Here's the updated glibc patch, and the patches for mutexes and rwlocks.

The initial patch for libpthread as published in: http://lists.gnu.org/archive/html/bug-hurd/2016-05/msg00027.html remains the same.


* hurd/hurdlock.h: New file.
* hurd/hurdlock.c: New file.
* hurd/Makefile: Add hurdlock.
* hurd/Versions: Added new entry to export the above interface.
* hurd/hurdpid.c: Include <lowlevellock.h>
  (_S_msg_proc_newids): Use lll_wait to synchronize.
* hurd/hurdsig.c: (reauth_proc): Use __mutex_lock and __mutex_unlock.
* hurd/setauth.c: Include <hurdlock.h>, use integer for synchronization.
* mach/lowlevellock.h: New file
* mach/Makefile: Remove unneeded files, add lowlevellock.h
* mach/lock-intern.h: Use lll to implement spinlocks.
* mach/mutex-init.c: Rewrite mutex initialization.
* mach/mutex-solid.c: Removed file.
* mach/spin-solid.c: Removed file.
* manual/errno.texi: Add definitions for EOWNERDEAD and ENOTRECOVERABLE.
* sysdeps/mach/Makefile: Add libmachuser as dependencies for some libs.
* sysdeps/mach/bits/libc-lock.h: Reimplemented libc internal locks
  with lll, cleanup routines now use gcc's cleanup attribute and
  libc keys are now implemented with pthread instead of cthreads.
* sysdeps/mach/hurd/bits/errno.h: New errno values.
* sysdeps/mach/hurd/bits/libc-lock.h: Removed file.
* sysdeps/mach/hurd/bits/posix_opt.h: Add suport for process shared attribute.
* sysdeps/mach/hurd/cthreads.c: Removed definition for __libc_getspecific.
* sysdeps/mach/hurd/malloc-machine.h: Reimplemented malloc locks.
* sysdeps/mach/hurd/setpgid.c: (setpgid): Use gsync for synchronization.
* sysdeps/mach/hurd/setsid.c: (setsid): Likewise.

--- 
diff --git a/hurd/Makefile b/hurd/Makefile
index 40bfdd9..b7e2ed4 100644
--- a/hurd/Makefile
+++ b/hurd/Makefile
@@ -60,6 +60,7 @@ routines = hurdstartup hurdinit \
 	   ports-get ports-set hurdports hurdmsg \
 	   errno-loc \
 	   sysvshm \
+	   hurdlock \
 	   $(sig) $(dtable) $(inlines) port-cleanup report-wait xattr
 sig	= hurdsig hurdfault siginfo hurd-raise preempt-sig \
 	  trampoline longjmp-ts catch-exc exc2signal hurdkill sigunwind \
diff --git a/hurd/Versions b/hurd/Versions
index 691c6df..3e376c3 100644
--- a/hurd/Versions
+++ b/hurd/Versions
@@ -182,4 +182,13 @@ libc {
     __libc_getspecific;
   }
 %endif
+
+  GLIBC_PRIVATE {
+    # Used by other libs.
+    lll_xwait; lll_timed_wait; lll_timed_xwait;
+    __lll_abstimed_wait; __lll_abstimed_xwait;
+    __lll_abstimed_lock; lll_robust_lock;
+    __lll_robust_abstimed_lock; lll_robust_trylock;
+    lll_set_wake; lll_robust_unlock; lll_requeue;
+  }
 }
diff --git a/hurd/hurdlock.c b/hurd/hurdlock.c
new file mode 100644
index 0000000..d6e4731
--- /dev/null
+++ b/hurd/hurdlock.c
@@ -0,0 +1,247 @@
+/* Copyright (C) 1999-2016 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#include "hurdlock.h"
+#include <hurd.h>
+#include <time.h>
+#include <errno.h>
+
+int lll_xwait (void *ptr, int lo, int hi, int flags)
+{
+  return (__gsync_wait (__mach_task_self (),
+    (vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD));
+}
+
+int lll_timed_wait (void *ptr, int val, int mlsec, int flags)
+{
+  return (__gsync_wait (__mach_task_self (),
+    (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED));
+}
+
+int lll_timed_xwait (void *ptr, int lo,
+  int hi, int mlsec, int flags)
+{
+  return (__gsync_wait (__mach_task_self (), (vm_offset_t)ptr,
+    lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD));
+}
+
+/* Convert an absolute timeout in nanoseconds to a relative
+ * timeout in milliseconds. */
+static inline int __attribute__ ((gnu_inline))
+compute_reltime (const struct timespec *abstime, clockid_t clk)
+{
+  struct timespec ts;
+  __clock_gettime (clk, &ts);
+
+  ts.tv_sec = abstime->tv_sec - ts.tv_sec;
+  ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec;
+
+  if (ts.tv_nsec < 0)
+    {
+      --ts.tv_sec;
+      ts.tv_nsec += 1000000000;
+    }
+
+  return (ts.tv_sec < 0 ? -1 :
+    (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000));
+}
+
+int __lll_abstimed_wait (void *ptr, int val,
+  const struct timespec *tsp, int flags, int clk)
+{
+  int mlsec = compute_reltime (tsp, clk);
+  return (mlsec < 0 ? KERN_TIMEDOUT :
+    lll_timed_wait (ptr, val, mlsec, flags));
+}
+
+int __lll_abstimed_xwait (void *ptr, int lo, int hi,
+  const struct timespec *tsp, int flags, int clk)
+{
+  int mlsec = compute_reltime (tsp, clk);
+  return (mlsec < 0 ? KERN_TIMEDOUT :
+    lll_timed_xwait (ptr, lo, hi, mlsec, flags));
+}
+
+int __lll_abstimed_lock (void *ptr,
+  const struct timespec *tsp, int flags, int clk)
+{
+  if (lll_trylock (ptr) == 0)
+    return (0);
+
+  while (1)
+    {
+      if (atomic_exchange_acq ((int *)ptr, 2) == 0)
+        return (0);
+      else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000)
+        return (EINVAL);
+
+      int mlsec = compute_reltime (tsp, clk);
+      if (mlsec < 0 || lll_timed_wait (ptr,
+          2, mlsec, flags) == KERN_TIMEDOUT)
+        return (ETIMEDOUT);
+    }
+}
+
+void lll_set_wake (void *ptr, int val, int flags)
+{
+  __gsync_wake (__mach_task_self (),
+    (vm_offset_t)ptr, val, flags | GSYNC_MUTATE);
+}
+
+void lll_requeue (void *src, void *dst, int wake_one, int flags)
+{
+  __gsync_requeue (__mach_task_self (), (vm_offset_t)src,
+    (vm_offset_t)dst, (boolean_t)wake_one, flags);
+}
+
+/* Robust locks. */
+
+extern int __getpid (void) __attribute__ ((const));
+extern task_t __pid2task (int);
+
+/* Test if a given process id is still valid. */
+static inline int valid_pid (int pid)
+{
+  task_t task = __pid2task (pid);
+  if (task == MACH_PORT_NULL)
+    return (0);
+
+  __mach_port_deallocate (__mach_task_self (), task);
+  return (1);
+}
+
+/* Robust locks have currently no support from the kernel; they
+ * are simply implemented with periodic polling. When sleeping, the
+ * maximum blocking time is determined by this constant. */
+#define MAX_WAIT_TIME   1500
+
+int lll_robust_lock (void *ptr, int flags)
+{
+  int *iptr = (int *)ptr;
+  int id = __getpid ();
+  int wait_time = 25;
+  unsigned int val;
+
+  /* Try to set the lock word to our PID if it's clear. Otherwise,
+   * mark it as having waiters. */
+  while (1)
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (atomic_compare_and_exchange_bool_acq (iptr,
+          val | LLL_WAITERS, val) == 0)
+        break;
+    }
+
+  for (id |= LLL_WAITERS ; ; )
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (val && !valid_pid (val & LLL_OWNER_MASK))
+        {
+          if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
+            return (EOWNERDEAD);
+        }
+      else
+        {
+          lll_timed_wait (iptr, val, wait_time, flags);
+          if (wait_time < MAX_WAIT_TIME)
+            wait_time <<= 1;
+        }
+    }
+}
+
+int __lll_robust_abstimed_lock (void *ptr,
+  const struct timespec *tsp, int flags, int clk)
+{
+  int *iptr = (int *)ptr;
+  int id = __getpid ();
+  int wait_time = 25;
+  unsigned int val;
+
+  while (1)
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (atomic_compare_and_exchange_bool_acq (iptr,
+          val | LLL_WAITERS, val) == 0)
+        break;
+    }
+
+  for (id |= LLL_WAITERS ; ; )
+    {
+      val = *iptr;
+      if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+      else if (val && !valid_pid (val & LLL_OWNER_MASK))
+        {
+          if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
+            return (EOWNERDEAD);
+        }
+      else
+        {
+          int mlsec = compute_reltime (tsp, clk);
+          if (mlsec < 0)
+            return (ETIMEDOUT);
+          else if (mlsec > wait_time)
+            mlsec = wait_time;
+
+          int res = lll_timed_wait (iptr, val, mlsec, flags);
+          if (res == KERN_TIMEDOUT)
+            return (ETIMEDOUT);
+          else if (wait_time < MAX_WAIT_TIME)
+            wait_time <<= 1;
+        }
+    }
+}
+
+int lll_robust_trylock (void *ptr)
+{
+  int *iptr = (int *)ptr;
+  int id = __getpid ();
+  unsigned int val = *iptr;
+
+  if (!val)
+    {
+      if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0)
+        return (0);
+    }
+  else if (!valid_pid (val & LLL_OWNER_MASK) &&
+      atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0)
+    return (EOWNERDEAD);
+
+  return (EBUSY);
+}
+
+void lll_robust_unlock (void *ptr, int flags)
+{
+  while (1)
+    {
+      unsigned int val = *(unsigned int *)ptr;
+      if (val & LLL_WAITERS)
+        {
+          lll_set_wake (ptr, 0, flags);
+          break;
+        }
+      else if (atomic_compare_and_exchange_bool_rel ((int *)ptr, 0, val) == 0)
+        break;
+    }
+}
+
diff --git a/hurd/hurdlock.h b/hurd/hurdlock.h
new file mode 100644
index 0000000..405ffaf
--- /dev/null
+++ b/hurd/hurdlock.h
@@ -0,0 +1,117 @@
+/* Copyright (C) 1999-2016 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef _HURD_LOCK_H
+#define _HURD_LOCK_H   1
+
+#include <mach/lowlevellock.h>
+
+struct timespec;
+
+/* Flags for robust locks. */
+#define LLL_WAITERS      (1U << 31)
+#define LLL_DEAD_OWNER   (1U << 30)
+
+#define LLL_OWNER_MASK   ~(LLL_WAITERS | LLL_DEAD_OWNER)
+
+/* Wait on 64-bit address PTR, without blocking if its contents
+ * are different from the pair <LO, HI>. */
+extern int lll_xwait (void *__ptr, int __lo,
+  int __hi, int __flags);
+
+/* Same as 'lll_wait', but only block for MLSEC milliseconds. */
+extern int lll_timed_wait (void *__ptr, int __val,
+  int __mlsec, int __flags);
+
+/* Same as 'lll_xwait', but only block for MLSEC milliseconds. */
+extern int lll_timed_xwait (void *__ptr, int __lo,
+  int __hi, int __mlsec, int __flags);
+
+/* Same as 'lll_wait', but only block until TSP elapses,
+ * using clock CLK. */
+extern int __lll_abstimed_wait (void *__ptr, int __val,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Same as 'lll_xwait', but only block until TSP elapses,
+ * using clock CLK. */
+extern int __lll_abstimed_xwait (void *__ptr, int __lo, int __hi,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Same as 'lll_lock', but return with an error if TSP elapses,
+ * using clock CLK. */
+extern int __lll_abstimed_lock (void *__ptr,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Acquire the lock at PTR, but return with an error if
+ * the process containing the owner thread dies. */
+extern int lll_robust_lock (void *__ptr, int __flags);
+
+/* Same as 'lll_robust_lock', but only block until TSP
+ * elapses, using clock CLK. */
+extern int __lll_robust_abstimed_lock (void *__ptr,
+  const struct timespec *__tsp, int __flags, int __clk);
+
+/* Same as 'lll_robust_lock', but return with an error
+ * if the lock cannot be acquired without blocking. */
+extern int lll_robust_trylock (void *__ptr);
+
+/* Wake one or more threads waiting on address PTR,
+ * setting its value to VAL before doing so. */
+extern void lll_set_wake (void *__ptr, int __val, int __flags);
+
+/* Release the robust lock at PTR. */
+extern void lll_robust_unlock (void *__ptr, int __flags);
+
+/* Rearrange threads waiting on address SRC to instead wait on
+ * DST, waking one of them if WAIT_ONE is non-zero. */
+extern void lll_requeue (void *__src, void *__dst,
+  int __wake_one, int __flags);
+
+/* The following are hacks that allow us to simulate optional
+ * parameters in C, to avoid having to pass the clock id for
+ * every one of these calls, defaulting to CLOCK_REALTIME if
+ * no argument is passed. */
+
+#define lll_abstimed_wait(ptr, val, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_abstimed_wait ((ptr), (val), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+#define lll_abstimed_xwait(ptr, lo, hi, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_abstimed_xwait ((ptr), (lo), (hi), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+#define lll_abstimed_lock(ptr, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_abstimed_lock ((ptr), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+#define lll_robust_abstimed_lock(ptr, tsp, flags, ...)   \
+  ({   \
+     const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ };   \
+     __lll_robust_abstimed_lock ((ptr), (tsp), (flags),   \
+       __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]);   \
+   })
+
+#endif
diff --git a/hurd/hurdpid.c b/hurd/hurdpid.c
index 3fac897..859a774 100644
--- a/hurd/hurdpid.c
+++ b/hurd/hurdpid.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
+/* Copyright (C) 1991-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -16,6 +16,8 @@
    <http://www.gnu.org/licenses/>.  */
 
 #include <hurd.h>
+#include <lowlevellock.h>
+
 pid_t _hurd_pid, _hurd_ppid, _hurd_pgrp;
 int _hurd_orphaned;
 
@@ -66,6 +68,7 @@ _S_msg_proc_newids (mach_port_t me,
 
   /* Notify any waiting user threads that the id change as been completed.  */
   ++_hurd_pids_changed_stamp;
+  lll_wake (&_hurd_pids_changed_stamp, GSYNC_BROADCAST);
 
   return 0;
 }
diff --git a/hurd/hurdsig.c b/hurd/hurdsig.c
index 5b63a06..1b7aaed 100644
--- a/hurd/hurdsig.c
+++ b/hurd/hurdsig.c
@@ -1571,14 +1571,14 @@ reauth_proc (mach_port_t new)
   __mach_port_destroy (__mach_task_self (), ref);
 
   /* Set the owner of the process here too. */
-  mutex_lock (&_hurd_id.lock);
+  __mutex_lock (&_hurd_id.lock);
   if (!_hurd_check_ids ())
     HURD_PORT_USE (&_hurd_ports[INIT_PORT_PROC],
 		   __proc_setowner (port,
 				    (_hurd_id.gen.nuids
 				     ? _hurd_id.gen.uids[0] : 0),
 				    !_hurd_id.gen.nuids));
-  mutex_unlock (&_hurd_id.lock);
+  __mutex_unlock (&_hurd_id.lock);
 
   (void) &reauth_proc;		/* Silence compiler warning.  */
 }
diff --git a/hurd/setauth.c b/hurd/setauth.c
index 5af7ff1..be76e93 100644
--- a/hurd/setauth.c
+++ b/hurd/setauth.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
+/* Copyright (C) 1991-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -18,14 +18,13 @@
 #include <hurd.h>
 #include <hurd/port.h>
 #include <hurd/id.h>
+#include <hurdlock.h>
 #include "set-hooks.h"
 
 /* Things in the library which want to be run when the auth port changes.  */
 DEFINE_HOOK (_hurd_reauth_hook, (auth_t new_auth));
 
-#include <cthreads.h>
-static struct mutex reauth_lock = MUTEX_INITIALIZER;
-
+static unsigned int reauth_lock = LLL_INITIALIZER;
 
 /* Set the auth port to NEW, and reauthenticate
    everything used by the library.  */
diff --git a/hurd/sysvshm.c b/hurd/sysvshm.c
index 5d538a6..f58fa38 100644
--- a/hurd/sysvshm.c
+++ b/hurd/sysvshm.c
@@ -1,5 +1,5 @@
 /* SysV shared memory for Hurd.
-   Copyright (C) 2005-2015 Free Software Foundation, Inc.
+   Copyright (C) 2005-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -26,6 +26,7 @@
 #include <dirent.h>
 #include <sys/stat.h>
 #include <sys/shm.h>
+#include <hurdlock.h>
 
 
 /* Description of an shm attachment.  */
@@ -45,7 +46,7 @@ struct sysvshm_attach
 static struct sysvshm_attach *sysvshm_list;
 
 /* A lock to protect the linked list of shared memory attachments.  */
-static struct mutex sysvshm_lock = MUTEX_INITIALIZER;
+static unsigned int sysvshm_lock = LLL_INITIALIZER;
 
 
 /* Adds a segment attachment.  */
diff --git a/mach/Makefile b/mach/Makefile
index 5131e26..0bf3df0 100644
--- a/mach/Makefile
+++ b/mach/Makefile
@@ -1,4 +1,4 @@
-# Copyright (C) 1991-2014 Free Software Foundation, Inc.
+# Copyright (C) 1991-2016 Free Software Foundation, Inc.
 # This file is part of the GNU C Library.
 
 # The GNU C Library is free software; you can redistribute it and/or
@@ -26,8 +26,8 @@ include ../Makeconfig
 headers = mach_init.h mach.h mach_error.h mach-shortcuts.h mach/mach_traps.h \
 	  $(interface-headers) mach/mach.h mach/mig_support.h mach/error.h \
 	  $(lock-headers) machine-sp.h
-lock = spin-solid spin-lock mutex-init mutex-solid
-lock-headers = lock-intern.h machine-lock.h spin-lock.h
+lock = spin-lock mutex-init
+lock-headers = lock-intern.h spin-lock.h lowlevellock.h
 routines = $(mach-syscalls) $(mach-shortcuts) \
 	   mach_init mig_strncpy msg \
 	   mig-alloc mig-dealloc mig-reply \
diff --git a/mach/lock-intern.h b/mach/lock-intern.h
index 6d315bb..4cf43cb 100644
--- a/mach/lock-intern.h
+++ b/mach/lock-intern.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 1994-2014 Free Software Foundation, Inc.
+/* Copyright (C) 1994-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -19,12 +19,17 @@
 #define	_LOCK_INTERN_H
 
 #include <sys/cdefs.h>
-#include <machine-lock.h>
+#include <lowlevellock.h>
 
 #ifndef _EXTERN_INLINE
 #define _EXTERN_INLINE __extern_inline
 #endif
 
+/* The type of a spin lock variable. */
+typedef unsigned int __spin_lock_t;
+
+/* Static initializer for spinlocks. */
+#define __SPIN_LOCK_INITIALIZER   0
 
 /* Initialize LOCK.  */
 
@@ -34,14 +39,11 @@ void __spin_lock_init (__spin_lock_t *__lock);
 _EXTERN_INLINE void
 __spin_lock_init (__spin_lock_t *__lock)
 {
-  *__lock = __SPIN_LOCK_INITIALIZER;
+  *__lock = LLL_INITIALIZER;
 }
 #endif
 
 
-/* Lock LOCK, blocking if we can't get it.  */
-extern void __spin_lock_solid (__spin_lock_t *__lock);
-
 /* Lock the spin lock LOCK.  */
 
 void __spin_lock (__spin_lock_t *__lock);
@@ -50,31 +52,47 @@ void __spin_lock (__spin_lock_t *__lock);
 _EXTERN_INLINE void
 __spin_lock (__spin_lock_t *__lock)
 {
-  if (! __spin_try_lock (__lock))
-    __spin_lock_solid (__lock);
+  lll_lock (__lock, 0);
 }
 #endif
-
-/* Name space-clean internal interface to mutex locks.
 
-   Code internal to the C library uses these functions to lock and unlock
-   mutex locks.  These locks are of type `struct mutex', defined in
-   <cthreads.h>.  The functions here are name space-clean.  If the program
-   is linked with the cthreads library, `__mutex_lock_solid' and
-   `__mutex_unlock_solid' will invoke the corresponding cthreads functions
-   to implement real mutex locks.  If not, simple stub versions just use
-   spin locks.  */
+/* Unlock LOCK. */
+void __spin_unlock (__spin_lock_t *__lock);
 
+#ifdef __USE_EXTERN_INLINES
+_EXTERN_INLINE void
+__spin_unlock (__spin_lock_t *__lock)
+{
+  lll_unlock (__lock, 0);
+}
+#endif
 
-/* Initialize the newly allocated mutex lock LOCK for further use.  */
-extern void __mutex_init (void *__lock);
+/* Try to lock LOCK; return nonzero if we locked it, zero if another has. */
+int __spin_try_lock (__spin_lock_t *__lock);
 
-/* Lock LOCK, blocking if we can't get it.  */
-extern void __mutex_lock_solid (void *__lock);
+#ifdef __USE_EXTERN_INLINES
+_EXTERN_INLINE int
+__spin_try_lock (__spin_lock_t *__lock)
+{
+  return (lll_trylock (__lock) == 0);
+}
+#endif
+
+/* Return nonzero if LOCK is locked. */
+int __spin_lock_locked (__spin_lock_t *__lock);
+
+#ifdef __USE_EXTERN_INLINES
+_EXTERN_INLINE int
+__spin_lock_locked (__spin_lock_t *__lock)
+{
+  return (*(volatile __spin_lock_t *)__lock != 0);
+}
+#endif
+
+/* Name space-clean internal interface to mutex locks. */
 
-/* Finish unlocking LOCK, after the spin lock LOCK->held has already been
-   unlocked.  This function will wake up any thread waiting on LOCK.  */
-extern void __mutex_unlock_solid (void *__lock);
+/* Initialize the newly allocated mutex lock LOCK for further use.  */
+extern void __mutex_init (void *__lock);
 
 /* Lock the mutex lock LOCK.  */
 
@@ -84,8 +102,7 @@ void __mutex_lock (void *__lock);
 _EXTERN_INLINE void
 __mutex_lock (void *__lock)
 {
-  if (! __spin_try_lock ((__spin_lock_t *) __lock))
-    __mutex_lock_solid (__lock);
+  __spin_lock ((__spin_lock_t *)__lock);
 }
 #endif
 
@@ -97,8 +114,7 @@ void __mutex_unlock (void *__lock);
 _EXTERN_INLINE void
 __mutex_unlock (void *__lock)
 {
-  __spin_unlock ((__spin_lock_t *) __lock);
-  __mutex_unlock_solid (__lock);
+  __spin_unlock ((__spin_lock_t *)__lock);
 }
 #endif
 
@@ -109,7 +125,7 @@ int __mutex_trylock (void *__lock);
 _EXTERN_INLINE int
 __mutex_trylock (void *__lock)
 {
-  return __spin_try_lock ((__spin_lock_t *) __lock);
+  return (__spin_try_lock ((__spin_lock_t *)__lock));
 }
 #endif
 
diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h
new file mode 100644
index 0000000..b6ce939
--- /dev/null
+++ b/mach/lowlevellock.h
@@ -0,0 +1,80 @@
+/* Copyright (C) 1994-2016 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, see
+   <http://www.gnu.org/licenses/>.  */
+
+#ifndef __MACH_LOWLEVELLOCK_H__
+#define __MACH_LOWLEVELLOCK_H__   1
+
+#include <mach/gnumach.h>
+#include <atomic.h>
+
+/* Gsync flags. */
+#ifndef GSYNC_SHARED
+  #define GSYNC_SHARED      0x01
+  #define GSYNC_QUAD        0x02
+  #define GSYNC_TIMED       0x04
+  #define GSYNC_BROADCAST   0x08
+  #define GSYNC_MUTATE      0x10
+#endif
+
+/* Static initializer for low-level locks. */
+#define LLL_INITIALIZER   0
+
+/* Wait on address PTR, without blocking if its contents
+ * are different from VAL. */
+#define lll_wait(ptr, val, flags)   \
+  __gsync_wait (__mach_task_self (),   \
+    (vm_offset_t)(ptr), (val), 0, 0, (flags))
+
+/* Wake one or more threads waiting on address PTR. */
+#define lll_wake(ptr, flags)   \
+  __gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags))
+
+/* Acquire the lock at PTR. */
+#define lll_lock(ptr, flags)   \
+  ({   \
+     int *__iptr = (int *)(ptr);   \
+     int __flags = (flags);   \
+     if (*__iptr != 0 ||   \
+         atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) != 0)   \
+       while (1)   \
+         {   \
+           if (atomic_exchange_acq (__iptr, 2) == 0)   \
+             break;   \
+           lll_wait (__iptr, 2, __flags);   \
+         }   \
+     (void)0;   \
+   })
+
+/* Try to acquire the lock at PTR, without blocking.
+ * Evaluates to zero on success. */
+#define lll_trylock(ptr)   \
+  ({   \
+     int *__iptr = (int *)(ptr);   \
+     *__iptr == 0 &&   \
+       atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1;   \
+   })
+
+/* Release the lock at PTR. */
+#define lll_unlock(ptr, flags)   \
+  ({   \
+     int *__iptr = (int *)(ptr);   \
+     if (atomic_exchange_rel (__iptr, 0) == 2)   \
+       lll_wake (__iptr, (flags));   \
+     (void)0;   \
+   })
+
+#endif
diff --git a/mach/mutex-init.c b/mach/mutex-init.c
index fc3a5e5..a2ede46 100644
--- a/mach/mutex-init.c
+++ b/mach/mutex-init.c
@@ -1,5 +1,5 @@
-/* Initialize a cthreads mutex structure.
-   Copyright (C) 1995-2014 Free Software Foundation, Inc.
+/* Initialize a mutex.
+   Copyright (C) 1995-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -17,13 +17,10 @@
    <http://www.gnu.org/licenses/>.  */
 
 #include <lock-intern.h>
-#include <cthreads.h>
+#include <lowlevellock.h>
 
 void
 __mutex_init (void *lock)
 {
-  /* This happens to be name space-safe because it is a macro.
-     It invokes only spin_lock_init, which is a macro for __spin_lock_init;
-     and cthread_queue_init, which is a macro for some simple code.  */
-  mutex_init ((struct mutex *) lock);
+  *(int *)lock = LLL_INITIALIZER;
 }
diff --git a/mach/mutex-solid.c b/mach/mutex-solid.c
deleted file mode 100644
index 70e8333..0000000
--- a/mach/mutex-solid.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Stub versions of mutex_lock_solid/mutex_unlock_solid for no -lthreads.
-   Copyright (C) 1995-2014 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <lock-intern.h>
-#include <cthreads.h>
-
-/* If cthreads is linked in, it will define these functions itself to do
-   real cthreads mutex locks.  This file will only be linked in when
-   cthreads is not used, and `mutexes' are in fact just spin locks (and
-   some unused storage).  */
-
-void
-__mutex_lock_solid (void *lock)
-{
-  __spin_lock_solid (lock);
-}
-
-void
-__mutex_unlock_solid (void *lock)
-{
-}
diff --git a/mach/spin-solid.c b/mach/spin-solid.c
deleted file mode 100644
index e1e154b..0000000
--- a/mach/spin-solid.c
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (C) 1994-2014 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#include <spin-lock.h>
-#include <mach/mach_traps.h>
-
-void
-__spin_lock_solid (spin_lock_t *lock)
-{
-  while (__spin_lock_locked (lock) || ! __spin_try_lock (lock))
-    /* Yield to another thread (system call).  */
-    __swtch_pri (0);
-}
-weak_alias (__spin_lock_solid, spin_lock_solid);
diff --git a/manual/errno.texi b/manual/errno.texi
index 6a691fc..70c574a 100644
--- a/manual/errno.texi
+++ b/manual/errno.texi
@@ -993,6 +993,20 @@ the normal result is for the operations affected to complete with this
 error; @pxref{Cancel AIO Operations}.
 @end deftypevr
 
+@comment errno.h
+@comment POSIX.1: Robust mutex owner died
+@deftypevr Macro int EOWNERDEAD
+@comment errno 120
+The owner of a POSIX robust mutex has died.
+@end deftypevr
+
+@comment errno.h
+@comment POSIX.1: Robust mutex irrecoverable
+@deftypevr Macro int ENOTRECOVERABLE
+@comment errno 121
+An inconsistent POSIX robust mutex has been unlocked before marking it
+as consistent again.
+@end deftypevr
 
 @emph{The following error codes are defined by the Linux/i386 kernel.
 They are not yet documented.}
diff --git a/sysdeps/mach/Makefile b/sysdeps/mach/Makefile
index 634ba80..68afa6d 100644
--- a/sysdeps/mach/Makefile
+++ b/sysdeps/mach/Makefile
@@ -1,4 +1,4 @@
-# Copyright (C) 1993-2014 Free Software Foundation, Inc.
+# Copyright (C) 1993-2016 Free Software Foundation, Inc.
 # This file is part of the GNU C Library.
 
 # The GNU C Library is free software; you can redistribute it and/or
@@ -48,4 +48,18 @@ $(patsubst mach%,m\%h%,$(mach-before-compile)): # Run only if doesn't exist.
 before-compile += $(mach-before-compile)
 endif
 
+ifeq (crypt,$(subdir))
+  LDLIBS-crypt.so += -lmachuser
+else ifeq (dlfcn,$(subdir))
+  LDLIBS-dl.so += -lmachuser
+else ifeq (nis,$(subdir))
+  LDLIBS-nsl.so += -lmachuser
+  LDLIBS-nss_nis.so += -lmachuser
+  LDLIBS-nss_nisplus.so += -lmachuser
+  LDLIBS-nss_compat.so += -lmachuser
+else ifeq (nss,$(subdir))
+  LDLIBS-nss.so += -lmachuser
+  LDLIBS-nss_files.so += -lmachuser
+endif
+
 endif	# in-Makerules
diff --git a/sysdeps/mach/bits/libc-lock.h b/sysdeps/mach/bits/libc-lock.h
index 40b7f2b..f6f3e05 100644
--- a/sysdeps/mach/bits/libc-lock.h
+++ b/sysdeps/mach/bits/libc-lock.h
@@ -1,5 +1,5 @@
-/* libc-internal interface for mutex locks.  Mach cthreads version.
-   Copyright (C) 1996-2014 Free Software Foundation, Inc.
+/* libc-internal interface for mutex locks.  Hurd version using gnumach gsync.
+   Copyright (C) 1996-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -20,15 +20,33 @@
 #define _BITS_LIBC_LOCK_H 1
 
 #ifdef _LIBC
+
+#include <tls.h>
 #include <cthreads.h>
-#define __libc_lock_t struct mutex
+#include <lowlevellock.h>
+
+/* The locking here is very inexpensive, even for inlining. */
+#define _IO_lock_inexpensive   1
+
+typedef unsigned int __libc_lock_t;
+typedef struct
+{
+  __libc_lock_t lock;
+  int cnt;
+  void *owner;
+} __libc_lock_recursive_t;
+
+typedef __libc_lock_recursive_t __rtld_lock_recursive_t;
+
+extern char __libc_lock_self0[0];
+#define __libc_lock_owner_self()   \
+  (__LIBC_NO_TLS() ? (void *)&__libc_lock_self0 : THREAD_SELF)
+
 #else
 typedef struct __libc_lock_opaque__ __libc_lock_t;
+typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
 #endif
 
-/* Type for key of thread specific data.  */
-typedef cthread_key_t __libc_key_t;
-
 /* Define a lock variable NAME with storage class CLASS.  The lock must be
    initialized with __libc_lock_init before it can be used (or define it
    with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
@@ -41,26 +59,92 @@ typedef cthread_key_t __libc_key_t;
 
 /* Define an initialized lock variable NAME with storage class CLASS.  */
 #define __libc_lock_define_initialized(CLASS,NAME) \
-  CLASS __libc_lock_t NAME = MUTEX_INITIALIZER;
+  CLASS __libc_lock_t NAME = LLL_INITIALIZER;
 
 /* Initialize the named lock variable, leaving it in a consistent, unlocked
    state.  */
-#define __libc_lock_init(NAME) __mutex_init (&(NAME))
+#define __libc_lock_init(NAME) (NAME) = LLL_INITIALIZER
 
 /* Finalize the named lock variable, which must be locked.  It cannot be
    used again until __libc_lock_init is called again on it.  This must be
    called on a lock variable before the containing storage is reused.  */
-#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME))
+#define __libc_lock_fini             __libc_lock_unlock
+#define __libc_lock_fini_recursive   __libc_lock_unlock_recursive
+#define __rtld_lock_fini_recursive   __rtld_lock_unlock_recursive
 
 /* Lock the named lock variable.  */
-#define __libc_lock_lock(NAME) __mutex_lock (&(NAME))
+#define __libc_lock_lock(NAME)   \
+  ({ lll_lock (&(NAME), 0); 0; })
 
 /* Lock the named lock variable.  */
-#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME)))
+#define __libc_lock_trylock(NAME) lll_trylock (&(NAME))
 
 /* Unlock the named lock variable.  */
-#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME))
-
+#define __libc_lock_unlock(NAME)   \
+  ({ lll_unlock (&(NAME), 0); 0; })
+
+#define __libc_lock_define_recursive(CLASS,NAME) \
+  CLASS __libc_lock_recursive_t NAME;
+
+#define _LIBC_LOCK_RECURSIVE_INITIALIZER { LLL_INITIALIZER, 0, 0 }
+
+#define __libc_lock_define_initialized_recursive(CLASS,NAME) \
+  CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
+
+#define __rtld_lock_define_recursive(CLASS,NAME) \
+  __libc_lock_define_recursive (CLASS, NAME)
+#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
+  _LIBC_LOCK_RECURSIVE_INITIALIZER
+#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
+  __libc_lock_define_initialized_recursive (CLASS, NAME)
+
+#define __libc_lock_init_recursive(NAME)   \
+  ((NAME) = (__libc_lock_recursive_t)_LIBC_LOCK_RECURSIVE_INITIALIZER, 0)
+
+#define __libc_lock_trylock_recursive(NAME)   \
+  ({   \
+     __libc_lock_recursive_t *const __lock = &(NAME);   \
+     void *__self = __libc_lock_owner_self ();   \
+     int __r = 0;   \
+     if (__self == __lock->owner)   \
+       ++__lock->cnt;   \
+     else if ((__r = lll_trylock (&__lock->lock)) == 0)   \
+       __lock->owner = __self, __lock->cnt = 1;   \
+     __r;   \
+   })
+
+#define __libc_lock_lock_recursive(NAME)   \
+  ({   \
+     __libc_lock_recursive_t *const __lock = &(NAME);   \
+     void *__self = __libc_lock_owner_self ();   \
+     if (__self != __lock->owner)   \
+       {   \
+         lll_lock (&__lock->lock, 0);   \
+         __lock->owner = __self;   \
+       }   \
+     ++__lock->cnt;   \
+     (void)0;   \
+   })
+
+#define __libc_lock_unlock_recursive(NAME)   \
+  ({   \
+     __libc_lock_recursive_t *const __lock = &(NAME);   \
+     if (--__lock->cnt == 0)   \
+       {   \
+         __lock->owner = 0;   \
+         lll_unlock (&__lock->lock, 0);   \
+       }   \
+   })
+
+
+#define __rtld_lock_initialize(NAME) \
+  (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
+#define __rtld_lock_trylock_recursive(NAME) \
+  __libc_lock_trylock_recursive (NAME)
+#define __rtld_lock_lock_recursive(NAME) \
+  __libc_lock_lock_recursive(NAME)
+#define __rtld_lock_unlock_recursive(NAME) \
+  __libc_lock_unlock_recursive (NAME)
 
 /* XXX for now */
 #define __libc_rwlock_define		__libc_lock_define
@@ -73,25 +157,38 @@ typedef cthread_key_t __libc_key_t;
 #define __libc_rwlock_trywrlock		__libc_lock_trylock
 #define __libc_rwlock_unlock		__libc_lock_unlock
 
+struct __libc_cleanup_frame
+{
+  void (*__fct) (void *);
+  void *__argp;
+  int __doit;
+};
+
+__extern_inline void
+__libc_cleanup_fct (struct __libc_cleanup_frame *framep)
+{
+  if (framep->__doit)
+    framep->__fct (framep->__argp);
+}
 
 /* Start a critical region with a cleanup function */
-#define __libc_cleanup_region_start(DOIT, FCT, ARG)			    \
-{									    \
-  typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0;			    \
-  typeof (ARG) __save_ARG = ARG;					    \
-  /* close brace is in __libc_cleanup_region_end below. */
-
-/* End a critical region started with __libc_cleanup_region_start. */
-#define __libc_cleanup_region_end(DOIT)					    \
-  if ((DOIT) && __save_FCT != 0)					    \
-    (*__save_FCT)(__save_ARG);						    \
-}
+#define __libc_cleanup_region_start(DOIT, FCT, ARG)   \
+  do   \
+    {   \
+      struct __libc_cleanup_frame __cleanup   \
+        __attribute__ ((__cleanup__ (__libc_cleanup_fct))) =   \
+        { .__fct = (FCT), .__argp = (ARG), .__doit = (DOIT) };
+
+/* This one closes the brace above. */
+#define __libc_cleanup_region_end(DOIT)   \
+      __cleanup.__doit = (DOIT);   \
+    }   \
+  while (0)
 
-/* Sometimes we have to exit the block in the middle.  */
-#define __libc_cleanup_end(DOIT)					    \
-  if ((DOIT) && __save_FCT != 0)					    \
-    (*__save_FCT)(__save_ARG);						    \
+#define __libc_cleanup_end(DOIT)   __cleanup.__doit = (DOIT);
 
+#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg)
+#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute)
 
 /* Use mutexes as once control variables. */
 
@@ -102,8 +199,7 @@ struct __libc_once
   };
 
 #define __libc_once_define(CLASS,NAME) \
-  CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 }
-
+  CLASS struct __libc_once NAME = { LLL_INITIALIZER, 0 }
 
 /* Call handler iff the first call.  */
 #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
@@ -121,25 +217,11 @@ struct __libc_once
 #ifdef _LIBC
 /* We need portable names for some functions.  E.g., when they are
    used as argument to __libc_cleanup_region_start.  */
-#define __libc_mutex_unlock __mutex_unlock
-#endif
+#define __libc_mutex_unlock __libc_lock_unlock
 
-#define __libc_key_create(KEY,DEST) cthread_keycreate (KEY)
-#define __libc_setspecific(KEY,VAL) cthread_setspecific (KEY, VAL)
-void *__libc_getspecific (__libc_key_t key);
-
-/* XXX until cthreads supports recursive locks */
-#define __libc_lock_define_initialized_recursive __libc_lock_define_initialized
-#define __libc_lock_init_recursive __libc_lock_init
-#define __libc_lock_fini_recursive __libc_lock_fini
-#define __libc_lock_trylock_recursive __libc_lock_trylock
-#define __libc_lock_unlock_recursive __libc_lock_unlock
-#define __libc_lock_lock_recursive __libc_lock_lock
-
-#define __rtld_lock_define_initialized_recursive __libc_lock_define_initialized
-#define __rtld_lock_fini_recursive __libc_lock_fini
-#define __rtld_lock_trylock_recursive __libc_lock_trylock
-#define __rtld_lock_unlock_recursive __libc_lock_unlock
-#define __rtld_lock_lock_recursive __libc_lock_lock
+/* Hide the definitions which are only supposed to be used inside libc in
+   a separate file.  This file is not present in the installation!  */
+# include <bits/libc-lockP.h>
+#endif
 
 #endif	/* bits/libc-lock.h */
diff --git a/sysdeps/mach/hurd/bits/errno.h b/sysdeps/mach/hurd/bits/errno.h
index d20ffe6..c5db66e 100644
--- a/sysdeps/mach/hurd/bits/errno.h
+++ b/sysdeps/mach/hurd/bits/errno.h
@@ -222,6 +222,10 @@ enum __error_t_codes
 #define	ETIME           _HURD_ERRNO (117)/* Timer expired */
 	ECANCELED       = _HURD_ERRNO (119),
 #define	ECANCELED       _HURD_ERRNO (119)/* Operation canceled */
+	EOWNERDEAD      = _HURD_ERRNO (120),
+#define	EOWNERDEAD      _HURD_ERRNO (120)/* Robust mutex owner died */
+	ENOTRECOVERABLE = _HURD_ERRNO (121),
+#define	ENOTRECOVERABLE _HURD_ERRNO (121)/* Robust mutex irrecoverable */
 
 	/* Errors from <mach/message.h>.  */
 	EMACH_SEND_IN_PROGRESS          = 0x10000001,
@@ -278,6 +282,8 @@ enum __error_t_codes
 	EKERN_MEMORY_PRESENT            = 23,
 	EKERN_WRITE_PROTECTION_FAILURE  = 24,
 	EKERN_TERMINATED                = 26,
+	EKERN_TIMEDOUT                  = 27,
+	EKERN_INTERRUPTED               = 28,
 
 	/* Errors from <mach/mig_errors.h>.  */
 	EMIG_TYPE_ERROR         = -300  /* client type check failure */,
@@ -305,7 +311,7 @@ enum __error_t_codes
 
 };
 
-#define	_HURD_ERRNOS	120
+#define	_HURD_ERRNOS	122
 
 /* User-visible type of error codes.  It is ok to use `int' or
    `kern_return_t' for these, but with `error_t' the debugger prints
diff --git a/sysdeps/mach/hurd/bits/libc-lock.h b/sysdeps/mach/hurd/bits/libc-lock.h
deleted file mode 100644
index c9872c6..0000000
--- a/sysdeps/mach/hurd/bits/libc-lock.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/* libc-internal interface for mutex locks.  Hurd version using Mach cthreads.
-   Copyright (C) 1996-2014 Free Software Foundation, Inc.
-   This file is part of the GNU C Library.
-
-   The GNU C Library is free software; you can redistribute it and/or
-   modify it under the terms of the GNU Lesser General Public
-   License as published by the Free Software Foundation; either
-   version 2.1 of the License, or (at your option) any later version.
-
-   The GNU C Library is distributed in the hope that it will be useful,
-   but WITHOUT ANY WARRANTY; without even the implied warranty of
-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
-   Lesser General Public License for more details.
-
-   You should have received a copy of the GNU Lesser General Public
-   License along with the GNU C Library; if not, see
-   <http://www.gnu.org/licenses/>.  */
-
-#ifndef _BITS_LIBC_LOCK_H
-#define _BITS_LIBC_LOCK_H 1
-
-#if (_LIBC - 0) || (_CTHREADS_ - 0)
-# if (_LIBC - 0)
-#  include <tls.h>
-# endif
-#include <cthreads.h>
-
-typedef struct mutex __libc_lock_t;
-typedef struct
-{
-  struct mutex mutex;
-  void *owner;
-  int count;
-} __libc_lock_recursive_t;
-typedef __libc_lock_recursive_t __rtld_lock_recursive_t;
-
-extern char __libc_lock_self0[0];
-#define __libc_lock_owner_self() (__LIBC_NO_TLS() ? &__libc_lock_self0 : THREAD_SELF)
-
-#else
-typedef struct __libc_lock_opaque__ __libc_lock_t;
-typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t;
-#endif
-
-/* Define a lock variable NAME with storage class CLASS.  The lock must be
-   initialized with __libc_lock_init before it can be used (or define it
-   with __libc_lock_define_initialized, below).  Use `extern' for CLASS to
-   declare a lock defined in another module.  In public structure
-   definitions you must use a pointer to the lock structure (i.e., NAME
-   begins with a `*'), because its storage size will not be known outside
-   of libc.  */
-#define __libc_lock_define(CLASS,NAME) \
-  CLASS __libc_lock_t NAME;
-
-/* Define an initialized lock variable NAME with storage class CLASS.  */
-#define __libc_lock_define_initialized(CLASS,NAME) \
-  CLASS __libc_lock_t NAME = MUTEX_INITIALIZER;
-
-/* Initialize the named lock variable, leaving it in a consistent, unlocked
-   state.  */
-#define __libc_lock_init(NAME) __mutex_init (&(NAME))
-
-/* Finalize the named lock variable, which must be locked.  It cannot be
-   used again until __libc_lock_init is called again on it.  This must be
-   called on a lock variable before the containing storage is reused.  */
-#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME))
-#define __libc_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex)
-#define __rtld_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex)
-
-
-/* Lock the named lock variable.  */
-#define __libc_lock_lock(NAME) __mutex_lock (&(NAME))
-
-/* Lock the named lock variable.  */
-#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME)))
-
-/* Unlock the named lock variable.  */
-#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME))
-
-
-#define __libc_lock_define_recursive(CLASS,NAME) \
-  CLASS __libc_lock_recursive_t NAME;
-#define _LIBC_LOCK_RECURSIVE_INITIALIZER { MUTEX_INITIALIZER, 0, 0 }
-#define __libc_lock_define_initialized_recursive(CLASS,NAME) \
-  CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER;
-
-#define __rtld_lock_define_recursive(CLASS,NAME) \
-  __libc_lock_define_recursive (CLASS, NAME)
-#define _RTLD_LOCK_RECURSIVE_INITIALIZER \
-  _LIBC_LOCK_RECURSIVE_INITIALIZER
-#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \
-  __libc_lock_define_initialized_recursive (CLASS, NAME)
-
-#define __libc_lock_init_recursive(NAME) \
-  ({ __libc_lock_recursive_t *const __lock = &(NAME); \
-     __lock->owner = 0; mutex_init (&__lock->mutex); })
-
-#define __libc_lock_trylock_recursive(NAME)				      \
-  ({ __libc_lock_recursive_t *const __lock = &(NAME);			      \
-     void *__self = __libc_lock_owner_self ();				      \
-     __mutex_trylock (&__lock->mutex)					      \
-     ? (__lock->owner = __self, __lock->count = 1, 0)			      \
-     : __lock->owner == __self ? (++__lock->count, 0) : 1; })
-
-#define __libc_lock_lock_recursive(NAME)				      \
-  ({ __libc_lock_recursive_t *const __lock = &(NAME);			      \
-     void *__self = __libc_lock_owner_self ();				      \
-     if (__mutex_trylock (&__lock->mutex)				      \
-	 || (__lock->owner != __self					      \
-	     && (__mutex_lock (&__lock->mutex), 1)))			      \
-       __lock->owner = __self, __lock->count = 1;			      \
-     else								      \
-       ++__lock->count;							      \
-  })
-#define __libc_lock_unlock_recursive(NAME)				      \
-  ({ __libc_lock_recursive_t *const __lock = &(NAME);			      \
-     if (--__lock->count == 0)						      \
-       {								      \
-	 __lock->owner = 0;						      \
-	 __mutex_unlock (&__lock->mutex);				      \
-       }								      \
-  })
-
-
-#define __rtld_lock_initialize(NAME) \
-  (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER)
-#define __rtld_lock_trylock_recursive(NAME) \
-  __libc_lock_trylock_recursive (NAME)
-#define __rtld_lock_lock_recursive(NAME) \
-  __libc_lock_lock_recursive(NAME)
-#define __rtld_lock_unlock_recursive(NAME) \
-  __libc_lock_unlock_recursive (NAME)
-
-
-/* XXX for now */
-#define __libc_rwlock_define		__libc_lock_define
-#define __libc_rwlock_define_initialized __libc_lock_define_initialized
-#define __libc_rwlock_init		__libc_lock_init
-#define __libc_rwlock_fini		__libc_lock_fini
-#define __libc_rwlock_rdlock		__libc_lock_lock
-#define __libc_rwlock_wrlock		__libc_lock_lock
-#define __libc_rwlock_tryrdlock		__libc_lock_trylock
-#define __libc_rwlock_trywrlock		__libc_lock_trylock
-#define __libc_rwlock_unlock		__libc_lock_unlock
-
-
-/* Start a critical region with a cleanup function */
-#define __libc_cleanup_region_start(DOIT, FCT, ARG)			    \
-{									    \
-  typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0;			    \
-  typeof (ARG) __save_ARG = ARG;					    \
-  /* close brace is in __libc_cleanup_region_end below. */
-
-/* End a critical region started with __libc_cleanup_region_start. */
-#define __libc_cleanup_region_end(DOIT)					    \
-  if ((DOIT) && __save_FCT != 0)					    \
-    (*__save_FCT)(__save_ARG);						    \
-}
-
-/* Sometimes we have to exit the block in the middle.  */
-#define __libc_cleanup_end(DOIT)					    \
-  if ((DOIT) && __save_FCT != 0)					    \
-    (*__save_FCT)(__save_ARG);						    \
-
-#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg)
-#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute)
-
-#if (_CTHREADS_ - 0)
-
-/* Use mutexes as once control variables. */
-
-struct __libc_once
-  {
-    __libc_lock_t lock;
-    int done;
-  };
-
-#define __libc_once_define(CLASS,NAME) \
-  CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 }
-
-/* Call handler iff the first call.  */
-#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \
-  do {									      \
-    __libc_lock_lock (ONCE_CONTROL.lock);				      \
-    if (!ONCE_CONTROL.done)						      \
-      (INIT_FUNCTION) ();						      \
-    ONCE_CONTROL.done = 1;						      \
-    __libc_lock_unlock (ONCE_CONTROL.lock);				      \
-  } while (0)
-
-/* Get once control variable.  */
-#define __libc_once_get(ONCE_CONTROL)	((ONCE_CONTROL).done != 0)
-
-#ifdef _LIBC
-/* We need portable names for some functions.  E.g., when they are
-   used as argument to __libc_cleanup_region_start.  */
-#define __libc_mutex_unlock __mutex_unlock
-#endif
-
-/* Type for key of thread specific data.  */
-typedef cthread_key_t __libc_key_t;
-
-#define __libc_key_create(KEY,DEST) cthread_keycreate (KEY)
-#define __libc_setspecific(KEY,VAL) cthread_setspecific (KEY, VAL)
-void *__libc_getspecific (__libc_key_t key);
-
-#endif /* _CTHREADS_ */
-
-/* Hide the definitions which are only supposed to be used inside libc in
-   a separate file.  This file is not present in the installation!  */
-#ifdef _LIBC
-# include <bits/libc-lockP.h>
-#endif
-
-#endif	/* bits/libc-lock.h */
diff --git a/sysdeps/mach/hurd/bits/posix_opt.h b/sysdeps/mach/hurd/bits/posix_opt.h
index 9ab9c9f..8ad8858 100644
--- a/sysdeps/mach/hurd/bits/posix_opt.h
+++ b/sysdeps/mach/hurd/bits/posix_opt.h
@@ -159,8 +159,8 @@
 /* POSIX message queues could be available in future.  */
 #define	_POSIX_MESSAGE_PASSING	0
 
-/* Thread process-shared synchronization is not supported.  */
-#define _POSIX_THREAD_PROCESS_SHARED	-1
+/* Thread process-shared synchronization is supported.  */
+#define _POSIX_THREAD_PROCESS_SHARED	200809L
 
 /* The monotonic clock might be available.  */
 #define _POSIX_MONOTONIC_CLOCK	0
diff --git a/sysdeps/mach/hurd/cthreads.c b/sysdeps/mach/hurd/cthreads.c
index e25fcd0..2b65d36 100644
--- a/sysdeps/mach/hurd/cthreads.c
+++ b/sysdeps/mach/hurd/cthreads.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 1997-2014 Free Software Foundation, Inc.
+/* Copyright (C) 1997-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -55,13 +55,3 @@ cthread_setspecific (key, val)
   return -1;
 }
 
-/* Call cthread_getspecific which gets a pointer to the return value instead
-   of just returning it.  */
-void *
-__libc_getspecific (key)
-     cthread_key_t key;
-{
-  void *val;
-  cthread_getspecific (key, &val);
-  return val;
-}
diff --git a/sysdeps/mach/hurd/malloc-machine.h b/sysdeps/mach/hurd/malloc-machine.h
index cd86642..577aed6 100644
--- a/sysdeps/mach/hurd/malloc-machine.h
+++ b/sysdeps/mach/hurd/malloc-machine.h
@@ -1,6 +1,6 @@
 /* Basic platform-independent macro definitions for mutexes,
    thread-specific data and parameters for malloc.
-   Copyright (C) 2003-2014 Free Software Foundation, Inc.
+   Copyright (C) 2003-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -22,24 +22,23 @@
 
 #undef thread_atfork_static
 
-#include <atomic.h>
 #include <bits/libc-lock.h>
+#include <mach/lock-intern.h>
 
-/* Assume hurd, with cthreads */
-
-/* Cthreads `mutex_t' is a pointer to a mutex, and malloc wants just the
-   mutex itself.  */
 #undef mutex_t
-#define mutex_t struct mutex
+#define mutex_t unsigned int
+
+#undef MUTEX_INITIALIZER
+#define MUTEX_INITIALIZER   LLL_INITIALIZER
 
 #undef mutex_init
-#define mutex_init(m) (__mutex_init(m), 0)
+#define mutex_init(m) ({ __mutex_init(m); 0; })
 
 #undef mutex_lock
-#define mutex_lock(m) (__mutex_lock(m), 0)
+#define mutex_lock(m) ({ __mutex_lock(m); 0; })
 
 #undef mutex_unlock
-#define mutex_unlock(m) (__mutex_unlock(m), 0)
+#define mutex_unlock(m) ({ __mutex_unlock(m); 0; })
 
 #define mutex_trylock(m) (!__mutex_trylock(m))
 
diff --git a/sysdeps/mach/hurd/setpgid.c b/sysdeps/mach/hurd/setpgid.c
index 6662d3d..655bc3c 100644
--- a/sysdeps/mach/hurd/setpgid.c
+++ b/sysdeps/mach/hurd/setpgid.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 1993-2014 Free Software Foundation, Inc.
+/* Copyright (C) 1993-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -19,6 +19,7 @@
 #include <unistd.h>
 #include <hurd.h>
 #include <hurd/port.h>
+#include <lowlevellock.h>
 
 /* Set the process group ID of the process matching PID to PGID.
    If PID is zero, the current process's process group ID is set.
@@ -40,14 +41,7 @@ __setpgid (pid, pgid)
     /* Synchronize with the signal thread to make sure we have
        received and processed proc_newids before returning to the user.  */
     while (_hurd_pids_changed_stamp == stamp)
-      {
-#ifdef noteven
-	/* XXX we have no need for a mutex, but cthreads demands one.  */
-	__condition_wait (&_hurd_pids_changed_sync, NULL);
-#else
-	__swtch_pri(0);
-#endif
-      }
+      lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
 
   return 0;
 
diff --git a/sysdeps/mach/hurd/setsid.c b/sysdeps/mach/hurd/setsid.c
index 36ec3d8..cf6140c 100644
--- a/sysdeps/mach/hurd/setsid.c
+++ b/sysdeps/mach/hurd/setsid.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 1993-2014 Free Software Foundation, Inc.
+/* Copyright (C) 1993-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -20,6 +20,8 @@
 #include <hurd.h>
 #include <hurd/port.h>
 #include <hurd/fd.h>
+#include <hurd/ioctl.h>
+#include <lowlevellock.h>
 
 /* Create a new session with the calling process as its leader.
    The process group IDs of the session and the calling process
@@ -54,14 +56,7 @@ __setsid (void)
 	 returned by `getpgrp ()' in other threads) has been updated before
 	 we return.  */
       while (_hurd_pids_changed_stamp == stamp)
-	{
-#ifdef noteven
-	  /* XXX we have no need for a mutex, but cthreads demands one.  */
-	  __condition_wait (&_hurd_pids_changed_sync, NULL);
-#else
-	  __swtch_pri (0);
-#endif
-	}
+        lll_wait (&_hurd_pids_changed_stamp, stamp, 0);
     }
 
   HURD_CRITICAL_END;
* Makefile: Removed objects and added pt-mutex.
* include/pthread/pthread.h: Added definitions for POSIX function
  'pthread_mutex_consistent' and added GNU extension with the '_np' suffix.
* sysdeps/generic/pt-mutex.c: New file. Contains the implementation for POSIX
  mutexes and their attributes, plus GNU/Hurd extensions.
* sysdeps/pthread/bits/mutex.h: Redefined the mutex type.

---
diff --git a/Makefile b/Makefile
index bfdae7b..8f3144b 100644
--- a/Makefile
+++ b/Makefile
@@ -81,18 +81,7 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate	    \
 	pt-testcancel							    \
 	pt-cancel							    \
 									    \
-	pt-mutexattr							    \
-	pt-mutexattr-destroy pt-mutexattr-init				    \
-	pt-mutexattr-getprioceiling pt-mutexattr-getprotocol		    \
-	pt-mutexattr-getpshared pt-mutexattr-gettype			    \
-	pt-mutexattr-setprioceiling pt-mutexattr-setprotocol		    \
-	pt-mutexattr-setpshared pt-mutexattr-settype			    \
-									    \
-	pt-mutex-init pt-mutex-destroy					    \
-	pt-mutex-lock pt-mutex-trylock pt-mutex-timedlock		    \
-	pt-mutex-unlock							    \
-	pt-mutex-transfer-np						    \
-	pt-mutex-getprioceiling pt-mutex-setprioceiling			    \
+	pt-mutex   \
 									    \
 	pt-rwlock-attr							    \
 	pt-rwlockattr-init pt-rwlockattr-destroy			    \
diff --git a/include/pthread/pthread.h b/include/pthread/pthread.h
index 3aa6a93..d73a3e7 100644
--- a/include/pthread/pthread.h
+++ b/include/pthread/pthread.h
@@ -399,6 +399,18 @@ extern int pthread_mutex_setprioceiling (pthread_mutex_t *__restrict __mutex,
 	__THROW __nonnull ((1, 3));
 #endif
 
+#ifdef __USE_XOPEN2K8
+
+/* Declare the state protected by robust mutex MTXP as consistent. */
+extern int pthread_mutex_consistent (pthread_mutex_t *__mtxp)
+  __THROW __nonnull ((1));
+
+#  ifdef __USE_GNU
+extern int pthread_mutex_consistent_np (pthread_mutex_t *__mtxp)
+  __THROW __nonnull ((1));
+#  endif
+#endif
+
 
 
 /* Condition attributes.  */
diff --git a/sysdeps/generic/pt-mutex.c b/sysdeps/generic/pt-mutex.c
new file mode 100644
index 0000000..c0eacca
--- /dev/null
+++ b/sysdeps/generic/pt-mutex.c
@@ -0,0 +1,536 @@
+/* Copyright (C) 2016 Free Software Foundation, Inc.
+   Contributed by Agustina Arzille <avarzi...@riseup.net>, 2016.
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License
+   as published by the Free Software Foundation; either
+   version 3 of the license, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public
+   License along with this program; if not, see
+   <http://www.gnu.org/licenses/>.
+*/
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <pt-internal.h>
+#include <hurdlock.h>
+
+static const pthread_mutexattr_t dfl_attr =
+{
+  .__prioceiling = 0,
+  .__protocol = PTHREAD_PRIO_NONE,
+  .__pshared = PTHREAD_PROCESS_PRIVATE,
+  .__mutex_type = __PTHREAD_MUTEX_TIMED
+};
+
+int pthread_mutexattr_init (pthread_mutexattr_t *attrp)
+{
+  *attrp = dfl_attr;
+  return (0);
+}
+
+int pthread_mutexattr_destroy (pthread_mutexattr_t *attrp)
+{
+  (void)attrp;
+  return (0);
+}
+
+int pthread_mutexattr_settype (pthread_mutexattr_t *attrp, int type)
+{
+  if (type < 0 || type > __PTHREAD_MUTEX_RECURSIVE)
+    return (EINVAL);
+
+  attrp->__mutex_type = type;
+  return (0);
+}
+
+int pthread_mutexattr_gettype (const pthread_mutexattr_t *attrp, int *outp)
+{
+  *outp = attrp->__mutex_type;
+  return (0);
+}
+
+int pthread_mutexattr_setpshared (pthread_mutexattr_t *attrp, int pshared)
+{
+  if (pshared != PTHREAD_PROCESS_PRIVATE &&
+      pshared != PTHREAD_PROCESS_SHARED)
+    return (EINVAL);
+
+  attrp->__pshared = pshared;
+  return (0);
+}
+
+int pthread_mutexattr_getpshared (const pthread_mutexattr_t *attrp, int *outp)
+{
+  *outp = attrp->__pshared;
+  return (0);
+}
+
+int pthread_mutexattr_setrobust (pthread_mutexattr_t *attrp, int robust)
+{
+  if (robust != PTHREAD_MUTEX_ROBUST &&
+      robust != PTHREAD_MUTEX_STALLED)
+    return (EINVAL);
+
+  attrp->__prioceiling |= robust;
+  return (0);
+}
+
+weak_alias (pthread_mutexattr_setrobust, pthread_mutexattr_setrobust_np)
+
+int pthread_mutexattr_getrobust (const pthread_mutexattr_t *attrp, int *outp)
+{
+  *outp = (attrp->__prioceiling & PTHREAD_MUTEX_ROBUST) ?
+    PTHREAD_MUTEX_ROBUST : PTHREAD_MUTEX_STALLED;
+  return (0);
+}
+
+weak_alias (pthread_mutexattr_getrobust, pthread_mutexattr_getrobust_np)
+
+int pthread_mutexattr_setprioceiling (pthread_mutexattr_t *attrp, int cl)
+{
+  (void)attrp; (void)cl;
+  return (ENOSYS);
+}
+
+stub_warning (pthread_mutexattr_setprioceiling)
+
+int pthread_mutexattr_getprioceiling (const pthread_mutexattr_t *ap, int *clp)
+{
+  (void)ap; (void)clp;
+  return (ENOSYS);
+}
+
+stub_warning (pthread_mutexattr_getprioceiling)
+
+int pthread_mutexattr_setprotocol (pthread_mutexattr_t *attrp, int proto)
+{
+  (void)attrp;
+  return (proto == PTHREAD_PRIO_NONE ? 0 :
+    proto != PTHREAD_PRIO_INHERIT &&
+    proto != PTHREAD_PRIO_PROTECT ? EINVAL : ENOTSUP);
+}
+
+int pthread_mutexattr_getprotocol (const pthread_mutexattr_t *attrp, int *ptp)
+{
+  *ptp = attrp->__protocol;
+  return (0);
+}
+
+int _pthread_mutex_init (pthread_mutex_t *mtxp,
+  const pthread_mutexattr_t *attrp)
+{
+  if (attrp == NULL)
+    attrp = &dfl_attr;
+
+  mtxp->__flags = (attrp->__pshared == PTHREAD_PROCESS_SHARED ?
+    GSYNC_SHARED : 0) | ((attrp->__prioceiling & PTHREAD_MUTEX_ROBUST) ?
+      PTHREAD_MUTEX_ROBUST : 0);
+
+  mtxp->__type = attrp->__mutex_type +
+    (attrp->__mutex_type != __PTHREAD_MUTEX_TIMED);
+
+  mtxp->__owner_id = 0;
+  mtxp->__shpid = 0;
+  mtxp->__cnt = 0;
+  mtxp->__lock = 0;
+
+  return (0);
+}
+
+strong_alias (_pthread_mutex_init, pthread_mutex_init)
+
+/* Lock routines. */
+
+/* Special ID used to signal an unrecoverable robust mutex. */
+#define NOTRECOVERABLE_ID   (1U << 31)
+
+/* Common path for robust mutexes. Assumes the variable 'ret'
+ * is bound in the function this is called from. */
+#define ROBUST_LOCK(self, mtxp, cb, ...)   \
+  if (mtxp->__owner_id == NOTRECOVERABLE_ID)   \
+    return (ENOTRECOVERABLE);   \
+  else if (mtxp->__owner_id == self->thread &&   \
+      __getpid () == (int)(mtxp->__lock & LLL_OWNER_MASK))   \
+    {   \
+      if (mtxp->__type == PT_MTX_RECURSIVE)   \
+        {   \
+          if (__glibc_unlikely (mtxp->__cnt + 1 == 0))   \
+            return (EAGAIN);   \
+          \
+          ++mtxp->__cnt;   \
+          return (0);   \
+        }   \
+      else if (mtxp->__type == PT_MTX_ERRORCHECK)   \
+        return (EDEADLK);   \
+    }   \
+  \
+  ret = cb (&mtxp->__lock, ##__VA_ARGS__);   \
+  if (ret == 0 || ret == EOWNERDEAD)   \
+    {   \
+      if (mtxp->__owner_id == ENOTRECOVERABLE)   \
+        ret = ENOTRECOVERABLE;   \
+      else   \
+        {   \
+          mtxp->__owner_id = self->thread;   \
+          mtxp->__cnt = 1;   \
+          if (ret == EOWNERDEAD)   \
+            {   \
+              mtxp->__lock = mtxp->__lock | LLL_DEAD_OWNER;   \
+              atomic_write_barrier ();   \
+            }   \
+        }   \
+    }   \
+  (void)0
+
+/* Check that a thread owns the mutex. For non-robust, task-shared
+ * objects, we have to check the thread *and* process-id. */
+#define mtx_owned_p(mtx, pt, flags)   \
+  ((mtx)->__owner_id == (pt)->thread &&   \
+    (((flags) & GSYNC_SHARED) == 0 ||   \
+      (mtx)->__shpid == __getpid ()))
+
+/* Record a thread as the owner of the mutex. */
+#define mtx_set_owner(mtx, pt, flags)   \
+  (void)   \
+    ({   \
+       (mtx)->__owner_id = (pt)->thread;   \
+       if ((flags) & GSYNC_SHARED)   \
+         (mtx)->__shpid = __getpid ();   \
+     })
+
+/* Redefined mutex types. The +1 is for binary compatibility. */
+#define PT_MTX_NORMAL       __PTHREAD_MUTEX_TIMED
+#define PT_MTX_RECURSIVE    (__PTHREAD_MUTEX_RECURSIVE + 1)
+#define PT_MTX_ERRORCHECK   (__PTHREAD_MUTEX_ERRORCHECK + 1)
+
+/* Mutex type, including robustness. */
+#define MTX_TYPE(mtxp)   \
+  ((mtxp)->__type | ((mtxp)->__flags & PTHREAD_MUTEX_ROBUST))
+
+extern int __getpid (void) __attribute__ ((const));
+
+int __pthread_mutex_lock (pthread_mutex_t *mtxp)
+{
+  struct __pthread *self = _pthread_self ();
+  int flags = mtxp->__flags & GSYNC_SHARED;
+  int ret = 0;
+
+  switch (MTX_TYPE (mtxp))
+    {
+      case PT_MTX_NORMAL:
+        lll_lock (&mtxp->__lock, flags);
+        break;
+
+      case PT_MTX_RECURSIVE:
+        if (mtx_owned_p (mtxp, self, flags))
+          {
+            if (__glibc_unlikely (mtxp->__cnt + 1 == 0))
+              return (EAGAIN);
+
+            ++mtxp->__cnt;
+            return (ret);
+          }
+
+        lll_lock (&mtxp->__lock, flags);
+        mtx_set_owner (mtxp, self, flags);
+        mtxp->__cnt = 1;
+        break;
+
+      case PT_MTX_ERRORCHECK:
+        if (mtx_owned_p (mtxp, self, flags))
+          return (EDEADLK);
+
+        lll_lock (&mtxp->__lock, flags);
+        mtx_set_owner (mtxp, self, flags);
+        break;
+
+      case PT_MTX_NORMAL     | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_RECURSIVE  | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
+        ROBUST_LOCK (self, mtxp, lll_robust_lock, flags);
+        break;
+
+      default:
+        ret = EINVAL;
+        break;
+    }
+
+  return (ret);
+}
+
+strong_alias (__pthread_mutex_lock, _pthread_mutex_lock)
+strong_alias (__pthread_mutex_lock, pthread_mutex_lock)
+
+int __pthread_mutex_trylock (pthread_mutex_t *mtxp)
+{
+  struct __pthread *self = _pthread_self ();
+  int ret;
+
+  switch (MTX_TYPE (mtxp))
+    {
+      case PT_MTX_NORMAL:
+        ret = lll_trylock (&mtxp->__lock);
+        break;
+
+      case PT_MTX_RECURSIVE:
+        if (mtx_owned_p (mtxp, self, mtxp->__flags))
+          {
+            if (__glibc_unlikely (mtxp->__cnt + 1 == 0))
+              return (EAGAIN);
+
+            ++mtxp->__cnt;
+            ret = 0;
+          }
+        else if ((ret = lll_trylock (&mtxp->__lock)) == 0)
+          {
+            mtx_set_owner (mtxp, self, mtxp->__flags);
+            mtxp->__cnt = 1;
+          }
+
+        break;
+
+      case PT_MTX_ERRORCHECK:
+        if ((ret = lll_trylock (&mtxp->__lock)) == 0)
+          mtx_set_owner (mtxp, self, mtxp->__flags);
+        break;
+
+      case PT_MTX_NORMAL     | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_RECURSIVE  | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
+        ROBUST_LOCK (self, mtxp, lll_robust_trylock);
+        break;
+
+      default:
+        ret = EINVAL;
+        break;
+    }
+
+  return (ret);
+}
+
+strong_alias (__pthread_mutex_trylock, _pthread_mutex_trylock)
+strong_alias (__pthread_mutex_trylock, pthread_mutex_trylock)
+
+int pthread_mutex_timedlock (pthread_mutex_t *mtxp,
+  const struct timespec *tsp)
+{
+  struct __pthread *self = _pthread_self ();
+  int ret, flags = mtxp->__flags & GSYNC_SHARED;
+
+  switch (MTX_TYPE (mtxp))
+    {
+      case PT_MTX_NORMAL:
+        ret = lll_abstimed_lock (&mtxp->__lock, tsp, flags);
+        break;
+
+      case PT_MTX_RECURSIVE:
+        if (mtx_owned_p (mtxp, self, flags))
+          {
+            if (__glibc_unlikely (mtxp->__cnt + 1 == 0))
+              return (EAGAIN);
+
+            ++mtxp->__cnt;
+            ret = 0;
+          }
+        else if ((ret = lll_abstimed_lock (&mtxp->__lock,
+            tsp, flags)) == 0)
+          {
+            mtx_set_owner (mtxp, self, flags);
+            mtxp->__cnt = 1;
+          }
+
+        break;
+
+      case PT_MTX_ERRORCHECK:
+        if (mtx_owned_p (mtxp, self, flags))
+          ret = EDEADLK;
+        else if ((ret = lll_abstimed_lock (&mtxp->__lock,
+            tsp, flags)) == 0)
+          mtx_set_owner (mtxp, self, flags);
+
+        break;
+
+      case PT_MTX_NORMAL     | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_RECURSIVE  | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
+        ROBUST_LOCK (self, mtxp, lll_robust_abstimed_lock, tsp, flags);
+        break;
+
+      default:
+        ret = EINVAL;
+        break;
+    }
+
+  return (ret);
+}
+
+int __pthread_mutex_unlock (pthread_mutex_t *mtxp)
+{
+  struct __pthread *self = _pthread_self ();
+  int ret = 0, flags = mtxp->__flags & GSYNC_SHARED;
+
+  switch (MTX_TYPE (mtxp))
+    {
+      case PT_MTX_NORMAL:
+        lll_unlock (&mtxp->__lock, flags);
+        break;
+
+      case PT_MTX_RECURSIVE:
+        if (!mtx_owned_p (mtxp, self, flags))
+          ret = EPERM;
+        else if (--mtxp->__cnt == 0)
+          {
+            mtxp->__owner_id = mtxp->__shpid = 0;
+            lll_unlock (&mtxp->__lock, flags);
+          }
+
+        break;
+
+      case PT_MTX_ERRORCHECK:
+        if (!mtx_owned_p (mtxp, self, flags))
+          ret = EPERM;
+        else
+          {
+            mtxp->__owner_id = mtxp->__shpid = 0;
+            lll_unlock (&mtxp->__lock, flags);
+          }
+
+        break;
+
+      case PT_MTX_NORMAL     | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_RECURSIVE  | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
+        if (mtxp->__owner_id == NOTRECOVERABLE_ID)
+          ;   /* Nothing to do. */
+        else if (mtxp->__owner_id != self->thread ||
+            (int)(mtxp->__lock & LLL_OWNER_MASK) != __getpid ())
+          ret = EPERM;
+        else if (--mtxp->__cnt == 0)
+          {
+            /* Release the lock. If it's in an inconsistent
+             * state, mark it as irrecoverable. */
+            mtxp->__owner_id = (mtxp->__lock & LLL_DEAD_OWNER) ?
+              NOTRECOVERABLE_ID : 0;
+            lll_robust_unlock (&mtxp->__lock, flags);
+          }
+
+        break;
+
+      default:
+        ret = EINVAL;
+        break;
+    }
+
+  return (ret);
+}
+
+strong_alias (__pthread_mutex_unlock, _pthread_mutex_unlock)
+strong_alias (__pthread_mutex_unlock, pthread_mutex_unlock)
+
+int pthread_mutex_consistent (pthread_mutex_t *mtxp)
+{
+  int ret = EINVAL;
+  unsigned int val = mtxp->__lock;
+
+  if ((mtxp->__flags & PTHREAD_MUTEX_ROBUST) != 0 &&
+      (val & LLL_DEAD_OWNER) != 0 &&
+      atomic_compare_and_exchange_bool_acq (&mtxp->__lock,
+        __getpid () | LLL_WAITERS, val) == 0)
+    {
+      /* The mutex is now ours, and it's consistent. */
+      mtxp->__owner_id = _pthread_self()->thread;
+      mtxp->__cnt = 1;
+      ret = 0;
+    }
+
+  return (ret);
+}
+
+weak_alias (pthread_mutex_consistent, pthread_mutex_consistent_np)
+
+int __pthread_mutex_transfer_np (pthread_mutex_t *mtxp, pthread_t th)
+{
+  struct __pthread *self = _pthread_self ();
+  struct __pthread *pt = __pthread_getid (th);
+
+  if (!pt)
+    return (ESRCH);
+  else if (pt == self)
+    return (0);
+
+  int ret = 0;
+  int flags = mtxp->__flags & GSYNC_SHARED;
+
+  switch (MTX_TYPE (mtxp))
+    {
+      case PT_MTX_NORMAL:
+        break;
+
+      case PT_MTX_RECURSIVE:
+      case PT_MTX_ERRORCHECK:
+        if (!mtx_owned_p (mtxp, self, flags))
+          ret = EPERM;
+        else
+          mtx_set_owner (mtxp, pt, flags);
+
+        break;
+
+      case PT_MTX_NORMAL     | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_RECURSIVE  | PTHREAD_MUTEX_ROBUST:
+      case PT_MTX_ERRORCHECK | PTHREAD_MUTEX_ROBUST:
+        /* Note that this can be used to transfer an inconsistent
+         * mutex as well. The new owner will still have the same
+         * flags as the original. */
+        if (mtxp->__owner_id != self->thread ||
+            (int)(mtxp->__lock & LLL_OWNER_MASK) != __getpid ())
+          ret = EPERM;
+        else
+          mtxp->__owner_id = pt->thread;
+
+        break;
+
+      default:
+        ret = EINVAL;
+    }
+
+  return (ret);
+}
+
+strong_alias (__pthread_mutex_transfer_np, pthread_mutex_transfer_np)
+
+int pthread_mutex_setprioceiling (pthread_mutex_t *mtxp, int cl, int *prp)
+{
+  (void)mtxp; (void)cl; (void)prp;
+  return (ENOSYS);
+}
+
+stub_warning (pthread_mutex_setprioceiling)
+
+int pthread_mutex_getprioceiling (const pthread_mutex_t *mtxp, int *clp)
+{
+  (void)mtxp; (void)clp;
+  return (ENOSYS);
+}
+
+stub_warning (pthread_mutex_getprioceiling)
+
+int _pthread_mutex_destroy (pthread_mutex_t *mtxp)
+{
+  atomic_read_barrier ();
+  if (*(volatile unsigned int *)&mtxp->__lock != 0)
+    return (EBUSY);
+
+  mtxp->__type = -1;
+  return (0);
+}
+
+strong_alias (_pthread_mutex_destroy, pthread_mutex_destroy)
+
diff --git a/sysdeps/pthread/bits/mutex.h b/sysdeps/pthread/bits/mutex.h
index 3120237..a52a2ad 100644
--- a/sysdeps/pthread/bits/mutex.h
+++ b/sysdeps/pthread/bits/mutex.h
@@ -1,6 +1,6 @@
 /* Mutex type.  Generic version.
 
-   Copyright (C) 2000, 2002, 2005, 2006, 2007, 2008, 2009
+   Copyright (C) 2000-2016
      Free Software Foundation, Inc.
 
    This file is part of the GNU C Library.
@@ -31,43 +31,32 @@
 #  undef __need_pthread_mutex
 #  define __pthread_mutex_defined
 
-#  include <bits/spin-lock.h>
 #  include <bits/mutex-attr.h>
 
 /* User visible part of a mutex.  */
 struct __pthread_mutex
-  {
-    __pthread_spinlock_t __held;
-    __pthread_spinlock_t __lock;
-    /* In cthreads, mutex_init does not initialized thre third
-       pointer, as such, we cannot rely on its value for anything.  */
-    char *__cthreadscompat1;
-    struct __pthread *__queue;
-    struct __pthread_mutexattr *__attr;
-    void *__data;
-    /*  Up to this point, we are completely compatible with cthreads
-	and what libc expects.  */
-    void *__owner;
-    unsigned __locks;
-    /* If NULL then the default attributes apply.  */
-  };
-
-/* Initializer for a mutex.  N.B.  this also happens to be compatible
-   with the cthread mutex initializer.  */
-#  define __PTHREAD_MUTEX_INITIALIZER \
-    { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0, 0, 0, 0, 0 }
-
-#  define __PTHREAD_ERRORCHECK_MUTEXATTR ((struct __pthread_mutexattr *) ((unsigned long) __PTHREAD_MUTEX_ERRORCHECK + 1))
-
-#  define __PTHREAD_ERRORCHECK_MUTEX_INITIALIZER \
-    { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0,	\
-	__PTHREAD_ERRORCHECK_MUTEXATTR, 0, 0, 0 }
-
-#  define __PTHREAD_RECURSIVE_MUTEXATTR ((struct __pthread_mutexattr *) ((unsigned long) __PTHREAD_MUTEX_RECURSIVE + 1))
-
-#  define __PTHREAD_RECURSIVE_MUTEX_INITIALIZER \
-    { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0,	\
-	__PTHREAD_RECURSIVE_MUTEXATTR, 0, 0, 0 }
+{
+  unsigned int __lock;
+  unsigned int __owner_id;
+  unsigned int __cnt;
+  int __shpid;
+  int __type;
+  int __flags;
+  unsigned int __reserved1;
+  unsigned int __reserved2;
+};
+
+/* Static mutex initializers. */
+#define __PTHREAD_MUTEX_INITIALIZER   \
+  { 0, 0, 0, 0, __PTHREAD_MUTEX_TIMED, 0, 0, 0 }
+
+/* The +1 is to mantain binary compatibility with the old
+ * libpthread implementation. */
+#define __PTHREAD_ERRORCHECK_MUTEX_INITIALIZER   \
+  { 0, 0, 0, 0, __PTHREAD_MUTEX_ERRORCHECK + 1, 0, 0, 0 }
+
+#define __PTHREAD_RECURSIVE_MUTEX_INITIALIZER   \
+  { 0, 0, 0, 0, __PTHREAD_MUTEX_RECURSIVE + 1, 0, 0, 0 }
 
 # endif
 #endif /* Not __pthread_mutex_defined.  */
* Makefile: Removed objects and added pt-rwlock. Also added a compilation flag
  specific for pt-rwlock.
* sysdeps/generic/pt-rwlock.c: New file. Contains the implementation for POSIX
  read-write locks and their attributes.
* sysdeps/pthread/bits/rwlock.h: Redefined the rwlock type.

---
diff --git a/Makefile b/Makefile
index bfdae7b..dd6bf94 100644
--- a/Makefile
+++ b/Makefile
@@ -94,15 +94,7 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate	    \
 	pt-mutex-transfer-np						    \
 	pt-mutex-getprioceiling pt-mutex-setprioceiling			    \
 									    \
-	pt-rwlock-attr							    \
-	pt-rwlockattr-init pt-rwlockattr-destroy			    \
-	pt-rwlockattr-getpshared pt-rwlockattr-setpshared		    \
-									    \
-	pt-rwlock-init pt-rwlock-destroy				    \
-	pt-rwlock-rdlock pt-rwlock-tryrdlock				    \
-	pt-rwlock-trywrlock pt-rwlock-wrlock				    \
-	pt-rwlock-timedrdlock pt-rwlock-timedwrlock			    \
-	pt-rwlock-unlock						    \
+	pt-rwlock   \
 									    \
 	pt-cond								    \
 	pt-condattr-init pt-condattr-destroy				    \
@@ -152,6 +144,9 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate	    \
 	cthreads-compat							    \
 	$(SYSDEPS)
 
+# Additional flags.
+CFLAGS-pt-rwlock.c = -msse2
+
 ifeq ($(IN_GLIBC),no)
 SRCS := $(addsuffix .c,$(libpthread-routines))
 OBJS = $(addsuffix .o,$(basename $(notdir $(SRCS))))
diff --git a/sysdeps/generic/pt-rwlock.c b/sysdeps/generic/pt-rwlock.c
new file mode 100644
index 0000000..4aac7d4
--- /dev/null
+++ b/sysdeps/generic/pt-rwlock.c
@@ -0,0 +1,399 @@
+/* Copyright (C) 2016 Free Software Foundation, Inc.
+   Contributed by Agustina Arzille <avarzi...@riseup.net>, 2016.
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License
+   as published by the Free Software Foundation; either
+   version 3 of the license, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public
+   License along with this program; if not, see
+   <http://www.gnu.org/licenses/>.
+*/
+
+#include <pthread.h>
+#include <pt-internal.h>
+#include <bits/pt-atomic.h>
+#include <hurdlock.h>
+
+static const pthread_rwlockattr_t dfl_attr =
+{
+  .__pshared = PTHREAD_PROCESS_PRIVATE
+};
+
+int pthread_rwlockattr_setpshared (pthread_rwlockattr_t *attrp, int pshared)
+{
+  if (pshared != PTHREAD_PROCESS_PRIVATE &&
+      pshared != PTHREAD_PROCESS_SHARED)
+    return (EINVAL);
+
+  attrp->__pshared = pshared;
+  return (0);
+}
+
+int pthread_rwlockattr_getpshared (const pthread_rwlockattr_t *ap, int *outp)
+{
+  *outp = ap->__pshared;
+  return (0);
+}
+
+int pthread_rwlockattr_destroy (pthread_rwlockattr_t *attrp)
+{
+  (void)attrp;
+  return (0);
+}
+
+/* Read-write locks have the following memory layout:
+ * 0         4            8            12           16
+ * |__shpid__|__qwriters__|__owner_id__|__nreaders__|
+ *
+ * Whenever a thread wants to acquire either lock, it first has to check
+ * the OID field. It may be unowned, owned by readers, or owner by a
+ * particular writer. For reader ownership, we use a special OID that no
+ * thread can ever have.
+ *
+ * When it comes to waiting for the lock to change ownership, we need
+ * different wait queues for readers and writers. However, both of them
+ * have to monitor the OID field for changes. This is where 64-bit gsync
+ * comes into play: Readers will wait on the address of the OID, while
+ * writers will wait on the 64-bit address that starts at NWRITERS and
+ * extends to OID as well.
+ *
+ * This approach can cause some extra work on the writer side, but it's
+ * more efficient by virtue of being lockless. As long as we have 64-bit
+ * atomics, we can safely implement the POSIX read-write lock interface
+ * without using any internal locks. */
+
+#define __rwl_atp(ptr, idx)   (((unsigned int *)(ptr))[idx])
+
+/* Access the fields described above. */
+#define rwl_qwr(val)   __rwl_atp (&(val), -1)
+#define rwl_oid(val)   __rwl_atp (&(val), +0)
+#define rwl_nrd(val)   __rwl_atp (&(val), +1)
+
+/* Special ID's to represent unowned and readers-owned locks. */
+#define RWLOCK_UNOWNED   (0)
+#define RWLOCK_RO        (1U << 31)
+
+#define ID_MASK   ~RWLOCK_RO
+
+/* Access the owner's PID for task-shared rwlocks. */
+#define rwl_spid(rwl)   *(unsigned int *)&(rwl)->__shpid_qwr
+
+int _pthread_rwlock_init (pthread_rwlock_t *rwp,
+  const pthread_rwlockattr_t *attrp)
+{
+  if (!attrp)
+    attrp = &dfl_attr;
+
+  rwp->__shpid_qwr.qv = rwp->__oid_nrd.qv = 0;
+  rwp->__flags = attrp->__pshared == PTHREAD_PROCESS_SHARED ?
+    GSYNC_SHARED : 0;
+
+  return (0);
+}
+
+strong_alias (_pthread_rwlock_init, pthread_rwlock_init)
+
+/* We need a function, because we're using a macro that
+ * expands into a list of arguments. */
+static inline int
+catomic_casx_bool (unsigned long long *ptr, unsigned int elo,
+  unsigned int ehi, unsigned int nlo, unsigned int nhi)
+{
+  return (atomic_casx_bool (ptr, elo, ehi, nlo, nhi));
+}
+
+extern int __getpid (void) __attribute__ ((const));
+
+/* Test that a read-write lock is owned by a particular thread. */
+#define rwl_owned_p(rwp, tid, flags)   \
+  (rwl_oid ((rwp)->__oid_nrd) == (tid) &&   \
+    (((flags) & GSYNC_SHARED) == 0 ||   \
+     rwl_spid (rwp) == (unsigned int)__getpid ()))
+
+#define rwl_setown(rwp, flags)   \
+  do   \
+    {   \
+      if ((flags) & GSYNC_SHARED)   \
+        rwl_spid(rwp) = __getpid ();   \
+    }   \
+  while (0)
+
+int __pthread_rwlock_rdlock (pthread_rwlock_t *rwp)
+{
+  int flags = rwp->__flags & GSYNC_SHARED;
+
+  /* Test that we don't own the write-lock already. */
+  if (rwl_owned_p (rwp, _pthread_self()->thread, flags))
+    return (EDEADLK);
+
+  while (1)
+    {
+      union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) };
+      if ((rwl_oid (tmp) & ID_MASK) == 0)
+        {
+          /* The lock is either unowned, or the readers hold it. */
+          if (catomic_casx_bool (&rwp->__oid_nrd.qv,
+              hurd_xint_pair (tmp.lo, tmp.hi),
+              hurd_xint_pair (RWLOCK_RO, rwl_nrd (tmp) + 1)))
+            {
+              /* If we grabbed an unowned lock and there were readers
+               * queued, notify our fellows so they stop blocking. */
+              if (rwl_oid (tmp) != RWLOCK_RO && rwl_nrd (tmp) > 0)
+                lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST);
+
+              return (0);
+            }
+        }
+      else
+        {
+          /* A writer holds the lock. Sleep. */
+          atomic_increment (&rwl_nrd(rwp->__oid_nrd));
+          lll_wait (&rwl_oid(rwp->__oid_nrd), rwl_oid (tmp), flags);
+          atomic_decrement (&rwl_nrd(rwp->__oid_nrd));
+        }
+    }
+}
+
+strong_alias (__pthread_rwlock_rdlock, pthread_rwlock_rdlock)
+
+int pthread_rwlock_tryrdlock (pthread_rwlock_t *rwp)
+{
+  if (rwl_owned_p (rwp, _pthread_self()->thread, rwp->__flags))
+    return (EDEADLK);
+
+  union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) };
+  if ((rwl_oid (tmp) & ID_MASK) == 0 &&
+      catomic_casx_bool (&rwp->__oid_nrd.qv,
+        hurd_xint_pair (tmp.lo, tmp.hi),
+        hurd_xint_pair (RWLOCK_RO, rwl_nrd (tmp) + 1)))
+    {
+      if (rwl_oid (tmp) != RWLOCK_RO && rwl_nrd (tmp) > 0)
+        lll_wake (&rwl_oid(rwp->__oid_nrd), GSYNC_BROADCAST |
+          (rwp->__flags & GSYNC_SHARED));
+
+      return (0);
+    }
+
+  return (EBUSY);
+}
+
+int pthread_rwlock_timedrdlock (pthread_rwlock_t *rwp,
+  const struct timespec *abstime)
+{
+  int flags = rwp->__flags & GSYNC_SHARED;
+
+  if (rwl_owned_p (rwp, _pthread_self()->thread, flags))
+    return (EDEADLK);
+
+  while (1)
+    {
+      union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) };
+      if ((rwl_oid (tmp) & ID_MASK) == 0)
+        {
+          if (catomic_casx_bool (&rwp->__oid_nrd.qv,
+              hurd_xint_pair (tmp.lo, tmp.hi),
+              hurd_xint_pair (RWLOCK_RO, rwl_nrd (tmp) + 1)))
+            {
+              if (rwl_oid (tmp) != RWLOCK_RO && rwl_nrd (tmp) > 0)
+                lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST);
+
+              return (0);
+            }
+        }
+      else
+        {
+          /* The timeout parameter has to be checked on every iteration,
+           * because its value may not be examined if the lock can be
+           * taken without blocking. */
+
+          if (__glibc_unlikely (abstime->tv_nsec < 0 ||
+              abstime->tv_nsec >= 1000000000))
+            return (EINVAL);
+
+          atomic_increment (&rwl_nrd(rwp->__oid_nrd));
+          int ret = lll_abstimed_wait (&rwl_oid(rwp->__oid_nrd),
+            rwl_oid (tmp), abstime, flags);
+          atomic_decrement (&rwl_nrd(rwp->__oid_nrd));
+
+          if (ret == KERN_TIMEDOUT)
+            return (ETIMEDOUT);
+        }
+    }
+}
+
+int __pthread_rwlock_wrlock (pthread_rwlock_t *rwp)
+{
+  int flags = rwp->__flags & GSYNC_SHARED;
+  unsigned int self_id = _pthread_self()->thread;
+
+  if (rwl_owned_p (rwp, self_id, flags))
+    return (EDEADLK);
+
+  while (1)
+    {
+      unsigned int *ptr = &rwl_oid (rwp->__oid_nrd);
+      atomic_read_barrier ();
+      unsigned int owner = *ptr;
+
+      if (owner == RWLOCK_UNOWNED)
+        {
+          if (atomic_compare_and_exchange_bool_acq (ptr, self_id, owner) == 0)
+            {
+              rwl_setown (rwp, flags);
+              return (0);
+            }
+        }
+      else
+        {
+          /* Wait on the address. We are only interested in the value of
+           * the OID field, but we need a different queue for writers.
+           * As such, we use 64-bit values, with the high word being
+           * the owner id. */
+          unsigned int nw = atomic_exchange_and_add (--ptr, 1);
+          lll_xwait (ptr, nw + 1, owner, flags);
+          atomic_decrement (ptr);
+        }
+    }
+}
+
+strong_alias (__pthread_rwlock_wrlock, pthread_rwlock_wrlock)
+
+int pthread_rwlock_trywrlock (pthread_rwlock_t *rwp)
+{
+  unsigned int self_id = _pthread_self()->thread;
+  unsigned int *ptr = &rwl_oid (rwp->__oid_nrd);
+  atomic_read_barrier ();
+  unsigned int owner = *ptr;
+
+  if (rwl_owned_p (rwp, self_id, rwp->__flags))
+    return (EDEADLK);
+  else if (owner == RWLOCK_UNOWNED &&
+      atomic_compare_and_exchange_bool_acq (ptr, self_id, owner) == 0)
+    {
+      rwl_setown (rwp, rwp->__flags);
+      return (0);
+    }
+
+  return (EBUSY);
+}
+
+int pthread_rwlock_timedwrlock (pthread_rwlock_t *rwp,
+  const struct timespec *abstime)
+{
+  unsigned int self_id = _pthread_self()->thread;
+  int flags = rwp->__flags & GSYNC_SHARED;
+
+  if (rwl_owned_p (rwp, self_id, flags))
+    return (EDEADLK);
+
+  while (1)
+    {
+      unsigned int *ptr = &rwl_oid (rwp->__oid_nrd);
+      atomic_read_barrier ();
+      unsigned int owner = *ptr;
+
+      if (owner == RWLOCK_UNOWNED)
+        {
+          if (atomic_compare_and_exchange_bool_acq (ptr, self_id, owner) == 0)
+            {
+              rwl_setown (rwp, flags);
+              return (0);
+            }
+        }
+      else
+        {
+          if (__glibc_unlikely (abstime->tv_nsec < 0 ||
+              abstime->tv_nsec >= 1000000000))
+            return (EINVAL);
+
+          unsigned int nw = atomic_exchange_and_add (--ptr, 1);
+          int ret = lll_abstimed_xwait (ptr, nw + 1, owner, abstime, flags);
+          nw = atomic_exchange_and_add (ptr, -1);
+
+          if (ret == KERN_TIMEDOUT)
+            {
+              /* If we timed out, there are no writers pending, the
+               * lock is unowned *and* there are readers blocked, it's
+               * possible that a wakeup was meant for us, but we timed
+               * out first. In such unlikely case, we wake every reader
+               * in order to avoid a potential deadlock. */
+
+              union hurd_xint tmp = { atomic_loadx (&rwp->__oid_nrd.qv) };
+              if (__glibc_unlikely (nw == 1 && rwl_nrd (tmp) > 0 &&
+                  rwl_oid (tmp) == RWLOCK_UNOWNED))
+                lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST);
+
+              /* We still return with an error. */
+              return (ETIMEDOUT);
+            }
+        }
+    }
+}
+
+int __pthread_rwlock_unlock (pthread_rwlock_t *rwp)
+{
+  int flags = rwp->__flags & GSYNC_SHARED;
+  atomic_read_barrier ();
+  unsigned int owner = rwl_oid (rwp->__oid_nrd);
+
+  if ((owner & ID_MASK) != 0)
+    {
+      /* A writer holds the lock. */
+      if (!rwl_owned_p (rwp, _pthread_self()->thread, flags))
+        /* ... But it isn't us. */
+        return (EPERM);
+
+      rwl_spid(rwp) = 0;
+      rwl_oid(rwp->__oid_nrd) = RWLOCK_UNOWNED;
+      atomic_write_barrier ();
+
+      /* The exclusive lock has been released. Now decide whether
+       * to wake a queued writer (preferred), or all the queued readers. */
+      if (rwl_qwr (rwp->__oid_nrd) > 0)
+        lll_wake (&rwl_qwr(rwp->__oid_nrd), flags);
+      else if (rwl_nrd (rwp->__oid_nrd) > 0)
+        lll_wake (&rwl_oid(rwp->__oid_nrd), flags | GSYNC_BROADCAST);
+    }
+  else if (rwl_nrd (rwp->__oid_nrd) == 0)
+    return (EPERM);
+  else
+    {
+      union hurd_xint tmp;
+      while (1)
+        {
+          tmp.qv = atomic_loadx (&rwp->__oid_nrd.qv);
+          if (catomic_casx_bool (&rwp->__oid_nrd.qv,
+              hurd_xint_pair (tmp.lo, tmp.hi),
+              hurd_xint_pair (rwl_nrd (tmp) == 1 ?
+                RWLOCK_UNOWNED : RWLOCK_RO, rwl_nrd (tmp) - 1)))
+            break;
+        }
+
+      /* As a reader, we only need to do a wakeup iff:
+       * - We were the last one.
+       * - There's at least a writer queued. */
+      if (rwl_nrd (tmp) == 1 && rwl_qwr (rwp->__oid_nrd) > 0)
+        lll_wake (&rwl_qwr(rwp->__oid_nrd), flags);
+    }
+
+  return (0);
+}
+
+strong_alias (__pthread_rwlock_unlock, pthread_rwlock_unlock)
+
+int _pthread_rwlock_destroy (pthread_rwlock_t *rwp)
+{
+  /* XXX: Maybe we could do some sanity checks. */
+  (void)rwp;
+  return (0);
+}
+
+strong_alias (_pthread_rwlock_destroy, pthread_rwlock_destroy)
+
diff --git a/sysdeps/pthread/bits/rwlock.h b/sysdeps/pthread/bits/rwlock.h
index bc27726..8226da7 100644
--- a/sysdeps/pthread/bits/rwlock.h
+++ b/sysdeps/pthread/bits/rwlock.h
@@ -1,5 +1,5 @@
 /* rwlock type.  Generic version.
-   Copyright (C) 2002, 2005, 2006, 2007, 2009 Free Software Foundation, Inc.
+   Copyright (C) 2002-2016 Free Software Foundation, Inc.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -20,27 +20,16 @@
 #ifndef _BITS_RWLOCK_H
 #define _BITS_RWLOCK_H
 
-#include <bits/spin-lock.h>
+#include <bits/xint.h>
 
-/* User visible part of a rwlock.  If __held is not held and readers
-   is 0, then the lock is unlocked.  If __held is held and readers is
-   0, then the lock is held by a writer.  If __held is held and
-   readers is greater than 0, then the lock is held by READERS
-   readers.  */
 struct __pthread_rwlock
-  {
-    __pthread_spinlock_t __held;
-    __pthread_spinlock_t __lock;
-    int __readers;
-    struct __pthread *__readerqueue;
-    struct __pthread *__writerqueue;
-    struct __pthread_rwlockattr *__attr;
-    void *__data;
-  };
-
-/* Initializer for a rwlock.  */
-#define __PTHREAD_RWLOCK_INITIALIZER \
-    { __PTHREAD_SPIN_LOCK_INITIALIZER, __PTHREAD_SPIN_LOCK_INITIALIZER, 0, 0, 0, 0, 0 }
-
+{
+  union hurd_xint __shpid_qwr;
+  union hurd_xint __oid_nrd;
+  int __flags;
+};
+
+/* Static initializer for read-write locks. */
+#define __PTHREAD_RWLOCK_INITIALIZER    { { 0 }, { 0 }, 0 }
 
 #endif /* bits/rwlock.h */

Reply via email to