Hello, everyone. Here are the patches needed to get the ball rolling and finally introduce gsync-based synchronization objects into our libc. These 2 first patches modify the internal locks used by glibc and some bits in libpthread that will be necessary for more complex objects.
There are quite a few changes in glibc, so any input you may have will be very welcomed and appreciated. Here's a list of changes: [ glibc ] * Low-level locks are introduced, replacing spin-locks and cthreads mutexes. The interface is closely modelled after Linux's, but with our extensions. * Glibc's internal locks are thus made to use low-level locks. There were also changes in libc's cleanup macros, which now use gcc's attribute of the same name (This will come in handy once libpthread uses forced stack-unwinding for thread cancellation), and the libc per-thread key interface is now using pthread instead of cthreads. This is not just a cosmetic change, but rather a correctness issue: cthreads' key creation function doesn't allow us to specify a destructor, which lead to memory leaks. Affected interfaces include dlerror and strsignal. * The spin-lock-solid and mutex-lock-solid files are no longer necessary and were removed. It's worth noting that glibc can now do efficient and cheap synchronization without having to pull in libpthread. * The files hurd/hurdlock.* have been added, which provide a richer interface for locks, including timed acquire and robust locks. This should come in handy once (if) we get around to implementing librt and things like system V semaphores and message queues. * Additional errno codes were added: EOWNERDEAD and ENOTRECOVERABLE. They will be used by the new pthread mutex implementation. * All but one instance of busy waiting with __swtch_pri have been removed. The last once will eventually be replaced as well once libpthread is able to install callbacks for the dynamic linker. [ libpthread ] * The file pt-atomic.h has been rewritten to instead implemented atomic operations on 64-bit values. Those will be used extensively for many pthread objects. The previous atomic operations have been replaced by calls to glibc's atomic API. * The TCB type is also brought from glibc instead of defined manually. * As per Samuel's request, pthread spin locks are now using gsync instead of spinning. * An extended integer type has been added, which will be used in conjunction with the aforementioned 64-bit atomics. ======================= That's all I have on my notes. I'm attaching the patches to this message. As always, I'm open to anything you guys want to say :)
diff --git a/hurd/Makefile b/hurd/Makefile index 40bfdd9..b7e2ed4 100644 --- a/hurd/Makefile +++ b/hurd/Makefile @@ -60,6 +60,7 @@ routines = hurdstartup hurdinit \ ports-get ports-set hurdports hurdmsg \ errno-loc \ sysvshm \ + hurdlock \ $(sig) $(dtable) $(inlines) port-cleanup report-wait xattr sig = hurdsig hurdfault siginfo hurd-raise preempt-sig \ trampoline longjmp-ts catch-exc exc2signal hurdkill sigunwind \ diff --git a/hurd/hurdlock.c b/hurd/hurdlock.c new file mode 100644 index 0000000..e19fa7f --- /dev/null +++ b/hurd/hurdlock.c @@ -0,0 +1,247 @@ +/* Copyright (C) 1999-2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#include "hurdlock.h" +#include <hurd.h> +#include <time.h> +#include <errno.h> + +int lll_xwait (void *ptr, int lo, int hi, int flags) +{ + return (__gsync_wait (__mach_task_self (), + (vm_offset_t)ptr, lo, hi, 0, flags | GSYNC_QUAD)); +} + +int lll_timed_wait (void *ptr, int val, int mlsec, int flags) +{ + return (__gsync_wait (__mach_task_self (), + (vm_offset_t)ptr, val, 0, mlsec, flags | GSYNC_TIMED)); +} + +int lll_timed_xwait (void *ptr, int lo, + int hi, int mlsec, int flags) +{ + return (__gsync_wait (__mach_task_self (), (vm_offset_t)ptr, + lo, hi, mlsec, flags | GSYNC_TIMED | GSYNC_QUAD)); +} + +/* Convert an absolute timeout in nanoseconds to a relative + * timeout in milliseconds. */ +static inline int __attribute__ ((gnu_inline)) +compute_reltime (const struct timespec *abstime, clockid_t clk) +{ + struct timespec ts; + __clock_gettime (clk, &ts); + + ts.tv_sec = abstime->tv_sec - ts.tv_sec; + ts.tv_nsec = abstime->tv_nsec - ts.tv_nsec; + + if (ts.tv_nsec < 0) + { + --ts.tv_sec; + ts.tv_nsec += 1000000000; + } + + return (ts.tv_sec < 0 ? -1 : + (int)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000)); +} + +int __lll_abstimed_wait (void *ptr, int val, + const struct timespec *tsp, int flags, int clk) +{ + int mlsec = compute_reltime (tsp, clk); + return (mlsec < 0 ? KERN_TIMEDOUT : + lll_timed_wait (ptr, val, mlsec, flags)); +} + +int __lll_abstimed_xwait (void *ptr, int lo, int hi, + const struct timespec *tsp, int flags, int clk) +{ + int mlsec = compute_reltime (tsp, clk); + return (mlsec < 0 ? KERN_TIMEDOUT : + lll_timed_xwait (ptr, lo, hi, mlsec, flags)); +} + +int __lll_abstimed_lock (void *ptr, + const struct timespec *tsp, int flags, int clk) +{ + if (lll_trylock (ptr) == 0) + return (0); + + while (1) + { + if (atomic_exchange_acq ((int *)ptr, 2) == 0) + return (0); + else if (tsp->tv_nsec < 0 || tsp->tv_nsec >= 1000000000) + return (EINVAL); + + int mlsec = compute_reltime (tsp, clk); + if (mlsec < 0 || lll_timed_wait (ptr, + 2, mlsec, flags) == KERN_TIMEDOUT) + return (ETIMEDOUT); + } +} + +void lll_set_wake (void *ptr, int val, int flags) +{ + __gsync_wake (__mach_task_self (), + (vm_offset_t)ptr, val, flags | GSYNC_MUTATE); +} + +void lll_requeue (void *src, void *dst, int wake_one, int flags) +{ + __gsync_requeue (__mach_task_self (), (vm_offset_t)src, + (vm_offset_t)dst, (boolean_t)wake_one, flags); +} + +/* Robust locks. */ + +extern int __getpid (void) __attribute__ ((const)); +extern task_t __pid2task (int); + +/* Test if a given process id is still valid. */ +static inline int valid_pid (int pid) +{ + task_t task = __pid2task (pid); + if (task == MACH_PORT_NULL) + return (0); + + __mach_port_deallocate (__mach_task_self (), task); + return (1); +} + +/* Robust locks have currently no support from the kernel; they + * are simply implemented with periodic polling. When sleeping, the + * maximum blocking time is determined by this constant. */ +#define MAX_WAIT_TIME 1500 + +int lll_robust_lock (void *ptr, int flags) +{ + int *iptr = (int *)ptr; + int id = __getpid (); + int wait_time = 25; + unsigned int val; + + /* Try to set the lock word to our PID if it's clear. Otherwise, + * mark it as having waiters. */ + while (1) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (atomic_compare_and_exchange_bool_acq (iptr, + val | LLL_WAITERS, val) == 0) + break; + } + + for (id |= LLL_WAITERS ; ; ) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (val && !valid_pid (val & LLL_OWNER_MASK)) + { + if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0) + return (EOWNERDEAD); + } + else + { + lll_timed_wait (iptr, val, wait_time, flags); + if (wait_time < MAX_WAIT_TIME) + wait_time <<= 1; + } + } +} + +int __lll_robust_abstimed_lock (void *ptr, + const struct timespec *tsp, int flags, int clk) +{ + int *iptr = (int *)ptr; + int id = __getpid (); + int wait_time = 25; + unsigned int val; + + while (1) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (atomic_compare_and_exchange_bool_acq (iptr, + val | LLL_WAITERS, val) == 0) + break; + } + + for (id |= LLL_WAITERS ; ; ) + { + val = *iptr; + if (!val && atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + else if (val && !valid_pid (val & LLL_OWNER_MASK)) + { + if (atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0) + return (EOWNERDEAD); + } + else + { + int mlsec = compute_reltime (tsp, clk); + if (mlsec < 0) + return (ETIMEDOUT); + else if (mlsec > wait_time) + mlsec = wait_time; + + int res = lll_timed_wait (iptr, val, mlsec, flags); + if (res == KERN_TIMEDOUT) + return (ETIMEDOUT); + else if (wait_time < MAX_WAIT_TIME) + wait_time <<= 1; + } + } +} + +int lll_robust_trylock (void *ptr) +{ + int *iptr = (int *)ptr; + int id = __getpid (); + unsigned int val = *iptr; + + if (!val) + { + if (atomic_compare_and_exchange_bool_acq (iptr, id, 0) == 0) + return (0); + } + else if (!valid_pid (val & LLL_OWNER_MASK) && + atomic_compare_and_exchange_bool_acq (iptr, id, val) == 0) + return (EOWNERDEAD); + + return (EBUSY); +} + +void lll_robust_unlock (void *ptr, int flags) +{ + while (1) + { + unsigned int val = *(unsigned int *)ptr; + if (val & LLL_WAITERS) + { + lll_set_wake (ptr, 0, flags); + break; + } + else if (atomic_compare_and_exchange_bool_rel ((int *)ptr, 0, val) == 0) + break; + } +} + diff --git a/hurd/hurdlock.h b/hurd/hurdlock.h new file mode 100644 index 0000000..1333b55 --- /dev/null +++ b/hurd/hurdlock.h @@ -0,0 +1,117 @@ +/* Copyright (C) 1999-2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef _HURD_LOCK_H +#define _HURD_LOCK_H 1 + +#include <mach/lowlevellock.h> + +struct timespec; + +/* Flags for robust locks. */ +#define LLL_WAITERS (1U << 31) +#define LLL_DEAD_OWNER (1U << 30) + +#define LLL_OWNER_MASK ~(LLL_WAITERS | LLL_DEAD_OWNER) + +/* Wait on 64-bit address PTR, without blocking if its contents + * are different from the pair <LO, HI>. */ +extern int lll_xwait (void *__ptr, int __lo, + int __hi, int __flags); + +/* Same as 'lll_wait', but only block for MLSEC milliseconds. */ +extern int lll_timed_wait (void *__ptr, int __val, + int __mlsec, int __flags); + +/* Same as 'lll_xwait', but only block for MLSEC milliseconds. */ +extern int lll_timed_xwait (void *__ptr, int __lo, + int __hi, int __mlsec, int __flags); + +/* Same as 'lll_wait', but only block until TSP elapses, + * using clock CLK. */ +extern int __lll_abstimed_wait (void *__ptr, int __val, + const struct timespec *__tsp, int __flags, int __clk); + +/* Same as 'lll_xwait', but only block until TSP elapses, + * using clock CLK. */ +extern int __lll_abstimed_xwait (void *__ptr, int __lo, int __hi, + const struct timespec *__tsp, int __flags, int __clk); + +/* Same as 'lll_lock', but return with an error if TSP elapses, + * using clock CLK. */ +extern int __lll_abstimed_lock (void *__ptr, + const struct timespec *__tsp, int __flags, int __clk); + +/* Acquire the lock at PTR, but return with an error if + * the process containing the owner thread dies. */ +extern int lll_robust_lock (void *__ptr, int __flags); + +/* Same as 'lll_robust_lock', but only block until TSP + * elapses, using clock CLK. */ +extern int __lll_robust_abstimed_lock (void *__ptr, + const struct timespec *__tsp, int __flags, int __clk); + +/* Same as 'lll_robust_lock', but return with an error + * if the lock cannot be acquired without blocking. */ +extern int lll_robust_trylock (void *__ptr); + +/* Wake one or more threads waiting on address PTR, + * setting its value to VAL before doing so. */ +extern void lll_set_wake (void *__ptr, int __val, int __flags); + +/* Release the robust lock at PTR. */ +extern void lll_robust_unlock (void *__ptr, int __flags); + +/* Rearrange threads waiting on address SRC to instead wait on + * DST, waking one of them if WAIT_ONE is non-zero. */ +extern void lll_requeue (void *__src, void *__dst, + int __wake_one, int __flags); + +/* The following are hacks that allow us to simulate optional + * parameters in C, to avoid having to pass the clock id for + * every one of these calls, defaulting to CLOCK_REALTIME if + * no argument is passed. */ + +#define lll_abstimed_wait(ptr, val, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_abstimed_wait ((ptr), (val), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#define lll_abstimed_xwait(ptr, lo, hi, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_abstimed_xwait ((ptr), (lo), (hi), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#define lll_abstimed_lock(ptr, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_abstimed_lock ((ptr), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#define lll_robust_abstimed_lock(ptr, tsp, flags, ...) \ + ({ \ + const clockid_t __clk[] = { CLOCK_REALTIME, ##__VA_ARGS__ }; \ + __lll_robust_abstimed_lock ((ptr), (tsp), (flags), \ + __clk[sizeof (__clk) / sizeof (__clk[0]) - 1]); \ + }) + +#endif diff --git a/hurd/hurdpid.c b/hurd/hurdpid.c index 3fac897..859a774 100644 --- a/hurd/hurdpid.c +++ b/hurd/hurdpid.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1991-2014 Free Software Foundation, Inc. +/* Copyright (C) 1991-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -16,6 +16,8 @@ <http://www.gnu.org/licenses/>. */ #include <hurd.h> +#include <lowlevellock.h> + pid_t _hurd_pid, _hurd_ppid, _hurd_pgrp; int _hurd_orphaned; @@ -66,6 +68,7 @@ _S_msg_proc_newids (mach_port_t me, /* Notify any waiting user threads that the id change as been completed. */ ++_hurd_pids_changed_stamp; + lll_wake (&_hurd_pids_changed_stamp, GSYNC_BROADCAST); return 0; } diff --git a/hurd/hurdsig.c b/hurd/hurdsig.c index 5b63a06..1b7aaed 100644 --- a/hurd/hurdsig.c +++ b/hurd/hurdsig.c @@ -1571,14 +1571,14 @@ reauth_proc (mach_port_t new) __mach_port_destroy (__mach_task_self (), ref); /* Set the owner of the process here too. */ - mutex_lock (&_hurd_id.lock); + __mutex_lock (&_hurd_id.lock); if (!_hurd_check_ids ()) HURD_PORT_USE (&_hurd_ports[INIT_PORT_PROC], __proc_setowner (port, (_hurd_id.gen.nuids ? _hurd_id.gen.uids[0] : 0), !_hurd_id.gen.nuids)); - mutex_unlock (&_hurd_id.lock); + __mutex_unlock (&_hurd_id.lock); (void) &reauth_proc; /* Silence compiler warning. */ } diff --git a/hurd/setauth.c b/hurd/setauth.c index 5af7ff1..be76e93 100644 --- a/hurd/setauth.c +++ b/hurd/setauth.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1991-2014 Free Software Foundation, Inc. +/* Copyright (C) 1991-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -18,14 +18,13 @@ #include <hurd.h> #include <hurd/port.h> #include <hurd/id.h> +#include <hurdlock.h> #include "set-hooks.h" /* Things in the library which want to be run when the auth port changes. */ DEFINE_HOOK (_hurd_reauth_hook, (auth_t new_auth)); -#include <cthreads.h> -static struct mutex reauth_lock = MUTEX_INITIALIZER; - +static unsigned int reauth_lock = LLL_INITIALIZER; /* Set the auth port to NEW, and reauthenticate everything used by the library. */ diff --git a/hurd/sysvshm.c b/hurd/sysvshm.c index 5d538a6..f58fa38 100644 --- a/hurd/sysvshm.c +++ b/hurd/sysvshm.c @@ -1,5 +1,5 @@ /* SysV shared memory for Hurd. - Copyright (C) 2005-2015 Free Software Foundation, Inc. + Copyright (C) 2005-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -26,6 +26,7 @@ #include <dirent.h> #include <sys/stat.h> #include <sys/shm.h> +#include <hurdlock.h> /* Description of an shm attachment. */ @@ -45,7 +46,7 @@ struct sysvshm_attach static struct sysvshm_attach *sysvshm_list; /* A lock to protect the linked list of shared memory attachments. */ -static struct mutex sysvshm_lock = MUTEX_INITIALIZER; +static unsigned int sysvshm_lock = LLL_INITIALIZER; /* Adds a segment attachment. */ diff --git a/mach/Makefile b/mach/Makefile index 5131e26..0bf3df0 100644 --- a/mach/Makefile +++ b/mach/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) 1991-2014 Free Software Foundation, Inc. +# Copyright (C) 1991-2016 Free Software Foundation, Inc. # This file is part of the GNU C Library. # The GNU C Library is free software; you can redistribute it and/or @@ -26,8 +26,8 @@ include ../Makeconfig headers = mach_init.h mach.h mach_error.h mach-shortcuts.h mach/mach_traps.h \ $(interface-headers) mach/mach.h mach/mig_support.h mach/error.h \ $(lock-headers) machine-sp.h -lock = spin-solid spin-lock mutex-init mutex-solid -lock-headers = lock-intern.h machine-lock.h spin-lock.h +lock = spin-lock mutex-init +lock-headers = lock-intern.h spin-lock.h lowlevellock.h routines = $(mach-syscalls) $(mach-shortcuts) \ mach_init mig_strncpy msg \ mig-alloc mig-dealloc mig-reply \ diff --git a/mach/lock-intern.h b/mach/lock-intern.h index 6d315bb..4cf43cb 100644 --- a/mach/lock-intern.h +++ b/mach/lock-intern.h @@ -1,4 +1,4 @@ -/* Copyright (C) 1994-2014 Free Software Foundation, Inc. +/* Copyright (C) 1994-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -19,12 +19,17 @@ #define _LOCK_INTERN_H #include <sys/cdefs.h> -#include <machine-lock.h> +#include <lowlevellock.h> #ifndef _EXTERN_INLINE #define _EXTERN_INLINE __extern_inline #endif +/* The type of a spin lock variable. */ +typedef unsigned int __spin_lock_t; + +/* Static initializer for spinlocks. */ +#define __SPIN_LOCK_INITIALIZER 0 /* Initialize LOCK. */ @@ -34,14 +39,11 @@ void __spin_lock_init (__spin_lock_t *__lock); _EXTERN_INLINE void __spin_lock_init (__spin_lock_t *__lock) { - *__lock = __SPIN_LOCK_INITIALIZER; + *__lock = LLL_INITIALIZER; } #endif -/* Lock LOCK, blocking if we can't get it. */ -extern void __spin_lock_solid (__spin_lock_t *__lock); - /* Lock the spin lock LOCK. */ void __spin_lock (__spin_lock_t *__lock); @@ -50,31 +52,47 @@ void __spin_lock (__spin_lock_t *__lock); _EXTERN_INLINE void __spin_lock (__spin_lock_t *__lock) { - if (! __spin_try_lock (__lock)) - __spin_lock_solid (__lock); + lll_lock (__lock, 0); } #endif - -/* Name space-clean internal interface to mutex locks. - Code internal to the C library uses these functions to lock and unlock - mutex locks. These locks are of type `struct mutex', defined in - <cthreads.h>. The functions here are name space-clean. If the program - is linked with the cthreads library, `__mutex_lock_solid' and - `__mutex_unlock_solid' will invoke the corresponding cthreads functions - to implement real mutex locks. If not, simple stub versions just use - spin locks. */ +/* Unlock LOCK. */ +void __spin_unlock (__spin_lock_t *__lock); +#ifdef __USE_EXTERN_INLINES +_EXTERN_INLINE void +__spin_unlock (__spin_lock_t *__lock) +{ + lll_unlock (__lock, 0); +} +#endif -/* Initialize the newly allocated mutex lock LOCK for further use. */ -extern void __mutex_init (void *__lock); +/* Try to lock LOCK; return nonzero if we locked it, zero if another has. */ +int __spin_try_lock (__spin_lock_t *__lock); -/* Lock LOCK, blocking if we can't get it. */ -extern void __mutex_lock_solid (void *__lock); +#ifdef __USE_EXTERN_INLINES +_EXTERN_INLINE int +__spin_try_lock (__spin_lock_t *__lock) +{ + return (lll_trylock (__lock) == 0); +} +#endif + +/* Return nonzero if LOCK is locked. */ +int __spin_lock_locked (__spin_lock_t *__lock); + +#ifdef __USE_EXTERN_INLINES +_EXTERN_INLINE int +__spin_lock_locked (__spin_lock_t *__lock) +{ + return (*(volatile __spin_lock_t *)__lock != 0); +} +#endif + +/* Name space-clean internal interface to mutex locks. */ -/* Finish unlocking LOCK, after the spin lock LOCK->held has already been - unlocked. This function will wake up any thread waiting on LOCK. */ -extern void __mutex_unlock_solid (void *__lock); +/* Initialize the newly allocated mutex lock LOCK for further use. */ +extern void __mutex_init (void *__lock); /* Lock the mutex lock LOCK. */ @@ -84,8 +102,7 @@ void __mutex_lock (void *__lock); _EXTERN_INLINE void __mutex_lock (void *__lock) { - if (! __spin_try_lock ((__spin_lock_t *) __lock)) - __mutex_lock_solid (__lock); + __spin_lock ((__spin_lock_t *)__lock); } #endif @@ -97,8 +114,7 @@ void __mutex_unlock (void *__lock); _EXTERN_INLINE void __mutex_unlock (void *__lock) { - __spin_unlock ((__spin_lock_t *) __lock); - __mutex_unlock_solid (__lock); + __spin_unlock ((__spin_lock_t *)__lock); } #endif @@ -109,7 +125,7 @@ int __mutex_trylock (void *__lock); _EXTERN_INLINE int __mutex_trylock (void *__lock) { - return __spin_try_lock ((__spin_lock_t *) __lock); + return (__spin_try_lock ((__spin_lock_t *)__lock)); } #endif diff --git a/mach/lowlevellock.h b/mach/lowlevellock.h new file mode 100644 index 0000000..e60fe91 --- /dev/null +++ b/mach/lowlevellock.h @@ -0,0 +1,80 @@ +/* Copyright (C) 1994-2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; if not, see + <http://www.gnu.org/licenses/>. */ + +#ifndef __MACH_LOWLEVELLOCK_H__ +#define __MACH_LOWLEVELLOCK_H__ 1 + +#include <mach/gnumach.h> +#include <atomic.h> + +/* Gsync flags. */ +#ifndef GSYNC_SHARED + #define GSYNC_SHARED 0x01 + #define GSYNC_QUAD 0x02 + #define GSYNC_TIMED 0x04 + #define GSYNC_BROADCAST 0x08 + #define GSYNC_MUTATE 0x10 +#endif + +/* Static initializer for low-level locks. */ +#define LLL_INITIALIZER 0 + +/* Wait on address PTR, without blocking if its contents + * are different from VAL. */ +#define lll_wait(ptr, val, flags) \ + __gsync_wait (__mach_task_self (), \ + (vm_offset_t)(ptr), (val), 0, 0, (flags)) + +/* Wake one or more threads waiting on address PTR. */ +#define lll_wake(ptr, flags) \ + __gsync_wake (__mach_task_self (), (vm_offset_t)(ptr), 0, (flags)) + +/* Acquire the lock at PTR. */ +#define lll_lock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + int __flags = (flags); \ + if (*__iptr != 0 || \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) != 0) \ + while (1) \ + { \ + if (atomic_exchange_acq (__iptr, 2) == 0) \ + break; \ + lll_wait (__iptr, 2, __flags); \ + } \ + (void)0; \ + }) + +/* Try to acquire the lock at PTR, without blocking. + * Evaluates to zero on success. */ +#define lll_trylock(ptr) \ + ({ \ + int *__iptr = (int *)(ptr); \ + *__iptr == 0 && \ + atomic_compare_and_exchange_bool_acq (__iptr, 1, 0) == 0 ? 0 : -1; \ + }) + +/* Release the lock at PTR. */ +#define lll_unlock(ptr, flags) \ + ({ \ + int *__iptr = (int *)(ptr); \ + if (atomic_exchange_rel (__iptr, 0) == 2) \ + lll_wake (__iptr, (flags)); \ + (void)0; \ + }) + +#endif diff --git a/mach/mutex-init.c b/mach/mutex-init.c index fc3a5e5..a2ede46 100644 --- a/mach/mutex-init.c +++ b/mach/mutex-init.c @@ -1,5 +1,5 @@ -/* Initialize a cthreads mutex structure. - Copyright (C) 1995-2014 Free Software Foundation, Inc. +/* Initialize a mutex. + Copyright (C) 1995-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -17,13 +17,10 @@ <http://www.gnu.org/licenses/>. */ #include <lock-intern.h> -#include <cthreads.h> +#include <lowlevellock.h> void __mutex_init (void *lock) { - /* This happens to be name space-safe because it is a macro. - It invokes only spin_lock_init, which is a macro for __spin_lock_init; - and cthread_queue_init, which is a macro for some simple code. */ - mutex_init ((struct mutex *) lock); + *(int *)lock = LLL_INITIALIZER; } diff --git a/mach/mutex-solid.c b/mach/mutex-solid.c deleted file mode 100644 index 70e8333..0000000 --- a/mach/mutex-solid.c +++ /dev/null @@ -1,36 +0,0 @@ -/* Stub versions of mutex_lock_solid/mutex_unlock_solid for no -lthreads. - Copyright (C) 1995-2014 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - <http://www.gnu.org/licenses/>. */ - -#include <lock-intern.h> -#include <cthreads.h> - -/* If cthreads is linked in, it will define these functions itself to do - real cthreads mutex locks. This file will only be linked in when - cthreads is not used, and `mutexes' are in fact just spin locks (and - some unused storage). */ - -void -__mutex_lock_solid (void *lock) -{ - __spin_lock_solid (lock); -} - -void -__mutex_unlock_solid (void *lock) -{ -} diff --git a/mach/spin-solid.c b/mach/spin-solid.c deleted file mode 100644 index e1e154b..0000000 --- a/mach/spin-solid.c +++ /dev/null @@ -1,28 +0,0 @@ -/* Copyright (C) 1994-2014 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - <http://www.gnu.org/licenses/>. */ - -#include <spin-lock.h> -#include <mach/mach_traps.h> - -void -__spin_lock_solid (spin_lock_t *lock) -{ - while (__spin_lock_locked (lock) || ! __spin_try_lock (lock)) - /* Yield to another thread (system call). */ - __swtch_pri (0); -} -weak_alias (__spin_lock_solid, spin_lock_solid); diff --git a/manual/errno.texi b/manual/errno.texi index 6a691fc..70c574a 100644 --- a/manual/errno.texi +++ b/manual/errno.texi @@ -993,6 +993,20 @@ the normal result is for the operations affected to complete with this error; @pxref{Cancel AIO Operations}. @end deftypevr +@comment errno.h +@comment POSIX.1: Robust mutex owner died +@deftypevr Macro int EOWNERDEAD +@comment errno 120 +The owner of a POSIX robust mutex has died. +@end deftypevr + +@comment errno.h +@comment POSIX.1: Robust mutex irrecoverable +@deftypevr Macro int ENOTRECOVERABLE +@comment errno 121 +An inconsistent POSIX robust mutex has been unlocked before marking it +as consistent again. +@end deftypevr @emph{The following error codes are defined by the Linux/i386 kernel. They are not yet documented.} diff --git a/sysdeps/mach/Makefile b/sysdeps/mach/Makefile index 634ba80..68afa6d 100644 --- a/sysdeps/mach/Makefile +++ b/sysdeps/mach/Makefile @@ -1,4 +1,4 @@ -# Copyright (C) 1993-2014 Free Software Foundation, Inc. +# Copyright (C) 1993-2016 Free Software Foundation, Inc. # This file is part of the GNU C Library. # The GNU C Library is free software; you can redistribute it and/or @@ -48,4 +48,18 @@ $(patsubst mach%,m\%h%,$(mach-before-compile)): # Run only if doesn't exist. before-compile += $(mach-before-compile) endif +ifeq (crypt,$(subdir)) + LDLIBS-crypt.so += -lmachuser +else ifeq (dlfcn,$(subdir)) + LDLIBS-dl.so += -lmachuser +else ifeq (nis,$(subdir)) + LDLIBS-nsl.so += -lmachuser + LDLIBS-nss_nis.so += -lmachuser + LDLIBS-nss_nisplus.so += -lmachuser + LDLIBS-nss_compat.so += -lmachuser +else ifeq (nss,$(subdir)) + LDLIBS-nss.so += -lmachuser + LDLIBS-nss_files.so += -lmachuser +endif + endif # in-Makerules diff --git a/sysdeps/mach/bits/libc-lock.h b/sysdeps/mach/bits/libc-lock.h index 40b7f2b..f6f3e05 100644 --- a/sysdeps/mach/bits/libc-lock.h +++ b/sysdeps/mach/bits/libc-lock.h @@ -1,5 +1,5 @@ -/* libc-internal interface for mutex locks. Mach cthreads version. - Copyright (C) 1996-2014 Free Software Foundation, Inc. +/* libc-internal interface for mutex locks. Hurd version using gnumach gsync. + Copyright (C) 1996-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,15 +20,33 @@ #define _BITS_LIBC_LOCK_H 1 #ifdef _LIBC + +#include <tls.h> #include <cthreads.h> -#define __libc_lock_t struct mutex +#include <lowlevellock.h> + +/* The locking here is very inexpensive, even for inlining. */ +#define _IO_lock_inexpensive 1 + +typedef unsigned int __libc_lock_t; +typedef struct +{ + __libc_lock_t lock; + int cnt; + void *owner; +} __libc_lock_recursive_t; + +typedef __libc_lock_recursive_t __rtld_lock_recursive_t; + +extern char __libc_lock_self0[0]; +#define __libc_lock_owner_self() \ + (__LIBC_NO_TLS() ? (void *)&__libc_lock_self0 : THREAD_SELF) + #else typedef struct __libc_lock_opaque__ __libc_lock_t; +typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; #endif -/* Type for key of thread specific data. */ -typedef cthread_key_t __libc_key_t; - /* Define a lock variable NAME with storage class CLASS. The lock must be initialized with __libc_lock_init before it can be used (or define it with __libc_lock_define_initialized, below). Use `extern' for CLASS to @@ -41,26 +59,92 @@ typedef cthread_key_t __libc_key_t; /* Define an initialized lock variable NAME with storage class CLASS. */ #define __libc_lock_define_initialized(CLASS,NAME) \ - CLASS __libc_lock_t NAME = MUTEX_INITIALIZER; + CLASS __libc_lock_t NAME = LLL_INITIALIZER; /* Initialize the named lock variable, leaving it in a consistent, unlocked state. */ -#define __libc_lock_init(NAME) __mutex_init (&(NAME)) +#define __libc_lock_init(NAME) (NAME) = LLL_INITIALIZER /* Finalize the named lock variable, which must be locked. It cannot be used again until __libc_lock_init is called again on it. This must be called on a lock variable before the containing storage is reused. */ -#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME)) +#define __libc_lock_fini __libc_lock_unlock +#define __libc_lock_fini_recursive __libc_lock_unlock_recursive +#define __rtld_lock_fini_recursive __rtld_lock_unlock_recursive /* Lock the named lock variable. */ -#define __libc_lock_lock(NAME) __mutex_lock (&(NAME)) +#define __libc_lock_lock(NAME) \ + ({ lll_lock (&(NAME), 0); 0; }) /* Lock the named lock variable. */ -#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME))) +#define __libc_lock_trylock(NAME) lll_trylock (&(NAME)) /* Unlock the named lock variable. */ -#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME)) - +#define __libc_lock_unlock(NAME) \ + ({ lll_unlock (&(NAME), 0); 0; }) + +#define __libc_lock_define_recursive(CLASS,NAME) \ + CLASS __libc_lock_recursive_t NAME; + +#define _LIBC_LOCK_RECURSIVE_INITIALIZER { LLL_INITIALIZER, 0, 0 } + +#define __libc_lock_define_initialized_recursive(CLASS,NAME) \ + CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER; + +#define __rtld_lock_define_recursive(CLASS,NAME) \ + __libc_lock_define_recursive (CLASS, NAME) +#define _RTLD_LOCK_RECURSIVE_INITIALIZER \ + _LIBC_LOCK_RECURSIVE_INITIALIZER +#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \ + __libc_lock_define_initialized_recursive (CLASS, NAME) + +#define __libc_lock_init_recursive(NAME) \ + ((NAME) = (__libc_lock_recursive_t)_LIBC_LOCK_RECURSIVE_INITIALIZER, 0) + +#define __libc_lock_trylock_recursive(NAME) \ + ({ \ + __libc_lock_recursive_t *const __lock = &(NAME); \ + void *__self = __libc_lock_owner_self (); \ + int __r = 0; \ + if (__self == __lock->owner) \ + ++__lock->cnt; \ + else if ((__r = lll_trylock (&__lock->lock)) == 0) \ + __lock->owner = __self, __lock->cnt = 1; \ + __r; \ + }) + +#define __libc_lock_lock_recursive(NAME) \ + ({ \ + __libc_lock_recursive_t *const __lock = &(NAME); \ + void *__self = __libc_lock_owner_self (); \ + if (__self != __lock->owner) \ + { \ + lll_lock (&__lock->lock, 0); \ + __lock->owner = __self; \ + } \ + ++__lock->cnt; \ + (void)0; \ + }) + +#define __libc_lock_unlock_recursive(NAME) \ + ({ \ + __libc_lock_recursive_t *const __lock = &(NAME); \ + if (--__lock->cnt == 0) \ + { \ + __lock->owner = 0; \ + lll_unlock (&__lock->lock, 0); \ + } \ + }) + + +#define __rtld_lock_initialize(NAME) \ + (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER) +#define __rtld_lock_trylock_recursive(NAME) \ + __libc_lock_trylock_recursive (NAME) +#define __rtld_lock_lock_recursive(NAME) \ + __libc_lock_lock_recursive(NAME) +#define __rtld_lock_unlock_recursive(NAME) \ + __libc_lock_unlock_recursive (NAME) /* XXX for now */ #define __libc_rwlock_define __libc_lock_define @@ -73,25 +157,38 @@ typedef cthread_key_t __libc_key_t; #define __libc_rwlock_trywrlock __libc_lock_trylock #define __libc_rwlock_unlock __libc_lock_unlock +struct __libc_cleanup_frame +{ + void (*__fct) (void *); + void *__argp; + int __doit; +}; + +__extern_inline void +__libc_cleanup_fct (struct __libc_cleanup_frame *framep) +{ + if (framep->__doit) + framep->__fct (framep->__argp); +} /* Start a critical region with a cleanup function */ -#define __libc_cleanup_region_start(DOIT, FCT, ARG) \ -{ \ - typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0; \ - typeof (ARG) __save_ARG = ARG; \ - /* close brace is in __libc_cleanup_region_end below. */ - -/* End a critical region started with __libc_cleanup_region_start. */ -#define __libc_cleanup_region_end(DOIT) \ - if ((DOIT) && __save_FCT != 0) \ - (*__save_FCT)(__save_ARG); \ -} +#define __libc_cleanup_region_start(DOIT, FCT, ARG) \ + do \ + { \ + struct __libc_cleanup_frame __cleanup \ + __attribute__ ((__cleanup__ (__libc_cleanup_fct))) = \ + { .__fct = (FCT), .__argp = (ARG), .__doit = (DOIT) }; + +/* This one closes the brace above. */ +#define __libc_cleanup_region_end(DOIT) \ + __cleanup.__doit = (DOIT); \ + } \ + while (0) -/* Sometimes we have to exit the block in the middle. */ -#define __libc_cleanup_end(DOIT) \ - if ((DOIT) && __save_FCT != 0) \ - (*__save_FCT)(__save_ARG); \ +#define __libc_cleanup_end(DOIT) __cleanup.__doit = (DOIT); +#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg) +#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute) /* Use mutexes as once control variables. */ @@ -102,8 +199,7 @@ struct __libc_once }; #define __libc_once_define(CLASS,NAME) \ - CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 } - + CLASS struct __libc_once NAME = { LLL_INITIALIZER, 0 } /* Call handler iff the first call. */ #define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ @@ -121,25 +217,11 @@ struct __libc_once #ifdef _LIBC /* We need portable names for some functions. E.g., when they are used as argument to __libc_cleanup_region_start. */ -#define __libc_mutex_unlock __mutex_unlock -#endif +#define __libc_mutex_unlock __libc_lock_unlock -#define __libc_key_create(KEY,DEST) cthread_keycreate (KEY) -#define __libc_setspecific(KEY,VAL) cthread_setspecific (KEY, VAL) -void *__libc_getspecific (__libc_key_t key); - -/* XXX until cthreads supports recursive locks */ -#define __libc_lock_define_initialized_recursive __libc_lock_define_initialized -#define __libc_lock_init_recursive __libc_lock_init -#define __libc_lock_fini_recursive __libc_lock_fini -#define __libc_lock_trylock_recursive __libc_lock_trylock -#define __libc_lock_unlock_recursive __libc_lock_unlock -#define __libc_lock_lock_recursive __libc_lock_lock - -#define __rtld_lock_define_initialized_recursive __libc_lock_define_initialized -#define __rtld_lock_fini_recursive __libc_lock_fini -#define __rtld_lock_trylock_recursive __libc_lock_trylock -#define __rtld_lock_unlock_recursive __libc_lock_unlock -#define __rtld_lock_lock_recursive __libc_lock_lock +/* Hide the definitions which are only supposed to be used inside libc in + a separate file. This file is not present in the installation! */ +# include <bits/libc-lockP.h> +#endif #endif /* bits/libc-lock.h */ diff --git a/sysdeps/mach/hurd/bits/errno.h b/sysdeps/mach/hurd/bits/errno.h index d20ffe6..c5db66e 100644 --- a/sysdeps/mach/hurd/bits/errno.h +++ b/sysdeps/mach/hurd/bits/errno.h @@ -222,6 +222,10 @@ enum __error_t_codes #define ETIME _HURD_ERRNO (117)/* Timer expired */ ECANCELED = _HURD_ERRNO (119), #define ECANCELED _HURD_ERRNO (119)/* Operation canceled */ + EOWNERDEAD = _HURD_ERRNO (120), +#define EOWNERDEAD _HURD_ERRNO (120)/* Robust mutex owner died */ + ENOTRECOVERABLE = _HURD_ERRNO (121), +#define ENOTRECOVERABLE _HURD_ERRNO (121)/* Robust mutex irrecoverable */ /* Errors from <mach/message.h>. */ EMACH_SEND_IN_PROGRESS = 0x10000001, @@ -278,6 +282,8 @@ enum __error_t_codes EKERN_MEMORY_PRESENT = 23, EKERN_WRITE_PROTECTION_FAILURE = 24, EKERN_TERMINATED = 26, + EKERN_TIMEDOUT = 27, + EKERN_INTERRUPTED = 28, /* Errors from <mach/mig_errors.h>. */ EMIG_TYPE_ERROR = -300 /* client type check failure */, @@ -305,7 +311,7 @@ enum __error_t_codes }; -#define _HURD_ERRNOS 120 +#define _HURD_ERRNOS 122 /* User-visible type of error codes. It is ok to use `int' or `kern_return_t' for these, but with `error_t' the debugger prints diff --git a/sysdeps/mach/hurd/bits/libc-lock.h b/sysdeps/mach/hurd/bits/libc-lock.h deleted file mode 100644 index c9872c6..0000000 --- a/sysdeps/mach/hurd/bits/libc-lock.h +++ /dev/null @@ -1,215 +0,0 @@ -/* libc-internal interface for mutex locks. Hurd version using Mach cthreads. - Copyright (C) 1996-2014 Free Software Foundation, Inc. - This file is part of the GNU C Library. - - The GNU C Library is free software; you can redistribute it and/or - modify it under the terms of the GNU Lesser General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - The GNU C Library is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - - You should have received a copy of the GNU Lesser General Public - License along with the GNU C Library; if not, see - <http://www.gnu.org/licenses/>. */ - -#ifndef _BITS_LIBC_LOCK_H -#define _BITS_LIBC_LOCK_H 1 - -#if (_LIBC - 0) || (_CTHREADS_ - 0) -# if (_LIBC - 0) -# include <tls.h> -# endif -#include <cthreads.h> - -typedef struct mutex __libc_lock_t; -typedef struct -{ - struct mutex mutex; - void *owner; - int count; -} __libc_lock_recursive_t; -typedef __libc_lock_recursive_t __rtld_lock_recursive_t; - -extern char __libc_lock_self0[0]; -#define __libc_lock_owner_self() (__LIBC_NO_TLS() ? &__libc_lock_self0 : THREAD_SELF) - -#else -typedef struct __libc_lock_opaque__ __libc_lock_t; -typedef struct __libc_lock_recursive_opaque__ __libc_lock_recursive_t; -#endif - -/* Define a lock variable NAME with storage class CLASS. The lock must be - initialized with __libc_lock_init before it can be used (or define it - with __libc_lock_define_initialized, below). Use `extern' for CLASS to - declare a lock defined in another module. In public structure - definitions you must use a pointer to the lock structure (i.e., NAME - begins with a `*'), because its storage size will not be known outside - of libc. */ -#define __libc_lock_define(CLASS,NAME) \ - CLASS __libc_lock_t NAME; - -/* Define an initialized lock variable NAME with storage class CLASS. */ -#define __libc_lock_define_initialized(CLASS,NAME) \ - CLASS __libc_lock_t NAME = MUTEX_INITIALIZER; - -/* Initialize the named lock variable, leaving it in a consistent, unlocked - state. */ -#define __libc_lock_init(NAME) __mutex_init (&(NAME)) - -/* Finalize the named lock variable, which must be locked. It cannot be - used again until __libc_lock_init is called again on it. This must be - called on a lock variable before the containing storage is reused. */ -#define __libc_lock_fini(NAME) __mutex_unlock (&(NAME)) -#define __libc_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex) -#define __rtld_lock_fini_recursive(NAME) __mutex_unlock (&(NAME).mutex) - - -/* Lock the named lock variable. */ -#define __libc_lock_lock(NAME) __mutex_lock (&(NAME)) - -/* Lock the named lock variable. */ -#define __libc_lock_trylock(NAME) (!__mutex_trylock (&(NAME))) - -/* Unlock the named lock variable. */ -#define __libc_lock_unlock(NAME) __mutex_unlock (&(NAME)) - - -#define __libc_lock_define_recursive(CLASS,NAME) \ - CLASS __libc_lock_recursive_t NAME; -#define _LIBC_LOCK_RECURSIVE_INITIALIZER { MUTEX_INITIALIZER, 0, 0 } -#define __libc_lock_define_initialized_recursive(CLASS,NAME) \ - CLASS __libc_lock_recursive_t NAME = _LIBC_LOCK_RECURSIVE_INITIALIZER; - -#define __rtld_lock_define_recursive(CLASS,NAME) \ - __libc_lock_define_recursive (CLASS, NAME) -#define _RTLD_LOCK_RECURSIVE_INITIALIZER \ - _LIBC_LOCK_RECURSIVE_INITIALIZER -#define __rtld_lock_define_initialized_recursive(CLASS,NAME) \ - __libc_lock_define_initialized_recursive (CLASS, NAME) - -#define __libc_lock_init_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - __lock->owner = 0; mutex_init (&__lock->mutex); }) - -#define __libc_lock_trylock_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - void *__self = __libc_lock_owner_self (); \ - __mutex_trylock (&__lock->mutex) \ - ? (__lock->owner = __self, __lock->count = 1, 0) \ - : __lock->owner == __self ? (++__lock->count, 0) : 1; }) - -#define __libc_lock_lock_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - void *__self = __libc_lock_owner_self (); \ - if (__mutex_trylock (&__lock->mutex) \ - || (__lock->owner != __self \ - && (__mutex_lock (&__lock->mutex), 1))) \ - __lock->owner = __self, __lock->count = 1; \ - else \ - ++__lock->count; \ - }) -#define __libc_lock_unlock_recursive(NAME) \ - ({ __libc_lock_recursive_t *const __lock = &(NAME); \ - if (--__lock->count == 0) \ - { \ - __lock->owner = 0; \ - __mutex_unlock (&__lock->mutex); \ - } \ - }) - - -#define __rtld_lock_initialize(NAME) \ - (void) ((NAME) = (__rtld_lock_recursive_t) _RTLD_LOCK_RECURSIVE_INITIALIZER) -#define __rtld_lock_trylock_recursive(NAME) \ - __libc_lock_trylock_recursive (NAME) -#define __rtld_lock_lock_recursive(NAME) \ - __libc_lock_lock_recursive(NAME) -#define __rtld_lock_unlock_recursive(NAME) \ - __libc_lock_unlock_recursive (NAME) - - -/* XXX for now */ -#define __libc_rwlock_define __libc_lock_define -#define __libc_rwlock_define_initialized __libc_lock_define_initialized -#define __libc_rwlock_init __libc_lock_init -#define __libc_rwlock_fini __libc_lock_fini -#define __libc_rwlock_rdlock __libc_lock_lock -#define __libc_rwlock_wrlock __libc_lock_lock -#define __libc_rwlock_tryrdlock __libc_lock_trylock -#define __libc_rwlock_trywrlock __libc_lock_trylock -#define __libc_rwlock_unlock __libc_lock_unlock - - -/* Start a critical region with a cleanup function */ -#define __libc_cleanup_region_start(DOIT, FCT, ARG) \ -{ \ - typeof (***(FCT)) *__save_FCT = (DOIT) ? (FCT) : 0; \ - typeof (ARG) __save_ARG = ARG; \ - /* close brace is in __libc_cleanup_region_end below. */ - -/* End a critical region started with __libc_cleanup_region_start. */ -#define __libc_cleanup_region_end(DOIT) \ - if ((DOIT) && __save_FCT != 0) \ - (*__save_FCT)(__save_ARG); \ -} - -/* Sometimes we have to exit the block in the middle. */ -#define __libc_cleanup_end(DOIT) \ - if ((DOIT) && __save_FCT != 0) \ - (*__save_FCT)(__save_ARG); \ - -#define __libc_cleanup_push(fct, arg) __libc_cleanup_region_start (1, fct, arg) -#define __libc_cleanup_pop(execute) __libc_cleanup_region_end (execute) - -#if (_CTHREADS_ - 0) - -/* Use mutexes as once control variables. */ - -struct __libc_once - { - __libc_lock_t lock; - int done; - }; - -#define __libc_once_define(CLASS,NAME) \ - CLASS struct __libc_once NAME = { MUTEX_INITIALIZER, 0 } - -/* Call handler iff the first call. */ -#define __libc_once(ONCE_CONTROL, INIT_FUNCTION) \ - do { \ - __libc_lock_lock (ONCE_CONTROL.lock); \ - if (!ONCE_CONTROL.done) \ - (INIT_FUNCTION) (); \ - ONCE_CONTROL.done = 1; \ - __libc_lock_unlock (ONCE_CONTROL.lock); \ - } while (0) - -/* Get once control variable. */ -#define __libc_once_get(ONCE_CONTROL) ((ONCE_CONTROL).done != 0) - -#ifdef _LIBC -/* We need portable names for some functions. E.g., when they are - used as argument to __libc_cleanup_region_start. */ -#define __libc_mutex_unlock __mutex_unlock -#endif - -/* Type for key of thread specific data. */ -typedef cthread_key_t __libc_key_t; - -#define __libc_key_create(KEY,DEST) cthread_keycreate (KEY) -#define __libc_setspecific(KEY,VAL) cthread_setspecific (KEY, VAL) -void *__libc_getspecific (__libc_key_t key); - -#endif /* _CTHREADS_ */ - -/* Hide the definitions which are only supposed to be used inside libc in - a separate file. This file is not present in the installation! */ -#ifdef _LIBC -# include <bits/libc-lockP.h> -#endif - -#endif /* bits/libc-lock.h */ diff --git a/sysdeps/mach/hurd/bits/posix_opt.h b/sysdeps/mach/hurd/bits/posix_opt.h index 9ab9c9f..8ad8858 100644 --- a/sysdeps/mach/hurd/bits/posix_opt.h +++ b/sysdeps/mach/hurd/bits/posix_opt.h @@ -159,8 +159,8 @@ /* POSIX message queues could be available in future. */ #define _POSIX_MESSAGE_PASSING 0 -/* Thread process-shared synchronization is not supported. */ -#define _POSIX_THREAD_PROCESS_SHARED -1 +/* Thread process-shared synchronization is supported. */ +#define _POSIX_THREAD_PROCESS_SHARED 200809L /* The monotonic clock might be available. */ #define _POSIX_MONOTONIC_CLOCK 0 diff --git a/sysdeps/mach/hurd/cthreads.c b/sysdeps/mach/hurd/cthreads.c index e25fcd0..2b65d36 100644 --- a/sysdeps/mach/hurd/cthreads.c +++ b/sysdeps/mach/hurd/cthreads.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1997-2014 Free Software Foundation, Inc. +/* Copyright (C) 1997-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -55,13 +55,3 @@ cthread_setspecific (key, val) return -1; } -/* Call cthread_getspecific which gets a pointer to the return value instead - of just returning it. */ -void * -__libc_getspecific (key) - cthread_key_t key; -{ - void *val; - cthread_getspecific (key, &val); - return val; -} diff --git a/sysdeps/mach/hurd/malloc-machine.h b/sysdeps/mach/hurd/malloc-machine.h index cd86642..577aed6 100644 --- a/sysdeps/mach/hurd/malloc-machine.h +++ b/sysdeps/mach/hurd/malloc-machine.h @@ -1,6 +1,6 @@ /* Basic platform-independent macro definitions for mutexes, thread-specific data and parameters for malloc. - Copyright (C) 2003-2014 Free Software Foundation, Inc. + Copyright (C) 2003-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -22,24 +22,23 @@ #undef thread_atfork_static -#include <atomic.h> #include <bits/libc-lock.h> +#include <mach/lock-intern.h> -/* Assume hurd, with cthreads */ - -/* Cthreads `mutex_t' is a pointer to a mutex, and malloc wants just the - mutex itself. */ #undef mutex_t -#define mutex_t struct mutex +#define mutex_t unsigned int + +#undef MUTEX_INITIALIZER +#define MUTEX_INITIALIZER LLL_INITIALIZER #undef mutex_init -#define mutex_init(m) (__mutex_init(m), 0) +#define mutex_init(m) ({ __mutex_init(m); 0; }) #undef mutex_lock -#define mutex_lock(m) (__mutex_lock(m), 0) +#define mutex_lock(m) ({ __mutex_lock(m); 0; }) #undef mutex_unlock -#define mutex_unlock(m) (__mutex_unlock(m), 0) +#define mutex_unlock(m) ({ __mutex_unlock(m); 0; }) #define mutex_trylock(m) (!__mutex_trylock(m)) diff --git a/sysdeps/mach/hurd/setpgid.c b/sysdeps/mach/hurd/setpgid.c index 6662d3d..655bc3c 100644 --- a/sysdeps/mach/hurd/setpgid.c +++ b/sysdeps/mach/hurd/setpgid.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1993-2014 Free Software Foundation, Inc. +/* Copyright (C) 1993-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -19,6 +19,7 @@ #include <unistd.h> #include <hurd.h> #include <hurd/port.h> +#include <lowlevellock.h> /* Set the process group ID of the process matching PID to PGID. If PID is zero, the current process's process group ID is set. @@ -40,14 +41,7 @@ __setpgid (pid, pgid) /* Synchronize with the signal thread to make sure we have received and processed proc_newids before returning to the user. */ while (_hurd_pids_changed_stamp == stamp) - { -#ifdef noteven - /* XXX we have no need for a mutex, but cthreads demands one. */ - __condition_wait (&_hurd_pids_changed_sync, NULL); -#else - __swtch_pri(0); -#endif - } + lll_wait (&_hurd_pids_changed_stamp, stamp, 0); return 0; diff --git a/sysdeps/mach/hurd/setsid.c b/sysdeps/mach/hurd/setsid.c index 36ec3d8..cf6140c 100644 --- a/sysdeps/mach/hurd/setsid.c +++ b/sysdeps/mach/hurd/setsid.c @@ -1,4 +1,4 @@ -/* Copyright (C) 1993-2014 Free Software Foundation, Inc. +/* Copyright (C) 1993-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,6 +20,8 @@ #include <hurd.h> #include <hurd/port.h> #include <hurd/fd.h> +#include <hurd/ioctl.h> +#include <lowlevellock.h> /* Create a new session with the calling process as its leader. The process group IDs of the session and the calling process @@ -54,14 +56,7 @@ __setsid (void) returned by `getpgrp ()' in other threads) has been updated before we return. */ while (_hurd_pids_changed_stamp == stamp) - { -#ifdef noteven - /* XXX we have no need for a mutex, but cthreads demands one. */ - __condition_wait (&_hurd_pids_changed_sync, NULL); -#else - __swtch_pri (0); -#endif - } + lll_wait (&_hurd_pids_changed_stamp, stamp, 0); } HURD_CRITICAL_END;
diff --git a/Makefile b/Makefile index bfdae7b..736be38 100644 --- a/Makefile +++ b/Makefile @@ -132,7 +132,6 @@ libpthread-routines := pt-attr pt-attr-destroy pt-attr-getdetachstate \ pt-sysdep \ pt-setup \ pt-machdep \ - pt-spin \ \ pt-sigstate-init \ pt-sigstate-destroy \ @@ -184,7 +183,8 @@ headers := \ bits/mutex-attr.h \ bits/rwlock.h \ bits/rwlock-attr.h \ - bits/semaphore.h + bits/semaphore.h \ + bits/xint.h ifeq ($(IN_GLIBC),yes) distribute := diff --git a/forward.c b/forward.c index 771b3ca..733af27 100644 --- a/forward.c +++ b/forward.c @@ -151,8 +151,7 @@ struct atfork { struct atfork *next; }; -/* TODO: better locking */ -static struct mutex atfork_lock; +static unsigned int atfork_lock = LLL_INITIALIZER; static struct atfork *fork_handlers, *fork_last_handler; static void diff --git a/pthread/cthreads-compat.c b/pthread/cthreads-compat.c index 1a0971b..1e82a8f 100644 --- a/pthread/cthreads-compat.c +++ b/pthread/cthreads-compat.c @@ -85,17 +85,3 @@ cthread_setspecific (cthread_key_t key, void *val) return err; } -void -__mutex_lock_solid (void *lock) -{ - __pthread_mutex_lock (lock); -} - -void -__mutex_unlock_solid (void *lock) -{ - if (__pthread_spin_trylock (lock) != 0) - /* Somebody already got the lock, that one will manage waking up others */ - return; - __pthread_mutex_unlock (lock); -} diff --git a/pthread/pt-create.c b/pthread/pt-create.c index 84044dc..7a27b29 100644 --- a/pthread/pt-create.c +++ b/pthread/pt-create.c @@ -1,5 +1,5 @@ /* Thread creation. - Copyright (C) 2000, 2002, 2005, 2007 Free Software Foundation, Inc. + Copyright (C) 2000-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -23,7 +23,7 @@ #include <signal.h> #include <resolv.h> -#include <bits/pt-atomic.h> +#include <atomic.h> #include <hurd/resource.h> #include <pt-internal.h> @@ -38,7 +38,7 @@ /* The total number of pthreads currently active. This is defined here since it would be really stupid to have a threads-using program that doesn't call `pthread_create'. */ -__atomic_t __pthread_total; +int __pthread_total; /* The entry-point for new threads. */ @@ -196,7 +196,7 @@ __pthread_create_internal (struct __pthread **thread, the number of threads from within the new thread isn't an option since this thread might return and call `pthread_exit' before the new thread runs. */ - __atomic_inc (&__pthread_total); + atomic_increment (&__pthread_total); /* Store a pointer to this thread in the thread ID lookup table. We could use __thread_setid, however, we only lock for reading as no @@ -227,7 +227,7 @@ __pthread_create_internal (struct __pthread **thread, __pthread_dealloc (pthread); __pthread_setid (pthread->thread, NULL); - __atomic_dec (&__pthread_total); + atomic_decrement (&__pthread_total); failed_sigstate: __pthread_sigstate_destroy (pthread); failed_setup: diff --git a/pthread/pt-dealloc.c b/pthread/pt-dealloc.c index e324800..327d0dc 100644 --- a/pthread/pt-dealloc.c +++ b/pthread/pt-dealloc.c @@ -1,5 +1,5 @@ /* Deallocate a thread structure. - Copyright (C) 2000, 2008 Free Software Foundation, Inc. + Copyright (C) 2000-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -20,10 +20,8 @@ #include <assert.h> #include <pthread.h> #include <stdlib.h> - #include <pt-internal.h> - -#include <bits/pt-atomic.h> +#include <atomic.h> /* List of thread structures corresponding to free thread IDs. */ extern struct __pthread *__pthread_free_threads; @@ -36,7 +34,7 @@ __pthread_dealloc (struct __pthread *pthread) { assert (pthread->state != PTHREAD_TERMINATED); - if (! __atomic_dec_and_test (&pthread->nr_refs)) + if (!atomic_decrement_and_test (&pthread->nr_refs)) return; /* Withdraw this thread from the thread ID lookup table. */ diff --git a/pthread/pt-exit.c b/pthread/pt-exit.c index 3427de5..e7f89f2 100644 --- a/pthread/pt-exit.c +++ b/pthread/pt-exit.c @@ -21,11 +21,8 @@ #include <errno.h> #include <pthread.h> #include <stdlib.h> - #include <pt-internal.h> - -#include <bits/pt-atomic.h> - +#include <atomic.h> /* Terminate the current thread and make STATUS available to any thread that might join it. */ @@ -50,7 +47,7 @@ __pthread_exit (void *status) /* Decrease the number of threads. We use an atomic operation to make sure that only the last thread calls `exit'. */ - if (__atomic_dec_and_test (&__pthread_total)) + if (atomic_decrement_and_test (&__pthread_total)) /* We are the last thread. */ exit (0); diff --git a/pthread/pt-internal.h b/pthread/pt-internal.h index ec8daed..3028775 100644 --- a/pthread/pt-internal.h +++ b/pthread/pt-internal.h @@ -1,5 +1,5 @@ /* Internal defenitions for pthreads library. - Copyright (C) 2000, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. + Copyright (C) 2000-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -28,8 +28,6 @@ #define __need_res_state #include <resolv.h> -#include <bits/pt-atomic.h> - #include <pt-key.h> #include <pt-sysdep.h> @@ -39,6 +37,8 @@ # include <ldsodefs.h> #endif +#include <tls.h> + /* Thread state. */ enum pthread_state { @@ -60,25 +60,13 @@ enum pthread_state # define PTHREAD_SYSDEP_MEMBERS #endif -#ifndef IS_IN_libpthread -#ifdef ENABLE_TLS -/* Type of the TCB. */ -typedef struct -{ - void *tcb; /* Points to this structure. */ - void *dtv; /* Vector of pointers to TLS data. */ - thread_t self; /* This thread's control port. */ -} tcbhead_t; -#endif /* ENABLE_TLS */ -#endif /* IS_IN_libpthread */ - /* This structure describes a POSIX thread. */ struct __pthread { /* Thread ID. */ pthread_t thread; - __atomic_t nr_refs; /* Detached threads have a self reference only, + unsigned int nr_refs; /* Detached threads have a self reference only, while joinable threads have two references. These are used to keep the structure valid at thread destruction. Detaching/joining a thread @@ -173,7 +161,7 @@ __pthread_dequeue (struct __pthread *thread) ) /* The total number of threads currently active. */ -extern __atomic_t __pthread_total; +extern int __pthread_total; /* The total number of thread IDs currently in use, or on the list of available thread IDs. */ @@ -208,6 +196,17 @@ extern struct __pthread *_pthread_self (void); #endif +/* Start and end cancellation points. */ +#define __pthread_cancelpoint_begin() \ + ({ \ + int __prev; \ + __pthread_setcanceltype (PTHREAD_CANCEL_ASYNCHRONOUS, &__prev); \ + __prev; \ + }) + +#define __pthread_cancelpoint_end(prev) \ + __pthread_setcanceltype ((prev), 0) + /* Initialize the pthreads library. */ extern void ___pthread_init (void); diff --git a/sysdeps/i386/bits/pt-atomic.h b/sysdeps/i386/bits/pt-atomic.h index 0dfc1f6..6acd513 100644 --- a/sysdeps/i386/bits/pt-atomic.h +++ b/sysdeps/i386/bits/pt-atomic.h @@ -1,10 +1,10 @@ -/* Atomic operations. i386 version. - Copyright (C) 2000 Free Software Foundation, Inc. +/* Additional atomic operations. i386 version. + Copyright (C) 2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Library General Public License as - published by the Free Software Foundation; either version 2 of the + published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, @@ -20,47 +20,145 @@ #ifndef _BITS_ATOMIC_H #define _BITS_ATOMIC_H 1 -typedef __volatile int __atomic_t; - -static inline void -__atomic_inc (__atomic_t *__var) -{ - __asm__ __volatile ("lock; incl %0" : "=m" (*__var) : "m" (*__var)); -} - -static inline void -__atomic_dec (__atomic_t *__var) -{ - __asm__ __volatile ("lock; decl %0" : "=m" (*__var) : "m" (*__var)); -} - -static inline int -__atomic_dec_and_test (__atomic_t *__var) -{ - unsigned char __ret; - - __asm__ __volatile ("lock; decl %0; sete %1" - : "=m" (*__var), "=qm" (__ret) : "m" (*__var)); - return __ret != 0; -} - -/* We assume that an __atomicptr_t is only used for pointers to - word-aligned objects, and use the lowest bit for a simple lock. */ -typedef __volatile int * __atomicptr_t; - -/* Actually we don't implement that yet, and assume that we run on - something that has the i486 instruction set. */ -static inline int -__atomicptr_compare_and_swap (__atomicptr_t *__ptr, void *__oldval, - void * __newval) -{ - char __ret; - int __dummy; - - __asm__ __volatile ("lock; cmpxchgl %3, %1; sete %0" - : "=q" (__ret), "=m" (*__ptr), "=a" (__dummy) - : "r" (__newval), "m" (*__ptr), "a" (__oldval)); - return __ret; -} +#include <atomic.h> + +#ifndef LOCK_PREFIX +# define LOCK_PREFIX "lock;" +#endif + +#if defined (__PIC__) && __GNUC__ < 5 + + /* When PIC is turned on, the %ebx register is used to save + * the value of the GOT pointer. As such, we must preserve + * %ebx across calls. */ + + /* The above doesn't apply anymore for GCC 5+. */ + +# ifdef __OPTIMIZE__ + +# define atomic_casx_bool(ptr, elo, ehi, nlo, nhi) \ + ({ \ + long __s, __v = (nlo); \ + char __r; \ + __asm__ __volatile__ \ + ( \ + "movl %%ebx, %2\n\t" \ + "leal %0, %%edi\n\t" \ + "movl %7, %%ebx\n\t" \ + LOCK_PREFIX "cmpxchg8b (%%edi)\n\t" \ + "movl %2, %%ebx\n\t" \ + "setz %1" \ + : "=m" (*ptr), "=a" (__r), "=m" (__s) \ + : "m" (*ptr), "d" (ehi), "a" (elo), \ + "c" (nhi), "m" (__v) \ + : "%edi", "memory" \ + ); \ + (int)__r; \ + }) + +# else + + /* When using -O0, we have to preserve %edi, otherwise gcc + * fails while looking for a register in class 'GENERAL_REGS'. */ + +# define atomic_casx_bool(ptr, elo, ehi, nlo, nhi) \ + ({ \ + long __d, __s, __v = (nlo); \ + char __r; \ + __asm__ __volatile__ \ + ( \ + "movl %%edi, %3\n\t" \ + "movl %%ebx, %2\n\t" \ + "leal %0, %%edi\n\t" \ + "movl %8, %%ebx\n\t" \ + LOCK_PREFIX "cmpxchg8b (%%edi)\n\t" \ + "movl %2, %%ebx\n\t" \ + "movl %3, %%edi\n\t" \ + "setz %1" \ + : "=m" (*ptr), "=a" (__r), "=m" (__s), "=m" (__d) \ + : "m" (*ptr), "d" (ehi), "a" (elo), \ + "c" (nhi), "m" (__v) \ + : "memory" \ + ); \ + (int)__r; \ + }) + +# endif + +#else + + /* In non-PIC mode, or with a recent-enough gcc, we + * can use %ebx to load the lower word in NLO. */ + +# define atomic_casx_bool(ptr, elo, ehi, nlo, nhi) \ + ({ \ + char __r; \ + __asm__ __volatile__ \ + ( \ + LOCK_PREFIX "cmpxchg8b %0\n\t" \ + "setz %1" \ + : "+m" (*ptr), "=a" (__r) \ + : "a" (elo), "d" (ehi), \ + "b" (nlo), "c" (nhi) \ + : "memory" \ + ); \ + (int)__r; \ + }) + +#endif /* defined (__PIC__) && __GNUC__ < 5 */ + +/* Loads and stores for 64-bit values. */ + +/* Note that loading a 64-bit integer may be implemented in + * a non-atomic way. That's fine since in the vast mayority + * of cases, we'll do a CAS right away. */ + +#if __SSE2__ + + /* When SSE is enabled, we can use one of its registers + * to move around unaligned 64-bit values. */ + +# define atomic_loadx(ptr) \ + ({ \ + typeof (*ptr) __r; \ + __asm__ __volatile__ \ + ( \ + "movq %1, %0" \ + : "=x" (__r) : "m" (*ptr) : "memory" \ + ); \ + __r; \ + }) + +# define atomic_storex(dst, src) \ + ({ \ + __asm__ __volatile__ \ + ( \ + "movq %1, %0" \ + : "=m" (*dst) : "x" (src) : "memory" \ + ); \ + (void)0; \ + }) + +#else + +# define atomic_loadx(ptr) ((typeof (*ptr)) *(ptr)) + + /* For stores, we do want atomicity. */ + +# define atomic_storex(dst, src) \ + ({ \ + union hurd_xint __tmp = { (src) }; \ + typeof (dst) __p = (dst); \ + while (1) \ + { \ + union hurd_xint __val = { *__p }; \ + if (atomic_casx_bool (__p, __val.lo, \ + __val.hi, __tmp.lo, __tmp.hi)) \ + break; \ + } \ + (void)0; \ + }) + +#endif /* __SSE2__ */ #endif diff --git a/sysdeps/mach/bits/spin-lock-inline.h b/sysdeps/mach/bits/spin-lock-inline.h index f9f7c29..b9646e9 100644 --- a/sysdeps/mach/bits/spin-lock-inline.h +++ b/sysdeps/mach/bits/spin-lock-inline.h @@ -1,5 +1,5 @@ /* Definitions of user-visible names for spin locks. - Copyright (C) 1994, 1997, 2002, 2008, 2009 Free Software Foundation, Inc. + Copyright (C) 1994-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -22,7 +22,7 @@ #include <features.h> #include <bits/spin-lock.h> -#include <machine-lock.h> /* This does all the work. */ +#include <mach/lock-intern.h> /* This does all the work. */ __BEGIN_DECLS @@ -63,14 +63,12 @@ __pthread_spin_trylock (__pthread_spinlock_t *__lock) return __spin_try_lock (__lock) ? 0 : __EBUSY; } -__extern_inline int __pthread_spin_lock (__pthread_spinlock_t *__lock); -extern int _pthread_spin_lock (__pthread_spinlock_t *__lock); +__PT_SPIN_INLINE int __pthread_spin_lock (__pthread_spinlock_t *__lock); __extern_inline int __pthread_spin_lock (__pthread_spinlock_t *__lock) { - if (__pthread_spin_trylock (__lock)) - return _pthread_spin_lock (__lock); + __spin_lock (__lock); return 0; } diff --git a/sysdeps/mach/i386/bits/spin-lock-inline.h b/sysdeps/mach/i386/bits/spin-lock-inline.h index e5ed3de..b9a18e4 100644 --- a/sysdeps/mach/i386/bits/spin-lock-inline.h +++ b/sysdeps/mach/i386/bits/spin-lock-inline.h @@ -1,5 +1,5 @@ /* Machine-specific definitions for spin locks. i386 version. - Copyright (C) 2000, 2005, 2008, 2009 Free Software Foundation, Inc. + Copyright (C) 2000-2016 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or @@ -26,6 +26,7 @@ #include <features.h> #include <bits/spin-lock.h> +#include <mach/lowlevellock.h> __BEGIN_DECLS @@ -45,7 +46,7 @@ __PT_SPIN_INLINE int __pthread_spin_destroy (__pthread_spinlock_t *__lock); __PT_SPIN_INLINE int __pthread_spin_destroy (__pthread_spinlock_t *__lock) { - return 0; + return (0); } __PT_SPIN_INLINE int __pthread_spin_init (__pthread_spinlock_t *__lock, @@ -54,8 +55,8 @@ __PT_SPIN_INLINE int __pthread_spin_init (__pthread_spinlock_t *__lock, __PT_SPIN_INLINE int __pthread_spin_init (__pthread_spinlock_t *__lock, int __pshared) { - *__lock = __PTHREAD_SPIN_LOCK_INITIALIZER; - return 0; + *__lock = LLL_INITIALIZER; + return (0); } __PT_SPIN_INLINE int __pthread_spin_trylock (__pthread_spinlock_t *__lock); @@ -63,21 +64,16 @@ __PT_SPIN_INLINE int __pthread_spin_trylock (__pthread_spinlock_t *__lock); __PT_SPIN_INLINE int __pthread_spin_trylock (__pthread_spinlock_t *__lock) { - int __locked; - __asm__ __volatile ("xchgl %0, %1" - : "=&r" (__locked), "=m" (*__lock) : "0" (1) : "memory"); - return __locked ? __EBUSY : 0; + return (lll_trylock (__lock)); } -__extern_inline int __pthread_spin_lock (__pthread_spinlock_t *__lock); -extern int _pthread_spin_lock (__pthread_spinlock_t *__lock); +__PT_SPIN_INLINE int __pthread_spin_lock (__pthread_spinlock_t *__lock); -__extern_inline int +__PT_SPIN_INLINE int __pthread_spin_lock (__pthread_spinlock_t *__lock) { - if (__pthread_spin_trylock (__lock)) - return _pthread_spin_lock (__lock); - return 0; + lll_lock (__lock, 0); + return (0); } __PT_SPIN_INLINE int __pthread_spin_unlock (__pthread_spinlock_t *__lock); @@ -85,10 +81,8 @@ __PT_SPIN_INLINE int __pthread_spin_unlock (__pthread_spinlock_t *__lock); __PT_SPIN_INLINE int __pthread_spin_unlock (__pthread_spinlock_t *__lock) { - int __unlocked; - __asm__ __volatile ("xchgl %0, %1" - : "=&r" (__unlocked), "=m" (*__lock) : "0" (0) : "memory"); - return 0; + lll_unlock (__lock, 0); + return (0); } #endif /* Use extern inlines or force inlines. */ diff --git a/sysdeps/pthread/bits/libc-lockP.h b/sysdeps/pthread/bits/libc-lockP.h index 65878f2..4d2e2b2 100644 --- a/sysdeps/pthread/bits/libc-lockP.h +++ b/sysdeps/pthread/bits/libc-lockP.h @@ -51,6 +51,18 @@ FUNC ARGS #endif +/* Type for key to thread-specific data. */ +typedef pthread_key_t __libc_key_t; + +#define __libc_key_create(KEY, DESTR) \ + __libc_maybe_call (__pthread_key_create, (KEY, DESTR), 1) + +#define __libc_getspecific(KEY) \ + __libc_maybe_call (__pthread_getspecific, (KEY), 0) + +#define __libc_setspecific(KEY, VAL) \ + __libc_maybe_call (__pthread_setspecific, (KEY, VAL), 0) + /* Functions that are used by this file and are internal to the GNU C library. */ diff --git a/sysdeps/pthread/bits/xint.h b/sysdeps/pthread/bits/xint.h new file mode 100644 index 0000000..4731548 --- /dev/null +++ b/sysdeps/pthread/bits/xint.h @@ -0,0 +1,41 @@ +/* Extended integer type. Generic version. + Copyright (C) 2016 Free Software Foundation, Inc. + This file is part of the GNU C Library. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Library General Public License as + published by the Free Software Foundation; either version 3 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Library General Public + License along with the GNU C Library; see the file COPYING.LIB. If not, + write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. */ + +#ifndef _BITS_XINT_H +#define _BITS_XINT_H 1 + +union hurd_xint +{ + unsigned long long qv; + struct + { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + unsigned int hi; + unsigned int lo; +# define hurd_xint_pair(lo, hi) hi, lo +#else +# define hurd_xint_pair(lo, hi) lo, hi + unsigned int lo; + unsigned int hi; +#endif + }; +} __attribute__ ((aligned (8))); + +#endif +