diff options
author | upstream source tree <ports@midipix.org> | 2015-03-15 20:14:05 -0400 |
---|---|---|
committer | upstream source tree <ports@midipix.org> | 2015-03-15 20:14:05 -0400 |
commit | 554fd8c5195424bdbcabf5de30fdc183aba391bd (patch) | |
tree | 976dc5ab7fddf506dadce60ae936f43f58787092 /libstdc++-v3/include/std/mutex | |
download | cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.bz2 cbb-gcc-4.6.4-554fd8c5195424bdbcabf5de30fdc183aba391bd.tar.xz |
obtained gcc-4.6.4.tar.bz2 from upstream website;upstream
verified gcc-4.6.4.tar.bz2.sig;
imported gcc-4.6.4 source tree from verified upstream tarball.
downloading a git-generated archive based on the 'upstream' tag
should provide you with a source tree that is binary identical
to the one extracted from the above tarball.
if you have obtained the source via the command 'git clone',
however, do note that line-endings of files in your working
directory might differ from line-endings of the respective
files in the upstream repository.
Diffstat (limited to 'libstdc++-v3/include/std/mutex')
-rw-r--r-- | libstdc++-v3/include/std/mutex | 830 |
1 files changed, 830 insertions, 0 deletions
diff --git a/libstdc++-v3/include/std/mutex b/libstdc++-v3/include/std/mutex new file mode 100644 index 000000000..ed530e4f0 --- /dev/null +++ b/libstdc++-v3/include/std/mutex @@ -0,0 +1,830 @@ +// <mutex> -*- C++ -*- + +// Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 +// Free Software Foundation, Inc. +// +// This file is part of the GNU ISO C++ Library. This library is free +// software; you can redistribute it and/or modify it under the +// terms of the GNU General Public License as published by the +// Free Software Foundation; either version 3, or (at your option) +// any later version. + +// This library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// Under Section 7 of GPL version 3, you are granted additional +// permissions described in the GCC Runtime Library Exception, version +// 3.1, as published by the Free Software Foundation. + +// You should have received a copy of the GNU General Public License and +// a copy of the GCC Runtime Library Exception along with this program; +// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see +// <http://www.gnu.org/licenses/>. + +/** @file include/mutex + * This is a Standard C++ Library header. + */ + +#ifndef _GLIBCXX_MUTEX +#define _GLIBCXX_MUTEX 1 + +#pragma GCC system_header + +#ifndef __GXX_EXPERIMENTAL_CXX0X__ +# include <bits/c++0x_warning.h> +#else + +#include <tuple> +#include <chrono> +#include <exception> +#include <type_traits> +#include <functional> +#include <system_error> +#include <bits/functexcept.h> +#include <bits/gthr.h> +#include <bits/move.h> // for std::swap + +#if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1) + +namespace std _GLIBCXX_VISIBILITY(default) +{ +_GLIBCXX_BEGIN_NAMESPACE_VERSION + + /** + * @defgroup mutexes Mutexes + * @ingroup concurrency + * + * Classes for mutex support. + * @{ + */ + + /// mutex + class mutex + { + typedef __gthread_mutex_t __native_type; + __native_type _M_mutex; + + public: + typedef __native_type* native_handle_type; + +#ifdef __GTHREAD_MUTEX_INIT + constexpr mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { } +#else + mutex() + { + // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may) + __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex); + } + + ~mutex() { __gthread_mutex_destroy(&_M_mutex); } +#endif + + mutex(const mutex&) = delete; + mutex& operator=(const mutex&) = delete; + + void + lock() + { + int __e = __gthread_mutex_lock(&_M_mutex); + + // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) + if (__e) + __throw_system_error(__e); + } + + bool + try_lock() + { + // XXX EINVAL, EAGAIN, EBUSY + return !__gthread_mutex_trylock(&_M_mutex); + } + + void + unlock() + { + // XXX EINVAL, EAGAIN, EPERM + __gthread_mutex_unlock(&_M_mutex); + } + + native_handle_type + native_handle() + { return &_M_mutex; } + }; + +#ifndef __GTHREAD_RECURSIVE_MUTEX_INIT + // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy + // so we need to obtain a __gthread_mutex_t to destroy + class __destroy_recursive_mutex + { + template<typename _Mx, typename _Rm> + static void + _S_destroy_win32(_Mx* __mx, _Rm const* __rmx) + { + __mx->counter = __rmx->counter; + __mx->sema = __rmx->sema; + __gthread_mutex_destroy(__mx); + } + + public: + // matches a gthr-win32.h recursive mutex + template<typename _Rm> + static typename enable_if<(bool)sizeof(&_Rm::sema), void>::type + _S_destroy(_Rm* __mx) + { + __gthread_mutex_t __tmp; + _S_destroy_win32(&__tmp, __mx); + } + + // matches a recursive mutex with a member 'actual' + template<typename _Rm> + static typename enable_if<(bool)sizeof(&_Rm::actual), void>::type + _S_destroy(_Rm* __mx) + { __gthread_mutex_destroy(&__mx->actual); } + + // matches when there's only one mutex type + template<typename _Rm> + static + typename enable_if<is_same<_Rm, __gthread_mutex_t>::value, void>::type + _S_destroy(_Rm* __mx) + { __gthread_mutex_destroy(__mx); } + }; +#endif + + /// recursive_mutex + class recursive_mutex + { + typedef __gthread_recursive_mutex_t __native_type; + __native_type _M_mutex; + + public: + typedef __native_type* native_handle_type; + +#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT + recursive_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { } +#else + recursive_mutex() + { + // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may) + __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex); + } + + ~recursive_mutex() + { __destroy_recursive_mutex::_S_destroy(&_M_mutex); } +#endif + + recursive_mutex(const recursive_mutex&) = delete; + recursive_mutex& operator=(const recursive_mutex&) = delete; + + void + lock() + { + int __e = __gthread_recursive_mutex_lock(&_M_mutex); + + // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) + if (__e) + __throw_system_error(__e); + } + + bool + try_lock() + { + // XXX EINVAL, EAGAIN, EBUSY + return !__gthread_recursive_mutex_trylock(&_M_mutex); + } + + void + unlock() + { + // XXX EINVAL, EAGAIN, EBUSY + __gthread_recursive_mutex_unlock(&_M_mutex); + } + + native_handle_type + native_handle() + { return &_M_mutex; } + }; + + /// timed_mutex + class timed_mutex + { + typedef __gthread_mutex_t __native_type; + +#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC + typedef chrono::monotonic_clock __clock_t; +#else + typedef chrono::high_resolution_clock __clock_t; +#endif + + __native_type _M_mutex; + + public: + typedef __native_type* native_handle_type; + +#ifdef __GTHREAD_MUTEX_INIT + timed_mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { } +#else + timed_mutex() + { + __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex); + } + + ~timed_mutex() { __gthread_mutex_destroy(&_M_mutex); } +#endif + + timed_mutex(const timed_mutex&) = delete; + timed_mutex& operator=(const timed_mutex&) = delete; + + void + lock() + { + int __e = __gthread_mutex_lock(&_M_mutex); + + // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) + if (__e) + __throw_system_error(__e); + } + + bool + try_lock() + { + // XXX EINVAL, EAGAIN, EBUSY + return !__gthread_mutex_trylock(&_M_mutex); + } + + template <class _Rep, class _Period> + bool + try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) + { return __try_lock_for_impl(__rtime); } + + template <class _Clock, class _Duration> + bool + try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) + { + chrono::time_point<_Clock, chrono::seconds> __s = + chrono::time_point_cast<chrono::seconds>(__atime); + + chrono::nanoseconds __ns = + chrono::duration_cast<chrono::nanoseconds>(__atime - __s); + + __gthread_time_t __ts = { + static_cast<std::time_t>(__s.time_since_epoch().count()), + static_cast<long>(__ns.count()) + }; + + return !__gthread_mutex_timedlock(&_M_mutex, &__ts); + } + + void + unlock() + { + // XXX EINVAL, EAGAIN, EBUSY + __gthread_mutex_unlock(&_M_mutex); + } + + native_handle_type + native_handle() + { return &_M_mutex; } + + private: + template<typename _Rep, typename _Period> + typename enable_if< + ratio_less_equal<__clock_t::period, _Period>::value, bool>::type + __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime) + { + __clock_t::time_point __atime = __clock_t::now() + + chrono::duration_cast<__clock_t::duration>(__rtime); + + return try_lock_until(__atime); + } + + template <typename _Rep, typename _Period> + typename enable_if< + !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type + __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime) + { + __clock_t::time_point __atime = __clock_t::now() + + ++chrono::duration_cast<__clock_t::duration>(__rtime); + + return try_lock_until(__atime); + } + }; + + /// recursive_timed_mutex + class recursive_timed_mutex + { + typedef __gthread_recursive_mutex_t __native_type; + +#ifdef _GLIBCXX_USE_CLOCK_MONOTONIC + typedef chrono::monotonic_clock __clock_t; +#else + typedef chrono::high_resolution_clock __clock_t; +#endif + + __native_type _M_mutex; + + public: + typedef __native_type* native_handle_type; + +#ifdef __GTHREAD_RECURSIVE_MUTEX_INIT + recursive_timed_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { } +#else + recursive_timed_mutex() + { + // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may) + __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex); + } + + ~recursive_timed_mutex() + { __destroy_recursive_mutex::_S_destroy(&_M_mutex); } +#endif + + recursive_timed_mutex(const recursive_timed_mutex&) = delete; + recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete; + + void + lock() + { + int __e = __gthread_recursive_mutex_lock(&_M_mutex); + + // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may) + if (__e) + __throw_system_error(__e); + } + + bool + try_lock() + { + // XXX EINVAL, EAGAIN, EBUSY + return !__gthread_recursive_mutex_trylock(&_M_mutex); + } + + template <class _Rep, class _Period> + bool + try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) + { return __try_lock_for_impl(__rtime); } + + template <class _Clock, class _Duration> + bool + try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) + { + chrono::time_point<_Clock, chrono::seconds> __s = + chrono::time_point_cast<chrono::seconds>(__atime); + + chrono::nanoseconds __ns = + chrono::duration_cast<chrono::nanoseconds>(__atime - __s); + + __gthread_time_t __ts = { + static_cast<std::time_t>(__s.time_since_epoch().count()), + static_cast<long>(__ns.count()) + }; + + return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts); + } + + void + unlock() + { + // XXX EINVAL, EAGAIN, EBUSY + __gthread_recursive_mutex_unlock(&_M_mutex); + } + + native_handle_type + native_handle() + { return &_M_mutex; } + + private: + template<typename _Rep, typename _Period> + typename enable_if< + ratio_less_equal<__clock_t::period, _Period>::value, bool>::type + __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime) + { + __clock_t::time_point __atime = __clock_t::now() + + chrono::duration_cast<__clock_t::duration>(__rtime); + + return try_lock_until(__atime); + } + + template <typename _Rep, typename _Period> + typename enable_if< + !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type + __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime) + { + __clock_t::time_point __atime = __clock_t::now() + + ++chrono::duration_cast<__clock_t::duration>(__rtime); + + return try_lock_until(__atime); + } + }; + + /// Do not acquire ownership of the mutex. + struct defer_lock_t { }; + + /// Try to acquire ownership of the mutex without blocking. + struct try_to_lock_t { }; + + /// Assume the calling thread has already obtained mutex ownership + /// and manage it. + struct adopt_lock_t { }; + + constexpr defer_lock_t defer_lock { }; + constexpr try_to_lock_t try_to_lock { }; + constexpr adopt_lock_t adopt_lock { }; + + /// @brief Scoped lock idiom. + // Acquire the mutex here with a constructor call, then release with + // the destructor call in accordance with RAII style. + template<typename _Mutex> + class lock_guard + { + public: + typedef _Mutex mutex_type; + + explicit lock_guard(mutex_type& __m) : _M_device(__m) + { _M_device.lock(); } + + lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m) + { } // calling thread owns mutex + + ~lock_guard() + { _M_device.unlock(); } + + lock_guard(const lock_guard&) = delete; + lock_guard& operator=(const lock_guard&) = delete; + + private: + mutex_type& _M_device; + }; + + /// unique_lock + template<typename _Mutex> + class unique_lock + { + public: + typedef _Mutex mutex_type; + + unique_lock() + : _M_device(0), _M_owns(false) + { } + + explicit unique_lock(mutex_type& __m) + : _M_device(&__m), _M_owns(false) + { + lock(); + _M_owns = true; + } + + unique_lock(mutex_type& __m, defer_lock_t) + : _M_device(&__m), _M_owns(false) + { } + + unique_lock(mutex_type& __m, try_to_lock_t) + : _M_device(&__m), _M_owns(_M_device->try_lock()) + { } + + unique_lock(mutex_type& __m, adopt_lock_t) + : _M_device(&__m), _M_owns(true) + { + // XXX calling thread owns mutex + } + + template<typename _Clock, typename _Duration> + unique_lock(mutex_type& __m, + const chrono::time_point<_Clock, _Duration>& __atime) + : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime)) + { } + + template<typename _Rep, typename _Period> + unique_lock(mutex_type& __m, + const chrono::duration<_Rep, _Period>& __rtime) + : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime)) + { } + + ~unique_lock() + { + if (_M_owns) + unlock(); + } + + unique_lock(const unique_lock&) = delete; + unique_lock& operator=(const unique_lock&) = delete; + + unique_lock(unique_lock&& __u) + : _M_device(__u._M_device), _M_owns(__u._M_owns) + { + __u._M_device = 0; + __u._M_owns = false; + } + + unique_lock& operator=(unique_lock&& __u) + { + if(_M_owns) + unlock(); + + unique_lock(std::move(__u)).swap(*this); + + __u._M_device = 0; + __u._M_owns = false; + + return *this; + } + + void + lock() + { + if (!_M_device) + __throw_system_error(int(errc::operation_not_permitted)); + else if (_M_owns) + __throw_system_error(int(errc::resource_deadlock_would_occur)); + else + { + _M_device->lock(); + _M_owns = true; + } + } + + bool + try_lock() + { + if (!_M_device) + __throw_system_error(int(errc::operation_not_permitted)); + else if (_M_owns) + __throw_system_error(int(errc::resource_deadlock_would_occur)); + else + { + _M_owns = _M_device->try_lock(); + return _M_owns; + } + } + + template<typename _Clock, typename _Duration> + bool + try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime) + { + if (!_M_device) + __throw_system_error(int(errc::operation_not_permitted)); + else if (_M_owns) + __throw_system_error(int(errc::resource_deadlock_would_occur)); + else + { + _M_owns = _M_device->try_lock_until(__atime); + return _M_owns; + } + } + + template<typename _Rep, typename _Period> + bool + try_lock_for(const chrono::duration<_Rep, _Period>& __rtime) + { + if (!_M_device) + __throw_system_error(int(errc::operation_not_permitted)); + else if (_M_owns) + __throw_system_error(int(errc::resource_deadlock_would_occur)); + else + { + _M_owns = _M_device->try_lock_for(__rtime); + return _M_owns; + } + } + + void + unlock() + { + if (!_M_owns) + __throw_system_error(int(errc::operation_not_permitted)); + else if (_M_device) + { + _M_device->unlock(); + _M_owns = false; + } + } + + void + swap(unique_lock& __u) + { + std::swap(_M_device, __u._M_device); + std::swap(_M_owns, __u._M_owns); + } + + mutex_type* + release() + { + mutex_type* __ret = _M_device; + _M_device = 0; + _M_owns = false; + return __ret; + } + + bool + owns_lock() const + { return _M_owns; } + + explicit operator bool() const + { return owns_lock(); } + + mutex_type* + mutex() const + { return _M_device; } + + private: + mutex_type* _M_device; + bool _M_owns; // XXX use atomic_bool + }; + + template<typename _Mutex> + inline void + swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) + { __x.swap(__y); } + + template<int _Idx> + struct __unlock_impl + { + template<typename... _Lock> + static void + __do_unlock(tuple<_Lock&...>& __locks) + { + std::get<_Idx>(__locks).unlock(); + __unlock_impl<_Idx - 1>::__do_unlock(__locks); + } + }; + + template<> + struct __unlock_impl<-1> + { + template<typename... _Lock> + static void + __do_unlock(tuple<_Lock&...>&) + { } + }; + + template<typename _Lock> + unique_lock<_Lock> + __try_to_lock(_Lock& __l) + { return unique_lock<_Lock>(__l, try_to_lock); } + + template<int _Idx, bool _Continue = true> + struct __try_lock_impl + { + template<typename... _Lock> + static void + __do_try_lock(tuple<_Lock&...>& __locks, int& __idx) + { + __idx = _Idx; + auto __lock = __try_to_lock(std::get<_Idx>(__locks)); + if (__lock.owns_lock()) + { + __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>:: + __do_try_lock(__locks, __idx); + if (__idx == -1) + __lock.release(); + } + } + }; + + template<int _Idx> + struct __try_lock_impl<_Idx, false> + { + template<typename... _Lock> + static void + __do_try_lock(tuple<_Lock&...>& __locks, int& __idx) + { + __idx = _Idx; + auto __lock = __try_to_lock(std::get<_Idx>(__locks)); + if (__lock.owns_lock()) + { + __idx = -1; + __lock.release(); + } + } + }; + + /** @brief Generic try_lock. + * @param __l1 Meets Mutex requirements (try_lock() may throw). + * @param __l2 Meets Mutex requirements (try_lock() may throw). + * @param __l3 Meets Mutex requirements (try_lock() may throw). + * @return Returns -1 if all try_lock() calls return true. Otherwise returns + * a 0-based index corresponding to the argument that returned false. + * @post Either all arguments are locked, or none will be. + * + * Sequentially calls try_lock() on each argument. + */ + template<typename _Lock1, typename _Lock2, typename... _Lock3> + int + try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3) + { + int __idx; + auto __locks = std::tie(__l1, __l2, __l3...); + __try + { __try_lock_impl<0>::__do_try_lock(__locks, __idx); } + __catch(...) + { } + return __idx; + } + + /** @brief Generic lock. + * @param __l1 Meets Mutex requirements (try_lock() may throw). + * @param __l2 Meets Mutex requirements (try_lock() may throw). + * @param __l3 Meets Mutex requirements (try_lock() may throw). + * @throw An exception thrown by an argument's lock() or try_lock() member. + * @post All arguments are locked. + * + * All arguments are locked via a sequence of calls to lock(), try_lock() + * and unlock(). If the call exits via an exception any locks that were + * obtained will be released. + */ + template<typename _L1, typename _L2, typename ..._L3> + void + lock(_L1& __l1, _L2& __l2, _L3&... __l3) + { + while (true) + { + unique_lock<_L1> __first(__l1); + int __idx; + auto __locks = std::tie(__l2, __l3...); + __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx); + if (__idx == -1) + { + __first.release(); + return; + } + } + } + + /// once_flag + struct once_flag + { + private: + typedef __gthread_once_t __native_type; + __native_type _M_once; + + public: + constexpr once_flag() : _M_once(__GTHREAD_ONCE_INIT) { } + + once_flag(const once_flag&) = delete; + once_flag& operator=(const once_flag&) = delete; + + template<typename _Callable, typename... _Args> + friend void + call_once(once_flag& __once, _Callable&& __f, _Args&&... __args); + }; + +#ifdef _GLIBCXX_HAVE_TLS + extern __thread void* __once_callable; + extern __thread void (*__once_call)(); + + template<typename _Callable> + inline void + __once_call_impl() + { + (*(_Callable*)__once_callable)(); + } +#else + extern function<void()> __once_functor; + + extern void + __set_once_functor_lock_ptr(unique_lock<mutex>*); + + extern mutex& + __get_once_mutex(); +#endif + + extern "C" void __once_proxy(); + + /// call_once + template<typename _Callable, typename... _Args> + void + call_once(once_flag& __once, _Callable&& __f, _Args&&... __args) + { +#ifdef _GLIBCXX_HAVE_TLS + auto __bound_functor = std::bind<void>(std::forward<_Callable>(__f), + std::forward<_Args>(__args)...); + __once_callable = &__bound_functor; + __once_call = &__once_call_impl<decltype(__bound_functor)>; +#else + unique_lock<mutex> __functor_lock(__get_once_mutex()); + __once_functor = std::bind<void>(std::forward<_Callable>(__f), + std::forward<_Args>(__args)...); + __set_once_functor_lock_ptr(&__functor_lock); +#endif + + int __e = __gthread_once(&(__once._M_once), &__once_proxy); + +#ifndef _GLIBCXX_HAVE_TLS + if (__functor_lock) + __set_once_functor_lock_ptr(0); +#endif + + if (__e) + __throw_system_error(__e); + } + + // @} group mutexes +_GLIBCXX_END_NAMESPACE_VERSION +} // namespace + +#endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1 + +#endif // __GXX_EXPERIMENTAL_CXX0X__ + +#endif // _GLIBCXX_MUTEX |