Whole document tree
    

Whole document tree

stl_threads.h Source File
Main Page   Namespace List   Class Hierarchy   Alphabetical List   Compound List   File List   Namespace Members   Compound Members   File Members  

stl_threads.h

Go to the documentation of this file.
00001 // Threading support -*- C++ -*-
00002 
00003 // Copyright (C) 2001 Free Software Foundation, Inc.
00004 //
00005 // This file is part of the GNU ISO C++ Library.  This library is free
00006 // software; you can redistribute it and/or modify it under the
00007 // terms of the GNU General Public License as published by the
00008 // Free Software Foundation; either version 2, or (at your option)
00009 // any later version.
00010 
00011 // This library is distributed in the hope that it will be useful,
00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00014 // GNU General Public License for more details.
00015 
00016 // You should have received a copy of the GNU General Public License along
00017 // with this library; see the file COPYING.  If not, write to the Free
00018 // Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
00019 // USA.
00020 
00021 // As a special exception, you may use this file as part of a free software
00022 // library without restriction.  Specifically, if other files instantiate
00023 // templates or use macros or inline functions from this file, or you compile
00024 // this file and link it with other files to produce an executable, this
00025 // file does not by itself cause the resulting executable to be covered by
00026 // the GNU General Public License.  This exception does not however
00027 // invalidate any other reasons why the executable file might be covered by
00028 // the GNU General Public License.
00029 
00030 /*
00031  * Copyright (c) 1997-1999
00032  * Silicon Graphics Computer Systems, Inc.
00033  *
00034  * Permission to use, copy, modify, distribute and sell this software
00035  * and its documentation for any purpose is hereby granted without fee,
00036  * provided that the above copyright notice appear in all copies and
00037  * that both that copyright notice and this permission notice appear
00038  * in supporting documentation.  Silicon Graphics makes no
00039  * representations about the suitability of this software for any
00040  * purpose.  It is provided "as is" without express or implied warranty.
00041  */
00042 
00043 // WARNING: This is an internal header file, included by other C++
00044 // standard library headers.  You should not attempt to use this header
00045 // file directly.
00046 // Stl_config.h should be included before this file.
00047 
00048 #ifndef __SGI_STL_INTERNAL_THREADS_H
00049 #define __SGI_STL_INTERNAL_THREADS_H
00050 
00051 // Supported threading models are native SGI, pthreads, uithreads
00052 // (similar to pthreads, but based on an earlier draft of the Posix
00053 // threads standard), and Win32 threads.  Uithread support by Jochen
00054 // Schlick, 1999.
00055 
00056 // GCC extension begin
00057 // In order to present a stable threading configuration, in all cases,
00058 // gcc looks for it's own abstraction layer before all others.  All
00059 // modifications to this file are marked to allow easier importation of
00060 // STL upgrades.
00061 #if defined(__STL_GTHREADS)
00062 #include "bits/gthr.h"
00063 #else
00064 // GCC extension end
00065 #if defined(__STL_SGI_THREADS)
00066 #include <mutex.h>
00067 #include <time.h>
00068 #elif defined(__STL_PTHREADS)
00069 #include <pthread.h>
00070 #elif defined(__STL_UITHREADS)
00071 #include <thread.h>
00072 #include <synch.h>
00073 #elif defined(__STL_WIN32THREADS)
00074 #include <windows.h>
00075 #endif
00076 // GCC extension begin
00077 #endif
00078 // GCC extension end
00079 
00080 namespace std
00081 {
00082 
00083 // Class _Refcount_Base provides a type, _RC_t, a data member,
00084 // _M_ref_count, and member functions _M_incr and _M_decr, which perform
00085 // atomic preincrement/predecrement.  The constructor initializes 
00086 // _M_ref_count.
00087 
00088 // Hack for SGI o32 compilers.
00089 #if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \
00090     (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
00091 #  define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)  
00092 #  define __test_and_set(__l,__v)  test_and_set(__l,__v)
00093 #endif /* o32 */
00094 
00095 struct _Refcount_Base
00096 {
00097   // The type _RC_t
00098 # ifdef __STL_WIN32THREADS
00099   typedef long _RC_t;
00100 # else
00101   typedef size_t _RC_t;
00102 #endif
00103   
00104   // The data member _M_ref_count
00105    volatile _RC_t _M_ref_count;
00106 
00107   // Constructor
00108 // GCC extension begin
00109 #ifdef __STL_GTHREADS
00110   __gthread_mutex_t _M_ref_count_lock;
00111   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00112     {
00113 #ifdef __GTHREAD_MUTEX_INIT
00114       __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
00115       _M_ref_count_lock = __tmp;
00116 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00117       __GTHREAD_MUTEX_INIT_FUNCTION (&_M_ref_count_lock);
00118 #else
00119 #error __GTHREAD_MUTEX_INIT or __GTHREAD_MUTEX_INIT_FUNCTION should be defined by gthr.h abstraction layer, report problem to libstdc++@gcc.gnu.org.
00120 #endif
00121     }
00122 #else
00123 // GCC extension end
00124 # ifdef __STL_PTHREADS
00125   pthread_mutex_t _M_ref_count_lock;
00126   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00127     { pthread_mutex_init(&_M_ref_count_lock, 0); }
00128 # elif defined(__STL_UITHREADS)
00129   mutex_t         _M_ref_count_lock;
00130   _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00131     { mutex_init(&_M_ref_count_lock, USYNC_THREAD, 0); }
00132 # else
00133   _Refcount_Base(_RC_t __n) : _M_ref_count(__n) {}
00134 # endif
00135 // GCC extension begin
00136 #endif
00137 // GCC extension end
00138 
00139 // GCC extension begin
00140 #ifdef __STL_GTHREADS
00141   void _M_incr() {
00142     __gthread_mutex_lock(&_M_ref_count_lock);
00143     ++_M_ref_count;
00144     __gthread_mutex_unlock(&_M_ref_count_lock);
00145   }
00146   _RC_t _M_decr() {
00147     __gthread_mutex_lock(&_M_ref_count_lock);
00148     volatile _RC_t __tmp = --_M_ref_count;
00149     __gthread_mutex_unlock(&_M_ref_count_lock);
00150     return __tmp;
00151   }
00152 #else
00153 // GCC extension end
00154   // _M_incr and _M_decr
00155 # ifdef __STL_SGI_THREADS
00156   void _M_incr() {  __add_and_fetch(&_M_ref_count, 1); }
00157   _RC_t _M_decr() { return __add_and_fetch(&_M_ref_count, (size_t) -1); }
00158 # elif defined (__STL_WIN32THREADS)
00159    void _M_incr() { InterlockedIncrement((_RC_t*)&_M_ref_count); }
00160   _RC_t _M_decr() { return InterlockedDecrement((_RC_t*)&_M_ref_count); }
00161 # elif defined(__STL_PTHREADS)
00162   void _M_incr() {
00163     pthread_mutex_lock(&_M_ref_count_lock);
00164     ++_M_ref_count;
00165     pthread_mutex_unlock(&_M_ref_count_lock);
00166   }
00167   _RC_t _M_decr() {
00168     pthread_mutex_lock(&_M_ref_count_lock);
00169     volatile _RC_t __tmp = --_M_ref_count;
00170     pthread_mutex_unlock(&_M_ref_count_lock);
00171     return __tmp;
00172   }
00173 # elif defined(__STL_UITHREADS)
00174   void _M_incr() {
00175     mutex_lock(&_M_ref_count_lock);
00176     ++_M_ref_count;
00177     mutex_unlock(&_M_ref_count_lock);
00178   }
00179   _RC_t _M_decr() {
00180     mutex_lock(&_M_ref_count_lock);
00181     /*volatile*/ _RC_t __tmp = --_M_ref_count;
00182     mutex_unlock(&_M_ref_count_lock);
00183     return __tmp;
00184   }
00185 # else  /* No threads */
00186   void _M_incr() { ++_M_ref_count; }
00187   _RC_t _M_decr() { return --_M_ref_count; }
00188 # endif
00189 // GCC extension begin
00190 #endif
00191 // GCC extension end
00192 };
00193 
00194 // Atomic swap on unsigned long
00195 // This is guaranteed to behave as though it were atomic only if all
00196 // possibly concurrent updates use _Atomic_swap.
00197 // In some cases the operation is emulated with a lock.
00198 // GCC extension begin
00199 #ifdef __STL_GTHREADS
00200 // We don't provide an _Atomic_swap in this configuration.  This only
00201 // affects the use of ext/rope with threads.  Someone could add this
00202 // later, if required.  You can start by cloning the __STL_PTHREADS
00203 // path while making the obvious changes.  Later it could be optimized
00204 // to use the atomicity.h abstraction layer from libstdc++-v3.
00205 #else
00206 // GCC extension end
00207 # ifdef __STL_SGI_THREADS
00208     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00209 #       if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
00210             return test_and_set(__p, __q);
00211 #       else
00212             return __test_and_set(__p, (unsigned long)__q);
00213 #       endif
00214     }
00215 # elif defined(__STL_WIN32THREADS)
00216     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00217         return (unsigned long) InterlockedExchange((LPLONG)__p, (LONG)__q);
00218     }
00219 # elif defined(__STL_PTHREADS)
00220     // We use a template here only to get a unique initialized instance.
00221     template<int __dummy>
00222     struct _Swap_lock_struct {
00223         static pthread_mutex_t _S_swap_lock;
00224     };
00225 
00226     template<int __dummy>
00227     pthread_mutex_t
00228     _Swap_lock_struct<__dummy>::_S_swap_lock = PTHREAD_MUTEX_INITIALIZER;
00229 
00230     // This should be portable, but performance is expected
00231     // to be quite awful.  This really needs platform specific
00232     // code.
00233     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00234         pthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00235         unsigned long __result = *__p;
00236         *__p = __q;
00237         pthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00238         return __result;
00239     }
00240 # elif defined(__STL_UITHREADS)
00241     // We use a template here only to get a unique initialized instance.
00242     template<int __dummy>
00243     struct _Swap_lock_struct {
00244         static mutex_t _S_swap_lock;
00245     };
00246 
00247     template<int __dummy>
00248     mutex_t
00249     _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00250 
00251     // This should be portable, but performance is expected
00252     // to be quite awful.  This really needs platform specific
00253     // code.
00254     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00255         mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00256         unsigned long __result = *__p;
00257         *__p = __q;
00258         mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00259         return __result;
00260     }
00261 # elif defined (__STL_SOLARIS_THREADS)
00262     // any better solutions ?
00263     // We use a template here only to get a unique initialized instance.
00264     template<int __dummy>
00265     struct _Swap_lock_struct {
00266         static mutex_t _S_swap_lock;
00267     };
00268 
00269 # if ( __STL_STATIC_TEMPLATE_DATA > 0 )
00270     template<int __dummy>
00271     mutex_t
00272     _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00273 #  else
00274     __DECLARE_INSTANCE(mutex_t, _Swap_lock_struct<__dummy>::_S_swap_lock, 
00275                        =DEFAULTMUTEX);
00276 # endif /* ( __STL_STATIC_TEMPLATE_DATA > 0 ) */
00277 
00278     // This should be portable, but performance is expected
00279     // to be quite awful.  This really needs platform specific
00280     // code.
00281     inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00282         mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00283         unsigned long __result = *__p;
00284         *__p = __q;
00285         mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00286         return __result;
00287     }
00288 # else
00289     static inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00290         unsigned long __result = *__p;
00291         *__p = __q;
00292         return __result;
00293     }
00294 # endif
00295 // GCC extension begin
00296 #endif
00297 // GCC extension end
00298 
00299 // Locking class.  Note that this class *does not have a constructor*.
00300 // It must be initialized either statically, with __STL_MUTEX_INITIALIZER,
00301 // or dynamically, by explicitly calling the _M_initialize member function.
00302 // (This is similar to the ways that a pthreads mutex can be initialized.)
00303 // There are explicit member functions for acquiring and releasing the lock.
00304 
00305 // There is no constructor because static initialization is essential for
00306 // some uses, and only a class aggregate (see section 8.5.1 of the C++
00307 // standard) can be initialized that way.  That means we must have no
00308 // constructors, no base classes, no virtual functions, and no private or
00309 // protected members.
00310 
00311 // Helper struct.  This is a workaround for various compilers that don't
00312 // handle static variables in inline functions properly.
00313 template <int __inst>
00314 struct _STL_mutex_spin {
00315   enum { __low_max = 30, __high_max = 1000 };
00316   // Low if we suspect uniprocessor, high for multiprocessor.
00317 
00318   static unsigned __max;
00319   static unsigned __last;
00320 };
00321 
00322 template <int __inst>
00323 unsigned _STL_mutex_spin<__inst>::__max = _STL_mutex_spin<__inst>::__low_max;
00324 
00325 template <int __inst>
00326 unsigned _STL_mutex_spin<__inst>::__last = 0;
00327 
00328 // GCC extension begin
00329 #if defined(__STL_GTHREADS)
00330 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00331 extern __gthread_mutex_t _GLIBCPP_mutex;
00332 extern __gthread_mutex_t *_GLIBCPP_mutex_address;
00333 extern __gthread_once_t _GLIBCPP_once;
00334 extern void _GLIBCPP_mutex_init (void);
00335 extern void _GLIBCPP_mutex_address_init (void);
00336 #endif
00337 #endif
00338 // GCC extension end
00339 
00340 struct _STL_mutex_lock
00341 {
00342 // GCC extension begin
00343 #if defined(__STL_GTHREADS)
00344   // The class must be statically initialized with __STL_MUTEX_INITIALIZER.
00345 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00346   volatile int _M_init_flag;
00347   __gthread_once_t _M_once;
00348 #endif
00349   __gthread_mutex_t _M_lock;
00350   void _M_initialize() {
00351 #ifdef __GTHREAD_MUTEX_INIT
00352     // There should be no code in this path given the usage rules above.
00353 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00354     if (_M_init_flag) return;
00355     if (__gthread_once (&_GLIBCPP_once, _GLIBCPP_mutex_init) != 0
00356         && __gthread_active_p ())
00357       abort ();
00358     __gthread_mutex_lock (&_GLIBCPP_mutex);
00359     if (!_M_init_flag) {
00360     // Even though we have a global lock, we use __gthread_once to be
00361     // absolutely certain the _M_lock mutex is only initialized once on
00362     // multiprocessor systems.
00363     _GLIBCPP_mutex_address = &_M_lock;
00364     if (__gthread_once (&_M_once, _GLIBCPP_mutex_address_init) != 0
00365         && __gthread_active_p ())
00366       abort ();
00367     _M_init_flag = 1;
00368     }
00369     __gthread_mutex_unlock (&_GLIBCPP_mutex);
00370 #endif
00371   }
00372   void _M_acquire_lock() {
00373 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00374     if (!_M_init_flag) _M_initialize();
00375 #endif
00376     __gthread_mutex_lock(&_M_lock);
00377   }
00378   void _M_release_lock() {
00379 #if !defined(__GTHREAD_MUTEX_INIT) && defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00380     if (!_M_init_flag) _M_initialize();
00381 #endif
00382     __gthread_mutex_unlock(&_M_lock);
00383   }
00384 #else
00385 // GCC extension end
00386 #if defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00387   // It should be relatively easy to get this to work on any modern Unix.
00388   volatile unsigned long _M_lock;
00389   void _M_initialize() { _M_lock = 0; }
00390   static void _S_nsec_sleep(int __log_nsec) {
00391 #     ifdef __STL_SGI_THREADS
00392           struct timespec __ts;
00393           /* Max sleep is 2**27nsec ~ 60msec      */
00394           __ts.tv_sec = 0;
00395           __ts.tv_nsec = 1L << __log_nsec;
00396           nanosleep(&__ts, 0);
00397 #     elif defined(__STL_WIN32THREADS)
00398           if (__log_nsec <= 20) {
00399               Sleep(0);
00400           } else {
00401               Sleep(1 << (__log_nsec - 20));
00402           }
00403 #     else
00404 #       error unimplemented
00405 #     endif
00406   }
00407   void _M_acquire_lock() {
00408     volatile unsigned long* __lock = &this->_M_lock;
00409 
00410     if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00411       return;
00412     }
00413     unsigned __my_spin_max = _STL_mutex_spin<0>::__max;
00414     unsigned __my_last_spins = _STL_mutex_spin<0>::__last;
00415     volatile unsigned __junk = 17;      // Value doesn't matter.
00416     unsigned __i;
00417     for (__i = 0; __i < __my_spin_max; __i++) {
00418       if (__i < __my_last_spins/2 || *__lock) {
00419         __junk *= __junk; __junk *= __junk;
00420         __junk *= __junk; __junk *= __junk;
00421         continue;
00422       }
00423       if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00424         // got it!
00425         // Spinning worked.  Thus we're probably not being scheduled
00426         // against the other process with which we were contending.
00427         // Thus it makes sense to spin longer the next time.
00428         _STL_mutex_spin<0>::__last = __i;
00429         _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__high_max;
00430         return;
00431       }
00432     }
00433     // We are probably being scheduled against the other process.  Sleep.
00434     _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__low_max;
00435     for (__i = 0 ;; ++__i) {
00436       int __log_nsec = __i + 6;
00437 
00438       if (__log_nsec > 27) __log_nsec = 27;
00439       if (!_Atomic_swap((unsigned long *)__lock, 1)) {
00440         return;
00441       }
00442       _S_nsec_sleep(__log_nsec);
00443     }
00444   }
00445   void _M_release_lock() {
00446     volatile unsigned long* __lock = &_M_lock;
00447 #   if defined(__STL_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
00448         asm("sync");
00449         *__lock = 0;
00450 #   elif defined(__STL_SGI_THREADS) && __mips >= 3 \
00451          && (defined (_ABIN32) || defined(_ABI64))
00452         __lock_release(__lock);
00453 #   else 
00454         *__lock = 0;
00455         // This is not sufficient on many multiprocessors, since
00456         // writes to protected variables and the lock may be reordered.
00457 #   endif
00458   }
00459 
00460 // We no longer use win32 critical sections.
00461 // They appear to be slower in the contention-free case,
00462 // and they appear difficult to initialize without introducing a race.
00463 
00464 #elif defined(__STL_PTHREADS)
00465   pthread_mutex_t _M_lock;
00466   void _M_initialize()   { pthread_mutex_init(&_M_lock, NULL); }
00467   void _M_acquire_lock() { pthread_mutex_lock(&_M_lock); }
00468   void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
00469 #elif defined(__STL_UITHREADS)
00470   mutex_t _M_lock;
00471   void _M_initialize()   { mutex_init(&_M_lock, USYNC_THREAD, 0); }
00472   void _M_acquire_lock() { mutex_lock(&_M_lock); }
00473   void _M_release_lock() { mutex_unlock(&_M_lock); }
00474 #else /* No threads */
00475   void _M_initialize()   {}
00476   void _M_acquire_lock() {}
00477   void _M_release_lock() {}
00478 #endif
00479 // GCC extension begin
00480 #endif
00481 // GCC extension end
00482 };
00483 
00484 // GCC extension begin
00485 #if defined(__STL_GTHREADS)
00486 #ifdef __GTHREAD_MUTEX_INIT
00487 #define __STL_MUTEX_INITIALIZER = { __GTHREAD_MUTEX_INIT }
00488 #elif defined(__GTHREAD_MUTEX_INIT_FUNCTION)
00489 #ifdef __GTHREAD_MUTEX_INIT_DEFAULT
00490 #define __STL_MUTEX_INITIALIZER \
00491   = { 0, __GTHREAD_ONCE_INIT, __GTHREAD_MUTEX_INIT_DEFAULT }
00492 #else
00493 #define __STL_MUTEX_INITIALIZER = { 0, __GTHREAD_ONCE_INIT }
00494 #endif
00495 #endif
00496 #else
00497 // GCC extension end
00498 #ifdef __STL_PTHREADS
00499 // Pthreads locks must be statically initialized to something other than
00500 // the default value of zero.
00501 #   define __STL_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
00502 #elif defined(__STL_UITHREADS)
00503 // UIthreads locks must be statically initialized to something other than
00504 // the default value of zero.
00505 #   define __STL_MUTEX_INITIALIZER = { DEFAULTMUTEX }
00506 #elif defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00507 #   define __STL_MUTEX_INITIALIZER = { 0 }
00508 #else
00509 #   define __STL_MUTEX_INITIALIZER
00510 #endif
00511 // GCC extension begin
00512 #endif
00513 // GCC extension end
00514 
00515 
00516 // A locking class that uses _STL_mutex_lock.  The constructor takes a
00517 // reference to an _STL_mutex_lock, and acquires a lock.  The
00518 // destructor releases the lock.  It's not clear that this is exactly
00519 // the right functionality.  It will probably change in the future.
00520 
00521 struct _STL_auto_lock
00522 {
00523   _STL_mutex_lock& _M_lock;
00524   
00525   _STL_auto_lock(_STL_mutex_lock& __lock) : _M_lock(__lock)
00526     { _M_lock._M_acquire_lock(); }
00527   ~_STL_auto_lock() { _M_lock._M_release_lock(); }
00528 
00529 private:
00530   void operator=(const _STL_auto_lock&);
00531   _STL_auto_lock(const _STL_auto_lock&);
00532 };
00533 
00534 } // namespace std
00535 
00536 #endif /* __SGI_STL_INTERNAL_THREADS_H */
00537 
00538 // Local Variables:
00539 // mode:C++
00540 // End:
00541 

Generated on Mon Apr 8 03:11:44 2002 for libstdc++-v3 Source by doxygen1.2.15