00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019 #ifndef __SGI_STL_INTERNAL_THREADS_H
00020 #define __SGI_STL_INTERNAL_THREADS_H
00021
00022
00023
00024
00025
00026
00027 #if defined(__STL_SGI_THREADS)
00028 #include <mutex.h>
00029 #include <time.h>
00030 #elif defined(__STL_PTHREADS)
00031 #include <pthread.h>
00032 #elif defined(__STL_UITHREADS)
00033 #include <thread.h>
00034 #include <synch.h>
00035 #elif defined(__STL_WIN32THREADS)
00036 #include <windows.h>
00037 #endif
00038
00039 __STL_BEGIN_NAMESPACE
00040
00041
00042
00043
00044
00045
00046
00047
00048 #if defined(__STL_SGI_THREADS) && !defined(__add_and_fetch) && \
00049 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
00050 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
00051 # define __test_and_set(__l,__v) test_and_set(__l,__v)
00052 #endif
00053
00054 struct _Refcount_Base
00055 {
00056
00057 # ifdef __STL_WIN32THREADS
00058 typedef long _RC_t;
00059 # else
00060 typedef size_t _RC_t;
00061 #endif
00062
00063
00064 volatile _RC_t _M_ref_count;
00065
00066
00067 # ifdef __STL_PTHREADS
00068 pthread_mutex_t _M_ref_count_lock;
00069 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00070 { pthread_mutex_init(&_M_ref_count_lock, 0); }
00071 # elif defined(__STL_UITHREADS)
00072 mutex_t _M_ref_count_lock;
00073 _Refcount_Base(_RC_t __n) : _M_ref_count(__n)
00074 { mutex_init(&_M_ref_count_lock, USYNC_THREAD, 0); }
00075 # else
00076 _Refcount_Base(_RC_t __n) : _M_ref_count(__n) {}
00077 # endif
00078
00079
00080 # ifdef __STL_SGI_THREADS
00081 void _M_incr() { __add_and_fetch(&_M_ref_count, 1); }
00082 _RC_t _M_decr() { return __add_and_fetch(&_M_ref_count, (size_t) -1); }
00083 # elif defined (__STL_WIN32THREADS)
00084 void _M_incr() { InterlockedIncrement((_RC_t*)&_M_ref_count); }
00085 _RC_t _M_decr() { return InterlockedDecrement((_RC_t*)&_M_ref_count); }
00086 # elif defined(__STL_PTHREADS)
00087 void _M_incr() {
00088 pthread_mutex_lock(&_M_ref_count_lock);
00089 ++_M_ref_count;
00090 pthread_mutex_unlock(&_M_ref_count_lock);
00091 }
00092 _RC_t _M_decr() {
00093 pthread_mutex_lock(&_M_ref_count_lock);
00094 volatile _RC_t __tmp = --_M_ref_count;
00095 pthread_mutex_unlock(&_M_ref_count_lock);
00096 return __tmp;
00097 }
00098 # elif defined(__STL_UITHREADS)
00099 void _M_incr() {
00100 mutex_lock(&_M_ref_count_lock);
00101 ++_M_ref_count;
00102 mutex_unlock(&_M_ref_count_lock);
00103 }
00104 _RC_t _M_decr() {
00105 mutex_lock(&_M_ref_count_lock);
00106 _RC_t __tmp = --_M_ref_count;
00107 mutex_unlock(&_M_ref_count_lock);
00108 return __tmp;
00109 }
00110 # else
00111 void _M_incr() { ++_M_ref_count; }
00112 _RC_t _M_decr() { return --_M_ref_count; }
00113 # endif
00114 };
00115
00116
00117
00118
00119
00120 # ifdef __STL_SGI_THREADS
00121 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00122 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
00123 return test_and_set(__p, __q);
00124 # else
00125 return __test_and_set(__p, (unsigned long)__q);
00126 # endif
00127 }
00128 # elif defined(__STL_WIN32THREADS)
00129 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00130 return (unsigned long) InterlockedExchange((LPLONG)__p, (LONG)__q);
00131 }
00132 # elif defined(__STL_PTHREADS)
00133
00134 template<int __dummy>
00135 struct _Swap_lock_struct {
00136 static pthread_mutex_t _S_swap_lock;
00137 };
00138
00139 template<int __dummy>
00140 pthread_mutex_t
00141 _Swap_lock_struct<__dummy>::_S_swap_lock = PTHREAD_MUTEX_INITIALIZER;
00142
00143
00144
00145
00146 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00147 pthread_mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00148 unsigned long __result = *__p;
00149 *__p = __q;
00150 pthread_mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00151 return __result;
00152 }
00153 # elif defined(__STL_UITHREADS)
00154
00155 template<int __dummy>
00156 struct _Swap_lock_struct {
00157 static mutex_t _S_swap_lock;
00158 };
00159
00160 template<int __dummy>
00161 mutex_t
00162 _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00163
00164
00165
00166
00167 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00168 mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00169 unsigned long __result = *__p;
00170 *__p = __q;
00171 mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00172 return __result;
00173 }
00174 # elif defined (__STL_SOLARIS_THREADS)
00175
00176
00177 template<int __dummy>
00178 struct _Swap_lock_struct {
00179 static mutex_t _S_swap_lock;
00180 };
00181
00182 # if ( __STL_STATIC_TEMPLATE_DATA > 0 )
00183 template<int __dummy>
00184 mutex_t
00185 _Swap_lock_struct<__dummy>::_S_swap_lock = DEFAULTMUTEX;
00186 # else
00187 __DECLARE_INSTANCE(mutex_t, _Swap_lock_struct<__dummy>::_S_swap_lock,
00188 =DEFAULTMUTEX);
00189 # endif
00190
00191
00192
00193
00194 inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00195 mutex_lock(&_Swap_lock_struct<0>::_S_swap_lock);
00196 unsigned long __result = *__p;
00197 *__p = __q;
00198 mutex_unlock(&_Swap_lock_struct<0>::_S_swap_lock);
00199 return __result;
00200 }
00201 # else
00202 static inline unsigned long _Atomic_swap(unsigned long * __p, unsigned long __q) {
00203 unsigned long __result = *__p;
00204 *__p = __q;
00205 return __result;
00206 }
00207 # endif
00208
00209
00210
00211
00212
00213
00214
00215
00216
00217
00218
00219
00220
00221
00222
00223 template <int __inst>
00224 struct _STL_mutex_spin {
00225 enum { __low_max = 30, __high_max = 1000 };
00226
00227
00228 static unsigned __max;
00229 static unsigned __last;
00230 };
00231
00232 template <int __inst>
00233 unsigned _STL_mutex_spin<__inst>::__max = _STL_mutex_spin<__inst>::__low_max;
00234
00235 template <int __inst>
00236 unsigned _STL_mutex_spin<__inst>::__last = 0;
00237
00238 struct _STL_mutex_lock
00239 {
00240 #if defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00241
00242 volatile unsigned long _M_lock;
00243 void _M_initialize() { _M_lock = 0; }
00244 static void _S_nsec_sleep(int __log_nsec) {
00245 # ifdef __STL_SGI_THREADS
00246 struct timespec __ts;
00247
00248 __ts.tv_sec = 0;
00249 __ts.tv_nsec = 1 << __log_nsec;
00250 nanosleep(&__ts, 0);
00251 # elif defined(__STL_WIN32THREADS)
00252 if (__log_nsec <= 20) {
00253 Sleep(0);
00254 } else {
00255 Sleep(1 << (__log_nsec - 20));
00256 }
00257 # else
00258 # error unimplemented
00259 # endif
00260 }
00261 void _M_acquire_lock() {
00262 volatile unsigned long* __lock = &this->_M_lock;
00263
00264 if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00265 return;
00266 }
00267 unsigned __my_spin_max = _STL_mutex_spin<0>::__max;
00268 unsigned __my_last_spins = _STL_mutex_spin<0>::__last;
00269 volatile unsigned __junk = 17;
00270 unsigned __i;
00271 for (__i = 0; __i < __my_spin_max; __i++) {
00272 if (__i < __my_last_spins/2 || *__lock) {
00273 __junk *= __junk; __junk *= __junk;
00274 __junk *= __junk; __junk *= __junk;
00275 continue;
00276 }
00277 if (!_Atomic_swap((unsigned long*)__lock, 1)) {
00278
00279
00280
00281
00282 _STL_mutex_spin<0>::__last = __i;
00283 _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__high_max;
00284 return;
00285 }
00286 }
00287
00288 _STL_mutex_spin<0>::__max = _STL_mutex_spin<0>::__low_max;
00289 for (__i = 0 ;; ++__i) {
00290 int __log_nsec = __i + 6;
00291
00292 if (__log_nsec > 27) __log_nsec = 27;
00293 if (!_Atomic_swap((unsigned long *)__lock, 1)) {
00294 return;
00295 }
00296 _S_nsec_sleep(__log_nsec);
00297 }
00298 }
00299 void _M_release_lock() {
00300 volatile unsigned long* __lock = &_M_lock;
00301 # if defined(__STL_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
00302 asm("sync");
00303 *__lock = 0;
00304 # elif defined(__STL_SGI_THREADS) && __mips >= 3 \
00305 && (defined (_ABIN32) || defined(_ABI64))
00306 __lock_release(__lock);
00307 # else
00308 *__lock = 0;
00309
00310
00311 # endif
00312 }
00313
00314
00315
00316
00317
00318 #elif defined(__STL_PTHREADS)
00319 pthread_mutex_t _M_lock;
00320 void _M_initialize() { pthread_mutex_init(&_M_lock, NULL); }
00321 void _M_acquire_lock() { pthread_mutex_lock(&_M_lock); }
00322 void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
00323 #elif defined(__STL_UITHREADS)
00324 mutex_t _M_lock;
00325 void _M_initialize() { mutex_init(&_M_lock, USYNC_THREAD, 0); }
00326 void _M_acquire_lock() { mutex_lock(&_M_lock); }
00327 void _M_release_lock() { mutex_unlock(&_M_lock); }
00328 #else
00329 void _M_initialize() {}
00330 void _M_acquire_lock() {}
00331 void _M_release_lock() {}
00332 #endif
00333 };
00334
00335 #ifdef __STL_PTHREADS
00336
00337
00338 # define __STL_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
00339 #elif defined(__STL_UITHREADS)
00340
00341
00342 # define __STL_MUTEX_INITIALIZER = { DEFAULTMUTEX }
00343 #elif defined(__STL_SGI_THREADS) || defined(__STL_WIN32THREADS)
00344 # define __STL_MUTEX_INITIALIZER = { 0 }
00345 #else
00346 # define __STL_MUTEX_INITIALIZER
00347 #endif
00348
00349
00350
00351
00352
00353
00354
00355 struct _STL_auto_lock
00356 {
00357 _STL_mutex_lock& _M_lock;
00358
00359 _STL_auto_lock(_STL_mutex_lock& __lock) : _M_lock(__lock)
00360 { _M_lock._M_acquire_lock(); }
00361 ~_STL_auto_lock() { _M_lock._M_release_lock(); }
00362
00363 private:
00364 void operator=(const _STL_auto_lock&);
00365 _STL_auto_lock(const _STL_auto_lock&);
00366 };
00367
00368 __STL_END_NAMESPACE
00369
00370 #endif
00371
00372
00373
00374