atomic_2.h

Go to the documentation of this file.
00001 // -*- C++ -*- header.
00002 
00003 // Copyright (C) 2008, 2009
00004 // Free Software Foundation, Inc.
00005 //
00006 // This file is part of the GNU ISO C++ Library.  This library is free
00007 // software; you can redistribute it and/or modify it under the
00008 // terms of the GNU General Public License as published by the
00009 // Free Software Foundation; either version 3, or (at your option)
00010 // any later version.
00011 
00012 // This library is distributed in the hope that it will be useful,
00013 // but WITHOUT ANY WARRANTY; without even the implied warranty of
00014 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00015 // GNU General Public License for more details.
00016 
00017 // Under Section 7 of GPL version 3, you are granted additional
00018 // permissions described in the GCC Runtime Library Exception, version
00019 // 3.1, as published by the Free Software Foundation.
00020 
00021 // You should have received a copy of the GNU General Public License and
00022 // a copy of the GCC Runtime Library Exception along with this program;
00023 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
00024 // <http://www.gnu.org/licenses/>.
00025 
00026 /** @file bits/atomic_2.h
00027  *  This is an internal header file, included by other library headers.
00028  *  You should not attempt to use it directly.
00029  */
00030 
00031 #ifndef _GLIBCXX_ATOMIC_2_H
00032 #define _GLIBCXX_ATOMIC_2_H 1
00033 
00034 #pragma GCC system_header
00035 
00036 // _GLIBCXX_BEGIN_NAMESPACE(std)
00037 
00038 // 2 == __atomic2 == Always lock-free
00039 // Assumed:
00040 // _GLIBCXX_ATOMIC_BUILTINS_1
00041 // _GLIBCXX_ATOMIC_BUILTINS_2
00042 // _GLIBCXX_ATOMIC_BUILTINS_4
00043 // _GLIBCXX_ATOMIC_BUILTINS_8
00044 namespace __atomic2
00045 {
00046   /// atomic_flag
00047   struct atomic_flag : private __atomic_flag_base
00048   {
00049     atomic_flag() = default;
00050     ~atomic_flag() = default;
00051     atomic_flag(const atomic_flag&) = delete;
00052     atomic_flag& operator=(const atomic_flag&) = delete;
00053 
00054     atomic_flag(bool __i) { _M_i = __i; } // XXX deleted copy ctor != agg
00055 
00056     bool
00057     test_and_set(memory_order __m = memory_order_seq_cst) volatile
00058     {
00059       // Redundant synchronize if built-in for lock is a full barrier.
00060       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
00061     __sync_synchronize();
00062       return __sync_lock_test_and_set(&_M_i, 1);
00063     }
00064 
00065     void
00066     clear(memory_order __m = memory_order_seq_cst) volatile
00067     {
00068       __glibcxx_assert(__m != memory_order_consume);
00069       __glibcxx_assert(__m != memory_order_acquire);
00070       __glibcxx_assert(__m != memory_order_acq_rel);
00071 
00072       __sync_lock_release(&_M_i);
00073       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
00074     __sync_synchronize();
00075     }
00076   };
00077 
00078 
00079   /// 29.4.2, address types
00080   struct atomic_address
00081   {
00082   private:
00083     void* _M_i;
00084 
00085   public:
00086     atomic_address() = default;
00087     ~atomic_address() = default;
00088     atomic_address(const atomic_address&) = delete;
00089     atomic_address& operator=(const atomic_address&) = delete;
00090 
00091     atomic_address(void* __v) { _M_i = __v; }
00092 
00093     bool
00094     is_lock_free() const volatile
00095     { return true; }
00096 
00097     void
00098     store(void* __v, memory_order __m = memory_order_seq_cst) volatile
00099     {
00100       __glibcxx_assert(__m != memory_order_acquire);
00101       __glibcxx_assert(__m != memory_order_acq_rel);
00102       __glibcxx_assert(__m != memory_order_consume);
00103 
00104       if (__m == memory_order_relaxed)
00105     _M_i = __v;
00106       else
00107     {
00108       // write_mem_barrier();
00109       _M_i = __v;
00110       if (__m = memory_order_seq_cst)
00111         __sync_synchronize();
00112     }
00113     }
00114 
00115     void*
00116     load(memory_order __m = memory_order_seq_cst) const volatile
00117     {
00118       __glibcxx_assert(__m != memory_order_release);
00119       __glibcxx_assert(__m != memory_order_acq_rel);
00120 
00121       __sync_synchronize();
00122       void* __ret = _M_i;
00123       __sync_synchronize();
00124       return __ret;
00125     }
00126 
00127     void*
00128     exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
00129     {
00130       // XXX built-in assumes memory_order_acquire.
00131       return __sync_lock_test_and_set(&_M_i, __v);
00132     }
00133 
00134     bool
00135     compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
00136               memory_order __m2) volatile
00137     { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
00138 
00139     bool
00140     compare_exchange_weak(void*& __v1, void* __v2,
00141               memory_order __m = memory_order_seq_cst) volatile
00142     {
00143       return compare_exchange_weak(__v1, __v2, __m,
00144                    __calculate_memory_order(__m));
00145     }
00146 
00147     bool
00148     compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
00149                 memory_order __m2) volatile
00150     {
00151       __glibcxx_assert(__m2 != memory_order_release);
00152       __glibcxx_assert(__m2 != memory_order_acq_rel);
00153       __glibcxx_assert(__m2 <= __m1);
00154 
00155       void* __v1o = __v1;
00156       void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
00157 
00158       // Assume extra stores (of same value) allowed in true case.
00159       __v1 = __v1n;
00160       return __v1o == __v1n;
00161     }
00162 
00163     bool
00164     compare_exchange_strong(void*& __v1, void* __v2,
00165               memory_order __m = memory_order_seq_cst) volatile
00166     {
00167       return compare_exchange_strong(__v1, __v2, __m,
00168                      __calculate_memory_order(__m));
00169     }
00170 
00171     void*
00172     fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
00173     { return __sync_fetch_and_add(&_M_i, __d); }
00174 
00175     void*
00176     fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
00177     { return __sync_fetch_and_sub(&_M_i, __d); }
00178 
00179     operator void*() const volatile
00180     { return load(); }
00181 
00182     void*
00183     operator=(void* __v) // XXX volatile
00184     {
00185       store(__v);
00186       return __v;
00187     }
00188 
00189     void*
00190     operator+=(ptrdiff_t __d) volatile
00191     { return __sync_add_and_fetch(&_M_i, __d); }
00192 
00193     void*
00194     operator-=(ptrdiff_t __d) volatile
00195     { return __sync_sub_and_fetch(&_M_i, __d); }
00196   };
00197 
00198   // 29.3.1 atomic integral types
00199   // For each of the integral types, define atomic_[integral type] struct
00200   //
00201   // atomic_bool     bool
00202   // atomic_char     char
00203   // atomic_schar    signed char
00204   // atomic_uchar    unsigned char
00205   // atomic_short    short
00206   // atomic_ushort   unsigned short
00207   // atomic_int      int
00208   // atomic_uint     unsigned int
00209   // atomic_long     long
00210   // atomic_ulong    unsigned long
00211   // atomic_llong    long long
00212   // atomic_ullong   unsigned long long
00213   // atomic_char16_t char16_t
00214   // atomic_char32_t char32_t
00215   // atomic_wchar_t  wchar_t
00216 
00217   // Base type.
00218   // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
00219   // since that is what GCC built-in functions for atomic memory access work on.
00220   template<typename _ITp>
00221     struct __atomic_base
00222     {
00223     private:
00224       typedef _ITp  __integral_type;
00225 
00226       __integral_type   _M_i;
00227 
00228     public:
00229       __atomic_base() = default;
00230       ~__atomic_base() = default;
00231       __atomic_base(const __atomic_base&) = delete;
00232       __atomic_base& operator=(const __atomic_base&) = delete;
00233 
00234       // Requires __integral_type convertible to _M_base._M_i.
00235       __atomic_base(__integral_type __i) { _M_i = __i; }
00236 
00237       operator __integral_type() const volatile
00238       { return load(); }
00239 
00240       __integral_type
00241       operator=(__integral_type __i) // XXX volatile
00242       {
00243     store(__i);
00244     return __i;
00245       }
00246 
00247       __integral_type
00248       operator++(int) volatile
00249       { return fetch_add(1); }
00250 
00251       __integral_type
00252       operator--(int) volatile
00253       { return fetch_sub(1); }
00254 
00255       __integral_type
00256       operator++() volatile
00257       { return __sync_add_and_fetch(&_M_i, 1); }
00258 
00259       __integral_type
00260       operator--() volatile
00261       { return __sync_sub_and_fetch(&_M_i, 1); }
00262 
00263       __integral_type
00264       operator+=(__integral_type __i) volatile
00265       { return __sync_add_and_fetch(&_M_i, __i); }
00266 
00267       __integral_type
00268       operator-=(__integral_type __i) volatile
00269       { return __sync_sub_and_fetch(&_M_i, __i); }
00270 
00271       __integral_type
00272       operator&=(__integral_type __i) volatile
00273       { return __sync_and_and_fetch(&_M_i, __i); }
00274 
00275       __integral_type
00276       operator|=(__integral_type __i) volatile
00277       { return __sync_or_and_fetch(&_M_i, __i); }
00278 
00279       __integral_type
00280       operator^=(__integral_type __i) volatile
00281       { return __sync_xor_and_fetch(&_M_i, __i); }
00282 
00283       bool
00284       is_lock_free() const volatile
00285       { return true; }
00286 
00287       void
00288       store(__integral_type __i,
00289         memory_order __m = memory_order_seq_cst) volatile
00290       {
00291     __glibcxx_assert(__m != memory_order_acquire);
00292     __glibcxx_assert(__m != memory_order_acq_rel);
00293     __glibcxx_assert(__m != memory_order_consume);
00294 
00295     if (__m == memory_order_relaxed)
00296       _M_i = __i;
00297     else
00298       {
00299         // write_mem_barrier();
00300         _M_i = __i;
00301         if (__m = memory_order_seq_cst)
00302           __sync_synchronize();
00303       }
00304       }
00305 
00306       __integral_type
00307       load(memory_order __m = memory_order_seq_cst) const volatile
00308       {
00309     __glibcxx_assert(__m != memory_order_release);
00310     __glibcxx_assert(__m != memory_order_acq_rel);
00311 
00312     __sync_synchronize();
00313     __integral_type __ret = _M_i;
00314     __sync_synchronize();
00315     return __ret;
00316       }
00317 
00318       __integral_type
00319       exchange(__integral_type __i,
00320            memory_order __m = memory_order_seq_cst) volatile
00321       {
00322     // XXX built-in assumes memory_order_acquire.
00323     return __sync_lock_test_and_set(&_M_i, __i);
00324       }
00325 
00326       bool
00327       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00328                 memory_order __m1, memory_order __m2) volatile
00329       { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
00330 
00331       bool
00332       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
00333                 memory_order __m = memory_order_seq_cst) volatile
00334       {
00335     return compare_exchange_weak(__i1, __i2, __m,
00336                      __calculate_memory_order(__m));
00337       }
00338 
00339       bool
00340       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00341                   memory_order __m1, memory_order __m2) volatile
00342       {
00343     __glibcxx_assert(__m2 != memory_order_release);
00344     __glibcxx_assert(__m2 != memory_order_acq_rel);
00345     __glibcxx_assert(__m2 <= __m1);
00346 
00347     __integral_type __i1o = __i1;
00348     __integral_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
00349 
00350     // Assume extra stores (of same value) allowed in true case.
00351     __i1 = __i1n;
00352     return __i1o == __i1n;
00353       }
00354 
00355       bool
00356       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
00357                   memory_order __m = memory_order_seq_cst) volatile
00358       {
00359     return compare_exchange_strong(__i1, __i2, __m,
00360                        __calculate_memory_order(__m));
00361       }
00362 
00363       __integral_type
00364       fetch_add(__integral_type __i,
00365         memory_order __m = memory_order_seq_cst) volatile
00366       { return __sync_fetch_and_add(&_M_i, __i); }
00367 
00368       __integral_type
00369       fetch_sub(__integral_type __i,
00370         memory_order __m = memory_order_seq_cst) volatile
00371       { return __sync_fetch_and_sub(&_M_i, __i); }
00372 
00373       __integral_type
00374       fetch_and(__integral_type __i,
00375         memory_order __m = memory_order_seq_cst) volatile
00376       { return __sync_fetch_and_and(&_M_i, __i); }
00377 
00378       __integral_type
00379       fetch_or(__integral_type __i,
00380            memory_order __m = memory_order_seq_cst) volatile
00381       { return __sync_fetch_and_or(&_M_i, __i); }
00382 
00383       __integral_type
00384       fetch_xor(__integral_type __i,
00385         memory_order __m = memory_order_seq_cst) volatile
00386       { return __sync_fetch_and_xor(&_M_i, __i); }
00387     };
00388 
00389 
00390   /// atomic_bool
00391   // NB: No operators or fetch-operations for this type.
00392   struct atomic_bool
00393   {
00394   private:
00395     __atomic_base<bool> _M_base;
00396 
00397   public:
00398     atomic_bool() = default;
00399     ~atomic_bool() = default;
00400     atomic_bool(const atomic_bool&) = delete;
00401     atomic_bool& operator=(const atomic_bool&) = delete;
00402 
00403     atomic_bool(bool __i) : _M_base(__i) { }
00404 
00405     bool
00406     operator=(bool __i) // XXX volatile
00407     { return _M_base.operator=(__i); }
00408 
00409     operator bool() const volatile
00410     { return _M_base.load(); }
00411 
00412     bool
00413     is_lock_free() const volatile
00414     { return _M_base.is_lock_free(); }
00415 
00416     void
00417     store(bool __i, memory_order __m = memory_order_seq_cst) volatile
00418     { _M_base.store(__i, __m); }
00419 
00420     bool
00421     load(memory_order __m = memory_order_seq_cst) const volatile
00422     { return _M_base.load(__m); }
00423 
00424     bool
00425     exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile
00426     { return _M_base.exchange(__i, __m); }
00427 
00428     bool
00429     compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
00430               memory_order __m2) volatile
00431     { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
00432 
00433     bool
00434     compare_exchange_weak(bool& __i1, bool __i2,
00435               memory_order __m = memory_order_seq_cst) volatile
00436     { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
00437 
00438     bool
00439     compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
00440                 memory_order __m2) volatile
00441     { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
00442 
00443 
00444     bool
00445     compare_exchange_strong(bool& __i1, bool __i2,
00446                 memory_order __m = memory_order_seq_cst) volatile
00447     { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
00448   };
00449 } // namespace __atomic2
00450 
00451 // _GLIBCXX_END_NAMESPACE
00452 
00453 #endif

Generated on Thu Jul 23 21:15:57 2009 for libstdc++ by  doxygen 1.5.8