4 * Copyright (C) 2006 iptelorg GmbH
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 * \brief SIP-router core :: Atomic operations and memory barriers
28 * \page atomicops Atomic operations and memory barriers
30 * WARNING: atomic ops do not include memory barriers
35 * void membar(); - memory barrier (load & store)
36 * void membar_read() - load (read) memory barrier
37 * void membar_write() - store (write) memory barrier
38 * void membar_depends() - read depends memory barrier, needed before using
39 * the contents of a pointer (for now is needed only
40 * on Alpha so on all other CPUs it will be a no-op)
42 * http://lse.sourceforge.net/locking/wmbdd.html
43 * http://www.linuxjournal.com/article/8212
45 * void membar_enter_lock() - memory barrier function that should be
46 * called after a lock operation (where lock is
47 * an asm inline function that uses atomic store
48 * operation on the lock var.). It is at most
49 * a StoreStore|StoreLoad barrier, but could also
50 * be empty if an atomic op implies a memory
51 * barrier on the specific arhitecture.
53 * raw_lock(l); membar_enter_lock(); ...
54 * void membar_leave_lock() - memory barrier function that should be called
55 * before an unlock operation (where unlock is an
56 * asm inline function that uses at least an atomic
57 * store to on the lock var.). It is at most a
58 * LoadStore|StoreStore barrier (but could also be
60 * Example: raw_lock(l); membar_enter_lock(); ..
61 * ... critical section ...
62 * membar_leave_lock(); raw_unlock(l);
63 * void membar_atomic_op() - memory barrier that should be called if a memory
64 * barrier is needed immediately after or
65 * immediately before an atomic operation
66 * (for example: atomic_inc(&i); membar_atomic_op()
67 * instead of atomic_inc(&i); membar()).
68 * atomic_op means every atomic operation except get
69 * and set (for them use membar_atomic_setget()).
70 * Using membar_atomic_op() instead of membar() in
71 * these cases will generate faster code on some
72 * architectures (for now x86 and x86_64), where
73 * atomic operations act also as memory barriers.
74 * Note that mb_atomic_<OP>(...) is equivalent to
75 * membar_atomic_op(); atomic_<OP>(...) and in this
76 * case the first form is preferred).
77 * void membar_atomic_setget() - same as above but for atomic_set and
78 * atomic_get (and not for any other atomic op.,
79 * including atomic_get_and_set, for them use
80 * membar_atomic_op()).
81 * Note that mb_atomic_{get,set}(&i) is equivalent
82 * and preferred to membar_atomic_setget();
83 * atomic_{get,set}(&i) (it will generate faster
84 * code on x86 and x86_64).
85 * void membar_read_atomic_op() - like membar_atomic_op(), but acts only as
87 * void membar_read_atomic_setget() - like membar_atomic_setget() but acts only
89 * void membar_write_atomic_op() - like membar_atomic_op(), but acts only as
91 * void membar_write_atomic_setget() - like membar_atomic_setget() but acts
92 * only as a write barrier.
95 * Note: - properly using memory barriers is tricky, in general try not to
96 * depend on them. Locks include memory barriers, so you don't need
97 * them for writes/load already protected by locks.
98 * - membar_enter_lock() and membar_leave_lock() are needed only if
99 * you implement your own locks using atomic ops (ser locks have the
106 * not including memory barriers:
108 * void atomic_set(atomic_t* v, int i) - v->val=i
109 * int atomic_get(atomic_t* v) - return v->val
110 * int atomic_get_and_set(atomic_t *v, i) - return old v->val, v->val=i
111 * void atomic_inc(atomic_t* v)
112 * void atomic_dec(atomic_t* v)
113 * int atomic_inc_and_test(atomic_t* v) - returns 1 if the result is 0
114 * int atomic_dec_and_test(atomic_t* v) - returns 1 if the result is 0
115 * void atomic_or (atomic_t* v, int mask) - v->val|=mask
116 * void atomic_and(atomic_t* v, int mask) - v->val&=mask
117 * int atomic_add(atomic_t* v, int i) - v->val+=i; return v->val
118 * int atomic_cmpxchg(atomic_t* v, o, n) - r=v->val; if (r==o) v->val=n;
119 * return r (old value)
122 * same ops, but with builtin memory barriers:
124 * void mb_atomic_set(atomic_t* v, int i) - v->val=i
125 * int mb_atomic_get(atomic_t* v) - return v->val
126 * int mb_atomic_get_and_set(atomic_t *v, i) - return old v->val, v->val=i
127 * void mb_atomic_inc(atomic_t* v)
128 * void mb_atomic_dec(atomic_t* v)
129 * int mb_atomic_inc_and_test(atomic_t* v) - returns 1 if the result is 0
130 * int mb_atomic_dec_and_test(atomic_t* v) - returns 1 if the result is 0
131 * void mb_atomic_or(atomic_t* v, int mask - v->val|=mask
132 * void mb_atomic_and(atomic_t* v, int mask)- v->val&=mask
133 * int mb_atomic_add(atomic_t* v, int i) - v->val+=i; return v->val
134 * int mb_atomic_cmpxchg(atomic_t* v, o, n) - r=v->val; if (r==o) v->val=n;
135 * return r (old value)
137 * Same operations are available for int and long. The functions are named
138 * after the following rules:
139 * - add an int or long suffix to the correspondent atomic function
140 * - volatile int* or volatile long* replace atomic_t* in the functions
142 * - long and int replace the parameter type (if the function has an extra
143 * parameter) and the return value
145 * long atomic_get_long(volatile long* v)
146 * int atomic_get_int( volatile int* v)
147 * long atomic_get_and_set(volatile long* v, long l)
148 * int atomic_get_and_set(volatile int* v, int i)
150 * Config defines: CC_GCC_LIKE_ASM - the compiler support gcc style
152 * NOSMP - the code will be a little faster, but not SMP
154 * __CPU_i386, __CPU_x86_64, X86_OOSTORE - see
155 * atomic/atomic_x86.h
156 * __CPU_mips, __CPU_mips2, __CPU_mips64, MIPS_HAS_LLSC - see
157 * atomic/atomic_mip2.h
158 * __CPU_ppc, __CPU_ppc64 - see atomic/atomic_ppc.h
159 * __CPU_sparc - see atomic/atomic_sparc.h
160 * __CPU_sparc64, SPARC64_MODE - see atomic/atomic_sparc64.h
161 * __CPU_arm, __CPU_arm6 - see atomic/atomic_arm.h
162 * __CPU_alpha - see atomic/atomic_alpha.h
167 * 2006-03-08 created by andrei
168 * 2007-05-13 moved some of the decl. and includes into atomic_common.h and
169 * atomic_native.h (andrei)
174 #include "atomic/atomic_common.h"
176 #include "atomic/atomic_native.h"
178 /*! \brief if no native operations, emulate them using locks */
179 #if ! defined HAVE_ASM_INLINE_ATOMIC_OPS || ! defined HAVE_ASM_INLINE_MEMBAR
181 #include "atomic/atomic_unknown.h"
183 #endif /* if HAVE_ASM_INLINE_ATOMIC_OPS */