#include <linux/types.h>
#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
#define ATOMIC_INIT(i) { (i) }
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
-#define smp_mb__before_atomic_dec() barrier()
-#define smp_mb__after_atomic_dec() barrier()
-#define smp_mb__before_atomic_inc() barrier()
-#define smp_mb__after_atomic_inc() barrier()
-
#endif
#include <linux/compiler.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
+#include <asm/barrier.h>
#ifdef __KERNEL__
-#define smp_mb__before_clear_bit() barrier()
-#define smp_mb__after_clear_bit() barrier()
-
/*
* The offset calculations for these are based on BITS_PER_LONG == 32
* (i.e. I get to shift by #5-2 (32 bits per long, 4 bytes per access),