Skip to content

Commit 0e86283

Browse files
alobakinYuryNorov
authored andcommitted
bitops: unify non-atomic bitops prototypes across architectures
Currently, there is a mess with the prototypes of the non-atomic bitops across the different architectures: ret bool, int, unsigned long nr int, long, unsigned int, unsigned long addr volatile unsigned long *, volatile void * Thankfully, it doesn't provoke any bugs, but can sometimes make the compiler angry when it's not handy at all. Adjust all the prototypes to the following standard: ret bool retval can be only 0 or 1 nr unsigned long native; signed makes no sense addr volatile unsigned long * bitmaps are arrays of ulongs Next, some architectures don't define 'arch_' versions as they don't support instrumentation, others do. To make sure there is always the same set of callables present and to ease any potential future changes, make them all follow the rule: * architecture-specific files define only 'arch_' versions; * non-prefixed versions can be defined only in asm-generic files; and place the non-prefixed definitions into a new file in asm-generic to be included by non-instrumented architectures. Finally, add some static assertions in order to prevent people from making a mess in this room again. I also used the %__always_inline attribute consistently, so that they always get resolved to the actual operations. Suggested-by: Andy Shevchenko <[email protected]> Signed-off-by: Alexander Lobakin <[email protected]> Acked-by: Mark Rutland <[email protected]> Reviewed-by: Yury Norov <[email protected]> Reviewed-by: Andy Shevchenko <[email protected]> Signed-off-by: Yury Norov <[email protected]>
1 parent 21bb8af commit 0e86283

File tree

13 files changed

+229
-150
lines changed

13 files changed

+229
-150
lines changed

arch/alpha/include/asm/bitops.h

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,8 @@ set_bit(unsigned long nr, volatile void * addr)
4646
/*
4747
* WARNING: non atomic version.
4848
*/
49-
static inline void
50-
__set_bit(unsigned long nr, volatile void * addr)
49+
static __always_inline void
50+
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
5151
{
5252
int *m = ((int *) addr) + (nr >> 5);
5353

@@ -82,8 +82,8 @@ clear_bit_unlock(unsigned long nr, volatile void * addr)
8282
/*
8383
* WARNING: non atomic version.
8484
*/
85-
static __inline__ void
86-
__clear_bit(unsigned long nr, volatile void * addr)
85+
static __always_inline void
86+
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
8787
{
8888
int *m = ((int *) addr) + (nr >> 5);
8989

@@ -94,7 +94,7 @@ static inline void
9494
__clear_bit_unlock(unsigned long nr, volatile void * addr)
9595
{
9696
smp_mb();
97-
__clear_bit(nr, addr);
97+
arch___clear_bit(nr, addr);
9898
}
9999

100100
static inline void
@@ -118,8 +118,8 @@ change_bit(unsigned long nr, volatile void * addr)
118118
/*
119119
* WARNING: non atomic version.
120120
*/
121-
static __inline__ void
122-
__change_bit(unsigned long nr, volatile void * addr)
121+
static __always_inline void
122+
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
123123
{
124124
int *m = ((int *) addr) + (nr >> 5);
125125

@@ -186,8 +186,8 @@ test_and_set_bit_lock(unsigned long nr, volatile void *addr)
186186
/*
187187
* WARNING: non atomic version.
188188
*/
189-
static inline int
190-
__test_and_set_bit(unsigned long nr, volatile void * addr)
189+
static __always_inline bool
190+
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
191191
{
192192
unsigned long mask = 1 << (nr & 0x1f);
193193
int *m = ((int *) addr) + (nr >> 5);
@@ -230,8 +230,8 @@ test_and_clear_bit(unsigned long nr, volatile void * addr)
230230
/*
231231
* WARNING: non atomic version.
232232
*/
233-
static inline int
234-
__test_and_clear_bit(unsigned long nr, volatile void * addr)
233+
static __always_inline bool
234+
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
235235
{
236236
unsigned long mask = 1 << (nr & 0x1f);
237237
int *m = ((int *) addr) + (nr >> 5);
@@ -272,8 +272,8 @@ test_and_change_bit(unsigned long nr, volatile void * addr)
272272
/*
273273
* WARNING: non atomic version.
274274
*/
275-
static __inline__ int
276-
__test_and_change_bit(unsigned long nr, volatile void * addr)
275+
static __always_inline bool
276+
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
277277
{
278278
unsigned long mask = 1 << (nr & 0x1f);
279279
int *m = ((int *) addr) + (nr >> 5);
@@ -283,8 +283,8 @@ __test_and_change_bit(unsigned long nr, volatile void * addr)
283283
return (old & mask) != 0;
284284
}
285285

286-
static inline int
287-
test_bit(int nr, const volatile void * addr)
286+
static __always_inline bool
287+
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
288288
{
289289
return (1UL & (((const int *) addr)[nr >> 5] >> (nr & 31))) != 0UL;
290290
}
@@ -450,6 +450,8 @@ sched_find_first_bit(const unsigned long b[2])
450450
return __ffs(tmp) + ofs;
451451
}
452452

453+
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
454+
453455
#include <asm-generic/bitops/le.h>
454456

455457
#include <asm-generic/bitops/ext2-atomic-setbit.h>

arch/hexagon/include/asm/bitops.h

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -127,38 +127,45 @@ static inline void change_bit(int nr, volatile void *addr)
127127
* be atomic, particularly for things like slab_lock and slab_unlock.
128128
*
129129
*/
130-
static inline void __clear_bit(int nr, volatile unsigned long *addr)
130+
static __always_inline void
131+
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
131132
{
132133
test_and_clear_bit(nr, addr);
133134
}
134135

135-
static inline void __set_bit(int nr, volatile unsigned long *addr)
136+
static __always_inline void
137+
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
136138
{
137139
test_and_set_bit(nr, addr);
138140
}
139141

140-
static inline void __change_bit(int nr, volatile unsigned long *addr)
142+
static __always_inline void
143+
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
141144
{
142145
test_and_change_bit(nr, addr);
143146
}
144147

145148
/* Apparently, at least some of these are allowed to be non-atomic */
146-
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
149+
static __always_inline bool
150+
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
147151
{
148152
return test_and_clear_bit(nr, addr);
149153
}
150154

151-
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
155+
static __always_inline bool
156+
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
152157
{
153158
return test_and_set_bit(nr, addr);
154159
}
155160

156-
static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
161+
static __always_inline bool
162+
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
157163
{
158164
return test_and_change_bit(nr, addr);
159165
}
160166

161-
static inline int __test_bit(int nr, const volatile unsigned long *addr)
167+
static __always_inline bool
168+
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
162169
{
163170
int retval;
164171

@@ -172,8 +179,6 @@ static inline int __test_bit(int nr, const volatile unsigned long *addr)
172179
return retval;
173180
}
174181

175-
#define test_bit(nr, addr) __test_bit(nr, addr)
176-
177182
/*
178183
* ffz - find first zero in word.
179184
* @word: The word to search
@@ -271,6 +276,7 @@ static inline unsigned long __fls(unsigned long word)
271276
}
272277

273278
#include <asm-generic/bitops/lock.h>
279+
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
274280

275281
#include <asm-generic/bitops/fls64.h>
276282
#include <asm-generic/bitops/sched.h>

arch/ia64/include/asm/bitops.h

Lines changed: 22 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -53,16 +53,16 @@ set_bit (int nr, volatile void *addr)
5353
}
5454

5555
/**
56-
* __set_bit - Set a bit in memory
56+
* arch___set_bit - Set a bit in memory
5757
* @nr: the bit to set
5858
* @addr: the address to start counting from
5959
*
6060
* Unlike set_bit(), this function is non-atomic and may be reordered.
6161
* If it's called on the same region of memory simultaneously, the effect
6262
* may be that only one operation succeeds.
6363
*/
64-
static __inline__ void
65-
__set_bit (int nr, volatile void *addr)
64+
static __always_inline void
65+
arch___set_bit(unsigned long nr, volatile unsigned long *addr)
6666
{
6767
*((__u32 *) addr + (nr >> 5)) |= (1 << (nr & 31));
6868
}
@@ -135,16 +135,16 @@ __clear_bit_unlock(int nr, void *addr)
135135
}
136136

137137
/**
138-
* __clear_bit - Clears a bit in memory (non-atomic version)
138+
* arch___clear_bit - Clears a bit in memory (non-atomic version)
139139
* @nr: the bit to clear
140140
* @addr: the address to start counting from
141141
*
142142
* Unlike clear_bit(), this function is non-atomic and may be reordered.
143143
* If it's called on the same region of memory simultaneously, the effect
144144
* may be that only one operation succeeds.
145145
*/
146-
static __inline__ void
147-
__clear_bit (int nr, volatile void *addr)
146+
static __always_inline void
147+
arch___clear_bit(unsigned long nr, volatile unsigned long *addr)
148148
{
149149
*((__u32 *) addr + (nr >> 5)) &= ~(1 << (nr & 31));
150150
}
@@ -175,16 +175,16 @@ change_bit (int nr, volatile void *addr)
175175
}
176176

177177
/**
178-
* __change_bit - Toggle a bit in memory
178+
* arch___change_bit - Toggle a bit in memory
179179
* @nr: the bit to toggle
180180
* @addr: the address to start counting from
181181
*
182182
* Unlike change_bit(), this function is non-atomic and may be reordered.
183183
* If it's called on the same region of memory simultaneously, the effect
184184
* may be that only one operation succeeds.
185185
*/
186-
static __inline__ void
187-
__change_bit (int nr, volatile void *addr)
186+
static __always_inline void
187+
arch___change_bit(unsigned long nr, volatile unsigned long *addr)
188188
{
189189
*((__u32 *) addr + (nr >> 5)) ^= (1 << (nr & 31));
190190
}
@@ -224,16 +224,16 @@ test_and_set_bit (int nr, volatile void *addr)
224224
#define test_and_set_bit_lock test_and_set_bit
225225

226226
/**
227-
* __test_and_set_bit - Set a bit and return its old value
227+
* arch___test_and_set_bit - Set a bit and return its old value
228228
* @nr: Bit to set
229229
* @addr: Address to count from
230230
*
231231
* This operation is non-atomic and can be reordered.
232232
* If two examples of this operation race, one can appear to succeed
233233
* but actually fail. You must protect multiple accesses with a lock.
234234
*/
235-
static __inline__ int
236-
__test_and_set_bit (int nr, volatile void *addr)
235+
static __always_inline bool
236+
arch___test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
237237
{
238238
__u32 *p = (__u32 *) addr + (nr >> 5);
239239
__u32 m = 1 << (nr & 31);
@@ -269,16 +269,16 @@ test_and_clear_bit (int nr, volatile void *addr)
269269
}
270270

271271
/**
272-
* __test_and_clear_bit - Clear a bit and return its old value
272+
* arch___test_and_clear_bit - Clear a bit and return its old value
273273
* @nr: Bit to clear
274274
* @addr: Address to count from
275275
*
276276
* This operation is non-atomic and can be reordered.
277277
* If two examples of this operation race, one can appear to succeed
278278
* but actually fail. You must protect multiple accesses with a lock.
279279
*/
280-
static __inline__ int
281-
__test_and_clear_bit(int nr, volatile void * addr)
280+
static __always_inline bool
281+
arch___test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
282282
{
283283
__u32 *p = (__u32 *) addr + (nr >> 5);
284284
__u32 m = 1 << (nr & 31);
@@ -314,14 +314,14 @@ test_and_change_bit (int nr, volatile void *addr)
314314
}
315315

316316
/**
317-
* __test_and_change_bit - Change a bit and return its old value
317+
* arch___test_and_change_bit - Change a bit and return its old value
318318
* @nr: Bit to change
319319
* @addr: Address to count from
320320
*
321321
* This operation is non-atomic and can be reordered.
322322
*/
323-
static __inline__ int
324-
__test_and_change_bit (int nr, void *addr)
323+
static __always_inline bool
324+
arch___test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
325325
{
326326
__u32 old, bit = (1 << (nr & 31));
327327
__u32 *m = (__u32 *) addr + (nr >> 5);
@@ -331,8 +331,8 @@ __test_and_change_bit (int nr, void *addr)
331331
return (old & bit) != 0;
332332
}
333333

334-
static __inline__ int
335-
test_bit (int nr, const volatile void *addr)
334+
static __always_inline bool
335+
arch_test_bit(unsigned long nr, const volatile unsigned long *addr)
336336
{
337337
return 1 & (((const volatile __u32 *) addr)[nr >> 5] >> (nr & 31));
338338
}
@@ -443,6 +443,8 @@ static __inline__ unsigned long __arch_hweight64(unsigned long x)
443443

444444
#ifdef __KERNEL__
445445

446+
#include <asm-generic/bitops/non-instrumented-non-atomic.h>
447+
446448
#include <asm-generic/bitops/le.h>
447449

448450
#include <asm-generic/bitops/ext2-atomic-setbit.h>

0 commit comments

Comments
 (0)