bitops.h 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. #ifndef _I386_BITOPS_H
  2. #define _I386_BITOPS_H
  3. /*
  4. * Copyright 1992, Linus Torvalds.
  5. */
  6. /*
  7. * These have to be done with inline assembly: that way the bit-setting
  8. * is guaranteed to be atomic. All bit operations return 0 if the bit
  9. * was cleared before the operation and != 0 if it was not.
  10. *
  11. * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
  12. */
  13. #include <asm-generic/bitops/fls.h>
  14. #include <asm-generic/bitops/__fls.h>
  15. #include <asm-generic/bitops/fls64.h>
  16. #ifdef CONFIG_SMP
  17. #define LOCK_PREFIX "lock ; "
  18. #else
  19. #define LOCK_PREFIX ""
  20. #endif
  21. #define ADDR (*(volatile long *) addr)
  22. /**
  23. * set_bit - Atomically set a bit in memory
  24. * @nr: the bit to set
  25. * @addr: the address to start counting from
  26. *
  27. * This function is atomic and may not be reordered. See __set_bit()
  28. * if you do not require the atomic guarantees.
  29. * Note that @nr may be almost arbitrarily large; this function is not
  30. * restricted to acting on a single-word quantity.
  31. */
  32. static __inline__ void set_bit(int nr, volatile void * addr)
  33. {
  34. __asm__ __volatile__( LOCK_PREFIX
  35. "btsl %1,%0"
  36. :"=m" (ADDR)
  37. :"Ir" (nr));
  38. }
  39. /**
  40. * __set_bit - Set a bit in memory
  41. * @nr: the bit to set
  42. * @addr: the address to start counting from
  43. *
  44. * Unlike set_bit(), this function is non-atomic and may be reordered.
  45. * If it's called on the same region of memory simultaneously, the effect
  46. * may be that only one operation succeeds.
  47. */
  48. static __inline__ void __set_bit(int nr, volatile void * addr)
  49. {
  50. __asm__(
  51. "btsl %1,%0"
  52. :"=m" (ADDR)
  53. :"Ir" (nr));
  54. }
  55. #define PLATFORM__SET_BIT
  56. /**
  57. * clear_bit - Clears a bit in memory
  58. * @nr: Bit to clear
  59. * @addr: Address to start counting from
  60. *
  61. * clear_bit() is atomic and may not be reordered. However, it does
  62. * not contain a memory barrier, so if it is used for locking purposes,
  63. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  64. * in order to ensure changes are visible on other processors.
  65. */
  66. static __inline__ void clear_bit(int nr, volatile void * addr)
  67. {
  68. __asm__ __volatile__( LOCK_PREFIX
  69. "btrl %1,%0"
  70. :"=m" (ADDR)
  71. :"Ir" (nr));
  72. }
  73. #define smp_mb__before_clear_bit() barrier()
  74. #define smp_mb__after_clear_bit() barrier()
  75. /**
  76. * __change_bit - Toggle a bit in memory
  77. * @nr: the bit to set
  78. * @addr: the address to start counting from
  79. *
  80. * Unlike change_bit(), this function is non-atomic and may be reordered.
  81. * If it's called on the same region of memory simultaneously, the effect
  82. * may be that only one operation succeeds.
  83. */
  84. static __inline__ void __change_bit(int nr, volatile void * addr)
  85. {
  86. __asm__ __volatile__(
  87. "btcl %1,%0"
  88. :"=m" (ADDR)
  89. :"Ir" (nr));
  90. }
  91. /**
  92. * change_bit - Toggle a bit in memory
  93. * @nr: Bit to clear
  94. * @addr: Address to start counting from
  95. *
  96. * change_bit() is atomic and may not be reordered.
  97. * Note that @nr may be almost arbitrarily large; this function is not
  98. * restricted to acting on a single-word quantity.
  99. */
  100. static __inline__ void change_bit(int nr, volatile void * addr)
  101. {
  102. __asm__ __volatile__( LOCK_PREFIX
  103. "btcl %1,%0"
  104. :"=m" (ADDR)
  105. :"Ir" (nr));
  106. }
  107. /**
  108. * test_and_set_bit - Set a bit and return its old value
  109. * @nr: Bit to set
  110. * @addr: Address to count from
  111. *
  112. * This operation is atomic and cannot be reordered.
  113. * It also implies a memory barrier.
  114. */
  115. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  116. {
  117. int oldbit;
  118. __asm__ __volatile__( LOCK_PREFIX
  119. "btsl %2,%1\n\tsbbl %0,%0"
  120. :"=r" (oldbit),"=m" (ADDR)
  121. :"Ir" (nr) : "memory");
  122. return oldbit;
  123. }
  124. /**
  125. * __test_and_set_bit - Set a bit and return its old value
  126. * @nr: Bit to set
  127. * @addr: Address to count from
  128. *
  129. * This operation is non-atomic and can be reordered.
  130. * If two examples of this operation race, one can appear to succeed
  131. * but actually fail. You must protect multiple accesses with a lock.
  132. */
  133. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  134. {
  135. int oldbit;
  136. __asm__(
  137. "btsl %2,%1\n\tsbbl %0,%0"
  138. :"=r" (oldbit),"=m" (ADDR)
  139. :"Ir" (nr));
  140. return oldbit;
  141. }
  142. /**
  143. * test_and_clear_bit - Clear a bit and return its old value
  144. * @nr: Bit to set
  145. * @addr: Address to count from
  146. *
  147. * This operation is atomic and cannot be reordered.
  148. * It also implies a memory barrier.
  149. */
  150. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  151. {
  152. int oldbit;
  153. __asm__ __volatile__( LOCK_PREFIX
  154. "btrl %2,%1\n\tsbbl %0,%0"
  155. :"=r" (oldbit),"=m" (ADDR)
  156. :"Ir" (nr) : "memory");
  157. return oldbit;
  158. }
  159. /**
  160. * __test_and_clear_bit - Clear a bit and return its old value
  161. * @nr: Bit to set
  162. * @addr: Address to count from
  163. *
  164. * This operation is non-atomic and can be reordered.
  165. * If two examples of this operation race, one can appear to succeed
  166. * but actually fail. You must protect multiple accesses with a lock.
  167. */
  168. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  169. {
  170. int oldbit;
  171. __asm__(
  172. "btrl %2,%1\n\tsbbl %0,%0"
  173. :"=r" (oldbit),"=m" (ADDR)
  174. :"Ir" (nr));
  175. return oldbit;
  176. }
  177. /* WARNING: non atomic and it can be reordered! */
  178. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  179. {
  180. int oldbit;
  181. __asm__ __volatile__(
  182. "btcl %2,%1\n\tsbbl %0,%0"
  183. :"=r" (oldbit),"=m" (ADDR)
  184. :"Ir" (nr) : "memory");
  185. return oldbit;
  186. }
  187. /**
  188. * test_and_change_bit - Change a bit and return its new value
  189. * @nr: Bit to set
  190. * @addr: Address to count from
  191. *
  192. * This operation is atomic and cannot be reordered.
  193. * It also implies a memory barrier.
  194. */
  195. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  196. {
  197. int oldbit;
  198. __asm__ __volatile__( LOCK_PREFIX
  199. "btcl %2,%1\n\tsbbl %0,%0"
  200. :"=r" (oldbit),"=m" (ADDR)
  201. :"Ir" (nr) : "memory");
  202. return oldbit;
  203. }
  204. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  205. /**
  206. * test_bit - Determine whether a bit is set
  207. * @nr: bit number to test
  208. * @addr: Address to start counting from
  209. */
  210. static int test_bit(int nr, const volatile void * addr);
  211. #endif
  212. static __inline__ int constant_test_bit(int nr, const volatile void * addr)
  213. {
  214. return ((1UL << (nr & 31)) & (((const volatile unsigned int *) addr)[nr >> 5])) != 0;
  215. }
  216. static __inline__ int variable_test_bit(int nr, volatile void * addr)
  217. {
  218. int oldbit;
  219. __asm__ __volatile__(
  220. "btl %2,%1\n\tsbbl %0,%0"
  221. :"=r" (oldbit)
  222. :"m" (ADDR),"Ir" (nr));
  223. return oldbit;
  224. }
  225. #define test_bit(nr,addr) \
  226. (__builtin_constant_p(nr) ? \
  227. constant_test_bit((nr),(addr)) : \
  228. variable_test_bit((nr),(addr)))
  229. /**
  230. * find_first_zero_bit - find the first zero bit in a memory region
  231. * @addr: The address to start the search at
  232. * @size: The maximum size to search
  233. *
  234. * Returns the bit-number of the first zero bit, not the number of the byte
  235. * containing a bit.
  236. */
  237. static __inline__ int find_first_zero_bit(void * addr, unsigned size)
  238. {
  239. int d0, d1, d2;
  240. int res;
  241. if (!size)
  242. return 0;
  243. /* This looks at memory. Mark it volatile to tell gcc not to move it around */
  244. __asm__ __volatile__(
  245. "movl $-1,%%eax\n\t"
  246. "xorl %%edx,%%edx\n\t"
  247. "repe; scasl\n\t"
  248. "je 1f\n\t"
  249. "xorl -4(%%edi),%%eax\n\t"
  250. "subl $4,%%edi\n\t"
  251. "bsfl %%eax,%%edx\n"
  252. "1:\tsubl %%ebx,%%edi\n\t"
  253. "shll $3,%%edi\n\t"
  254. "addl %%edi,%%edx"
  255. :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
  256. :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
  257. return res;
  258. }
  259. /**
  260. * find_next_zero_bit - find the first zero bit in a memory region
  261. * @addr: The address to base the search on
  262. * @offset: The bitnumber to start searching at
  263. * @size: The maximum size to search
  264. */
  265. static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  266. {
  267. unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
  268. int set = 0, bit = offset & 31, res;
  269. if (bit) {
  270. /*
  271. * Look for zero in first byte
  272. */
  273. __asm__("bsfl %1,%0\n\t"
  274. "jne 1f\n\t"
  275. "movl $32, %0\n"
  276. "1:"
  277. : "=r" (set)
  278. : "r" (~(*p >> bit)));
  279. if (set < (32 - bit))
  280. return set + offset;
  281. set = 32 - bit;
  282. p++;
  283. }
  284. /*
  285. * No zero yet, search remaining full bytes for a zero
  286. */
  287. res = find_first_zero_bit (p, size - 32 * (p - (unsigned long *) addr));
  288. return (offset + set + res);
  289. }
  290. /**
  291. * ffz - find first zero in word.
  292. * @word: The word to search
  293. *
  294. * Undefined if no zero exists, so code should check against ~0UL first.
  295. */
  296. static __inline__ unsigned long ffz(unsigned long word)
  297. {
  298. __asm__("bsfl %1,%0"
  299. :"=r" (word)
  300. :"r" (~word));
  301. return word;
  302. }
  303. #ifdef __KERNEL__
  304. /**
  305. * __ffs - find first set bit in word
  306. * @word: The word to search
  307. *
  308. * Undefined if no bit exists, so code should check against 0 first.
  309. */
  310. static inline unsigned long __ffs(unsigned long word)
  311. {
  312. __asm__("rep; bsf %1,%0"
  313. : "=r" (word)
  314. : "rm" (word));
  315. return word;
  316. }
  317. /**
  318. * ffs - find first bit set
  319. * @x: the word to search
  320. *
  321. * This is defined the same way as
  322. * the libc and compiler builtin ffs routines, therefore
  323. * differs in spirit from the above ffz (man ffs).
  324. */
  325. static __inline__ int ffs(int x)
  326. {
  327. int r;
  328. __asm__("bsfl %1,%0\n\t"
  329. "jnz 1f\n\t"
  330. "movl $-1,%0\n"
  331. "1:" : "=r" (r) : "rm" (x));
  332. return r+1;
  333. }
  334. #define PLATFORM_FFS
  335. static inline int __ilog2(unsigned int x)
  336. {
  337. return generic_fls(x) - 1;
  338. }
  339. /**
  340. * hweightN - returns the hamming weight of a N-bit word
  341. * @x: the word to weigh
  342. *
  343. * The Hamming Weight of a number is the total number of bits set in it.
  344. */
  345. #define hweight32(x) generic_hweight32(x)
  346. #define hweight16(x) generic_hweight16(x)
  347. #define hweight8(x) generic_hweight8(x)
  348. #endif /* __KERNEL__ */
  349. #ifdef __KERNEL__
  350. #define ext2_set_bit __test_and_set_bit
  351. #define ext2_clear_bit __test_and_clear_bit
  352. #define ext2_test_bit test_bit
  353. #define ext2_find_first_zero_bit find_first_zero_bit
  354. #define ext2_find_next_zero_bit find_next_zero_bit
  355. /* Bitmap functions for the minix filesystem. */
  356. #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr)
  357. #define minix_set_bit(nr,addr) __set_bit(nr,addr)
  358. #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr)
  359. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  360. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  361. #endif /* __KERNEL__ */
  362. #endif /* _I386_BITOPS_H */