bitops.h 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
  7. * Copyright (c) 2000 Silicon Graphics, Inc.
  8. */
  9. #ifndef _ASM_BITOPS_H
  10. #define _ASM_BITOPS_H
  11. #include <linux/types.h>
  12. #include <asm/byteorder.h> /* sigh ... */
  13. #ifdef __KERNEL__
  14. #include <asm/sgidefs.h>
  15. #include <asm/system.h>
  16. #include <asm-generic/bitops/fls.h>
  17. #include <asm-generic/bitops/__fls.h>
  18. #include <asm-generic/bitops/fls64.h>
  19. #include <asm-generic/bitops/__ffs.h>
  20. /*
  21. * clear_bit() doesn't provide any barrier for the compiler.
  22. */
  23. #define smp_mb__before_clear_bit() barrier()
  24. #define smp_mb__after_clear_bit() barrier()
  25. /*
  26. * Only disable interrupt for kernel mode stuff to keep usermode stuff
  27. * that dares to use kernel include files alive.
  28. */
  29. #define __bi_flags unsigned long flags
  30. #define __bi_cli() __cli()
  31. #define __bi_save_flags(x) __save_flags(x)
  32. #define __bi_save_and_cli(x) __save_and_cli(x)
  33. #define __bi_restore_flags(x) __restore_flags(x)
  34. #else
  35. #define __bi_flags
  36. #define __bi_cli()
  37. #define __bi_save_flags(x)
  38. #define __bi_save_and_cli(x)
  39. #define __bi_restore_flags(x)
  40. #endif /* __KERNEL__ */
  41. #ifdef CONFIG_CPU_HAS_LLSC
  42. #include <asm/mipsregs.h>
  43. /*
  44. * These functions for MIPS ISA > 1 are interrupt and SMP proof and
  45. * interrupt friendly
  46. */
  47. /*
  48. * set_bit - Atomically set a bit in memory
  49. * @nr: the bit to set
  50. * @addr: the address to start counting from
  51. *
  52. * This function is atomic and may not be reordered. See __set_bit()
  53. * if you do not require the atomic guarantees.
  54. * Note that @nr may be almost arbitrarily large; this function is not
  55. * restricted to acting on a single-word quantity.
  56. */
  57. static __inline__ void
  58. set_bit(int nr, volatile void *addr)
  59. {
  60. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  61. unsigned long temp;
  62. __asm__ __volatile__(
  63. "1:\tll\t%0, %1\t\t# set_bit\n\t"
  64. "or\t%0, %2\n\t"
  65. "sc\t%0, %1\n\t"
  66. "beqz\t%0, 1b"
  67. : "=&r" (temp), "=m" (*m)
  68. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  69. }
  70. /*
  71. * __set_bit - Set a bit in memory
  72. * @nr: the bit to set
  73. * @addr: the address to start counting from
  74. *
  75. * Unlike set_bit(), this function is non-atomic and may be reordered.
  76. * If it's called on the same region of memory simultaneously, the effect
  77. * may be that only one operation succeeds.
  78. */
  79. static __inline__ void __set_bit(int nr, volatile void * addr)
  80. {
  81. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  82. *m |= 1UL << (nr & 31);
  83. }
  84. #define PLATFORM__SET_BIT
  85. /*
  86. * clear_bit - Clears a bit in memory
  87. * @nr: Bit to clear
  88. * @addr: Address to start counting from
  89. *
  90. * clear_bit() is atomic and may not be reordered. However, it does
  91. * not contain a memory barrier, so if it is used for locking purposes,
  92. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  93. * in order to ensure changes are visible on other processors.
  94. */
  95. static __inline__ void
  96. clear_bit(int nr, volatile void *addr)
  97. {
  98. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  99. unsigned long temp;
  100. __asm__ __volatile__(
  101. "1:\tll\t%0, %1\t\t# clear_bit\n\t"
  102. "and\t%0, %2\n\t"
  103. "sc\t%0, %1\n\t"
  104. "beqz\t%0, 1b\n\t"
  105. : "=&r" (temp), "=m" (*m)
  106. : "ir" (~(1UL << (nr & 0x1f))), "m" (*m));
  107. }
  108. /*
  109. * change_bit - Toggle a bit in memory
  110. * @nr: Bit to clear
  111. * @addr: Address to start counting from
  112. *
  113. * change_bit() is atomic and may not be reordered.
  114. * Note that @nr may be almost arbitrarily large; this function is not
  115. * restricted to acting on a single-word quantity.
  116. */
  117. static __inline__ void
  118. change_bit(int nr, volatile void *addr)
  119. {
  120. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  121. unsigned long temp;
  122. __asm__ __volatile__(
  123. "1:\tll\t%0, %1\t\t# change_bit\n\t"
  124. "xor\t%0, %2\n\t"
  125. "sc\t%0, %1\n\t"
  126. "beqz\t%0, 1b"
  127. : "=&r" (temp), "=m" (*m)
  128. : "ir" (1UL << (nr & 0x1f)), "m" (*m));
  129. }
  130. /*
  131. * __change_bit - Toggle a bit in memory
  132. * @nr: the bit to set
  133. * @addr: the address to start counting from
  134. *
  135. * Unlike change_bit(), this function is non-atomic and may be reordered.
  136. * If it's called on the same region of memory simultaneously, the effect
  137. * may be that only one operation succeeds.
  138. */
  139. static __inline__ void __change_bit(int nr, volatile void * addr)
  140. {
  141. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  142. *m ^= 1UL << (nr & 31);
  143. }
  144. /*
  145. * test_and_set_bit - Set a bit and return its old value
  146. * @nr: Bit to set
  147. * @addr: Address to count from
  148. *
  149. * This operation is atomic and cannot be reordered.
  150. * It also implies a memory barrier.
  151. */
  152. static __inline__ int
  153. test_and_set_bit(int nr, volatile void *addr)
  154. {
  155. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  156. unsigned long temp, res;
  157. __asm__ __volatile__(
  158. ".set\tnoreorder\t\t# test_and_set_bit\n"
  159. "1:\tll\t%0, %1\n\t"
  160. "or\t%2, %0, %3\n\t"
  161. "sc\t%2, %1\n\t"
  162. "beqz\t%2, 1b\n\t"
  163. " and\t%2, %0, %3\n\t"
  164. ".set\treorder"
  165. : "=&r" (temp), "=m" (*m), "=&r" (res)
  166. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  167. : "memory");
  168. return res != 0;
  169. }
  170. /*
  171. * __test_and_set_bit - Set a bit and return its old value
  172. * @nr: Bit to set
  173. * @addr: Address to count from
  174. *
  175. * This operation is non-atomic and can be reordered.
  176. * If two examples of this operation race, one can appear to succeed
  177. * but actually fail. You must protect multiple accesses with a lock.
  178. */
  179. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  180. {
  181. int mask, retval;
  182. volatile int *a = addr;
  183. a += nr >> 5;
  184. mask = 1 << (nr & 0x1f);
  185. retval = (mask & *a) != 0;
  186. *a |= mask;
  187. return retval;
  188. }
  189. /*
  190. * test_and_clear_bit - Clear a bit and return its old value
  191. * @nr: Bit to set
  192. * @addr: Address to count from
  193. *
  194. * This operation is atomic and cannot be reordered.
  195. * It also implies a memory barrier.
  196. */
  197. static __inline__ int
  198. test_and_clear_bit(int nr, volatile void *addr)
  199. {
  200. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  201. unsigned long temp, res;
  202. __asm__ __volatile__(
  203. ".set\tnoreorder\t\t# test_and_clear_bit\n"
  204. "1:\tll\t%0, %1\n\t"
  205. "or\t%2, %0, %3\n\t"
  206. "xor\t%2, %3\n\t"
  207. "sc\t%2, %1\n\t"
  208. "beqz\t%2, 1b\n\t"
  209. " and\t%2, %0, %3\n\t"
  210. ".set\treorder"
  211. : "=&r" (temp), "=m" (*m), "=&r" (res)
  212. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  213. : "memory");
  214. return res != 0;
  215. }
  216. /*
  217. * __test_and_clear_bit - Clear a bit and return its old value
  218. * @nr: Bit to set
  219. * @addr: Address to count from
  220. *
  221. * This operation is non-atomic and can be reordered.
  222. * If two examples of this operation race, one can appear to succeed
  223. * but actually fail. You must protect multiple accesses with a lock.
  224. */
  225. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  226. {
  227. int mask, retval;
  228. volatile int *a = addr;
  229. a += nr >> 5;
  230. mask = 1 << (nr & 0x1f);
  231. retval = (mask & *a) != 0;
  232. *a &= ~mask;
  233. return retval;
  234. }
  235. /*
  236. * test_and_change_bit - Change a bit and return its new value
  237. * @nr: Bit to set
  238. * @addr: Address to count from
  239. *
  240. * This operation is atomic and cannot be reordered.
  241. * It also implies a memory barrier.
  242. */
  243. static __inline__ int
  244. test_and_change_bit(int nr, volatile void *addr)
  245. {
  246. unsigned long *m = ((unsigned long *) addr) + (nr >> 5);
  247. unsigned long temp, res;
  248. __asm__ __volatile__(
  249. ".set\tnoreorder\t\t# test_and_change_bit\n"
  250. "1:\tll\t%0, %1\n\t"
  251. "xor\t%2, %0, %3\n\t"
  252. "sc\t%2, %1\n\t"
  253. "beqz\t%2, 1b\n\t"
  254. " and\t%2, %0, %3\n\t"
  255. ".set\treorder"
  256. : "=&r" (temp), "=m" (*m), "=&r" (res)
  257. : "r" (1UL << (nr & 0x1f)), "m" (*m)
  258. : "memory");
  259. return res != 0;
  260. }
  261. /*
  262. * __test_and_change_bit - Change a bit and return its old value
  263. * @nr: Bit to set
  264. * @addr: Address to count from
  265. *
  266. * This operation is non-atomic and can be reordered.
  267. * If two examples of this operation race, one can appear to succeed
  268. * but actually fail. You must protect multiple accesses with a lock.
  269. */
  270. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  271. {
  272. int mask, retval;
  273. volatile int *a = addr;
  274. a += nr >> 5;
  275. mask = 1 << (nr & 0x1f);
  276. retval = (mask & *a) != 0;
  277. *a ^= mask;
  278. return retval;
  279. }
  280. #else /* MIPS I */
  281. /*
  282. * set_bit - Atomically set a bit in memory
  283. * @nr: the bit to set
  284. * @addr: the address to start counting from
  285. *
  286. * This function is atomic and may not be reordered. See __set_bit()
  287. * if you do not require the atomic guarantees.
  288. * Note that @nr may be almost arbitrarily large; this function is not
  289. * restricted to acting on a single-word quantity.
  290. */
  291. static __inline__ void set_bit(int nr, volatile void * addr)
  292. {
  293. int mask;
  294. volatile int *a = addr;
  295. __bi_flags;
  296. a += nr >> 5;
  297. mask = 1 << (nr & 0x1f);
  298. __bi_save_and_cli(flags);
  299. *a |= mask;
  300. __bi_restore_flags(flags);
  301. }
  302. /*
  303. * __set_bit - Set a bit in memory
  304. * @nr: the bit to set
  305. * @addr: the address to start counting from
  306. *
  307. * Unlike set_bit(), this function is non-atomic and may be reordered.
  308. * If it's called on the same region of memory simultaneously, the effect
  309. * may be that only one operation succeeds.
  310. */
  311. static __inline__ void __set_bit(int nr, volatile void * addr)
  312. {
  313. int mask;
  314. volatile int *a = addr;
  315. a += nr >> 5;
  316. mask = 1 << (nr & 0x1f);
  317. *a |= mask;
  318. }
  319. /*
  320. * clear_bit - Clears a bit in memory
  321. * @nr: Bit to clear
  322. * @addr: Address to start counting from
  323. *
  324. * clear_bit() is atomic and may not be reordered. However, it does
  325. * not contain a memory barrier, so if it is used for locking purposes,
  326. * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
  327. * in order to ensure changes are visible on other processors.
  328. */
  329. static __inline__ void clear_bit(int nr, volatile void * addr)
  330. {
  331. int mask;
  332. volatile int *a = addr;
  333. __bi_flags;
  334. a += nr >> 5;
  335. mask = 1 << (nr & 0x1f);
  336. __bi_save_and_cli(flags);
  337. *a &= ~mask;
  338. __bi_restore_flags(flags);
  339. }
  340. /*
  341. * change_bit - Toggle a bit in memory
  342. * @nr: Bit to clear
  343. * @addr: Address to start counting from
  344. *
  345. * change_bit() is atomic and may not be reordered.
  346. * Note that @nr may be almost arbitrarily large; this function is not
  347. * restricted to acting on a single-word quantity.
  348. */
  349. static __inline__ void change_bit(int nr, volatile void * addr)
  350. {
  351. int mask;
  352. volatile int *a = addr;
  353. __bi_flags;
  354. a += nr >> 5;
  355. mask = 1 << (nr & 0x1f);
  356. __bi_save_and_cli(flags);
  357. *a ^= mask;
  358. __bi_restore_flags(flags);
  359. }
  360. /*
  361. * __change_bit - Toggle a bit in memory
  362. * @nr: the bit to set
  363. * @addr: the address to start counting from
  364. *
  365. * Unlike change_bit(), this function is non-atomic and may be reordered.
  366. * If it's called on the same region of memory simultaneously, the effect
  367. * may be that only one operation succeeds.
  368. */
  369. static __inline__ void __change_bit(int nr, volatile void * addr)
  370. {
  371. unsigned long * m = ((unsigned long *) addr) + (nr >> 5);
  372. *m ^= 1UL << (nr & 31);
  373. }
  374. /*
  375. * test_and_set_bit - Set a bit and return its old value
  376. * @nr: Bit to set
  377. * @addr: Address to count from
  378. *
  379. * This operation is atomic and cannot be reordered.
  380. * It also implies a memory barrier.
  381. */
  382. static __inline__ int test_and_set_bit(int nr, volatile void * addr)
  383. {
  384. int mask, retval;
  385. volatile int *a = addr;
  386. __bi_flags;
  387. a += nr >> 5;
  388. mask = 1 << (nr & 0x1f);
  389. __bi_save_and_cli(flags);
  390. retval = (mask & *a) != 0;
  391. *a |= mask;
  392. __bi_restore_flags(flags);
  393. return retval;
  394. }
  395. /*
  396. * __test_and_set_bit - Set a bit and return its old value
  397. * @nr: Bit to set
  398. * @addr: Address to count from
  399. *
  400. * This operation is non-atomic and can be reordered.
  401. * If two examples of this operation race, one can appear to succeed
  402. * but actually fail. You must protect multiple accesses with a lock.
  403. */
  404. static __inline__ int __test_and_set_bit(int nr, volatile void * addr)
  405. {
  406. int mask, retval;
  407. volatile int *a = addr;
  408. a += nr >> 5;
  409. mask = 1 << (nr & 0x1f);
  410. retval = (mask & *a) != 0;
  411. *a |= mask;
  412. return retval;
  413. }
  414. /*
  415. * test_and_clear_bit - Clear a bit and return its old value
  416. * @nr: Bit to set
  417. * @addr: Address to count from
  418. *
  419. * This operation is atomic and cannot be reordered.
  420. * It also implies a memory barrier.
  421. */
  422. static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
  423. {
  424. int mask, retval;
  425. volatile int *a = addr;
  426. __bi_flags;
  427. a += nr >> 5;
  428. mask = 1 << (nr & 0x1f);
  429. __bi_save_and_cli(flags);
  430. retval = (mask & *a) != 0;
  431. *a &= ~mask;
  432. __bi_restore_flags(flags);
  433. return retval;
  434. }
  435. /*
  436. * __test_and_clear_bit - Clear a bit and return its old value
  437. * @nr: Bit to set
  438. * @addr: Address to count from
  439. *
  440. * This operation is non-atomic and can be reordered.
  441. * If two examples of this operation race, one can appear to succeed
  442. * but actually fail. You must protect multiple accesses with a lock.
  443. */
  444. static __inline__ int __test_and_clear_bit(int nr, volatile void * addr)
  445. {
  446. int mask, retval;
  447. volatile int *a = addr;
  448. a += nr >> 5;
  449. mask = 1 << (nr & 0x1f);
  450. retval = (mask & *a) != 0;
  451. *a &= ~mask;
  452. return retval;
  453. }
  454. /*
  455. * test_and_change_bit - Change a bit and return its new value
  456. * @nr: Bit to set
  457. * @addr: Address to count from
  458. *
  459. * This operation is atomic and cannot be reordered.
  460. * It also implies a memory barrier.
  461. */
  462. static __inline__ int test_and_change_bit(int nr, volatile void * addr)
  463. {
  464. int mask, retval;
  465. volatile int *a = addr;
  466. __bi_flags;
  467. a += nr >> 5;
  468. mask = 1 << (nr & 0x1f);
  469. __bi_save_and_cli(flags);
  470. retval = (mask & *a) != 0;
  471. *a ^= mask;
  472. __bi_restore_flags(flags);
  473. return retval;
  474. }
  475. /*
  476. * __test_and_change_bit - Change a bit and return its old value
  477. * @nr: Bit to set
  478. * @addr: Address to count from
  479. *
  480. * This operation is non-atomic and can be reordered.
  481. * If two examples of this operation race, one can appear to succeed
  482. * but actually fail. You must protect multiple accesses with a lock.
  483. */
  484. static __inline__ int __test_and_change_bit(int nr, volatile void * addr)
  485. {
  486. int mask, retval;
  487. volatile int *a = addr;
  488. a += nr >> 5;
  489. mask = 1 << (nr & 0x1f);
  490. retval = (mask & *a) != 0;
  491. *a ^= mask;
  492. return retval;
  493. }
  494. #undef __bi_flags
  495. #undef __bi_cli
  496. #undef __bi_save_flags
  497. #undef __bi_restore_flags
  498. #endif /* MIPS I */
  499. /*
  500. * test_bit - Determine whether a bit is set
  501. * @nr: bit number to test
  502. * @addr: Address to start counting from
  503. */
  504. static __inline__ int test_bit(int nr, const volatile void *addr)
  505. {
  506. return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
  507. }
  508. #ifndef __MIPSEB__
  509. /* Little endian versions. */
  510. /*
  511. * find_first_zero_bit - find the first zero bit in a memory region
  512. * @addr: The address to start the search at
  513. * @size: The maximum size to search
  514. *
  515. * Returns the bit-number of the first zero bit, not the number of the byte
  516. * containing a bit.
  517. */
  518. static __inline__ int find_first_zero_bit (void *addr, unsigned size)
  519. {
  520. unsigned long dummy;
  521. int res;
  522. if (!size)
  523. return 0;
  524. __asm__ (".set\tnoreorder\n\t"
  525. ".set\tnoat\n"
  526. "1:\tsubu\t$1,%6,%0\n\t"
  527. "blez\t$1,2f\n\t"
  528. "lw\t$1,(%5)\n\t"
  529. "addiu\t%5,4\n\t"
  530. #if (_MIPS_ISA == _MIPS_ISA_MIPS2 ) || (_MIPS_ISA == _MIPS_ISA_MIPS3 ) || \
  531. (_MIPS_ISA == _MIPS_ISA_MIPS4 ) || (_MIPS_ISA == _MIPS_ISA_MIPS5 ) || \
  532. (_MIPS_ISA == _MIPS_ISA_MIPS32) || (_MIPS_ISA == _MIPS_ISA_MIPS64)
  533. "beql\t%1,$1,1b\n\t"
  534. "addiu\t%0,32\n\t"
  535. #else
  536. "addiu\t%0,32\n\t"
  537. "beq\t%1,$1,1b\n\t"
  538. "nop\n\t"
  539. "subu\t%0,32\n\t"
  540. #endif
  541. #ifdef __MIPSEB__
  542. #error "Fix this for big endian"
  543. #endif /* __MIPSEB__ */
  544. "li\t%1,1\n"
  545. "1:\tand\t%2,$1,%1\n\t"
  546. "beqz\t%2,2f\n\t"
  547. "sll\t%1,%1,1\n\t"
  548. "bnez\t%1,1b\n\t"
  549. "add\t%0,%0,1\n\t"
  550. ".set\tat\n\t"
  551. ".set\treorder\n"
  552. "2:"
  553. : "=r" (res), "=r" (dummy), "=r" (addr)
  554. : "0" ((signed int) 0), "1" ((unsigned int) 0xffffffff),
  555. "2" (addr), "r" (size)
  556. : "$1");
  557. return res;
  558. }
  559. /*
  560. * find_next_zero_bit - find the first zero bit in a memory region
  561. * @addr: The address to base the search on
  562. * @offset: The bitnumber to start searching at
  563. * @size: The maximum size to search
  564. */
  565. static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
  566. {
  567. unsigned int *p = ((unsigned int *) addr) + (offset >> 5);
  568. int set = 0, bit = offset & 31, res;
  569. unsigned long dummy;
  570. if (bit) {
  571. /*
  572. * Look for zero in first byte
  573. */
  574. #ifdef __MIPSEB__
  575. #error "Fix this for big endian byte order"
  576. #endif
  577. __asm__(".set\tnoreorder\n\t"
  578. ".set\tnoat\n"
  579. "1:\tand\t$1,%4,%1\n\t"
  580. "beqz\t$1,1f\n\t"
  581. "sll\t%1,%1,1\n\t"
  582. "bnez\t%1,1b\n\t"
  583. "addiu\t%0,1\n\t"
  584. ".set\tat\n\t"
  585. ".set\treorder\n"
  586. "1:"
  587. : "=r" (set), "=r" (dummy)
  588. : "0" (0), "1" (1 << bit), "r" (*p)
  589. : "$1");
  590. if (set < (32 - bit))
  591. return set + offset;
  592. set = 32 - bit;
  593. p++;
  594. }
  595. /*
  596. * No zero yet, search remaining full bytes for a zero
  597. */
  598. res = find_first_zero_bit(p, size - 32 * (p - (unsigned int *) addr));
  599. return offset + set + res;
  600. }
  601. #endif /* !(__MIPSEB__) */
  602. /*
  603. * ffz - find first zero in word.
  604. * @word: The word to search
  605. *
  606. * Undefined if no zero exists, so code should check against ~0UL first.
  607. */
  608. static __inline__ unsigned long ffz(unsigned long word)
  609. {
  610. unsigned int __res;
  611. unsigned int mask = 1;
  612. __asm__ (
  613. ".set\tnoreorder\n\t"
  614. ".set\tnoat\n\t"
  615. "move\t%0,$0\n"
  616. "1:\tand\t$1,%2,%1\n\t"
  617. "beqz\t$1,2f\n\t"
  618. "sll\t%1,1\n\t"
  619. "bnez\t%1,1b\n\t"
  620. "addiu\t%0,1\n\t"
  621. ".set\tat\n\t"
  622. ".set\treorder\n"
  623. "2:\n\t"
  624. : "=&r" (__res), "=r" (mask)
  625. : "r" (word), "1" (mask)
  626. : "$1");
  627. return __res;
  628. }
  629. #ifdef __KERNEL__
  630. /*
  631. * hweightN - returns the hamming weight of a N-bit word
  632. * @x: the word to weigh
  633. *
  634. * The Hamming Weight of a number is the total number of bits set in it.
  635. */
  636. #define hweight32(x) generic_hweight32(x)
  637. #define hweight16(x) generic_hweight16(x)
  638. #define hweight8(x) generic_hweight8(x)
  639. #endif /* __KERNEL__ */
  640. #ifdef __MIPSEB__
  641. /*
  642. * find_next_zero_bit - find the first zero bit in a memory region
  643. * @addr: The address to base the search on
  644. * @offset: The bitnumber to start searching at
  645. * @size: The maximum size to search
  646. */
  647. static __inline__ int find_next_zero_bit(void *addr, int size, int offset)
  648. {
  649. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  650. unsigned long result = offset & ~31UL;
  651. unsigned long tmp;
  652. if (offset >= size)
  653. return size;
  654. size -= result;
  655. offset &= 31UL;
  656. if (offset) {
  657. tmp = *(p++);
  658. tmp |= ~0UL >> (32-offset);
  659. if (size < 32)
  660. goto found_first;
  661. if (~tmp)
  662. goto found_middle;
  663. size -= 32;
  664. result += 32;
  665. }
  666. while (size & ~31UL) {
  667. if (~(tmp = *(p++)))
  668. goto found_middle;
  669. result += 32;
  670. size -= 32;
  671. }
  672. if (!size)
  673. return result;
  674. tmp = *p;
  675. found_first:
  676. tmp |= ~0UL << size;
  677. found_middle:
  678. return result + ffz(tmp);
  679. }
  680. /* Linus sez that gcc can optimize the following correctly, we'll see if this
  681. * holds on the Sparc as it does for the ALPHA.
  682. */
  683. #if 0 /* Fool kernel-doc since it doesn't do macros yet */
  684. /*
  685. * find_first_zero_bit - find the first zero bit in a memory region
  686. * @addr: The address to start the search at
  687. * @size: The maximum size to search
  688. *
  689. * Returns the bit-number of the first zero bit, not the number of the byte
  690. * containing a bit.
  691. */
  692. static int find_first_zero_bit (void *addr, unsigned size);
  693. #endif
  694. #define find_first_zero_bit(addr, size) \
  695. find_next_zero_bit((addr), (size), 0)
  696. #endif /* (__MIPSEB__) */
  697. /* Now for the ext2 filesystem bit operations and helper routines. */
  698. #ifdef __MIPSEB__
  699. static __inline__ int ext2_set_bit(int nr, void * addr)
  700. {
  701. int mask, retval, flags;
  702. unsigned char *ADDR = (unsigned char *) addr;
  703. ADDR += nr >> 3;
  704. mask = 1 << (nr & 0x07);
  705. save_and_cli(flags);
  706. retval = (mask & *ADDR) != 0;
  707. *ADDR |= mask;
  708. restore_flags(flags);
  709. return retval;
  710. }
  711. static __inline__ int ext2_clear_bit(int nr, void * addr)
  712. {
  713. int mask, retval, flags;
  714. unsigned char *ADDR = (unsigned char *) addr;
  715. ADDR += nr >> 3;
  716. mask = 1 << (nr & 0x07);
  717. save_and_cli(flags);
  718. retval = (mask & *ADDR) != 0;
  719. *ADDR &= ~mask;
  720. restore_flags(flags);
  721. return retval;
  722. }
  723. static __inline__ int ext2_test_bit(int nr, const void * addr)
  724. {
  725. int mask;
  726. const unsigned char *ADDR = (const unsigned char *) addr;
  727. ADDR += nr >> 3;
  728. mask = 1 << (nr & 0x07);
  729. return ((mask & *ADDR) != 0);
  730. }
  731. #define ext2_find_first_zero_bit(addr, size) \
  732. ext2_find_next_zero_bit((addr), (size), 0)
  733. static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset)
  734. {
  735. unsigned long *p = ((unsigned long *) addr) + (offset >> 5);
  736. unsigned long result = offset & ~31UL;
  737. unsigned long tmp;
  738. if (offset >= size)
  739. return size;
  740. size -= result;
  741. offset &= 31UL;
  742. if(offset) {
  743. /* We hold the little endian value in tmp, but then the
  744. * shift is illegal. So we could keep a big endian value
  745. * in tmp, like this:
  746. *
  747. * tmp = __swab32(*(p++));
  748. * tmp |= ~0UL >> (32-offset);
  749. *
  750. * but this would decrease preformance, so we change the
  751. * shift:
  752. */
  753. tmp = *(p++);
  754. tmp |= __swab32(~0UL >> (32-offset));
  755. if(size < 32)
  756. goto found_first;
  757. if(~tmp)
  758. goto found_middle;
  759. size -= 32;
  760. result += 32;
  761. }
  762. while(size & ~31UL) {
  763. if(~(tmp = *(p++)))
  764. goto found_middle;
  765. result += 32;
  766. size -= 32;
  767. }
  768. if(!size)
  769. return result;
  770. tmp = *p;
  771. found_first:
  772. /* tmp is little endian, so we would have to swab the shift,
  773. * see above. But then we have to swab tmp below for ffz, so
  774. * we might as well do this here.
  775. */
  776. return result + ffz(__swab32(tmp) | (~0UL << size));
  777. found_middle:
  778. return result + ffz(__swab32(tmp));
  779. }
  780. #else /* !(__MIPSEB__) */
  781. /* Native ext2 byte ordering, just collapse using defines. */
  782. #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr))
  783. #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr))
  784. #define ext2_test_bit(nr, addr) test_bit((nr), (addr))
  785. #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size))
  786. #define ext2_find_next_zero_bit(addr, size, offset) \
  787. find_next_zero_bit((addr), (size), (offset))
  788. #endif /* !(__MIPSEB__) */
  789. /*
  790. * Bitmap functions for the minix filesystem.
  791. * FIXME: These assume that Minix uses the native byte/bitorder.
  792. * This limits the Minix filesystem's value for data exchange very much.
  793. */
  794. #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
  795. #define minix_set_bit(nr,addr) set_bit(nr,addr)
  796. #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
  797. #define minix_test_bit(nr,addr) test_bit(nr,addr)
  798. #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
  799. #endif /* _ASM_BITOPS_H */