cache.S 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136
  1. /*
  2. * (C) Copyright 2013
  3. * David Feng <fenghua@phytium.com.cn>
  4. *
  5. * This file is based on sample code from ARMv8 ARM.
  6. *
  7. * SPDX-License-Identifier: GPL-2.0+
  8. */
  9. #include <asm-offsets.h>
  10. #include <config.h>
  11. #include <version.h>
  12. #include <asm/macro.h>
  13. #include <linux/linkage.h>
  14. /*
  15. * void __asm_flush_dcache_level(level)
  16. *
  17. * clean and invalidate one level cache.
  18. *
  19. * x0: cache level
  20. * x1~x9: clobbered
  21. */
  22. ENTRY(__asm_flush_dcache_level)
  23. lsl x1, x0, #1
  24. msr csselr_el1, x1 /* select cache level */
  25. isb /* sync change of cssidr_el1 */
  26. mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
  27. and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
  28. add x2, x2, #4 /* x2 <- log2(cache line size) */
  29. mov x3, #0x3ff
  30. and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
  31. add w4, w3, w3
  32. sub w4, w4, 1 /* round up log2(#ways + 1) */
  33. clz w5, w4 /* bit position of #ways */
  34. mov x4, #0x7fff
  35. and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
  36. /* x1 <- cache level << 1 */
  37. /* x2 <- line length offset */
  38. /* x3 <- number of cache ways - 1 */
  39. /* x4 <- number of cache sets - 1 */
  40. /* x5 <- bit position of #ways */
  41. loop_set:
  42. mov x6, x3 /* x6 <- working copy of #ways */
  43. loop_way:
  44. lsl x7, x6, x5
  45. orr x9, x1, x7 /* map way and level to cisw value */
  46. lsl x7, x4, x2
  47. orr x9, x9, x7 /* map set number to cisw value */
  48. dc cisw, x9 /* clean & invalidate by set/way */
  49. subs x6, x6, #1 /* decrement the way */
  50. b.ge loop_way
  51. subs x4, x4, #1 /* decrement the set */
  52. b.ge loop_set
  53. ret
  54. ENDPROC(__asm_flush_dcache_level)
  55. /*
  56. * void __asm_flush_dcache_all(void)
  57. *
  58. * clean and invalidate all data cache by SET/WAY.
  59. */
  60. ENTRY(__asm_flush_dcache_all)
  61. dsb sy
  62. mrs x10, clidr_el1 /* read clidr_el1 */
  63. lsr x11, x10, #24
  64. and x11, x11, #0x7 /* x11 <- loc */
  65. cbz x11, finished /* if loc is 0, exit */
  66. mov x15, lr
  67. mov x0, #0 /* start flush at cache level 0 */
  68. /* x0 <- cache level */
  69. /* x10 <- clidr_el1 */
  70. /* x11 <- loc */
  71. /* x15 <- return address */
  72. loop_level:
  73. lsl x1, x0, #1
  74. add x1, x1, x0 /* x0 <- tripled cache level */
  75. lsr x1, x10, x1
  76. and x1, x1, #7 /* x1 <- cache type */
  77. cmp x1, #2
  78. b.lt skip /* skip if no cache or icache */
  79. bl __asm_flush_dcache_level
  80. skip:
  81. add x0, x0, #1 /* increment cache level */
  82. cmp x11, x0
  83. b.gt loop_level
  84. mov x0, #0
  85. msr csselr_el1, x0 /* resotre csselr_el1 */
  86. dsb sy
  87. isb
  88. mov lr, x15
  89. finished:
  90. ret
  91. ENDPROC(__asm_flush_dcache_all)
  92. /*
  93. * void __asm_flush_dcache_range(start, end)
  94. *
  95. * clean & invalidate data cache in the range
  96. *
  97. * x0: start address
  98. * x1: end address
  99. */
  100. ENTRY(__asm_flush_dcache_range)
  101. mrs x3, ctr_el0
  102. lsr x3, x3, #16
  103. and x3, x3, #0xf
  104. mov x2, #4
  105. lsl x2, x2, x3 /* cache line size */
  106. /* x2 <- minimal cache line size in cache system */
  107. sub x3, x2, #1
  108. bic x0, x0, x3
  109. 1: dc civac, x0 /* clean & invalidate data or unified cache */
  110. add x0, x0, x2
  111. cmp x0, x1
  112. b.lo 1b
  113. dsb sy
  114. ret
  115. ENDPROC(__asm_flush_dcache_range)
  116. /*
  117. * void __asm_invalidate_icache_all(void)
  118. *
  119. * invalidate all tlb entries.
  120. */
  121. ENTRY(__asm_invalidate_icache_all)
  122. ic ialluis
  123. isb sy
  124. ret
  125. ENDPROC(__asm_invalidate_icache_all)