cache_v8.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219
  1. /*
  2. * (C) Copyright 2013
  3. * David Feng <fenghua@phytium.com.cn>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <common.h>
  8. #include <asm/system.h>
  9. #include <asm/armv8/mmu.h>
  10. DECLARE_GLOBAL_DATA_PTR;
  11. #ifndef CONFIG_SYS_DCACHE_OFF
  12. static void set_pgtable_section(u64 section, u64 memory_type)
  13. {
  14. u64 *page_table = (u64 *)gd->arch.tlb_addr;
  15. u64 value;
  16. value = (section << SECTION_SHIFT) | PMD_TYPE_SECT | PMD_SECT_AF;
  17. value |= PMD_ATTRINDX(memory_type);
  18. page_table[section] = value;
  19. }
  20. /* to activate the MMU we need to set up virtual memory */
  21. static void mmu_setup(void)
  22. {
  23. int i, j, el;
  24. bd_t *bd = gd->bd;
  25. /* Setup an identity-mapping for all spaces */
  26. for (i = 0; i < (PGTABLE_SIZE >> 3); i++)
  27. set_pgtable_section(i, MT_DEVICE_NGNRNE);
  28. /* Setup an identity-mapping for all RAM space */
  29. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  30. ulong start = bd->bi_dram[i].start;
  31. ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size;
  32. for (j = start >> SECTION_SHIFT;
  33. j < end >> SECTION_SHIFT; j++) {
  34. set_pgtable_section(j, MT_NORMAL);
  35. }
  36. }
  37. /* load TTBR0 */
  38. el = current_el();
  39. if (el == 1)
  40. asm volatile("msr ttbr0_el1, %0"
  41. : : "r" (gd->arch.tlb_addr) : "memory");
  42. else if (el == 2)
  43. asm volatile("msr ttbr0_el2, %0"
  44. : : "r" (gd->arch.tlb_addr) : "memory");
  45. else
  46. asm volatile("msr ttbr0_el3, %0"
  47. : : "r" (gd->arch.tlb_addr) : "memory");
  48. /* enable the mmu */
  49. set_sctlr(get_sctlr() | CR_M);
  50. }
  51. /*
  52. * Performs a invalidation of the entire data cache at all levels
  53. */
  54. void invalidate_dcache_all(void)
  55. {
  56. __asm_flush_dcache_all();
  57. }
  58. /*
  59. * Performs a clean & invalidation of the entire data cache at all levels
  60. */
  61. void flush_dcache_all(void)
  62. {
  63. __asm_flush_dcache_all();
  64. }
  65. /*
  66. * Invalidates range in all levels of D-cache/unified cache
  67. */
  68. void invalidate_dcache_range(unsigned long start, unsigned long stop)
  69. {
  70. __asm_flush_dcache_range(start, stop);
  71. }
  72. /*
  73. * Flush range(clean & invalidate) from all levels of D-cache/unified cache
  74. */
  75. void flush_dcache_range(unsigned long start, unsigned long stop)
  76. {
  77. __asm_flush_dcache_range(start, stop);
  78. }
  79. void dcache_enable(void)
  80. {
  81. /* The data cache is not active unless the mmu is enabled */
  82. if (!(get_sctlr() & CR_M)) {
  83. invalidate_dcache_all();
  84. __asm_invalidate_tlb_all();
  85. mmu_setup();
  86. }
  87. set_sctlr(get_sctlr() | CR_C);
  88. }
  89. void dcache_disable(void)
  90. {
  91. uint32_t sctlr;
  92. sctlr = get_sctlr();
  93. /* if cache isn't enabled no need to disable */
  94. if (!(sctlr & CR_C))
  95. return;
  96. set_sctlr(sctlr & ~(CR_C|CR_M));
  97. flush_dcache_all();
  98. __asm_invalidate_tlb_all();
  99. }
  100. int dcache_status(void)
  101. {
  102. return (get_sctlr() & CR_C) != 0;
  103. }
  104. #else /* CONFIG_SYS_DCACHE_OFF */
  105. void invalidate_dcache_all(void)
  106. {
  107. }
  108. void flush_dcache_all(void)
  109. {
  110. }
  111. void invalidate_dcache_range(unsigned long start, unsigned long stop)
  112. {
  113. }
  114. void flush_dcache_range(unsigned long start, unsigned long stop)
  115. {
  116. }
  117. void dcache_enable(void)
  118. {
  119. }
  120. void dcache_disable(void)
  121. {
  122. }
  123. int dcache_status(void)
  124. {
  125. return 0;
  126. }
  127. #endif /* CONFIG_SYS_DCACHE_OFF */
  128. #ifndef CONFIG_SYS_ICACHE_OFF
  129. void icache_enable(void)
  130. {
  131. set_sctlr(get_sctlr() | CR_I);
  132. }
  133. void icache_disable(void)
  134. {
  135. set_sctlr(get_sctlr() & ~CR_I);
  136. }
  137. int icache_status(void)
  138. {
  139. return (get_sctlr() & CR_I) != 0;
  140. }
  141. void invalidate_icache_all(void)
  142. {
  143. __asm_invalidate_icache_all();
  144. }
  145. #else /* CONFIG_SYS_ICACHE_OFF */
  146. void icache_enable(void)
  147. {
  148. }
  149. void icache_disable(void)
  150. {
  151. }
  152. int icache_status(void)
  153. {
  154. return 0;
  155. }
  156. void invalidate_icache_all(void)
  157. {
  158. }
  159. #endif /* CONFIG_SYS_ICACHE_OFF */
  160. /*
  161. * Enable dCache & iCache, whether cache is actually enabled
  162. * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
  163. */
  164. void enable_caches(void)
  165. {
  166. icache_enable();
  167. dcache_enable();
  168. }
  169. /*
  170. * Flush range from all levels of d-cache/unified-cache
  171. */
  172. void flush_cache(unsigned long start, unsigned long size)
  173. {
  174. flush_dcache_range(start, start + size);
  175. }