cpu.c 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /*
  2. * (C) Copyright 2014 - 2015 Xilinx, Inc.
  3. * Michal Simek <michal.simek@xilinx.com>
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <common.h>
  8. #include <asm/arch/hardware.h>
  9. #include <asm/arch/sys_proto.h>
  10. #include <asm/io.h>
  11. #define ZYNQ_SILICON_VER_MASK 0xF000
  12. #define ZYNQ_SILICON_VER_SHIFT 12
  13. DECLARE_GLOBAL_DATA_PTR;
  14. static unsigned int zynqmp_get_silicon_version_secure(void)
  15. {
  16. u32 ver;
  17. ver = readl(&csu_base->version);
  18. ver &= ZYNQMP_SILICON_VER_MASK;
  19. ver >>= ZYNQMP_SILICON_VER_SHIFT;
  20. return ver;
  21. }
  22. unsigned int zynqmp_get_silicon_version(void)
  23. {
  24. if (current_el() == 3)
  25. return zynqmp_get_silicon_version_secure();
  26. gd->cpu_clk = get_tbclk();
  27. switch (gd->cpu_clk) {
  28. case 0 ... 1000000:
  29. return ZYNQMP_CSU_VERSION_VELOCE;
  30. case 50000000:
  31. return ZYNQMP_CSU_VERSION_QEMU;
  32. }
  33. return ZYNQMP_CSU_VERSION_EP108;
  34. }
  35. #ifndef CONFIG_SYS_DCACHE_OFF
  36. #include <asm/armv8/mmu.h>
  37. #define SECTION_SHIFT_L1 30UL
  38. #define SECTION_SHIFT_L2 21UL
  39. #define BLOCK_SIZE_L0 0x8000000000UL
  40. #define BLOCK_SIZE_L1 (1 << SECTION_SHIFT_L1)
  41. #define BLOCK_SIZE_L2 (1 << SECTION_SHIFT_L2)
  42. #define TCR_TG1_4K (1 << 31)
  43. #define TCR_EPD1_DISABLE (1 << 23)
  44. #define ZYNQMO_VA_BITS 40
  45. #define ZYNQMP_TCR TCR_TG1_4K | \
  46. TCR_EPD1_DISABLE | \
  47. TCR_SHARED_OUTER | \
  48. TCR_SHARED_INNER | \
  49. TCR_IRGN_WBWA | \
  50. TCR_ORGN_WBWA | \
  51. TCR_T0SZ(ZYNQMO_VA_BITS)
  52. #define MEMORY_ATTR PMD_SECT_AF | PMD_SECT_INNER_SHARE | \
  53. PMD_ATTRINDX(MT_NORMAL) | \
  54. PMD_TYPE_SECT
  55. #define DEVICE_ATTR PMD_SECT_AF | PMD_SECT_PXN | \
  56. PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_NGNRNE) | \
  57. PMD_TYPE_SECT
  58. /* 4K size is required to place 512 entries in each level */
  59. #define TLB_TABLE_SIZE 0x1000
  60. struct attr_tbl {
  61. u32 num;
  62. u64 attr;
  63. };
  64. static struct attr_tbl attr_tbll1t0[4] = { {16, 0x0},
  65. {8, DEVICE_ATTR},
  66. {32, MEMORY_ATTR},
  67. {456, DEVICE_ATTR}
  68. };
  69. static struct attr_tbl attr_tbll2t3[4] = { {0x180, DEVICE_ATTR},
  70. {0x40, 0x0},
  71. {0x3F, DEVICE_ATTR},
  72. {0x1, MEMORY_ATTR}
  73. };
  74. /*
  75. * This mmu table looks as below
  76. * Level 0 table contains two entries to 512GB sizes. One is Level1 Table 0
  77. * and other Level1 Table1.
  78. * Level1 Table0 contains entries for each 1GB from 0 to 511GB.
  79. * Level1 Table1 contains entries for each 1GB from 512GB to 1TB.
  80. * Level2 Table0, Level2 Table1, Level2 Table2 and Level2 Table3 contains
  81. * entries for each 2MB starting from 0GB, 1GB, 2GB and 3GB respectively.
  82. */
  83. static void zynqmp_mmu_setup(void)
  84. {
  85. int el;
  86. u32 index_attr;
  87. u64 i, section_l1t0, section_l1t1;
  88. u64 section_l2t0, section_l2t1, section_l2t2, section_l2t3;
  89. u64 *level0_table = (u64 *)gd->arch.tlb_addr;
  90. u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + TLB_TABLE_SIZE);
  91. u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + (2 * TLB_TABLE_SIZE));
  92. u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + (3 * TLB_TABLE_SIZE));
  93. u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + (4 * TLB_TABLE_SIZE));
  94. u64 *level2_table_2 = (u64 *)(gd->arch.tlb_addr + (5 * TLB_TABLE_SIZE));
  95. u64 *level2_table_3 = (u64 *)(gd->arch.tlb_addr + (6 * TLB_TABLE_SIZE));
  96. level0_table[0] =
  97. (u64)level1_table_0 | PMD_TYPE_TABLE;
  98. level0_table[1] =
  99. (u64)level1_table_1 | PMD_TYPE_TABLE;
  100. /*
  101. * set level 1 table 0, covering 0 to 512GB
  102. * set level 1 table 1, covering 512GB to 1TB
  103. */
  104. section_l1t0 = 0;
  105. section_l1t1 = BLOCK_SIZE_L0;
  106. index_attr = 0;
  107. for (i = 0; i < 512; i++) {
  108. level1_table_0[i] = section_l1t0;
  109. level1_table_0[i] |= attr_tbll1t0[index_attr].attr;
  110. attr_tbll1t0[index_attr].num--;
  111. if (attr_tbll1t0[index_attr].num == 0)
  112. index_attr++;
  113. level1_table_1[i] = section_l1t1;
  114. level1_table_1[i] |= DEVICE_ATTR;
  115. section_l1t0 += BLOCK_SIZE_L1;
  116. section_l1t1 += BLOCK_SIZE_L1;
  117. }
  118. level1_table_0[0] =
  119. (u64)level2_table_0 | PMD_TYPE_TABLE;
  120. level1_table_0[1] =
  121. (u64)level2_table_1 | PMD_TYPE_TABLE;
  122. level1_table_0[2] =
  123. (u64)level2_table_2 | PMD_TYPE_TABLE;
  124. level1_table_0[3] =
  125. (u64)level2_table_3 | PMD_TYPE_TABLE;
  126. section_l2t0 = 0;
  127. section_l2t1 = section_l2t0 + BLOCK_SIZE_L1; /* 1GB */
  128. section_l2t2 = section_l2t1 + BLOCK_SIZE_L1; /* 2GB */
  129. section_l2t3 = section_l2t2 + BLOCK_SIZE_L1; /* 3GB */
  130. index_attr = 0;
  131. for (i = 0; i < 512; i++) {
  132. level2_table_0[i] = section_l2t0 | MEMORY_ATTR;
  133. level2_table_1[i] = section_l2t1 | MEMORY_ATTR;
  134. level2_table_2[i] = section_l2t2 | DEVICE_ATTR;
  135. level2_table_3[i] = section_l2t3 |
  136. attr_tbll2t3[index_attr].attr;
  137. attr_tbll2t3[index_attr].num--;
  138. if (attr_tbll2t3[index_attr].num == 0)
  139. index_attr++;
  140. section_l2t0 += BLOCK_SIZE_L2;
  141. section_l2t1 += BLOCK_SIZE_L2;
  142. section_l2t2 += BLOCK_SIZE_L2;
  143. section_l2t3 += BLOCK_SIZE_L2;
  144. }
  145. /* flush new MMU table */
  146. flush_dcache_range(gd->arch.tlb_addr,
  147. gd->arch.tlb_addr + gd->arch.tlb_size);
  148. /* point TTBR to the new table */
  149. el = current_el();
  150. set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
  151. ZYNQMP_TCR, MEMORY_ATTRIBUTES);
  152. set_sctlr(get_sctlr() | CR_M);
  153. }
  154. int arch_cpu_init(void)
  155. {
  156. icache_enable();
  157. __asm_invalidate_dcache_all();
  158. __asm_invalidate_tlb_all();
  159. return 0;
  160. }
  161. /*
  162. * This function is called from lib/board.c.
  163. * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
  164. * There is no need to disable d-cache for this operation.
  165. */
  166. void enable_caches(void)
  167. {
  168. /* The data cache is not active unless the mmu is enabled */
  169. if (!(get_sctlr() & CR_M)) {
  170. invalidate_dcache_all();
  171. __asm_invalidate_tlb_all();
  172. zynqmp_mmu_setup();
  173. }
  174. puts("Enabling Caches...\n");
  175. set_sctlr(get_sctlr() | CR_C);
  176. }
  177. u64 *arch_get_page_table(void)
  178. {
  179. return (u64 *)(gd->arch.tlb_addr + 0x3000);
  180. }
  181. #endif