cpu.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2014 - 2015 Xilinx, Inc.
  4. * Michal Simek <michal.simek@xilinx.com>
  5. */
  6. #include <common.h>
  7. #include <asm/arch/hardware.h>
  8. #include <asm/arch/sys_proto.h>
  9. #include <asm/armv8/mmu.h>
  10. #include <asm/io.h>
  11. #define ZYNQ_SILICON_VER_MASK 0xF000
  12. #define ZYNQ_SILICON_VER_SHIFT 12
  13. DECLARE_GLOBAL_DATA_PTR;
  14. /*
  15. * Number of filled static entries and also the first empty
  16. * slot in zynqmp_mem_map.
  17. */
  18. #define ZYNQMP_MEM_MAP_USED 4
  19. #if !defined(CONFIG_ZYNQMP_NO_DDR)
  20. #define DRAM_BANKS CONFIG_NR_DRAM_BANKS
  21. #else
  22. #define DRAM_BANKS 0
  23. #endif
  24. #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
  25. #define TCM_MAP 1
  26. #else
  27. #define TCM_MAP 0
  28. #endif
  29. /* +1 is end of list which needs to be empty */
  30. #define ZYNQMP_MEM_MAP_MAX (ZYNQMP_MEM_MAP_USED + DRAM_BANKS + TCM_MAP + 1)
  31. static struct mm_region zynqmp_mem_map[ZYNQMP_MEM_MAP_MAX] = {
  32. {
  33. .virt = 0x80000000UL,
  34. .phys = 0x80000000UL,
  35. .size = 0x70000000UL,
  36. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  37. PTE_BLOCK_NON_SHARE |
  38. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  39. }, {
  40. .virt = 0xf8000000UL,
  41. .phys = 0xf8000000UL,
  42. .size = 0x07e00000UL,
  43. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  44. PTE_BLOCK_NON_SHARE |
  45. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  46. }, {
  47. .virt = 0x400000000UL,
  48. .phys = 0x400000000UL,
  49. .size = 0x400000000UL,
  50. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  51. PTE_BLOCK_NON_SHARE |
  52. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  53. }, {
  54. .virt = 0x1000000000UL,
  55. .phys = 0x1000000000UL,
  56. .size = 0xf000000000UL,
  57. .attrs = PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE) |
  58. PTE_BLOCK_NON_SHARE |
  59. PTE_BLOCK_PXN | PTE_BLOCK_UXN
  60. }
  61. };
  62. void mem_map_fill(void)
  63. {
  64. int banks = ZYNQMP_MEM_MAP_USED;
  65. #if defined(CONFIG_DEFINE_TCM_OCM_MMAP)
  66. zynqmp_mem_map[banks].virt = 0xffe00000UL;
  67. zynqmp_mem_map[banks].phys = 0xffe00000UL;
  68. zynqmp_mem_map[banks].size = 0x00200000UL;
  69. zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  70. PTE_BLOCK_INNER_SHARE;
  71. banks = banks + 1;
  72. #endif
  73. #if !defined(CONFIG_ZYNQMP_NO_DDR)
  74. for (int i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  75. /* Zero size means no more DDR that's this is end */
  76. if (!gd->bd->bi_dram[i].size)
  77. break;
  78. zynqmp_mem_map[banks].virt = gd->bd->bi_dram[i].start;
  79. zynqmp_mem_map[banks].phys = gd->bd->bi_dram[i].start;
  80. zynqmp_mem_map[banks].size = gd->bd->bi_dram[i].size;
  81. zynqmp_mem_map[banks].attrs = PTE_BLOCK_MEMTYPE(MT_NORMAL) |
  82. PTE_BLOCK_INNER_SHARE;
  83. banks = banks + 1;
  84. }
  85. #endif
  86. }
  87. struct mm_region *mem_map = zynqmp_mem_map;
  88. u64 get_page_table_size(void)
  89. {
  90. return 0x14000;
  91. }
  92. #if defined(CONFIG_SYS_MEM_RSVD_FOR_MMU) || defined(CONFIG_DEFINE_TCM_OCM_MMAP)
  93. void tcm_init(u8 mode)
  94. {
  95. puts("WARNING: Initializing TCM overwrites TCM content\n");
  96. initialize_tcm(mode);
  97. memset((void *)ZYNQMP_TCM_BASE_ADDR, 0, ZYNQMP_TCM_SIZE);
  98. }
  99. #endif
  100. #ifdef CONFIG_SYS_MEM_RSVD_FOR_MMU
  101. int reserve_mmu(void)
  102. {
  103. tcm_init(TCM_LOCK);
  104. gd->arch.tlb_size = PGTABLE_SIZE;
  105. gd->arch.tlb_addr = ZYNQMP_TCM_BASE_ADDR;
  106. return 0;
  107. }
  108. #endif
  109. static unsigned int zynqmp_get_silicon_version_secure(void)
  110. {
  111. u32 ver;
  112. ver = readl(&csu_base->version);
  113. ver &= ZYNQMP_SILICON_VER_MASK;
  114. ver >>= ZYNQMP_SILICON_VER_SHIFT;
  115. return ver;
  116. }
  117. unsigned int zynqmp_get_silicon_version(void)
  118. {
  119. if (current_el() == 3)
  120. return zynqmp_get_silicon_version_secure();
  121. gd->cpu_clk = get_tbclk();
  122. switch (gd->cpu_clk) {
  123. case 50000000:
  124. return ZYNQMP_CSU_VERSION_QEMU;
  125. }
  126. return ZYNQMP_CSU_VERSION_SILICON;
  127. }
  128. #define ZYNQMP_MMIO_READ 0xC2000014
  129. #define ZYNQMP_MMIO_WRITE 0xC2000013
  130. int __maybe_unused invoke_smc(u32 pm_api_id, u32 arg0, u32 arg1, u32 arg2,
  131. u32 arg3, u32 *ret_payload)
  132. {
  133. /*
  134. * Added SIP service call Function Identifier
  135. * Make sure to stay in x0 register
  136. */
  137. struct pt_regs regs;
  138. regs.regs[0] = pm_api_id;
  139. regs.regs[1] = ((u64)arg1 << 32) | arg0;
  140. regs.regs[2] = ((u64)arg3 << 32) | arg2;
  141. smc_call(&regs);
  142. if (ret_payload != NULL) {
  143. ret_payload[0] = (u32)regs.regs[0];
  144. ret_payload[1] = upper_32_bits(regs.regs[0]);
  145. ret_payload[2] = (u32)regs.regs[1];
  146. ret_payload[3] = upper_32_bits(regs.regs[1]);
  147. ret_payload[4] = (u32)regs.regs[2];
  148. }
  149. return regs.regs[0];
  150. }
  151. #if defined(CONFIG_CLK_ZYNQMP)
  152. unsigned int zynqmp_pmufw_version(void)
  153. {
  154. int ret;
  155. u32 ret_payload[PAYLOAD_ARG_CNT];
  156. static u32 pm_api_version = ZYNQMP_PM_VERSION_INVALID;
  157. /*
  158. * Get PMU version only once and later
  159. * just return stored values instead of
  160. * asking PMUFW again.
  161. */
  162. if (pm_api_version == ZYNQMP_PM_VERSION_INVALID) {
  163. ret = invoke_smc(ZYNQMP_SIP_SVC_GET_API_VERSION, 0, 0, 0, 0,
  164. ret_payload);
  165. pm_api_version = ret_payload[1];
  166. if (ret)
  167. panic("PMUFW is not found - Please load it!\n");
  168. }
  169. return pm_api_version;
  170. }
  171. #endif
  172. static int zynqmp_mmio_rawwrite(const u32 address,
  173. const u32 mask,
  174. const u32 value)
  175. {
  176. u32 data;
  177. u32 value_local = value;
  178. int ret;
  179. ret = zynqmp_mmio_read(address, &data);
  180. if (ret)
  181. return ret;
  182. data &= ~mask;
  183. value_local &= mask;
  184. value_local |= data;
  185. writel(value_local, (ulong)address);
  186. return 0;
  187. }
  188. static int zynqmp_mmio_rawread(const u32 address, u32 *value)
  189. {
  190. *value = readl((ulong)address);
  191. return 0;
  192. }
  193. int zynqmp_mmio_write(const u32 address,
  194. const u32 mask,
  195. const u32 value)
  196. {
  197. if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3)
  198. return zynqmp_mmio_rawwrite(address, mask, value);
  199. else
  200. return invoke_smc(ZYNQMP_MMIO_WRITE, address, mask,
  201. value, 0, NULL);
  202. return -EINVAL;
  203. }
  204. int zynqmp_mmio_read(const u32 address, u32 *value)
  205. {
  206. u32 ret_payload[PAYLOAD_ARG_CNT];
  207. u32 ret;
  208. if (!value)
  209. return -EINVAL;
  210. if (IS_ENABLED(CONFIG_SPL_BUILD) || current_el() == 3) {
  211. ret = zynqmp_mmio_rawread(address, value);
  212. } else {
  213. ret = invoke_smc(ZYNQMP_MMIO_READ, address, 0, 0,
  214. 0, ret_payload);
  215. *value = ret_payload[1];
  216. }
  217. return ret;
  218. }