psci-mx7.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2015-2016 Freescale Semiconductor, Inc.
  4. * Copyright 2017 NXP
  5. */
  6. #include <asm/io.h>
  7. #include <asm/psci.h>
  8. #include <asm/secure.h>
  9. #include <asm/arch/imx-regs.h>
  10. #include <asm/armv7.h>
  11. #include <asm/gic.h>
  12. #include <linux/bitops.h>
  13. #include <common.h>
  14. #include <fsl_wdog.h>
  15. #define GPC_LPCR_A7_BSC 0x0
  16. #define GPC_LPCR_A7_AD 0x4
  17. #define GPC_SLPCR 0x14
  18. #define GPC_PGC_ACK_SEL_A7 0x24
  19. #define GPC_IMR1_CORE0 0x30
  20. #define GPC_SLOT0_CFG 0xb0
  21. #define GPC_CPU_PGC_SW_PUP_REQ 0xf0
  22. #define GPC_CPU_PGC_SW_PDN_REQ 0xfc
  23. #define GPC_PGC_C0 0x800
  24. #define GPC_PGC_C0 0x800
  25. #define GPC_PGC_C1 0x840
  26. #define GPC_PGC_SCU 0x880
  27. #define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000
  28. #define BM_LPCR_A7_BSC_LPM1 0xc
  29. #define BM_LPCR_A7_BSC_LPM0 0x3
  30. #define BP_LPCR_A7_BSC_LPM0 0
  31. #define BM_SLPCR_EN_DSM 0x80000000
  32. #define BM_SLPCR_RBC_EN 0x40000000
  33. #define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000
  34. #define BM_SLPCR_VSTBY 0x4
  35. #define BM_SLPCR_SBYOS 0x2
  36. #define BM_SLPCR_BYPASS_PMIC_READY 0x1
  37. #define BM_LPCR_A7_AD_L2PGE 0x10000
  38. #define BM_LPCR_A7_AD_EN_C1_PUP 0x800
  39. #define BM_LPCR_A7_AD_EN_C0_PUP 0x200
  40. #define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10
  41. #define BM_LPCR_A7_AD_EN_C1_PDN 0x8
  42. #define BM_LPCR_A7_AD_EN_C0_PDN 0x2
  43. #define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7 0x1
  44. #define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2
  45. #define BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK 0x8000
  46. #define BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK 0x80000000
  47. #define MAX_SLOT_NUMBER 10
  48. #define A7_LPM_WAIT 0x5
  49. #define A7_LPM_STOP 0xa
  50. #define BM_SYS_COUNTER_CNTCR_FCR1 0x200
  51. #define BM_SYS_COUNTER_CNTCR_FCR0 0x100
  52. #define REG_SET 0x4
  53. #define REG_CLR 0x8
  54. #define ANADIG_ARM_PLL 0x60
  55. #define ANADIG_DDR_PLL 0x70
  56. #define ANADIG_SYS_PLL 0xb0
  57. #define ANADIG_ENET_PLL 0xe0
  58. #define ANADIG_AUDIO_PLL 0xf0
  59. #define ANADIG_VIDEO_PLL 0x130
  60. #define BM_ANATOP_ARM_PLL_OVERRIDE BIT(20)
  61. #define BM_ANATOP_DDR_PLL_OVERRIDE BIT(19)
  62. #define BM_ANATOP_SYS_PLL_OVERRIDE (0x1ff << 17)
  63. #define BM_ANATOP_ENET_PLL_OVERRIDE BIT(13)
  64. #define BM_ANATOP_AUDIO_PLL_OVERRIDE BIT(24)
  65. #define BM_ANATOP_VIDEO_PLL_OVERRIDE BIT(24)
  66. #define DDRC_STAT 0x4
  67. #define DDRC_PWRCTL 0x30
  68. #define DDRC_PSTAT 0x3fc
  69. #define SRC_GPR1_MX7D 0x074
  70. #define SRC_GPR2_MX7D 0x078
  71. #define SRC_A7RCR0 0x004
  72. #define SRC_A7RCR1 0x008
  73. #define BP_SRC_A7RCR0_A7_CORE_RESET0 0
  74. #define BP_SRC_A7RCR1_A7_CORE1_ENABLE 1
  75. #define SNVS_LPCR 0x38
  76. #define BP_SNVS_LPCR_DP_EN 0x20
  77. #define BP_SNVS_LPCR_TOP 0x40
  78. #define CCM_CCGR_SNVS 0x4250
  79. #define CCM_ROOT_WDOG 0xbb80
  80. #define CCM_CCGR_WDOG1 0x49c0
  81. #define MPIDR_AFF0 GENMASK(7, 0)
  82. #define IMX7D_PSCI_NR_CPUS 2
  83. #if IMX7D_PSCI_NR_CPUS > CONFIG_ARMV7_PSCI_NR_CPUS
  84. #error "invalid value for CONFIG_ARMV7_PSCI_NR_CPUS"
  85. #endif
  86. #define imx_cpu_gpr_entry_offset(cpu) \
  87. (SRC_BASE_ADDR + SRC_GPR1_MX7D + cpu * 8)
  88. #define imx_cpu_gpr_para_offset(cpu) \
  89. (imx_cpu_gpr_entry_offset(cpu) + 4)
  90. #define IMX_CPU_SYNC_OFF ~0
  91. #define IMX_CPU_SYNC_ON 0
  92. u8 psci_state[IMX7D_PSCI_NR_CPUS] __secure_data = {
  93. PSCI_AFFINITY_LEVEL_ON,
  94. PSCI_AFFINITY_LEVEL_OFF};
  95. enum imx_gpc_slot {
  96. CORE0_A7,
  97. CORE1_A7,
  98. SCU_A7,
  99. FAST_MEGA_MIX,
  100. MIPI_PHY,
  101. PCIE_PHY,
  102. USB_OTG1_PHY,
  103. USB_OTG2_PHY,
  104. USB_HSIC_PHY,
  105. CORE0_M4,
  106. };
  107. enum mxc_cpu_pwr_mode {
  108. RUN,
  109. WAIT,
  110. STOP,
  111. };
  112. extern void psci_system_resume(void);
  113. static inline void psci_set_state(int cpu, u8 state)
  114. {
  115. psci_state[cpu] = state;
  116. dsb();
  117. isb();
  118. }
  119. static inline void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset)
  120. {
  121. writel(enable, GPC_IPS_BASE_ADDR + offset);
  122. }
  123. __secure void imx_gpcv2_set_core_power(int cpu, bool pdn)
  124. {
  125. u32 reg = pdn ? GPC_CPU_PGC_SW_PUP_REQ : GPC_CPU_PGC_SW_PDN_REQ;
  126. u32 pgc = cpu ? GPC_PGC_C1 : GPC_PGC_C0;
  127. u32 pdn_pup_req = cpu ? BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 :
  128. BM_CPU_PGC_SW_PDN_PUP_REQ_CORE0_A7;
  129. u32 val;
  130. imx_gpcv2_set_m_core_pgc(true, pgc);
  131. val = readl(GPC_IPS_BASE_ADDR + reg);
  132. val |= pdn_pup_req;
  133. writel(val, GPC_IPS_BASE_ADDR + reg);
  134. while ((readl(GPC_IPS_BASE_ADDR + reg) & pdn_pup_req) != 0)
  135. ;
  136. imx_gpcv2_set_m_core_pgc(false, pgc);
  137. }
  138. __secure void imx_enable_cpu_ca7(int cpu, bool enable)
  139. {
  140. u32 mask, val;
  141. mask = 1 << (BP_SRC_A7RCR1_A7_CORE1_ENABLE + cpu - 1);
  142. val = readl(SRC_BASE_ADDR + SRC_A7RCR1);
  143. val = enable ? val | mask : val & ~mask;
  144. writel(val, SRC_BASE_ADDR + SRC_A7RCR1);
  145. }
  146. __secure void psci_arch_cpu_entry(void)
  147. {
  148. u32 cpu = psci_get_cpu_id();
  149. psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON);
  150. }
  151. __secure s32 psci_cpu_on(u32 __always_unused function_id, u32 mpidr, u32 ep,
  152. u32 context_id)
  153. {
  154. u32 cpu = mpidr & MPIDR_AFF0;
  155. if (mpidr & ~MPIDR_AFF0)
  156. return ARM_PSCI_RET_INVAL;
  157. if (cpu >= IMX7D_PSCI_NR_CPUS)
  158. return ARM_PSCI_RET_INVAL;
  159. if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON)
  160. return ARM_PSCI_RET_ALREADY_ON;
  161. if (psci_state[cpu] == PSCI_AFFINITY_LEVEL_ON_PENDING)
  162. return ARM_PSCI_RET_ON_PENDING;
  163. psci_save(cpu, ep, context_id);
  164. writel((u32)psci_cpu_entry, imx_cpu_gpr_entry_offset(cpu));
  165. psci_set_state(cpu, PSCI_AFFINITY_LEVEL_ON_PENDING);
  166. imx_gpcv2_set_core_power(cpu, true);
  167. imx_enable_cpu_ca7(cpu, true);
  168. return ARM_PSCI_RET_SUCCESS;
  169. }
  170. __secure s32 psci_cpu_off(void)
  171. {
  172. int cpu;
  173. cpu = psci_get_cpu_id();
  174. psci_cpu_off_common();
  175. psci_set_state(cpu, PSCI_AFFINITY_LEVEL_OFF);
  176. imx_enable_cpu_ca7(cpu, false);
  177. imx_gpcv2_set_core_power(cpu, false);
  178. /*
  179. * We use the cpu jumping argument register to sync with
  180. * psci_affinity_info() which is running on cpu0 to kill the cpu.
  181. */
  182. writel(IMX_CPU_SYNC_OFF, imx_cpu_gpr_para_offset(cpu));
  183. while (1)
  184. wfi();
  185. }
  186. __secure void psci_system_reset(void)
  187. {
  188. struct wdog_regs *wdog = (struct wdog_regs *)WDOG1_BASE_ADDR;
  189. /* make sure WDOG1 clock is enabled */
  190. writel(0x1 << 28, CCM_BASE_ADDR + CCM_ROOT_WDOG);
  191. writel(0x3, CCM_BASE_ADDR + CCM_CCGR_WDOG1);
  192. writew(WCR_WDE, &wdog->wcr);
  193. while (1)
  194. wfi();
  195. }
  196. __secure void psci_system_off(void)
  197. {
  198. u32 val;
  199. /* make sure SNVS clock is enabled */
  200. writel(0x3, CCM_BASE_ADDR + CCM_CCGR_SNVS);
  201. val = readl(SNVS_BASE_ADDR + SNVS_LPCR);
  202. val |= BP_SNVS_LPCR_DP_EN | BP_SNVS_LPCR_TOP;
  203. writel(val, SNVS_BASE_ADDR + SNVS_LPCR);
  204. while (1)
  205. wfi();
  206. }
  207. __secure u32 psci_version(void)
  208. {
  209. return ARM_PSCI_VER_1_0;
  210. }
  211. __secure s32 psci_cpu_suspend(u32 __always_unused function_id, u32 power_state,
  212. u32 entry_point_address,
  213. u32 context_id)
  214. {
  215. return ARM_PSCI_RET_INVAL;
  216. }
  217. __secure s32 psci_affinity_info(u32 __always_unused function_id,
  218. u32 target_affinity,
  219. u32 lowest_affinity_level)
  220. {
  221. u32 cpu = target_affinity & MPIDR_AFF0;
  222. if (lowest_affinity_level > 0)
  223. return ARM_PSCI_RET_INVAL;
  224. if (target_affinity & ~MPIDR_AFF0)
  225. return ARM_PSCI_RET_INVAL;
  226. if (cpu >= IMX7D_PSCI_NR_CPUS)
  227. return ARM_PSCI_RET_INVAL;
  228. /* CPU is waiting for killed */
  229. if (readl(imx_cpu_gpr_para_offset(cpu)) == IMX_CPU_SYNC_OFF) {
  230. imx_enable_cpu_ca7(cpu, false);
  231. imx_gpcv2_set_core_power(cpu, false);
  232. writel(IMX_CPU_SYNC_ON, imx_cpu_gpr_para_offset(cpu));
  233. }
  234. return psci_state[cpu];
  235. }
  236. __secure s32 psci_migrate_info_type(u32 function_id)
  237. {
  238. /* Trusted OS is either not present or does not require migration */
  239. return 2;
  240. }
  241. __secure s32 psci_features(u32 __always_unused function_id, u32 psci_fid)
  242. {
  243. switch (psci_fid) {
  244. case ARM_PSCI_0_2_FN_PSCI_VERSION:
  245. case ARM_PSCI_0_2_FN_CPU_OFF:
  246. case ARM_PSCI_0_2_FN_CPU_ON:
  247. case ARM_PSCI_0_2_FN_AFFINITY_INFO:
  248. case ARM_PSCI_0_2_FN_MIGRATE_INFO_TYPE:
  249. case ARM_PSCI_0_2_FN_SYSTEM_OFF:
  250. case ARM_PSCI_0_2_FN_SYSTEM_RESET:
  251. case ARM_PSCI_1_0_FN_PSCI_FEATURES:
  252. case ARM_PSCI_1_0_FN_SYSTEM_SUSPEND:
  253. return 0x0;
  254. }
  255. return ARM_PSCI_RET_NI;
  256. }
  257. static __secure void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode)
  258. {
  259. u32 val1, val2, val3;
  260. val1 = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
  261. val2 = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
  262. /* all cores' LPM settings must be same */
  263. val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1);
  264. val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
  265. val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN |
  266. BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY);
  267. /*
  268. * GPC: When improper low-power sequence is used,
  269. * the SoC enters low power mode before the ARM core executes WFI.
  270. *
  271. * Software workaround:
  272. * 1) Software should trigger IRQ #32 (IOMUX) to be always pending
  273. * by setting IOMUX_GPR1_IRQ.
  274. * 2) Software should then unmask IRQ #32 in GPC before setting GPC
  275. * Low-Power mode.
  276. * 3) Software should mask IRQ #32 right after GPC Low-Power mode
  277. * is set.
  278. */
  279. switch (mode) {
  280. case RUN:
  281. val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
  282. val3 &= ~0x1;
  283. writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
  284. break;
  285. case WAIT:
  286. val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0;
  287. val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
  288. val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
  289. val3 &= ~0x1;
  290. writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
  291. break;
  292. case STOP:
  293. val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0;
  294. val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM;
  295. val2 |= BM_SLPCR_EN_DSM;
  296. val2 |= BM_SLPCR_SBYOS;
  297. val2 |= BM_SLPCR_VSTBY;
  298. val2 |= BM_SLPCR_BYPASS_PMIC_READY;
  299. val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
  300. val3 |= 0x1;
  301. writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0);
  302. break;
  303. default:
  304. return;
  305. }
  306. writel(val1, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC);
  307. writel(val2, GPC_IPS_BASE_ADDR + GPC_SLPCR);
  308. }
  309. static __secure void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn)
  310. {
  311. u32 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
  312. val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE);
  313. if (pdn)
  314. val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE;
  315. writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
  316. }
  317. static __secure void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn)
  318. {
  319. u32 val;
  320. val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
  321. if (cpu == 0) {
  322. if (pdn)
  323. val |= BM_LPCR_A7_AD_EN_C0_PDN |
  324. BM_LPCR_A7_AD_EN_C0_PUP;
  325. else
  326. val &= ~(BM_LPCR_A7_AD_EN_C0_PDN |
  327. BM_LPCR_A7_AD_EN_C0_PUP);
  328. }
  329. if (cpu == 1) {
  330. if (pdn)
  331. val |= BM_LPCR_A7_AD_EN_C1_PDN |
  332. BM_LPCR_A7_AD_EN_C1_PUP;
  333. else
  334. val &= ~(BM_LPCR_A7_AD_EN_C1_PDN |
  335. BM_LPCR_A7_AD_EN_C1_PUP);
  336. }
  337. writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD);
  338. }
  339. static __secure void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core,
  340. bool mode, bool ack)
  341. {
  342. u32 val;
  343. if (index >= MAX_SLOT_NUMBER)
  344. return;
  345. /* set slot */
  346. writel(readl(GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4) |
  347. ((mode + 1) << (m_core * 2)),
  348. GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4);
  349. if (ack) {
  350. /* set ack */
  351. val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
  352. /* clear dummy ack */
  353. val &= ~(mode ? BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK :
  354. BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK);
  355. val |= 1 << (m_core + (mode ? 16 : 0));
  356. writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7);
  357. }
  358. }
  359. static __secure void imx_system_counter_resume(void)
  360. {
  361. u32 val;
  362. val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
  363. val &= ~BM_SYS_COUNTER_CNTCR_FCR1;
  364. val |= BM_SYS_COUNTER_CNTCR_FCR0;
  365. writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
  366. }
  367. static __secure void imx_system_counter_suspend(void)
  368. {
  369. u32 val;
  370. val = readl(SYSCNT_CTRL_IPS_BASE_ADDR);
  371. val &= ~BM_SYS_COUNTER_CNTCR_FCR0;
  372. val |= BM_SYS_COUNTER_CNTCR_FCR1;
  373. writel(val, SYSCNT_CTRL_IPS_BASE_ADDR);
  374. }
  375. static __secure void gic_resume(void)
  376. {
  377. u32 itlinesnr, i;
  378. u32 gic_dist_addr = GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET;
  379. /* enable the GIC distributor */
  380. writel(readl(gic_dist_addr + GICD_CTLR) | 0x03,
  381. gic_dist_addr + GICD_CTLR);
  382. /* TYPER[4:0] contains an encoded number of available interrupts */
  383. itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f;
  384. /* set all bits in the GIC group registers to one to allow access
  385. * from non-secure state. The first 32 interrupts are private per
  386. * CPU and will be set later when enabling the GIC for each core
  387. */
  388. for (i = 1; i <= itlinesnr; i++)
  389. writel((u32)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i);
  390. }
  391. static inline void imx_pll_suspend(void)
  392. {
  393. writel(BM_ANATOP_ARM_PLL_OVERRIDE,
  394. ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_SET);
  395. writel(BM_ANATOP_DDR_PLL_OVERRIDE,
  396. ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_SET);
  397. writel(BM_ANATOP_SYS_PLL_OVERRIDE,
  398. ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_SET);
  399. writel(BM_ANATOP_ENET_PLL_OVERRIDE,
  400. ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_SET);
  401. writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
  402. ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_SET);
  403. writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
  404. ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_SET);
  405. }
  406. static inline void imx_pll_resume(void)
  407. {
  408. writel(BM_ANATOP_ARM_PLL_OVERRIDE,
  409. ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_CLR);
  410. writel(BM_ANATOP_DDR_PLL_OVERRIDE,
  411. ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_CLR);
  412. writel(BM_ANATOP_SYS_PLL_OVERRIDE,
  413. ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_CLR);
  414. writel(BM_ANATOP_ENET_PLL_OVERRIDE,
  415. ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_CLR);
  416. writel(BM_ANATOP_AUDIO_PLL_OVERRIDE,
  417. ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_CLR);
  418. writel(BM_ANATOP_VIDEO_PLL_OVERRIDE,
  419. ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_CLR);
  420. }
  421. static inline void imx_udelay(u32 usec)
  422. {
  423. u32 freq;
  424. u64 start, end;
  425. asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (freq));
  426. asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (start));
  427. do {
  428. asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (end));
  429. if ((end - start) > usec * (freq / 1000000))
  430. break;
  431. } while (1);
  432. }
  433. static inline void imx_ddrc_enter_self_refresh(void)
  434. {
  435. writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
  436. while (readl(DDRC_IPS_BASE_ADDR + DDRC_PSTAT) & 0x10001)
  437. ;
  438. writel(0x20, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
  439. while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x23) != 0x23)
  440. ;
  441. writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x8,
  442. DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
  443. }
  444. static inline void imx_ddrc_exit_self_refresh(void)
  445. {
  446. writel(0, DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
  447. while ((readl(DDRC_IPS_BASE_ADDR + DDRC_STAT) & 0x3) == 0x3)
  448. ;
  449. writel(readl(DDRC_IPS_BASE_ADDR + DDRC_PWRCTL) | 0x1,
  450. DDRC_IPS_BASE_ADDR + DDRC_PWRCTL);
  451. }
  452. __secure void imx_system_resume(void)
  453. {
  454. unsigned int i, val, imr[4], entry;
  455. entry = psci_get_target_pc(0);
  456. imx_ddrc_exit_self_refresh();
  457. imx_system_counter_resume();
  458. imx_gpcv2_set_lpm_mode(RUN);
  459. imx_gpcv2_set_cpu_power_gate_by_lpm(0, false);
  460. imx_gpcv2_set_plat_power_gate_by_lpm(false);
  461. imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0);
  462. imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU);
  463. /*
  464. * need to mask all interrupts in GPC before
  465. * operating RBC configurations
  466. */
  467. for (i = 0; i < 4; i++) {
  468. imr[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
  469. writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
  470. }
  471. /* configure RBC enable bit */
  472. val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
  473. val &= ~BM_SLPCR_RBC_EN;
  474. writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
  475. /* configure RBC count */
  476. val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
  477. val &= ~BM_SLPCR_REG_BYPASS_COUNT;
  478. writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
  479. /*
  480. * need to delay at least 2 cycles of CKIL(32K)
  481. * due to hardware design requirement, which is
  482. * ~61us, here we use 65us for safe
  483. */
  484. imx_udelay(65);
  485. /* restore GPC interrupt mask settings */
  486. for (i = 0; i < 4; i++)
  487. writel(imr[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
  488. /* initialize gic distributor */
  489. gic_resume();
  490. _nonsec_init();
  491. /* save cpu0 entry */
  492. psci_save(0, entry, 0);
  493. psci_cpu_entry();
  494. }
  495. __secure void psci_system_suspend(u32 __always_unused function_id,
  496. u32 ep, u32 context_id)
  497. {
  498. u32 gpc_mask[4];
  499. u32 i, val;
  500. psci_save(0, ep, context_id);
  501. /* overwrite PLL to be controlled by low power mode */
  502. imx_pll_suspend();
  503. imx_system_counter_suspend();
  504. /* set CA7 platform to enter STOP mode */
  505. imx_gpcv2_set_lpm_mode(STOP);
  506. /* enable core0/scu power down/up with low power mode */
  507. imx_gpcv2_set_cpu_power_gate_by_lpm(0, true);
  508. imx_gpcv2_set_plat_power_gate_by_lpm(true);
  509. /* time slot settings for core0 and scu */
  510. imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false);
  511. imx_gpcv2_set_slot_ack(1, SCU_A7, false, true);
  512. imx_gpcv2_set_slot_ack(5, SCU_A7, true, false);
  513. imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true);
  514. imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0);
  515. imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU);
  516. psci_v7_flush_dcache_all();
  517. imx_ddrc_enter_self_refresh();
  518. /*
  519. * e10133: ARM: Boot failure after A7 enters into
  520. * low-power idle mode
  521. *
  522. * Workaround:
  523. * If both CPU0/CPU1 are IDLE, the last IDLE CPU should
  524. * disable GIC first, then REG_BYPASS_COUNTER is used
  525. * to mask wakeup INT, and then execute “wfi” is used to
  526. * bring the system into power down processing safely.
  527. * The counter must be enabled as close to the “wfi” state
  528. * as possible. The following equation can be used to
  529. * determine the RBC counter value:
  530. * RBC_COUNT * (1/32K RTC frequency) >=
  531. * (46 + PDNSCR_SW + PDNSCR_SW2ISO ) ( 1/IPG_CLK frequency ).
  532. */
  533. /* disable GIC distributor */
  534. writel(0, GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET);
  535. for (i = 0; i < 4; i++)
  536. gpc_mask[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
  537. /*
  538. * enable the RBC bypass counter here
  539. * to hold off the interrupts. RBC counter
  540. * = 8 (240us). With this setting, the latency
  541. * from wakeup interrupt to ARM power up
  542. * is ~250uS.
  543. */
  544. val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
  545. val &= ~(0x3f << 24);
  546. val |= (0x8 << 24);
  547. writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
  548. /* enable the counter. */
  549. val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR);
  550. val |= (1 << 30);
  551. writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR);
  552. /* unmask all the GPC interrupts. */
  553. for (i = 0; i < 4; i++)
  554. writel(gpc_mask[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4);
  555. /*
  556. * now delay for a short while (3usec)
  557. * ARM is at 1GHz at this point
  558. * so a short loop should be enough.
  559. * this delay is required to ensure that
  560. * the RBC counter can start counting in
  561. * case an interrupt is already pending
  562. * or in case an interrupt arrives just
  563. * as ARM is about to assert DSM_request.
  564. */
  565. imx_udelay(3);
  566. /* save resume entry and sp in CPU0 GPR registers */
  567. asm volatile("mov %0, sp" : "=r" (val));
  568. writel((u32)psci_system_resume, SRC_BASE_ADDR + SRC_GPR1_MX7D);
  569. writel(val, SRC_BASE_ADDR + SRC_GPR2_MX7D);
  570. /* sleep */
  571. while (1)
  572. wfi();
  573. }