cache.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517
  1. /*
  2. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <config.h>
  7. #include <common.h>
  8. #include <linux/compiler.h>
  9. #include <linux/kernel.h>
  10. #include <linux/log2.h>
  11. #include <asm/arcregs.h>
  12. #include <asm/cache.h>
  13. /* Bit values in IC_CTRL */
  14. #define IC_CTRL_CACHE_DISABLE BIT(0)
  15. /* Bit values in DC_CTRL */
  16. #define DC_CTRL_CACHE_DISABLE BIT(0)
  17. #define DC_CTRL_INV_MODE_FLUSH BIT(6)
  18. #define DC_CTRL_FLUSH_STATUS BIT(8)
  19. #define CACHE_VER_NUM_MASK 0xF
  20. #define OP_INV 0x1
  21. #define OP_FLUSH 0x2
  22. #define OP_INV_IC 0x3
  23. /* Bit val in SLC_CONTROL */
  24. #define SLC_CTRL_DIS 0x001
  25. #define SLC_CTRL_IM 0x040
  26. #define SLC_CTRL_BUSY 0x100
  27. #define SLC_CTRL_RGN_OP_INV 0x200
  28. /*
  29. * By default that variable will fall into .bss section.
  30. * But .bss section is not relocated and so it will be initilized before
  31. * relocation but will be used after being zeroed.
  32. */
  33. int l1_line_sz __section(".data");
  34. bool dcache_exists __section(".data") = false;
  35. bool icache_exists __section(".data") = false;
  36. #define CACHE_LINE_MASK (~(l1_line_sz - 1))
  37. #ifdef CONFIG_ISA_ARCV2
  38. int slc_line_sz __section(".data");
  39. bool slc_exists __section(".data") = false;
  40. bool ioc_exists __section(".data") = false;
  41. bool pae_exists __section(".data") = false;
  42. /* To force enable IOC set ioc_enable to 'true' */
  43. bool ioc_enable __section(".data") = false;
  44. void read_decode_mmu_bcr(void)
  45. {
  46. /* TODO: should we compare mmu version from BCR and from CONFIG? */
  47. #if (CONFIG_ARC_MMU_VER >= 4)
  48. u32 tmp;
  49. tmp = read_aux_reg(ARC_AUX_MMU_BCR);
  50. struct bcr_mmu_4 {
  51. #ifdef CONFIG_CPU_BIG_ENDIAN
  52. unsigned int ver:8, sasid:1, sz1:4, sz0:4, res:2, pae:1,
  53. n_ways:2, n_entry:2, n_super:2, u_itlb:3, u_dtlb:3;
  54. #else
  55. /* DTLB ITLB JES JE JA */
  56. unsigned int u_dtlb:3, u_itlb:3, n_super:2, n_entry:2, n_ways:2,
  57. pae:1, res:2, sz0:4, sz1:4, sasid:1, ver:8;
  58. #endif /* CONFIG_CPU_BIG_ENDIAN */
  59. } *mmu4;
  60. mmu4 = (struct bcr_mmu_4 *)&tmp;
  61. pae_exists = !!mmu4->pae;
  62. #endif /* (CONFIG_ARC_MMU_VER >= 4) */
  63. }
  64. static void __slc_entire_op(const int op)
  65. {
  66. unsigned int ctrl;
  67. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  68. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  69. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  70. else
  71. ctrl |= SLC_CTRL_IM;
  72. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  73. if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  74. write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
  75. else
  76. write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
  77. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  78. read_aux_reg(ARC_AUX_SLC_CTRL);
  79. /* Important to wait for flush to complete */
  80. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  81. }
  82. static void slc_upper_region_init(void)
  83. {
  84. /*
  85. * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
  86. * as we don't use PAE40.
  87. */
  88. write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
  89. write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
  90. }
  91. static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
  92. {
  93. unsigned int ctrl;
  94. unsigned long end;
  95. /*
  96. * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
  97. * - b'000 (default) is Flush,
  98. * - b'001 is Invalidate if CTRL.IM == 0
  99. * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
  100. */
  101. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  102. /* Don't rely on default value of IM bit */
  103. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  104. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  105. else
  106. ctrl |= SLC_CTRL_IM;
  107. if (op & OP_INV)
  108. ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
  109. else
  110. ctrl &= ~SLC_CTRL_RGN_OP_INV;
  111. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  112. /*
  113. * Lower bits are ignored, no need to clip
  114. * END needs to be setup before START (latter triggers the operation)
  115. * END can't be same as START, so add (l2_line_sz - 1) to sz
  116. */
  117. end = paddr + sz + slc_line_sz - 1;
  118. /*
  119. * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
  120. * are always == 0 as we don't use PAE40, so we only setup lower ones
  121. * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
  122. */
  123. write_aux_reg(ARC_AUX_SLC_RGN_END, end);
  124. write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
  125. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  126. read_aux_reg(ARC_AUX_SLC_CTRL);
  127. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  128. }
  129. #endif /* CONFIG_ISA_ARCV2 */
  130. #ifdef CONFIG_ISA_ARCV2
  131. static void read_decode_cache_bcr_arcv2(void)
  132. {
  133. union {
  134. struct {
  135. #ifdef CONFIG_CPU_BIG_ENDIAN
  136. unsigned int pad:24, way:2, lsz:2, sz:4;
  137. #else
  138. unsigned int sz:4, lsz:2, way:2, pad:24;
  139. #endif
  140. } fields;
  141. unsigned int word;
  142. } slc_cfg;
  143. union {
  144. struct {
  145. #ifdef CONFIG_CPU_BIG_ENDIAN
  146. unsigned int pad:24, ver:8;
  147. #else
  148. unsigned int ver:8, pad:24;
  149. #endif
  150. } fields;
  151. unsigned int word;
  152. } sbcr;
  153. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  154. if (sbcr.fields.ver) {
  155. slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
  156. slc_exists = true;
  157. slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
  158. }
  159. union {
  160. struct bcr_clust_cfg {
  161. #ifdef CONFIG_CPU_BIG_ENDIAN
  162. unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
  163. #else
  164. unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
  165. #endif
  166. } fields;
  167. unsigned int word;
  168. } cbcr;
  169. cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
  170. if (cbcr.fields.c && ioc_enable)
  171. ioc_exists = true;
  172. }
  173. #endif
  174. void read_decode_cache_bcr(void)
  175. {
  176. int dc_line_sz = 0, ic_line_sz = 0;
  177. union {
  178. struct {
  179. #ifdef CONFIG_CPU_BIG_ENDIAN
  180. unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
  181. #else
  182. unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
  183. #endif
  184. } fields;
  185. unsigned int word;
  186. } ibcr, dbcr;
  187. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  188. if (ibcr.fields.ver) {
  189. icache_exists = true;
  190. l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
  191. if (!ic_line_sz)
  192. panic("Instruction exists but line length is 0\n");
  193. }
  194. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  195. if (dbcr.fields.ver) {
  196. dcache_exists = true;
  197. l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
  198. if (!dc_line_sz)
  199. panic("Data cache exists but line length is 0\n");
  200. }
  201. if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
  202. panic("Instruction and data cache line lengths differ\n");
  203. }
  204. void cache_init(void)
  205. {
  206. read_decode_cache_bcr();
  207. #ifdef CONFIG_ISA_ARCV2
  208. read_decode_cache_bcr_arcv2();
  209. if (ioc_exists) {
  210. /* IOC Aperture start is equal to DDR start */
  211. unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
  212. /* IOC Aperture size is equal to DDR size */
  213. long ap_size = CONFIG_SYS_SDRAM_SIZE;
  214. flush_dcache_all();
  215. invalidate_dcache_all();
  216. if (!is_power_of_2(ap_size) || ap_size < 4096)
  217. panic("IOC Aperture size must be power of 2 and bigger 4Kib");
  218. /*
  219. * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
  220. * so setting 0x11 implies 512M, 0x12 implies 1G...
  221. */
  222. write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
  223. order_base_2(ap_size / 1024) - 2);
  224. /* IOC Aperture start must be aligned to the size of the aperture */
  225. if (ap_base % ap_size != 0)
  226. panic("IOC Aperture start must be aligned to the size of the aperture");
  227. write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
  228. write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
  229. write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
  230. }
  231. read_decode_mmu_bcr();
  232. /*
  233. * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
  234. * only if PAE exists in current HW. So we had to check pae_exist
  235. * before using them.
  236. */
  237. if (slc_exists && pae_exists)
  238. slc_upper_region_init();
  239. #endif /* CONFIG_ISA_ARCV2 */
  240. }
  241. int icache_status(void)
  242. {
  243. if (!icache_exists)
  244. return 0;
  245. if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE)
  246. return 0;
  247. else
  248. return 1;
  249. }
  250. void icache_enable(void)
  251. {
  252. if (icache_exists)
  253. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
  254. ~IC_CTRL_CACHE_DISABLE);
  255. }
  256. void icache_disable(void)
  257. {
  258. if (icache_exists)
  259. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
  260. IC_CTRL_CACHE_DISABLE);
  261. }
  262. void invalidate_icache_all(void)
  263. {
  264. /* Any write to IC_IVIC register triggers invalidation of entire I$ */
  265. if (icache_status()) {
  266. write_aux_reg(ARC_AUX_IC_IVIC, 1);
  267. /*
  268. * As per ARC HS databook (see chapter 5.3.3.2)
  269. * it is required to add 3 NOPs after each write to IC_IVIC.
  270. */
  271. __builtin_arc_nop();
  272. __builtin_arc_nop();
  273. __builtin_arc_nop();
  274. read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
  275. }
  276. #ifdef CONFIG_ISA_ARCV2
  277. if (slc_exists)
  278. __slc_entire_op(OP_INV);
  279. #endif
  280. }
  281. int dcache_status(void)
  282. {
  283. if (!dcache_exists)
  284. return 0;
  285. if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE)
  286. return 0;
  287. else
  288. return 1;
  289. }
  290. void dcache_enable(void)
  291. {
  292. if (!dcache_exists)
  293. return;
  294. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
  295. ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
  296. }
  297. void dcache_disable(void)
  298. {
  299. if (!dcache_exists)
  300. return;
  301. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
  302. DC_CTRL_CACHE_DISABLE);
  303. }
  304. #ifndef CONFIG_SYS_DCACHE_OFF
  305. /*
  306. * Common Helper for Line Operations on {I,D}-Cache
  307. */
  308. static inline void __cache_line_loop(unsigned long paddr, unsigned long sz,
  309. const int cacheop)
  310. {
  311. unsigned int aux_cmd;
  312. #if (CONFIG_ARC_MMU_VER == 3)
  313. unsigned int aux_tag;
  314. #endif
  315. int num_lines;
  316. if (cacheop == OP_INV_IC) {
  317. aux_cmd = ARC_AUX_IC_IVIL;
  318. #if (CONFIG_ARC_MMU_VER == 3)
  319. aux_tag = ARC_AUX_IC_PTAG;
  320. #endif
  321. } else {
  322. /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
  323. aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
  324. #if (CONFIG_ARC_MMU_VER == 3)
  325. aux_tag = ARC_AUX_DC_PTAG;
  326. #endif
  327. }
  328. sz += paddr & ~CACHE_LINE_MASK;
  329. paddr &= CACHE_LINE_MASK;
  330. num_lines = DIV_ROUND_UP(sz, l1_line_sz);
  331. while (num_lines-- > 0) {
  332. #if (CONFIG_ARC_MMU_VER == 3)
  333. write_aux_reg(aux_tag, paddr);
  334. #endif
  335. write_aux_reg(aux_cmd, paddr);
  336. paddr += l1_line_sz;
  337. }
  338. }
  339. static unsigned int __before_dc_op(const int op)
  340. {
  341. unsigned int reg;
  342. if (op == OP_INV) {
  343. /*
  344. * IM is set by default and implies Flush-n-inv
  345. * Clear it here for vanilla inv
  346. */
  347. reg = read_aux_reg(ARC_AUX_DC_CTRL);
  348. write_aux_reg(ARC_AUX_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
  349. }
  350. return reg;
  351. }
  352. static void __after_dc_op(const int op, unsigned int reg)
  353. {
  354. if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
  355. while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
  356. /* Switch back to default Invalidate mode */
  357. if (op == OP_INV)
  358. write_aux_reg(ARC_AUX_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
  359. }
  360. static inline void __dc_entire_op(const int cacheop)
  361. {
  362. int aux;
  363. unsigned int ctrl_reg = __before_dc_op(cacheop);
  364. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  365. aux = ARC_AUX_DC_IVDC;
  366. else
  367. aux = ARC_AUX_DC_FLSH;
  368. write_aux_reg(aux, 0x1);
  369. __after_dc_op(cacheop, ctrl_reg);
  370. }
  371. static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
  372. const int cacheop)
  373. {
  374. unsigned int ctrl_reg = __before_dc_op(cacheop);
  375. __cache_line_loop(paddr, sz, cacheop);
  376. __after_dc_op(cacheop, ctrl_reg);
  377. }
  378. #else
  379. #define __dc_entire_op(cacheop)
  380. #define __dc_line_op(paddr, sz, cacheop)
  381. #endif /* !CONFIG_SYS_DCACHE_OFF */
  382. void invalidate_dcache_range(unsigned long start, unsigned long end)
  383. {
  384. if (start >= end)
  385. return;
  386. #ifdef CONFIG_ISA_ARCV2
  387. if (!ioc_exists)
  388. #endif
  389. __dc_line_op(start, end - start, OP_INV);
  390. #ifdef CONFIG_ISA_ARCV2
  391. if (slc_exists && !ioc_exists)
  392. __slc_rgn_op(start, end - start, OP_INV);
  393. #endif
  394. }
  395. void flush_dcache_range(unsigned long start, unsigned long end)
  396. {
  397. if (start >= end)
  398. return;
  399. #ifdef CONFIG_ISA_ARCV2
  400. if (!ioc_exists)
  401. #endif
  402. __dc_line_op(start, end - start, OP_FLUSH);
  403. #ifdef CONFIG_ISA_ARCV2
  404. if (slc_exists && !ioc_exists)
  405. __slc_rgn_op(start, end - start, OP_FLUSH);
  406. #endif
  407. }
  408. void flush_cache(unsigned long start, unsigned long size)
  409. {
  410. flush_dcache_range(start, start + size);
  411. }
  412. void invalidate_dcache_all(void)
  413. {
  414. __dc_entire_op(OP_INV);
  415. #ifdef CONFIG_ISA_ARCV2
  416. if (slc_exists)
  417. __slc_entire_op(OP_INV);
  418. #endif
  419. }
  420. void flush_dcache_all(void)
  421. {
  422. __dc_entire_op(OP_FLUSH);
  423. #ifdef CONFIG_ISA_ARCV2
  424. if (slc_exists)
  425. __slc_entire_op(OP_FLUSH);
  426. #endif
  427. }