cache.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716
  1. /*
  2. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <config.h>
  7. #include <common.h>
  8. #include <linux/compiler.h>
  9. #include <linux/kernel.h>
  10. #include <linux/log2.h>
  11. #include <asm/arcregs.h>
  12. #include <asm/arc-bcr.h>
  13. #include <asm/cache.h>
  14. /*
  15. * [ NOTE 1 ]:
  16. * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  17. * operation may result in unexpected behavior and data loss even if we flush
  18. * data cache right before invalidation. That may happens if we store any context
  19. * on stack (like we store BLINK register on stack before function call).
  20. * BLINK register is the register where return address is automatically saved
  21. * when we do function call with instructions like 'bl'.
  22. *
  23. * There is the real example:
  24. * We may hang in the next code as we store any BLINK register on stack in
  25. * invalidate_dcache_all() function.
  26. *
  27. * void flush_dcache_all() {
  28. * __dc_entire_op(OP_FLUSH);
  29. * // Other code //
  30. * }
  31. *
  32. * void invalidate_dcache_all() {
  33. * __dc_entire_op(OP_INV);
  34. * // Other code //
  35. * }
  36. *
  37. * void foo(void) {
  38. * flush_dcache_all();
  39. * invalidate_dcache_all();
  40. * }
  41. *
  42. * Now let's see what really happens during that code execution:
  43. *
  44. * foo()
  45. * |->> call flush_dcache_all
  46. * [return address is saved to BLINK register]
  47. * [push BLINK] (save to stack) ![point 1]
  48. * |->> call __dc_entire_op(OP_FLUSH)
  49. * [return address is saved to BLINK register]
  50. * [flush L1 D$]
  51. * return [jump to BLINK]
  52. * <<------
  53. * [other flush_dcache_all code]
  54. * [pop BLINK] (get from stack)
  55. * return [jump to BLINK]
  56. * <<------
  57. * |->> call invalidate_dcache_all
  58. * [return address is saved to BLINK register]
  59. * [push BLINK] (save to stack) ![point 2]
  60. * |->> call __dc_entire_op(OP_FLUSH)
  61. * [return address is saved to BLINK register]
  62. * [invalidate L1 D$] ![point 3]
  63. * // Oops!!!
  64. * // We lose return address from invalidate_dcache_all function:
  65. * // we save it to stack and invalidate L1 D$ after that!
  66. * return [jump to BLINK]
  67. * <<------
  68. * [other invalidate_dcache_all code]
  69. * [pop BLINK] (get from stack)
  70. * // we don't have this data in L1 dcache as we invalidated it in [point 3]
  71. * // so we get it from next memory level (for example DDR memory)
  72. * // but in the memory we have value which we save in [point 1], which
  73. * // is return address from flush_dcache_all function (instead of
  74. * // address from current invalidate_dcache_all function which we
  75. * // saved in [point 2] !)
  76. * return [jump to BLINK]
  77. * <<------
  78. * // As BLINK points to invalidate_dcache_all, we call it again and
  79. * // loop forever.
  80. *
  81. * Fortunately we may fix that by using flush & invalidation of D$ with a single
  82. * one instruction (instead of flush and invalidation instructions pair) and
  83. * enabling force function inline with '__attribute__((always_inline))' gcc
  84. * attribute to avoid any function call (and BLINK store) between cache flush
  85. * and disable.
  86. *
  87. *
  88. * [ NOTE 2 ]:
  89. * As of today we only support the following cache configurations on ARC.
  90. * Other configurations may exist in HW (for example, since version 3.0 HS
  91. * supports SL$ (L2 system level cache) disable) but we don't support it in SW.
  92. * Configuration 1:
  93. * ______________________
  94. * | |
  95. * | ARC CPU |
  96. * |______________________|
  97. * ___|___ ___|___
  98. * | | | |
  99. * | L1 I$ | | L1 D$ |
  100. * |_______| |_______|
  101. * on/off on/off
  102. * ___|______________|____
  103. * | |
  104. * | main memory |
  105. * |______________________|
  106. *
  107. * Configuration 2:
  108. * ______________________
  109. * | |
  110. * | ARC CPU |
  111. * |______________________|
  112. * ___|___ ___|___
  113. * | | | |
  114. * | L1 I$ | | L1 D$ |
  115. * |_______| |_______|
  116. * on/off on/off
  117. * ___|______________|____
  118. * | |
  119. * | L2 (SL$) |
  120. * |______________________|
  121. * always must be on
  122. * ___|______________|____
  123. * | |
  124. * | main memory |
  125. * |______________________|
  126. *
  127. * Configuration 3:
  128. * ______________________
  129. * | |
  130. * | ARC CPU |
  131. * |______________________|
  132. * ___|___ ___|___
  133. * | | | |
  134. * | L1 I$ | | L1 D$ |
  135. * |_______| |_______|
  136. * on/off must be on
  137. * ___|______________|____ _______
  138. * | | | |
  139. * | L2 (SL$) |-----| IOC |
  140. * |______________________| |_______|
  141. * always must be on on/off
  142. * ___|______________|____
  143. * | |
  144. * | main memory |
  145. * |______________________|
  146. */
  147. DECLARE_GLOBAL_DATA_PTR;
  148. /* Bit values in IC_CTRL */
  149. #define IC_CTRL_CACHE_DISABLE BIT(0)
  150. /* Bit values in DC_CTRL */
  151. #define DC_CTRL_CACHE_DISABLE BIT(0)
  152. #define DC_CTRL_INV_MODE_FLUSH BIT(6)
  153. #define DC_CTRL_FLUSH_STATUS BIT(8)
  154. #define OP_INV BIT(0)
  155. #define OP_FLUSH BIT(1)
  156. #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
  157. /* Bit val in SLC_CONTROL */
  158. #define SLC_CTRL_DIS 0x001
  159. #define SLC_CTRL_IM 0x040
  160. #define SLC_CTRL_BUSY 0x100
  161. #define SLC_CTRL_RGN_OP_INV 0x200
  162. #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
  163. /*
  164. * We don't want to use '__always_inline' macro here as it can be redefined
  165. * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
  166. * details about the reasons we need to use always_inline functions.
  167. */
  168. #define inlined_cachefunc inline __attribute__((always_inline))
  169. static inlined_cachefunc void __ic_entire_invalidate(void);
  170. static inlined_cachefunc void __dc_entire_op(const int cacheop);
  171. static inline bool pae_exists(void)
  172. {
  173. /* TODO: should we compare mmu version from BCR and from CONFIG? */
  174. #if (CONFIG_ARC_MMU_VER >= 4)
  175. union bcr_mmu_4 mmu4;
  176. mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
  177. if (mmu4.fields.pae)
  178. return true;
  179. #endif /* (CONFIG_ARC_MMU_VER >= 4) */
  180. return false;
  181. }
  182. static inlined_cachefunc bool icache_exists(void)
  183. {
  184. union bcr_di_cache ibcr;
  185. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  186. return !!ibcr.fields.ver;
  187. }
  188. static inlined_cachefunc bool icache_enabled(void)
  189. {
  190. if (!icache_exists())
  191. return false;
  192. return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
  193. }
  194. static inlined_cachefunc bool dcache_exists(void)
  195. {
  196. union bcr_di_cache dbcr;
  197. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  198. return !!dbcr.fields.ver;
  199. }
  200. static inlined_cachefunc bool dcache_enabled(void)
  201. {
  202. if (!dcache_exists())
  203. return false;
  204. return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
  205. }
  206. static inlined_cachefunc bool slc_exists(void)
  207. {
  208. if (is_isa_arcv2()) {
  209. union bcr_generic sbcr;
  210. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  211. return !!sbcr.fields.ver;
  212. }
  213. return false;
  214. }
  215. static inlined_cachefunc bool slc_data_bypass(void)
  216. {
  217. /*
  218. * If L1 data cache is disabled SL$ is bypassed and all load/store
  219. * requests are sent directly to main memory.
  220. */
  221. return !dcache_enabled();
  222. }
  223. static inline bool ioc_exists(void)
  224. {
  225. if (is_isa_arcv2()) {
  226. union bcr_clust_cfg cbcr;
  227. cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
  228. return cbcr.fields.c;
  229. }
  230. return false;
  231. }
  232. static inline bool ioc_enabled(void)
  233. {
  234. /*
  235. * We check only CONFIG option instead of IOC HW state check as IOC
  236. * must be disabled by default.
  237. */
  238. if (is_ioc_enabled())
  239. return ioc_exists();
  240. return false;
  241. }
  242. static inlined_cachefunc void __slc_entire_op(const int op)
  243. {
  244. unsigned int ctrl;
  245. if (!slc_exists())
  246. return;
  247. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  248. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  249. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  250. else
  251. ctrl |= SLC_CTRL_IM;
  252. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  253. if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  254. write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
  255. else
  256. write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
  257. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  258. read_aux_reg(ARC_AUX_SLC_CTRL);
  259. /* Important to wait for flush to complete */
  260. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  261. }
  262. static void slc_upper_region_init(void)
  263. {
  264. /*
  265. * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
  266. * only if PAE exists in current HW. So we had to check pae_exist
  267. * before using them.
  268. */
  269. if (!pae_exists())
  270. return;
  271. /*
  272. * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
  273. * as we don't use PAE40.
  274. */
  275. write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
  276. write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
  277. }
  278. static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
  279. {
  280. #ifdef CONFIG_ISA_ARCV2
  281. unsigned int ctrl;
  282. unsigned long end;
  283. if (!slc_exists())
  284. return;
  285. /*
  286. * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
  287. * - b'000 (default) is Flush,
  288. * - b'001 is Invalidate if CTRL.IM == 0
  289. * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
  290. */
  291. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  292. /* Don't rely on default value of IM bit */
  293. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  294. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  295. else
  296. ctrl |= SLC_CTRL_IM;
  297. if (op & OP_INV)
  298. ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
  299. else
  300. ctrl &= ~SLC_CTRL_RGN_OP_INV;
  301. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  302. /*
  303. * Lower bits are ignored, no need to clip
  304. * END needs to be setup before START (latter triggers the operation)
  305. * END can't be same as START, so add (l2_line_sz - 1) to sz
  306. */
  307. end = paddr + sz + gd->arch.slc_line_sz - 1;
  308. /*
  309. * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
  310. * are always == 0 as we don't use PAE40, so we only setup lower ones
  311. * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
  312. */
  313. write_aux_reg(ARC_AUX_SLC_RGN_END, end);
  314. write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
  315. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  316. read_aux_reg(ARC_AUX_SLC_CTRL);
  317. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  318. #endif /* CONFIG_ISA_ARCV2 */
  319. }
  320. static void arc_ioc_setup(void)
  321. {
  322. /* IOC Aperture start is equal to DDR start */
  323. unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
  324. /* IOC Aperture size is equal to DDR size */
  325. long ap_size = CONFIG_SYS_SDRAM_SIZE;
  326. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  327. if (!slc_exists())
  328. panic("Try to enable IOC but SLC is not present");
  329. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  330. if (!dcache_enabled())
  331. panic("Try to enable IOC but L1 D$ is disabled");
  332. if (!is_power_of_2(ap_size) || ap_size < 4096)
  333. panic("IOC Aperture size must be power of 2 and bigger 4Kib");
  334. /* IOC Aperture start must be aligned to the size of the aperture */
  335. if (ap_base % ap_size != 0)
  336. panic("IOC Aperture start must be aligned to the size of the aperture");
  337. flush_n_invalidate_dcache_all();
  338. /*
  339. * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
  340. * so setting 0x11 implies 512M, 0x12 implies 1G...
  341. */
  342. write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
  343. order_base_2(ap_size / 1024) - 2);
  344. write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
  345. write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
  346. write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
  347. }
  348. static void read_decode_cache_bcr_arcv2(void)
  349. {
  350. #ifdef CONFIG_ISA_ARCV2
  351. union bcr_slc_cfg slc_cfg;
  352. if (slc_exists()) {
  353. slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
  354. gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
  355. /*
  356. * We don't support configuration where L1 I$ or L1 D$ is
  357. * absent but SL$ exists. See [ NOTE 2 ] for more details.
  358. */
  359. if (!icache_exists() || !dcache_exists())
  360. panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
  361. }
  362. #endif /* CONFIG_ISA_ARCV2 */
  363. }
  364. void read_decode_cache_bcr(void)
  365. {
  366. int dc_line_sz = 0, ic_line_sz = 0;
  367. union bcr_di_cache ibcr, dbcr;
  368. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  369. if (ibcr.fields.ver) {
  370. gd->arch.l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
  371. if (!ic_line_sz)
  372. panic("Instruction exists but line length is 0\n");
  373. }
  374. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  375. if (dbcr.fields.ver) {
  376. gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
  377. if (!dc_line_sz)
  378. panic("Data cache exists but line length is 0\n");
  379. }
  380. if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
  381. panic("Instruction and data cache line lengths differ\n");
  382. }
  383. void cache_init(void)
  384. {
  385. read_decode_cache_bcr();
  386. if (is_isa_arcv2())
  387. read_decode_cache_bcr_arcv2();
  388. if (is_isa_arcv2() && ioc_enabled())
  389. arc_ioc_setup();
  390. if (is_isa_arcv2() && slc_exists())
  391. slc_upper_region_init();
  392. }
  393. int icache_status(void)
  394. {
  395. return icache_enabled();
  396. }
  397. void icache_enable(void)
  398. {
  399. if (icache_exists())
  400. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
  401. ~IC_CTRL_CACHE_DISABLE);
  402. }
  403. void icache_disable(void)
  404. {
  405. if (!icache_exists())
  406. return;
  407. __ic_entire_invalidate();
  408. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
  409. IC_CTRL_CACHE_DISABLE);
  410. }
  411. /* IC supports only invalidation */
  412. static inlined_cachefunc void __ic_entire_invalidate(void)
  413. {
  414. if (!icache_enabled())
  415. return;
  416. /* Any write to IC_IVIC register triggers invalidation of entire I$ */
  417. write_aux_reg(ARC_AUX_IC_IVIC, 1);
  418. /*
  419. * As per ARC HS databook (see chapter 5.3.3.2)
  420. * it is required to add 3 NOPs after each write to IC_IVIC.
  421. */
  422. __builtin_arc_nop();
  423. __builtin_arc_nop();
  424. __builtin_arc_nop();
  425. read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
  426. }
  427. void invalidate_icache_all(void)
  428. {
  429. __ic_entire_invalidate();
  430. /*
  431. * If SL$ is bypassed for data it is used only for instructions,
  432. * so we need to invalidate it too.
  433. * TODO: HS 3.0 supports SLC disable so we need to check slc
  434. * enable/disable status here.
  435. */
  436. if (is_isa_arcv2() && slc_data_bypass())
  437. __slc_entire_op(OP_INV);
  438. }
  439. int dcache_status(void)
  440. {
  441. return dcache_enabled();
  442. }
  443. void dcache_enable(void)
  444. {
  445. if (!dcache_exists())
  446. return;
  447. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
  448. ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
  449. }
  450. void dcache_disable(void)
  451. {
  452. if (!dcache_exists())
  453. return;
  454. __dc_entire_op(OP_FLUSH_N_INV);
  455. /*
  456. * As SLC will be bypassed for data after L1 D$ disable we need to
  457. * flush it first before L1 D$ disable. Also we invalidate SLC to
  458. * avoid any inconsistent data problems after enabling L1 D$ again with
  459. * dcache_enable function.
  460. */
  461. if (is_isa_arcv2())
  462. __slc_entire_op(OP_FLUSH_N_INV);
  463. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
  464. DC_CTRL_CACHE_DISABLE);
  465. }
  466. /* Common Helper for Line Operations on D-cache */
  467. static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
  468. const int cacheop)
  469. {
  470. unsigned int aux_cmd;
  471. int num_lines;
  472. /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
  473. aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
  474. sz += paddr & ~CACHE_LINE_MASK;
  475. paddr &= CACHE_LINE_MASK;
  476. num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
  477. while (num_lines-- > 0) {
  478. #if (CONFIG_ARC_MMU_VER == 3)
  479. write_aux_reg(ARC_AUX_DC_PTAG, paddr);
  480. #endif
  481. write_aux_reg(aux_cmd, paddr);
  482. paddr += gd->arch.l1_line_sz;
  483. }
  484. }
  485. static inlined_cachefunc void __before_dc_op(const int op)
  486. {
  487. unsigned int ctrl;
  488. ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
  489. /* IM bit implies flush-n-inv, instead of vanilla inv */
  490. if (op == OP_INV)
  491. ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
  492. else
  493. ctrl |= DC_CTRL_INV_MODE_FLUSH;
  494. write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
  495. }
  496. static inlined_cachefunc void __after_dc_op(const int op)
  497. {
  498. if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
  499. while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
  500. }
  501. static inlined_cachefunc void __dc_entire_op(const int cacheop)
  502. {
  503. int aux;
  504. if (!dcache_enabled())
  505. return;
  506. __before_dc_op(cacheop);
  507. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  508. aux = ARC_AUX_DC_IVDC;
  509. else
  510. aux = ARC_AUX_DC_FLSH;
  511. write_aux_reg(aux, 0x1);
  512. __after_dc_op(cacheop);
  513. }
  514. static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
  515. const int cacheop)
  516. {
  517. if (!dcache_enabled())
  518. return;
  519. __before_dc_op(cacheop);
  520. __dcache_line_loop(paddr, sz, cacheop);
  521. __after_dc_op(cacheop);
  522. }
  523. void invalidate_dcache_range(unsigned long start, unsigned long end)
  524. {
  525. if (start >= end)
  526. return;
  527. /*
  528. * ARCv1 -> call __dc_line_op
  529. * ARCv2 && L1 D$ disabled -> nothing
  530. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  531. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  532. */
  533. if (!is_isa_arcv2() || !ioc_enabled())
  534. __dc_line_op(start, end - start, OP_INV);
  535. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  536. __slc_rgn_op(start, end - start, OP_INV);
  537. }
  538. void flush_dcache_range(unsigned long start, unsigned long end)
  539. {
  540. if (start >= end)
  541. return;
  542. /*
  543. * ARCv1 -> call __dc_line_op
  544. * ARCv2 && L1 D$ disabled -> nothing
  545. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  546. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  547. */
  548. if (!is_isa_arcv2() || !ioc_enabled())
  549. __dc_line_op(start, end - start, OP_FLUSH);
  550. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  551. __slc_rgn_op(start, end - start, OP_FLUSH);
  552. }
  553. void flush_cache(unsigned long start, unsigned long size)
  554. {
  555. flush_dcache_range(start, start + size);
  556. }
  557. /*
  558. * As invalidate_dcache_all() is not used in generic U-Boot code and as we
  559. * don't need it in arch/arc code alone (invalidate without flush) we implement
  560. * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
  561. * it's much safer. See [ NOTE 1 ] for more details.
  562. */
  563. void flush_n_invalidate_dcache_all(void)
  564. {
  565. __dc_entire_op(OP_FLUSH_N_INV);
  566. if (is_isa_arcv2() && !slc_data_bypass())
  567. __slc_entire_op(OP_FLUSH_N_INV);
  568. }
  569. void flush_dcache_all(void)
  570. {
  571. __dc_entire_op(OP_FLUSH);
  572. if (is_isa_arcv2() && !slc_data_bypass())
  573. __slc_entire_op(OP_FLUSH);
  574. }
  575. /*
  576. * This is function to cleanup all caches (and therefore sync I/D caches) which
  577. * can be used for cleanup before linux launch or to sync caches during
  578. * relocation.
  579. */
  580. void sync_n_cleanup_cache_all(void)
  581. {
  582. __dc_entire_op(OP_FLUSH_N_INV);
  583. /*
  584. * If SL$ is bypassed for data it is used only for instructions,
  585. * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
  586. */
  587. if (is_isa_arcv2()) {
  588. if (slc_data_bypass())
  589. __slc_entire_op(OP_INV);
  590. else
  591. __slc_entire_op(OP_FLUSH_N_INV);
  592. }
  593. __ic_entire_invalidate();
  594. }