cache.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
  4. */
  5. #include <config.h>
  6. #include <common.h>
  7. #include <linux/compiler.h>
  8. #include <linux/kernel.h>
  9. #include <linux/log2.h>
  10. #include <asm/arcregs.h>
  11. #include <asm/arc-bcr.h>
  12. #include <asm/cache.h>
  13. /*
  14. * [ NOTE 1 ]:
  15. * Data cache (L1 D$ or SL$) entire invalidate operation or data cache disable
  16. * operation may result in unexpected behavior and data loss even if we flush
  17. * data cache right before invalidation. That may happens if we store any context
  18. * on stack (like we store BLINK register on stack before function call).
  19. * BLINK register is the register where return address is automatically saved
  20. * when we do function call with instructions like 'bl'.
  21. *
  22. * There is the real example:
  23. * We may hang in the next code as we store any BLINK register on stack in
  24. * invalidate_dcache_all() function.
  25. *
  26. * void flush_dcache_all() {
  27. * __dc_entire_op(OP_FLUSH);
  28. * // Other code //
  29. * }
  30. *
  31. * void invalidate_dcache_all() {
  32. * __dc_entire_op(OP_INV);
  33. * // Other code //
  34. * }
  35. *
  36. * void foo(void) {
  37. * flush_dcache_all();
  38. * invalidate_dcache_all();
  39. * }
  40. *
  41. * Now let's see what really happens during that code execution:
  42. *
  43. * foo()
  44. * |->> call flush_dcache_all
  45. * [return address is saved to BLINK register]
  46. * [push BLINK] (save to stack) ![point 1]
  47. * |->> call __dc_entire_op(OP_FLUSH)
  48. * [return address is saved to BLINK register]
  49. * [flush L1 D$]
  50. * return [jump to BLINK]
  51. * <<------
  52. * [other flush_dcache_all code]
  53. * [pop BLINK] (get from stack)
  54. * return [jump to BLINK]
  55. * <<------
  56. * |->> call invalidate_dcache_all
  57. * [return address is saved to BLINK register]
  58. * [push BLINK] (save to stack) ![point 2]
  59. * |->> call __dc_entire_op(OP_FLUSH)
  60. * [return address is saved to BLINK register]
  61. * [invalidate L1 D$] ![point 3]
  62. * // Oops!!!
  63. * // We lose return address from invalidate_dcache_all function:
  64. * // we save it to stack and invalidate L1 D$ after that!
  65. * return [jump to BLINK]
  66. * <<------
  67. * [other invalidate_dcache_all code]
  68. * [pop BLINK] (get from stack)
  69. * // we don't have this data in L1 dcache as we invalidated it in [point 3]
  70. * // so we get it from next memory level (for example DDR memory)
  71. * // but in the memory we have value which we save in [point 1], which
  72. * // is return address from flush_dcache_all function (instead of
  73. * // address from current invalidate_dcache_all function which we
  74. * // saved in [point 2] !)
  75. * return [jump to BLINK]
  76. * <<------
  77. * // As BLINK points to invalidate_dcache_all, we call it again and
  78. * // loop forever.
  79. *
  80. * Fortunately we may fix that by using flush & invalidation of D$ with a single
  81. * one instruction (instead of flush and invalidation instructions pair) and
  82. * enabling force function inline with '__attribute__((always_inline))' gcc
  83. * attribute to avoid any function call (and BLINK store) between cache flush
  84. * and disable.
  85. *
  86. *
  87. * [ NOTE 2 ]:
  88. * As of today we only support the following cache configurations on ARC.
  89. * Other configurations may exist in HW (for example, since version 3.0 HS
  90. * supports SL$ (L2 system level cache) disable) but we don't support it in SW.
  91. * Configuration 1:
  92. * ______________________
  93. * | |
  94. * | ARC CPU |
  95. * |______________________|
  96. * ___|___ ___|___
  97. * | | | |
  98. * | L1 I$ | | L1 D$ |
  99. * |_______| |_______|
  100. * on/off on/off
  101. * ___|______________|____
  102. * | |
  103. * | main memory |
  104. * |______________________|
  105. *
  106. * Configuration 2:
  107. * ______________________
  108. * | |
  109. * | ARC CPU |
  110. * |______________________|
  111. * ___|___ ___|___
  112. * | | | |
  113. * | L1 I$ | | L1 D$ |
  114. * |_______| |_______|
  115. * on/off on/off
  116. * ___|______________|____
  117. * | |
  118. * | L2 (SL$) |
  119. * |______________________|
  120. * always must be on
  121. * ___|______________|____
  122. * | |
  123. * | main memory |
  124. * |______________________|
  125. *
  126. * Configuration 3:
  127. * ______________________
  128. * | |
  129. * | ARC CPU |
  130. * |______________________|
  131. * ___|___ ___|___
  132. * | | | |
  133. * | L1 I$ | | L1 D$ |
  134. * |_______| |_______|
  135. * on/off must be on
  136. * ___|______________|____ _______
  137. * | | | |
  138. * | L2 (SL$) |-----| IOC |
  139. * |______________________| |_______|
  140. * always must be on on/off
  141. * ___|______________|____
  142. * | |
  143. * | main memory |
  144. * |______________________|
  145. */
  146. DECLARE_GLOBAL_DATA_PTR;
  147. /* Bit values in IC_CTRL */
  148. #define IC_CTRL_CACHE_DISABLE BIT(0)
  149. /* Bit values in DC_CTRL */
  150. #define DC_CTRL_CACHE_DISABLE BIT(0)
  151. #define DC_CTRL_INV_MODE_FLUSH BIT(6)
  152. #define DC_CTRL_FLUSH_STATUS BIT(8)
  153. #define OP_INV BIT(0)
  154. #define OP_FLUSH BIT(1)
  155. #define OP_FLUSH_N_INV (OP_FLUSH | OP_INV)
  156. /* Bit val in SLC_CONTROL */
  157. #define SLC_CTRL_DIS 0x001
  158. #define SLC_CTRL_IM 0x040
  159. #define SLC_CTRL_BUSY 0x100
  160. #define SLC_CTRL_RGN_OP_INV 0x200
  161. #define CACHE_LINE_MASK (~(gd->arch.l1_line_sz - 1))
  162. /*
  163. * We don't want to use '__always_inline' macro here as it can be redefined
  164. * to simple 'inline' in some cases which breaks stuff. See [ NOTE 1 ] for more
  165. * details about the reasons we need to use always_inline functions.
  166. */
  167. #define inlined_cachefunc inline __attribute__((always_inline))
  168. static inlined_cachefunc void __ic_entire_invalidate(void);
  169. static inlined_cachefunc void __dc_entire_op(const int cacheop);
  170. static inline bool pae_exists(void)
  171. {
  172. /* TODO: should we compare mmu version from BCR and from CONFIG? */
  173. #if (CONFIG_ARC_MMU_VER >= 4)
  174. union bcr_mmu_4 mmu4;
  175. mmu4.word = read_aux_reg(ARC_AUX_MMU_BCR);
  176. if (mmu4.fields.pae)
  177. return true;
  178. #endif /* (CONFIG_ARC_MMU_VER >= 4) */
  179. return false;
  180. }
  181. static inlined_cachefunc bool icache_exists(void)
  182. {
  183. union bcr_di_cache ibcr;
  184. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  185. return !!ibcr.fields.ver;
  186. }
  187. static inlined_cachefunc bool icache_enabled(void)
  188. {
  189. if (!icache_exists())
  190. return false;
  191. return !(read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE);
  192. }
  193. static inlined_cachefunc bool dcache_exists(void)
  194. {
  195. union bcr_di_cache dbcr;
  196. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  197. return !!dbcr.fields.ver;
  198. }
  199. static inlined_cachefunc bool dcache_enabled(void)
  200. {
  201. if (!dcache_exists())
  202. return false;
  203. return !(read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE);
  204. }
  205. static inlined_cachefunc bool slc_exists(void)
  206. {
  207. if (is_isa_arcv2()) {
  208. union bcr_generic sbcr;
  209. sbcr.word = read_aux_reg(ARC_BCR_SLC);
  210. return !!sbcr.fields.ver;
  211. }
  212. return false;
  213. }
  214. static inlined_cachefunc bool slc_data_bypass(void)
  215. {
  216. /*
  217. * If L1 data cache is disabled SL$ is bypassed and all load/store
  218. * requests are sent directly to main memory.
  219. */
  220. return !dcache_enabled();
  221. }
  222. static inline bool ioc_exists(void)
  223. {
  224. if (is_isa_arcv2()) {
  225. union bcr_clust_cfg cbcr;
  226. cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
  227. return cbcr.fields.c;
  228. }
  229. return false;
  230. }
  231. static inline bool ioc_enabled(void)
  232. {
  233. /*
  234. * We check only CONFIG option instead of IOC HW state check as IOC
  235. * must be disabled by default.
  236. */
  237. if (is_ioc_enabled())
  238. return ioc_exists();
  239. return false;
  240. }
  241. static inlined_cachefunc void __slc_entire_op(const int op)
  242. {
  243. unsigned int ctrl;
  244. if (!slc_exists())
  245. return;
  246. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  247. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  248. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  249. else
  250. ctrl |= SLC_CTRL_IM;
  251. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  252. if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  253. write_aux_reg(ARC_AUX_SLC_INVALIDATE, 0x1);
  254. else
  255. write_aux_reg(ARC_AUX_SLC_FLUSH, 0x1);
  256. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  257. read_aux_reg(ARC_AUX_SLC_CTRL);
  258. /* Important to wait for flush to complete */
  259. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  260. }
  261. static void slc_upper_region_init(void)
  262. {
  263. /*
  264. * ARC_AUX_SLC_RGN_START1 and ARC_AUX_SLC_RGN_END1 register exist
  265. * only if PAE exists in current HW. So we had to check pae_exist
  266. * before using them.
  267. */
  268. if (!pae_exists())
  269. return;
  270. /*
  271. * ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1 are always == 0
  272. * as we don't use PAE40.
  273. */
  274. write_aux_reg(ARC_AUX_SLC_RGN_END1, 0);
  275. write_aux_reg(ARC_AUX_SLC_RGN_START1, 0);
  276. }
  277. static void __slc_rgn_op(unsigned long paddr, unsigned long sz, const int op)
  278. {
  279. #ifdef CONFIG_ISA_ARCV2
  280. unsigned int ctrl;
  281. unsigned long end;
  282. if (!slc_exists())
  283. return;
  284. /*
  285. * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
  286. * - b'000 (default) is Flush,
  287. * - b'001 is Invalidate if CTRL.IM == 0
  288. * - b'001 is Flush-n-Invalidate if CTRL.IM == 1
  289. */
  290. ctrl = read_aux_reg(ARC_AUX_SLC_CTRL);
  291. /* Don't rely on default value of IM bit */
  292. if (!(op & OP_FLUSH)) /* i.e. OP_INV */
  293. ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
  294. else
  295. ctrl |= SLC_CTRL_IM;
  296. if (op & OP_INV)
  297. ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */
  298. else
  299. ctrl &= ~SLC_CTRL_RGN_OP_INV;
  300. write_aux_reg(ARC_AUX_SLC_CTRL, ctrl);
  301. /*
  302. * Lower bits are ignored, no need to clip
  303. * END needs to be setup before START (latter triggers the operation)
  304. * END can't be same as START, so add (l2_line_sz - 1) to sz
  305. */
  306. end = paddr + sz + gd->arch.slc_line_sz - 1;
  307. /*
  308. * Upper addresses (ARC_AUX_SLC_RGN_END1 and ARC_AUX_SLC_RGN_START1)
  309. * are always == 0 as we don't use PAE40, so we only setup lower ones
  310. * (ARC_AUX_SLC_RGN_END and ARC_AUX_SLC_RGN_START)
  311. */
  312. write_aux_reg(ARC_AUX_SLC_RGN_END, end);
  313. write_aux_reg(ARC_AUX_SLC_RGN_START, paddr);
  314. /* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
  315. read_aux_reg(ARC_AUX_SLC_CTRL);
  316. while (read_aux_reg(ARC_AUX_SLC_CTRL) & SLC_CTRL_BUSY);
  317. #endif /* CONFIG_ISA_ARCV2 */
  318. }
  319. static void arc_ioc_setup(void)
  320. {
  321. /* IOC Aperture start is equal to DDR start */
  322. unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
  323. /* IOC Aperture size is equal to DDR size */
  324. long ap_size = CONFIG_SYS_SDRAM_SIZE;
  325. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  326. if (!slc_exists())
  327. panic("Try to enable IOC but SLC is not present");
  328. /* Unsupported configuration. See [ NOTE 2 ] for more details. */
  329. if (!dcache_enabled())
  330. panic("Try to enable IOC but L1 D$ is disabled");
  331. if (!is_power_of_2(ap_size) || ap_size < 4096)
  332. panic("IOC Aperture size must be power of 2 and bigger 4Kib");
  333. /* IOC Aperture start must be aligned to the size of the aperture */
  334. if (ap_base % ap_size != 0)
  335. panic("IOC Aperture start must be aligned to the size of the aperture");
  336. flush_n_invalidate_dcache_all();
  337. /*
  338. * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
  339. * so setting 0x11 implies 512M, 0x12 implies 1G...
  340. */
  341. write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
  342. order_base_2(ap_size / 1024) - 2);
  343. write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
  344. write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
  345. write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
  346. }
  347. static void read_decode_cache_bcr_arcv2(void)
  348. {
  349. #ifdef CONFIG_ISA_ARCV2
  350. union bcr_slc_cfg slc_cfg;
  351. if (slc_exists()) {
  352. slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
  353. gd->arch.slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
  354. /*
  355. * We don't support configuration where L1 I$ or L1 D$ is
  356. * absent but SL$ exists. See [ NOTE 2 ] for more details.
  357. */
  358. if (!icache_exists() || !dcache_exists())
  359. panic("Unsupported cache configuration: SLC exists but one of L1 caches is absent");
  360. }
  361. #endif /* CONFIG_ISA_ARCV2 */
  362. }
  363. void read_decode_cache_bcr(void)
  364. {
  365. int dc_line_sz = 0, ic_line_sz = 0;
  366. union bcr_di_cache ibcr, dbcr;
  367. /*
  368. * We don't care much about I$ line length really as there're
  369. * no per-line ops on I$ instead we only do full invalidation of it
  370. * on occasion of relocation and right before jumping to the OS.
  371. * Still we check insane config with zero-encoded line length in
  372. * presense of version field in I$ BCR. Just in case.
  373. */
  374. ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
  375. if (ibcr.fields.ver) {
  376. ic_line_sz = 8 << ibcr.fields.line_len;
  377. if (!ic_line_sz)
  378. panic("Instruction exists but line length is 0\n");
  379. }
  380. dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
  381. if (dbcr.fields.ver) {
  382. gd->arch.l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
  383. if (!dc_line_sz)
  384. panic("Data cache exists but line length is 0\n");
  385. }
  386. }
  387. void cache_init(void)
  388. {
  389. read_decode_cache_bcr();
  390. if (is_isa_arcv2())
  391. read_decode_cache_bcr_arcv2();
  392. if (is_isa_arcv2() && ioc_enabled())
  393. arc_ioc_setup();
  394. if (is_isa_arcv2() && slc_exists())
  395. slc_upper_region_init();
  396. }
  397. int icache_status(void)
  398. {
  399. return icache_enabled();
  400. }
  401. void icache_enable(void)
  402. {
  403. if (icache_exists())
  404. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
  405. ~IC_CTRL_CACHE_DISABLE);
  406. }
  407. void icache_disable(void)
  408. {
  409. if (!icache_exists())
  410. return;
  411. __ic_entire_invalidate();
  412. write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
  413. IC_CTRL_CACHE_DISABLE);
  414. }
  415. /* IC supports only invalidation */
  416. static inlined_cachefunc void __ic_entire_invalidate(void)
  417. {
  418. if (!icache_enabled())
  419. return;
  420. /* Any write to IC_IVIC register triggers invalidation of entire I$ */
  421. write_aux_reg(ARC_AUX_IC_IVIC, 1);
  422. /*
  423. * As per ARC HS databook (see chapter 5.3.3.2)
  424. * it is required to add 3 NOPs after each write to IC_IVIC.
  425. */
  426. __builtin_arc_nop();
  427. __builtin_arc_nop();
  428. __builtin_arc_nop();
  429. read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
  430. }
  431. void invalidate_icache_all(void)
  432. {
  433. __ic_entire_invalidate();
  434. /*
  435. * If SL$ is bypassed for data it is used only for instructions,
  436. * so we need to invalidate it too.
  437. * TODO: HS 3.0 supports SLC disable so we need to check slc
  438. * enable/disable status here.
  439. */
  440. if (is_isa_arcv2() && slc_data_bypass())
  441. __slc_entire_op(OP_INV);
  442. }
  443. int dcache_status(void)
  444. {
  445. return dcache_enabled();
  446. }
  447. void dcache_enable(void)
  448. {
  449. if (!dcache_exists())
  450. return;
  451. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
  452. ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
  453. }
  454. void dcache_disable(void)
  455. {
  456. if (!dcache_exists())
  457. return;
  458. __dc_entire_op(OP_FLUSH_N_INV);
  459. /*
  460. * As SLC will be bypassed for data after L1 D$ disable we need to
  461. * flush it first before L1 D$ disable. Also we invalidate SLC to
  462. * avoid any inconsistent data problems after enabling L1 D$ again with
  463. * dcache_enable function.
  464. */
  465. if (is_isa_arcv2())
  466. __slc_entire_op(OP_FLUSH_N_INV);
  467. write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
  468. DC_CTRL_CACHE_DISABLE);
  469. }
  470. /* Common Helper for Line Operations on D-cache */
  471. static inline void __dcache_line_loop(unsigned long paddr, unsigned long sz,
  472. const int cacheop)
  473. {
  474. unsigned int aux_cmd;
  475. int num_lines;
  476. /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
  477. aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
  478. sz += paddr & ~CACHE_LINE_MASK;
  479. paddr &= CACHE_LINE_MASK;
  480. num_lines = DIV_ROUND_UP(sz, gd->arch.l1_line_sz);
  481. while (num_lines-- > 0) {
  482. #if (CONFIG_ARC_MMU_VER == 3)
  483. write_aux_reg(ARC_AUX_DC_PTAG, paddr);
  484. #endif
  485. write_aux_reg(aux_cmd, paddr);
  486. paddr += gd->arch.l1_line_sz;
  487. }
  488. }
  489. static inlined_cachefunc void __before_dc_op(const int op)
  490. {
  491. unsigned int ctrl;
  492. ctrl = read_aux_reg(ARC_AUX_DC_CTRL);
  493. /* IM bit implies flush-n-inv, instead of vanilla inv */
  494. if (op == OP_INV)
  495. ctrl &= ~DC_CTRL_INV_MODE_FLUSH;
  496. else
  497. ctrl |= DC_CTRL_INV_MODE_FLUSH;
  498. write_aux_reg(ARC_AUX_DC_CTRL, ctrl);
  499. }
  500. static inlined_cachefunc void __after_dc_op(const int op)
  501. {
  502. if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
  503. while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
  504. }
  505. static inlined_cachefunc void __dc_entire_op(const int cacheop)
  506. {
  507. int aux;
  508. if (!dcache_enabled())
  509. return;
  510. __before_dc_op(cacheop);
  511. if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
  512. aux = ARC_AUX_DC_IVDC;
  513. else
  514. aux = ARC_AUX_DC_FLSH;
  515. write_aux_reg(aux, 0x1);
  516. __after_dc_op(cacheop);
  517. }
  518. static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
  519. const int cacheop)
  520. {
  521. if (!dcache_enabled())
  522. return;
  523. __before_dc_op(cacheop);
  524. __dcache_line_loop(paddr, sz, cacheop);
  525. __after_dc_op(cacheop);
  526. }
  527. void invalidate_dcache_range(unsigned long start, unsigned long end)
  528. {
  529. if (start >= end)
  530. return;
  531. /*
  532. * ARCv1 -> call __dc_line_op
  533. * ARCv2 && L1 D$ disabled -> nothing
  534. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  535. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  536. */
  537. if (!is_isa_arcv2() || !ioc_enabled())
  538. __dc_line_op(start, end - start, OP_INV);
  539. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  540. __slc_rgn_op(start, end - start, OP_INV);
  541. }
  542. void flush_dcache_range(unsigned long start, unsigned long end)
  543. {
  544. if (start >= end)
  545. return;
  546. /*
  547. * ARCv1 -> call __dc_line_op
  548. * ARCv2 && L1 D$ disabled -> nothing
  549. * ARCv2 && L1 D$ enabled && IOC enabled -> nothing
  550. * ARCv2 && L1 D$ enabled && no IOC -> call __dc_line_op; call __slc_rgn_op
  551. */
  552. if (!is_isa_arcv2() || !ioc_enabled())
  553. __dc_line_op(start, end - start, OP_FLUSH);
  554. if (is_isa_arcv2() && !ioc_enabled() && !slc_data_bypass())
  555. __slc_rgn_op(start, end - start, OP_FLUSH);
  556. }
  557. void flush_cache(unsigned long start, unsigned long size)
  558. {
  559. flush_dcache_range(start, start + size);
  560. }
  561. /*
  562. * As invalidate_dcache_all() is not used in generic U-Boot code and as we
  563. * don't need it in arch/arc code alone (invalidate without flush) we implement
  564. * flush_n_invalidate_dcache_all (flush and invalidate in 1 operation) because
  565. * it's much safer. See [ NOTE 1 ] for more details.
  566. */
  567. void flush_n_invalidate_dcache_all(void)
  568. {
  569. __dc_entire_op(OP_FLUSH_N_INV);
  570. if (is_isa_arcv2() && !slc_data_bypass())
  571. __slc_entire_op(OP_FLUSH_N_INV);
  572. }
  573. void flush_dcache_all(void)
  574. {
  575. __dc_entire_op(OP_FLUSH);
  576. if (is_isa_arcv2() && !slc_data_bypass())
  577. __slc_entire_op(OP_FLUSH);
  578. }
  579. /*
  580. * This is function to cleanup all caches (and therefore sync I/D caches) which
  581. * can be used for cleanup before linux launch or to sync caches during
  582. * relocation.
  583. */
  584. void sync_n_cleanup_cache_all(void)
  585. {
  586. __dc_entire_op(OP_FLUSH_N_INV);
  587. /*
  588. * If SL$ is bypassed for data it is used only for instructions,
  589. * and we shouldn't flush it. So invalidate it instead of flush_n_inv.
  590. */
  591. if (is_isa_arcv2()) {
  592. if (slc_data_bypass())
  593. __slc_entire_op(OP_INV);
  594. else
  595. __slc_entire_op(OP_FLUSH_N_INV);
  596. }
  597. __ic_entire_invalidate();
  598. }