start.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692
  1. /*
  2. * Copyright (C) 1998 Dan Malek <dmalek@jlc.net>
  3. * Copyright (C) 1999 Magnus Damm <kieraypc01.p.y.kie.era.ericsson.se>
  4. * Copyright (C) 2000-2009 Wolfgang Denk <wd@denx.de>
  5. * Copyright Freescale Semiconductor, Inc. 2004, 2006.
  6. *
  7. * SPDX-License-Identifier: GPL-2.0+
  8. *
  9. * Based on the MPC83xx code.
  10. */
  11. /*
  12. * U-Boot - Startup Code for MPC512x based Embedded Boards
  13. */
  14. #include <asm-offsets.h>
  15. #include <config.h>
  16. #ifndef CONFIG_IDENT_STRING
  17. #define CONFIG_IDENT_STRING "MPC512X"
  18. #endif
  19. #include <version.h>
  20. #define CONFIG_521X 1 /* needed for Linux kernel header files*/
  21. #include <asm/immap_512x.h>
  22. #include "asm-offsets.h"
  23. #include <ppc_asm.tmpl>
  24. #include <ppc_defs.h>
  25. #include <asm/cache.h>
  26. #include <asm/mmu.h>
  27. #include <asm/u-boot.h>
  28. /*
  29. * Floating Point enable, Machine Check and Recoverable Interr.
  30. */
  31. #undef MSR_KERNEL
  32. #ifdef DEBUG
  33. #define MSR_KERNEL (MSR_FP|MSR_RI)
  34. #else
  35. #define MSR_KERNEL (MSR_FP|MSR_ME|MSR_RI)
  36. #endif
  37. /* Macros for manipulating CSx_START/STOP */
  38. #define START_REG(start) ((start) >> 16)
  39. #define STOP_REG(start, size) (((start) + (size) - 1) >> 16)
  40. /*
  41. * Set up GOT: Global Offset Table
  42. *
  43. * Use r12 to access the GOT
  44. */
  45. START_GOT
  46. GOT_ENTRY(_GOT2_TABLE_)
  47. GOT_ENTRY(_FIXUP_TABLE_)
  48. GOT_ENTRY(_start)
  49. GOT_ENTRY(_start_of_vectors)
  50. GOT_ENTRY(_end_of_vectors)
  51. GOT_ENTRY(transfer_to_handler)
  52. GOT_ENTRY(__init_end)
  53. GOT_ENTRY(__bss_end)
  54. GOT_ENTRY(__bss_start)
  55. END_GOT
  56. /*
  57. * Magic number and version string
  58. */
  59. .long 0x27051956 /* U-Boot Magic Number */
  60. .globl version_string
  61. version_string:
  62. .ascii U_BOOT_VERSION_STRING, "\0"
  63. /*
  64. * Vector Table
  65. */
  66. .text
  67. . = EXC_OFF_SYS_RESET
  68. .globl _start
  69. /* Start from here after reset/power on */
  70. _start:
  71. b boot_cold
  72. .globl _start_of_vectors
  73. _start_of_vectors:
  74. /* Machine check */
  75. STD_EXCEPTION(0x200, MachineCheck, MachineCheckException)
  76. /* Data Storage exception. */
  77. STD_EXCEPTION(0x300, DataStorage, UnknownException)
  78. /* Instruction Storage exception. */
  79. STD_EXCEPTION(0x400, InstStorage, UnknownException)
  80. /* External Interrupt exception. */
  81. STD_EXCEPTION(0x500, ExtInterrupt, UnknownException)
  82. /* Alignment exception. */
  83. . = 0x600
  84. Alignment:
  85. EXCEPTION_PROLOG(SRR0, SRR1)
  86. mfspr r4,DAR
  87. stw r4,_DAR(r21)
  88. mfspr r5,DSISR
  89. stw r5,_DSISR(r21)
  90. addi r3,r1,STACK_FRAME_OVERHEAD
  91. EXC_XFER_TEMPLATE(Alignment, AlignmentException, MSR_KERNEL, COPY_EE)
  92. /* Program check exception */
  93. . = 0x700
  94. ProgramCheck:
  95. EXCEPTION_PROLOG(SRR0, SRR1)
  96. addi r3,r1,STACK_FRAME_OVERHEAD
  97. EXC_XFER_TEMPLATE(ProgramCheck, ProgramCheckException,
  98. MSR_KERNEL, COPY_EE)
  99. /* Floating Point Unit unavailable exception */
  100. STD_EXCEPTION(0x800, FPUnavailable, UnknownException)
  101. /* Decrementer */
  102. STD_EXCEPTION(0x900, Decrementer, timer_interrupt)
  103. /* Critical interrupt */
  104. STD_EXCEPTION(0xa00, Critical, UnknownException)
  105. /* System Call */
  106. STD_EXCEPTION(0xc00, SystemCall, UnknownException)
  107. /* Trace interrupt */
  108. STD_EXCEPTION(0xd00, Trace, UnknownException)
  109. /* Performance Monitor interrupt */
  110. STD_EXCEPTION(0xf00, PerfMon, UnknownException)
  111. /* Intruction Translation Miss */
  112. STD_EXCEPTION(0x1000, InstructionTLBMiss, UnknownException)
  113. /* Data Load Translation Miss */
  114. STD_EXCEPTION(0x1100, DataLoadTLBMiss, UnknownException)
  115. /* Data Store Translation Miss */
  116. STD_EXCEPTION(0x1200, DataStoreTLBMiss, UnknownException)
  117. /* Instruction Address Breakpoint */
  118. STD_EXCEPTION(0x1300, InstructionAddrBreakpoint, DebugException)
  119. /* System Management interrupt */
  120. STD_EXCEPTION(0x1400, SystemMgmtInterrupt, UnknownException)
  121. .globl _end_of_vectors
  122. _end_of_vectors:
  123. . = 0x3000
  124. boot_cold:
  125. /* Save msr contents */
  126. mfmsr r5
  127. /* Set IMMR area to our preferred location */
  128. lis r4, CONFIG_DEFAULT_IMMR@h
  129. lis r3, CONFIG_SYS_IMMR@h
  130. ori r3, r3, CONFIG_SYS_IMMR@l
  131. stw r3, IMMRBAR(r4)
  132. mtspr MBAR, r3 /* IMMRBAR is mirrored into the MBAR SPR (311) */
  133. /* Initialise the machine */
  134. bl cpu_early_init
  135. /*
  136. * Set up Local Access Windows:
  137. *
  138. * 1) Boot/CS0 (boot FLASH)
  139. * 2) On-chip SRAM (initial stack purposes)
  140. */
  141. /* Boot CS/CS0 window range */
  142. lis r3, CONFIG_SYS_IMMR@h
  143. ori r3, r3, CONFIG_SYS_IMMR@l
  144. lis r4, START_REG(CONFIG_SYS_FLASH_BASE)
  145. ori r4, r4, STOP_REG(CONFIG_SYS_FLASH_BASE, CONFIG_SYS_FLASH_SIZE)
  146. stw r4, LPCS0AW(r3)
  147. /*
  148. * The SRAM window has a fixed size (256K), so only the start address
  149. * is necessary
  150. */
  151. lis r4, START_REG(CONFIG_SYS_SRAM_BASE) & 0xff00
  152. stw r4, SRAMBAR(r3)
  153. /*
  154. * According to MPC5121e RM, configuring local access windows should
  155. * be followed by a dummy read of the config register that was
  156. * modified last and an isync
  157. */
  158. lwz r4, SRAMBAR(r3)
  159. isync
  160. /*
  161. * Set configuration of the Boot/CS0, the SRAM window does not have a
  162. * config register so no params can be set for it
  163. */
  164. lis r3, (CONFIG_SYS_IMMR + LPC_OFFSET)@h
  165. ori r3, r3, (CONFIG_SYS_IMMR + LPC_OFFSET)@l
  166. lis r4, CONFIG_SYS_CS0_CFG@h
  167. ori r4, r4, CONFIG_SYS_CS0_CFG@l
  168. stw r4, CS0_CONFIG(r3)
  169. /* Master enable all CS's */
  170. lis r4, CS_CTRL_ME@h
  171. ori r4, r4, CS_CTRL_ME@l
  172. stw r4, CS_CTRL(r3)
  173. lis r4, (CONFIG_SYS_MONITOR_BASE)@h
  174. ori r4, r4, (CONFIG_SYS_MONITOR_BASE)@l
  175. addi r5, r4, in_flash - _start + EXC_OFF_SYS_RESET
  176. mtlr r5
  177. blr
  178. in_flash:
  179. lis r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@h
  180. ori r1, r1, (CONFIG_SYS_INIT_RAM_ADDR + CONFIG_SYS_GBL_DATA_OFFSET)@l
  181. li r0, 0 /* Make room for stack frame header and */
  182. stwu r0, -4(r1) /* clear final stack frame so that */
  183. stwu r0, -4(r1) /* stack backtraces terminate cleanly */
  184. /* let the C-code set up the rest */
  185. /* */
  186. /* Be careful to keep code relocatable & stack humble */
  187. /*------------------------------------------------------*/
  188. GET_GOT /* initialize GOT access */
  189. /* r3: IMMR */
  190. lis r3, CONFIG_SYS_IMMR@h
  191. /* run low-level CPU init code (in Flash) */
  192. bl cpu_init_f
  193. /* run 1st part of board init code (in Flash) */
  194. bl board_init_f
  195. /* NOTREACHED - board_init_f() does not return */
  196. /*
  197. * This code finishes saving the registers to the exception frame
  198. * and jumps to the appropriate handler for the exception.
  199. * Register r21 is pointer into trap frame, r1 has new stack pointer.
  200. */
  201. .globl transfer_to_handler
  202. transfer_to_handler:
  203. stw r22,_NIP(r21)
  204. lis r22,MSR_POW@h
  205. andc r23,r23,r22
  206. stw r23,_MSR(r21)
  207. SAVE_GPR(7, r21)
  208. SAVE_4GPRS(8, r21)
  209. SAVE_8GPRS(12, r21)
  210. SAVE_8GPRS(24, r21)
  211. mflr r23
  212. andi. r24,r23,0x3f00 /* get vector offset */
  213. stw r24,TRAP(r21)
  214. li r22,0
  215. stw r22,RESULT(r21)
  216. lwz r24,0(r23) /* virtual address of handler */
  217. lwz r23,4(r23) /* where to go when done */
  218. mtspr SRR0,r24
  219. mtspr SRR1,r20
  220. mtlr r23
  221. SYNC
  222. rfi /* jump to handler, enable MMU */
  223. int_return:
  224. mfmsr r28 /* Disable interrupts */
  225. li r4,0
  226. ori r4,r4,MSR_EE
  227. andc r28,r28,r4
  228. SYNC /* Some chip revs need this... */
  229. mtmsr r28
  230. SYNC
  231. lwz r2,_CTR(r1)
  232. lwz r0,_LINK(r1)
  233. mtctr r2
  234. mtlr r0
  235. lwz r2,_XER(r1)
  236. lwz r0,_CCR(r1)
  237. mtspr XER,r2
  238. mtcrf 0xFF,r0
  239. REST_10GPRS(3, r1)
  240. REST_10GPRS(13, r1)
  241. REST_8GPRS(23, r1)
  242. REST_GPR(31, r1)
  243. lwz r2,_NIP(r1) /* Restore environment */
  244. lwz r0,_MSR(r1)
  245. mtspr SRR0,r2
  246. mtspr SRR1,r0
  247. lwz r0,GPR0(r1)
  248. lwz r2,GPR2(r1)
  249. lwz r1,GPR1(r1)
  250. SYNC
  251. rfi
  252. /*
  253. * This code initialises the machine, it expects original MSR contents to be in r5.
  254. */
  255. cpu_early_init:
  256. /* Initialize machine status; enable machine check interrupt */
  257. /*-----------------------------------------------------------*/
  258. li r3, MSR_KERNEL /* Set ME and RI flags */
  259. rlwimi r3, r5, 0, 25, 25 /* preserve IP bit */
  260. #ifdef DEBUG
  261. rlwimi r3, r5, 0, 21, 22 /* debugger might set SE, BE bits */
  262. #endif
  263. mtmsr r3
  264. SYNC
  265. mtspr SRR1, r3 /* Mirror current MSR state in SRR1 */
  266. lis r3, CONFIG_SYS_IMMR@h
  267. #if defined(CONFIG_WATCHDOG)
  268. /* Initialise the watchdog and reset it */
  269. /*--------------------------------------*/
  270. lis r4, CONFIG_SYS_WATCHDOG_VALUE
  271. ori r4, r4, (SWCRR_SWEN | SWCRR_SWRI | SWCRR_SWPR)
  272. stw r4, SWCRR(r3)
  273. /* reset */
  274. li r4, 0x556C
  275. sth r4, SWSRR@l(r3)
  276. li r4, 0x0
  277. ori r4, r4, 0xAA39
  278. sth r4, SWSRR@l(r3)
  279. #else
  280. /* Disable the watchdog */
  281. /*----------------------*/
  282. lwz r4, SWCRR(r3)
  283. /*
  284. * Check to see if it's enabled for disabling: once disabled by s/w
  285. * it's not possible to re-enable it
  286. */
  287. andi. r4, r4, 0x4
  288. beq 1f
  289. xor r4, r4, r4
  290. stw r4, SWCRR(r3)
  291. 1:
  292. #endif /* CONFIG_WATCHDOG */
  293. /* Initialize the Hardware Implementation-dependent Registers */
  294. /* HID0 also contains cache control */
  295. /*------------------------------------------------------*/
  296. lis r3, CONFIG_SYS_HID0_INIT@h
  297. ori r3, r3, CONFIG_SYS_HID0_INIT@l
  298. SYNC
  299. mtspr HID0, r3
  300. lis r3, CONFIG_SYS_HID0_FINAL@h
  301. ori r3, r3, CONFIG_SYS_HID0_FINAL@l
  302. SYNC
  303. mtspr HID0, r3
  304. lis r3, CONFIG_SYS_HID2@h
  305. ori r3, r3, CONFIG_SYS_HID2@l
  306. SYNC
  307. mtspr HID2, r3
  308. sync
  309. blr
  310. /* Cache functions.
  311. *
  312. * Note: requires that all cache bits in
  313. * HID0 are in the low half word.
  314. */
  315. .globl icache_enable
  316. icache_enable:
  317. mfspr r3, HID0
  318. ori r3, r3, HID0_ICE
  319. lis r4, 0
  320. ori r4, r4, HID0_ILOCK
  321. andc r3, r3, r4
  322. ori r4, r3, HID0_ICFI
  323. isync
  324. mtspr HID0, r4 /* sets enable and invalidate, clears lock */
  325. isync
  326. mtspr HID0, r3 /* clears invalidate */
  327. blr
  328. .globl icache_disable
  329. icache_disable:
  330. mfspr r3, HID0
  331. lis r4, 0
  332. ori r4, r4, HID0_ICE|HID0_ILOCK
  333. andc r3, r3, r4
  334. ori r4, r3, HID0_ICFI
  335. isync
  336. mtspr HID0, r4 /* sets invalidate, clears enable and lock*/
  337. isync
  338. mtspr HID0, r3 /* clears invalidate */
  339. blr
  340. .globl icache_status
  341. icache_status:
  342. mfspr r3, HID0
  343. rlwinm r3, r3, (31 - HID0_ICE_SHIFT + 1), 31, 31
  344. blr
  345. .globl dcache_enable
  346. dcache_enable:
  347. mfspr r3, HID0
  348. li r5, HID0_DCFI|HID0_DLOCK
  349. andc r3, r3, r5
  350. mtspr HID0, r3 /* no invalidate, unlock */
  351. ori r3, r3, HID0_DCE
  352. ori r5, r3, HID0_DCFI
  353. mtspr HID0, r5 /* enable + invalidate */
  354. mtspr HID0, r3 /* enable */
  355. sync
  356. blr
  357. .globl dcache_disable
  358. dcache_disable:
  359. mfspr r3, HID0
  360. lis r4, 0
  361. ori r4, r4, HID0_DCE|HID0_DLOCK
  362. andc r3, r3, r4
  363. ori r4, r3, HID0_DCI
  364. sync
  365. mtspr HID0, r4 /* sets invalidate, clears enable and lock */
  366. sync
  367. mtspr HID0, r3 /* clears invalidate */
  368. blr
  369. .globl dcache_status
  370. dcache_status:
  371. mfspr r3, HID0
  372. rlwinm r3, r3, (31 - HID0_DCE_SHIFT + 1), 31, 31
  373. blr
  374. .globl get_pvr
  375. get_pvr:
  376. mfspr r3, PVR
  377. blr
  378. /*-------------------------------------------------------------------*/
  379. /*
  380. * void relocate_code (addr_sp, gd, addr_moni)
  381. *
  382. * This "function" does not return, instead it continues in RAM
  383. * after relocating the monitor code.
  384. *
  385. * r3 = dest
  386. * r4 = src
  387. * r5 = length in bytes
  388. * r6 = cachelinesize
  389. */
  390. .globl relocate_code
  391. relocate_code:
  392. mr r1, r3 /* Set new stack pointer */
  393. mr r9, r4 /* Save copy of Global Data pointer */
  394. mr r10, r5 /* Save copy of Destination Address */
  395. GET_GOT
  396. mr r3, r5 /* Destination Address */
  397. lis r4, CONFIG_SYS_MONITOR_BASE@h /* Source Address */
  398. ori r4, r4, CONFIG_SYS_MONITOR_BASE@l
  399. lwz r5, GOT(__init_end)
  400. sub r5, r5, r4
  401. li r6, CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */
  402. /*
  403. * Fix GOT pointer:
  404. *
  405. * New GOT-PTR = (old GOT-PTR - CONFIG_SYS_MONITOR_BASE)
  406. * + Destination Address
  407. *
  408. * Offset:
  409. */
  410. sub r15, r10, r4
  411. /* First our own GOT */
  412. add r12, r12, r15
  413. /* then the one used by the C code */
  414. add r30, r30, r15
  415. /*
  416. * Now relocate code
  417. */
  418. cmplw cr1,r3,r4
  419. addi r0,r5,3
  420. srwi. r0,r0,2
  421. beq cr1,4f /* In place copy is not necessary */
  422. beq 7f /* Protect against 0 count */
  423. mtctr r0
  424. bge cr1,2f
  425. la r8,-4(r4)
  426. la r7,-4(r3)
  427. /* copy */
  428. 1: lwzu r0,4(r8)
  429. stwu r0,4(r7)
  430. bdnz 1b
  431. addi r0,r5,3
  432. srwi. r0,r0,2
  433. mtctr r0
  434. la r8,-4(r4)
  435. la r7,-4(r3)
  436. /* and compare */
  437. 20: lwzu r20,4(r8)
  438. lwzu r21,4(r7)
  439. xor. r22, r20, r21
  440. bne 30f
  441. bdnz 20b
  442. b 4f
  443. /* compare failed */
  444. 30: li r3, 0
  445. blr
  446. 2: slwi r0,r0,2 /* re copy in reverse order ... y do we needed it? */
  447. add r8,r4,r0
  448. add r7,r3,r0
  449. 3: lwzu r0,-4(r8)
  450. stwu r0,-4(r7)
  451. bdnz 3b
  452. /*
  453. * Now flush the cache: note that we must start from a cache aligned
  454. * address. Otherwise we might miss one cache line.
  455. */
  456. 4: cmpwi r6,0
  457. add r5,r3,r5
  458. beq 7f /* Always flush prefetch queue in any case */
  459. subi r0,r6,1
  460. andc r3,r3,r0
  461. mr r4,r3
  462. 5: dcbst 0,r4
  463. add r4,r4,r6
  464. cmplw r4,r5
  465. blt 5b
  466. sync /* Wait for all dcbst to complete on bus */
  467. mr r4,r3
  468. 6: icbi 0,r4
  469. add r4,r4,r6
  470. cmplw r4,r5
  471. blt 6b
  472. 7: sync /* Wait for all icbi to complete on bus */
  473. isync
  474. /*
  475. * We are done. Do not return, instead branch to second part of board
  476. * initialization, now running from RAM.
  477. */
  478. addi r0, r10, in_ram - _start + EXC_OFF_SYS_RESET
  479. mtlr r0
  480. blr
  481. in_ram:
  482. /*
  483. * Relocation Function, r12 point to got2+0x8000
  484. *
  485. * Adjust got2 pointers, no need to check for 0, this code
  486. * already puts a few entries in the table.
  487. */
  488. li r0,__got2_entries@sectoff@l
  489. la r3,GOT(_GOT2_TABLE_)
  490. lwz r11,GOT(_GOT2_TABLE_)
  491. mtctr r0
  492. sub r11,r3,r11
  493. addi r3,r3,-4
  494. 1: lwzu r0,4(r3)
  495. cmpwi r0,0
  496. beq- 2f
  497. add r0,r0,r11
  498. stw r0,0(r3)
  499. 2: bdnz 1b
  500. /*
  501. * Now adjust the fixups and the pointers to the fixups
  502. * in case we need to move ourselves again.
  503. */
  504. li r0,__fixup_entries@sectoff@l
  505. lwz r3,GOT(_FIXUP_TABLE_)
  506. cmpwi r0,0
  507. mtctr r0
  508. addi r3,r3,-4
  509. beq 4f
  510. 3: lwzu r4,4(r3)
  511. lwzux r0,r4,r11
  512. cmpwi r0,0
  513. add r0,r0,r11
  514. stw r4,0(r3)
  515. beq- 5f
  516. stw r0,0(r4)
  517. 5: bdnz 3b
  518. 4:
  519. clear_bss:
  520. /*
  521. * Now clear BSS segment
  522. */
  523. lwz r3,GOT(__bss_start)
  524. lwz r4,GOT(__bss_end)
  525. cmplw 0, r3, r4
  526. beq 6f
  527. li r0, 0
  528. 5:
  529. stw r0, 0(r3)
  530. addi r3, r3, 4
  531. cmplw 0, r3, r4
  532. bne 5b
  533. 6:
  534. mr r3, r9 /* Global Data pointer */
  535. mr r4, r10 /* Destination Address */
  536. bl board_init_r
  537. /*
  538. * Copy exception vector code to low memory
  539. *
  540. * r3: dest_addr
  541. * r7: source address, r8: end address, r9: target address
  542. */
  543. .globl trap_init
  544. trap_init:
  545. mflr r4 /* save link register */
  546. GET_GOT
  547. lwz r7, GOT(_start)
  548. lwz r8, GOT(_end_of_vectors)
  549. li r9, 0x100 /* reset vector at 0x100 */
  550. cmplw 0, r7, r8
  551. bgelr /* return if r7>=r8 - just in case */
  552. 1:
  553. lwz r0, 0(r7)
  554. stw r0, 0(r9)
  555. addi r7, r7, 4
  556. addi r9, r9, 4
  557. cmplw 0, r7, r8
  558. bne 1b
  559. /*
  560. * relocate `hdlr' and `int_return' entries
  561. */
  562. li r7, .L_MachineCheck - _start + EXC_OFF_SYS_RESET
  563. li r8, Alignment - _start + EXC_OFF_SYS_RESET
  564. 2:
  565. bl trap_reloc
  566. addi r7, r7, 0x100 /* next exception vector */
  567. cmplw 0, r7, r8
  568. blt 2b
  569. li r7, .L_Alignment - _start + EXC_OFF_SYS_RESET
  570. bl trap_reloc
  571. li r7, .L_ProgramCheck - _start + EXC_OFF_SYS_RESET
  572. bl trap_reloc
  573. li r7, .L_FPUnavailable - _start + EXC_OFF_SYS_RESET
  574. li r8, SystemCall - _start + EXC_OFF_SYS_RESET
  575. 3:
  576. bl trap_reloc
  577. addi r7, r7, 0x100 /* next exception vector */
  578. cmplw 0, r7, r8
  579. blt 3b
  580. li r7, .L_Trace - _start + EXC_OFF_SYS_RESET
  581. li r8, _end_of_vectors - _start + EXC_OFF_SYS_RESET
  582. 4:
  583. bl trap_reloc
  584. addi r7, r7, 0x100 /* next exception vector */
  585. cmplw 0, r7, r8
  586. blt 4b
  587. mfmsr r3 /* now that the vectors have */
  588. lis r7, MSR_IP@h /* relocated into low memory */
  589. ori r7, r7, MSR_IP@l /* MSR[IP] can be turned off */
  590. andc r3, r3, r7 /* (if it was on) */
  591. SYNC /* Some chip revs need this... */
  592. mtmsr r3
  593. SYNC
  594. mtlr r4 /* restore link register */
  595. blr