pcie_layerscape.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736
  1. /*
  2. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  3. * Layerscape PCIe driver
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <common.h>
  8. #include <asm/arch/fsl_serdes.h>
  9. #include <pci.h>
  10. #include <asm/io.h>
  11. #include <errno.h>
  12. #include <malloc.h>
  13. #ifndef CONFIG_LS102XA
  14. #include <asm/arch/fdt.h>
  15. #include <asm/arch/soc.h>
  16. #endif
  17. #ifndef CONFIG_SYS_PCI_MEMORY_BUS
  18. #define CONFIG_SYS_PCI_MEMORY_BUS CONFIG_SYS_SDRAM_BASE
  19. #endif
  20. #ifndef CONFIG_SYS_PCI_MEMORY_PHYS
  21. #define CONFIG_SYS_PCI_MEMORY_PHYS CONFIG_SYS_SDRAM_BASE
  22. #endif
  23. #ifndef CONFIG_SYS_PCI_MEMORY_SIZE
  24. #define CONFIG_SYS_PCI_MEMORY_SIZE (2 * 1024 * 1024 * 1024UL) /* 2G */
  25. #endif
  26. #ifndef CONFIG_SYS_PCI_EP_MEMORY_BASE
  27. #define CONFIG_SYS_PCI_EP_MEMORY_BASE CONFIG_SYS_LOAD_ADDR
  28. #endif
  29. /* iATU registers */
  30. #define PCIE_ATU_VIEWPORT 0x900
  31. #define PCIE_ATU_REGION_INBOUND (0x1 << 31)
  32. #define PCIE_ATU_REGION_OUTBOUND (0x0 << 31)
  33. #define PCIE_ATU_REGION_INDEX0 (0x0 << 0)
  34. #define PCIE_ATU_REGION_INDEX1 (0x1 << 0)
  35. #define PCIE_ATU_REGION_INDEX2 (0x2 << 0)
  36. #define PCIE_ATU_REGION_INDEX3 (0x3 << 0)
  37. #define PCIE_ATU_CR1 0x904
  38. #define PCIE_ATU_TYPE_MEM (0x0 << 0)
  39. #define PCIE_ATU_TYPE_IO (0x2 << 0)
  40. #define PCIE_ATU_TYPE_CFG0 (0x4 << 0)
  41. #define PCIE_ATU_TYPE_CFG1 (0x5 << 0)
  42. #define PCIE_ATU_CR2 0x908
  43. #define PCIE_ATU_ENABLE (0x1 << 31)
  44. #define PCIE_ATU_BAR_MODE_ENABLE (0x1 << 30)
  45. #define PCIE_ATU_BAR_NUM(bar) ((bar) << 8)
  46. #define PCIE_ATU_LOWER_BASE 0x90C
  47. #define PCIE_ATU_UPPER_BASE 0x910
  48. #define PCIE_ATU_LIMIT 0x914
  49. #define PCIE_ATU_LOWER_TARGET 0x918
  50. #define PCIE_ATU_BUS(x) (((x) & 0xff) << 24)
  51. #define PCIE_ATU_DEV(x) (((x) & 0x1f) << 19)
  52. #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16)
  53. #define PCIE_ATU_UPPER_TARGET 0x91C
  54. #define PCIE_DBI_RO_WR_EN 0x8bc
  55. #define PCIE_LINK_CAP 0x7c
  56. #define PCIE_LINK_SPEED_MASK 0xf
  57. #define PCIE_LINK_STA 0x82
  58. #define LTSSM_STATE_MASK 0x3f
  59. #define LTSSM_PCIE_L0 0x11 /* L0 state */
  60. #define PCIE_DBI_SIZE 0x100000 /* 1M */
  61. #define PCIE_LCTRL0_CFG2_ENABLE (1 << 31)
  62. #define PCIE_LCTRL0_VF(vf) ((vf) << 22)
  63. #define PCIE_LCTRL0_PF(pf) ((pf) << 16)
  64. #define PCIE_LCTRL0_VF_ACTIVE (1 << 21)
  65. #define PCIE_LCTRL0_VAL(pf, vf) (PCIE_LCTRL0_PF(pf) | \
  66. PCIE_LCTRL0_VF(vf) | \
  67. ((vf) == 0 ? 0 : PCIE_LCTRL0_VF_ACTIVE) | \
  68. PCIE_LCTRL0_CFG2_ENABLE)
  69. #define PCIE_NO_SRIOV_BAR_BASE 0x1000
  70. #define PCIE_PF_NUM 2
  71. #define PCIE_VF_NUM 64
  72. #define PCIE_BAR0_SIZE (4 * 1024) /* 4K */
  73. #define PCIE_BAR1_SIZE (8 * 1024) /* 8K for MSIX */
  74. #define PCIE_BAR2_SIZE (4 * 1024) /* 4K */
  75. #define PCIE_BAR4_SIZE (1 * 1024 * 1024) /* 1M */
  76. struct ls_pcie {
  77. int idx;
  78. void __iomem *dbi;
  79. void __iomem *va_cfg0;
  80. void __iomem *va_cfg1;
  81. struct pci_controller hose;
  82. };
  83. struct ls_pcie_info {
  84. unsigned long regs;
  85. int pci_num;
  86. u64 phys_base;
  87. u64 cfg0_phys;
  88. u64 cfg0_size;
  89. u64 cfg1_phys;
  90. u64 cfg1_size;
  91. u64 mem_bus;
  92. u64 mem_phys;
  93. u64 mem_size;
  94. u64 io_bus;
  95. u64 io_phys;
  96. u64 io_size;
  97. };
  98. #define SET_LS_PCIE_INFO(x, num) \
  99. { \
  100. x.regs = CONFIG_SYS_PCIE##num##_ADDR; \
  101. x.phys_base = CONFIG_SYS_PCIE##num##_PHYS_ADDR; \
  102. x.cfg0_phys = CONFIG_SYS_PCIE_CFG0_PHYS_OFF + \
  103. CONFIG_SYS_PCIE##num##_PHYS_ADDR; \
  104. x.cfg0_size = CONFIG_SYS_PCIE_CFG0_SIZE; \
  105. x.cfg1_phys = CONFIG_SYS_PCIE_CFG1_PHYS_OFF + \
  106. CONFIG_SYS_PCIE##num##_PHYS_ADDR; \
  107. x.cfg1_size = CONFIG_SYS_PCIE_CFG1_SIZE; \
  108. x.mem_bus = CONFIG_SYS_PCIE_MEM_BUS; \
  109. x.mem_phys = CONFIG_SYS_PCIE_MEM_PHYS_OFF + \
  110. CONFIG_SYS_PCIE##num##_PHYS_ADDR; \
  111. x.mem_size = CONFIG_SYS_PCIE_MEM_SIZE; \
  112. x.io_bus = CONFIG_SYS_PCIE_IO_BUS; \
  113. x.io_phys = CONFIG_SYS_PCIE_IO_PHYS_OFF + \
  114. CONFIG_SYS_PCIE##num##_PHYS_ADDR; \
  115. x.io_size = CONFIG_SYS_PCIE_IO_SIZE; \
  116. x.pci_num = num; \
  117. }
  118. #ifdef CONFIG_LS102XA
  119. #include <asm/arch/immap_ls102xa.h>
  120. /* PEX1/2 Misc Ports Status Register */
  121. #define LTSSM_STATE_SHIFT 20
  122. static int ls_pcie_link_state(struct ls_pcie *pcie)
  123. {
  124. u32 state;
  125. struct ccsr_scfg *scfg = (struct ccsr_scfg *)CONFIG_SYS_FSL_SCFG_ADDR;
  126. state = in_be32(&scfg->pexmscportsr[pcie->idx]);
  127. state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
  128. if (state < LTSSM_PCIE_L0) {
  129. debug("....PCIe link error. LTSSM=0x%02x.\n", state);
  130. return 0;
  131. }
  132. return 1;
  133. }
  134. #else
  135. static int ls_pcie_link_state(struct ls_pcie *pcie)
  136. {
  137. u32 state;
  138. state = pex_lut_in32(pcie->dbi + PCIE_LUT_BASE + PCIE_LUT_DBG) &
  139. LTSSM_STATE_MASK;
  140. if (state < LTSSM_PCIE_L0) {
  141. debug("....PCIe link error. LTSSM=0x%02x.\n", state);
  142. return 0;
  143. }
  144. return 1;
  145. }
  146. #endif
  147. static int ls_pcie_link_up(struct ls_pcie *pcie)
  148. {
  149. int state;
  150. u32 cap;
  151. state = ls_pcie_link_state(pcie);
  152. if (state)
  153. return state;
  154. /* Try to download speed to gen1 */
  155. cap = readl(pcie->dbi + PCIE_LINK_CAP);
  156. writel((cap & (~PCIE_LINK_SPEED_MASK)) | 1, pcie->dbi + PCIE_LINK_CAP);
  157. /*
  158. * Notice: the following delay has critical impact on link training
  159. * if too short (<30ms) the link doesn't get up.
  160. */
  161. mdelay(100);
  162. state = ls_pcie_link_state(pcie);
  163. if (state)
  164. return state;
  165. writel(cap, pcie->dbi + PCIE_LINK_CAP);
  166. return 0;
  167. }
  168. static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev)
  169. {
  170. writel(PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
  171. pcie->dbi + PCIE_ATU_VIEWPORT);
  172. writel(busdev, pcie->dbi + PCIE_ATU_LOWER_TARGET);
  173. }
  174. static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev)
  175. {
  176. writel(PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
  177. pcie->dbi + PCIE_ATU_VIEWPORT);
  178. writel(busdev, pcie->dbi + PCIE_ATU_LOWER_TARGET);
  179. }
  180. static void ls_pcie_iatu_outbound_set(struct ls_pcie *pcie, int idx, int type,
  181. u64 phys, u64 bus_addr, pci_size_t size)
  182. {
  183. writel(PCIE_ATU_REGION_OUTBOUND | idx, pcie->dbi + PCIE_ATU_VIEWPORT);
  184. writel((u32)phys, pcie->dbi + PCIE_ATU_LOWER_BASE);
  185. writel(phys >> 32, pcie->dbi + PCIE_ATU_UPPER_BASE);
  186. writel(phys + size - 1, pcie->dbi + PCIE_ATU_LIMIT);
  187. writel((u32)bus_addr, pcie->dbi + PCIE_ATU_LOWER_TARGET);
  188. writel(bus_addr >> 32, pcie->dbi + PCIE_ATU_UPPER_TARGET);
  189. writel(type, pcie->dbi + PCIE_ATU_CR1);
  190. writel(PCIE_ATU_ENABLE, pcie->dbi + PCIE_ATU_CR2);
  191. }
  192. /* Use bar match mode and MEM type as default */
  193. static void ls_pcie_iatu_inbound_set(struct ls_pcie *pcie, int idx,
  194. int bar, u64 phys)
  195. {
  196. writel(PCIE_ATU_REGION_INBOUND | idx, pcie->dbi + PCIE_ATU_VIEWPORT);
  197. writel((u32)phys, pcie->dbi + PCIE_ATU_LOWER_TARGET);
  198. writel(phys >> 32, pcie->dbi + PCIE_ATU_UPPER_TARGET);
  199. writel(PCIE_ATU_TYPE_MEM, pcie->dbi + PCIE_ATU_CR1);
  200. writel(PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
  201. PCIE_ATU_BAR_NUM(bar), pcie->dbi + PCIE_ATU_CR2);
  202. }
  203. static void ls_pcie_setup_atu(struct ls_pcie *pcie, struct ls_pcie_info *info)
  204. {
  205. #ifdef DEBUG
  206. int i;
  207. #endif
  208. /* ATU 0 : OUTBOUND : CFG0 */
  209. ls_pcie_iatu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
  210. PCIE_ATU_TYPE_CFG0,
  211. info->cfg0_phys,
  212. 0,
  213. info->cfg0_size);
  214. /* ATU 1 : OUTBOUND : CFG1 */
  215. ls_pcie_iatu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1,
  216. PCIE_ATU_TYPE_CFG1,
  217. info->cfg1_phys,
  218. 0,
  219. info->cfg1_size);
  220. /* ATU 2 : OUTBOUND : MEM */
  221. ls_pcie_iatu_outbound_set(pcie, PCIE_ATU_REGION_INDEX2,
  222. PCIE_ATU_TYPE_MEM,
  223. info->mem_phys,
  224. info->mem_bus,
  225. info->mem_size);
  226. /* ATU 3 : OUTBOUND : IO */
  227. ls_pcie_iatu_outbound_set(pcie, PCIE_ATU_REGION_INDEX3,
  228. PCIE_ATU_TYPE_IO,
  229. info->io_phys,
  230. info->io_bus,
  231. info->io_size);
  232. #ifdef DEBUG
  233. for (i = 0; i <= PCIE_ATU_REGION_INDEX3; i++) {
  234. writel(PCIE_ATU_REGION_OUTBOUND | i,
  235. pcie->dbi + PCIE_ATU_VIEWPORT);
  236. debug("iATU%d:\n", i);
  237. debug("\tLOWER PHYS 0x%08x\n",
  238. readl(pcie->dbi + PCIE_ATU_LOWER_BASE));
  239. debug("\tUPPER PHYS 0x%08x\n",
  240. readl(pcie->dbi + PCIE_ATU_UPPER_BASE));
  241. debug("\tLOWER BUS 0x%08x\n",
  242. readl(pcie->dbi + PCIE_ATU_LOWER_TARGET));
  243. debug("\tUPPER BUS 0x%08x\n",
  244. readl(pcie->dbi + PCIE_ATU_UPPER_TARGET));
  245. debug("\tLIMIT 0x%08x\n",
  246. readl(pcie->dbi + PCIE_ATU_LIMIT));
  247. debug("\tCR1 0x%08x\n",
  248. readl(pcie->dbi + PCIE_ATU_CR1));
  249. debug("\tCR2 0x%08x\n",
  250. readl(pcie->dbi + PCIE_ATU_CR2));
  251. }
  252. #endif
  253. }
  254. int pci_skip_dev(struct pci_controller *hose, pci_dev_t dev)
  255. {
  256. /* Do not skip controller */
  257. return 0;
  258. }
  259. static int ls_pcie_addr_valid(struct pci_controller *hose, pci_dev_t d)
  260. {
  261. if (PCI_DEV(d) > 0)
  262. return -EINVAL;
  263. /* Controller does not support multi-function in RC mode */
  264. if ((PCI_BUS(d) == hose->first_busno) && (PCI_FUNC(d) > 0))
  265. return -EINVAL;
  266. return 0;
  267. }
  268. static int ls_pcie_read_config(struct pci_controller *hose, pci_dev_t d,
  269. int where, u32 *val)
  270. {
  271. struct ls_pcie *pcie = hose->priv_data;
  272. u32 busdev, *addr;
  273. if (ls_pcie_addr_valid(hose, d)) {
  274. *val = 0xffffffff;
  275. return 0;
  276. }
  277. if (PCI_BUS(d) == hose->first_busno) {
  278. addr = pcie->dbi + (where & ~0x3);
  279. } else {
  280. busdev = PCIE_ATU_BUS(PCI_BUS(d)) |
  281. PCIE_ATU_DEV(PCI_DEV(d)) |
  282. PCIE_ATU_FUNC(PCI_FUNC(d));
  283. if (PCI_BUS(d) == hose->first_busno + 1) {
  284. ls_pcie_cfg0_set_busdev(pcie, busdev);
  285. addr = pcie->va_cfg0 + (where & ~0x3);
  286. } else {
  287. ls_pcie_cfg1_set_busdev(pcie, busdev);
  288. addr = pcie->va_cfg1 + (where & ~0x3);
  289. }
  290. }
  291. *val = readl(addr);
  292. return 0;
  293. }
  294. static int ls_pcie_write_config(struct pci_controller *hose, pci_dev_t d,
  295. int where, u32 val)
  296. {
  297. struct ls_pcie *pcie = hose->priv_data;
  298. u32 busdev, *addr;
  299. if (ls_pcie_addr_valid(hose, d))
  300. return -EINVAL;
  301. if (PCI_BUS(d) == hose->first_busno) {
  302. addr = pcie->dbi + (where & ~0x3);
  303. } else {
  304. busdev = PCIE_ATU_BUS(PCI_BUS(d)) |
  305. PCIE_ATU_DEV(PCI_DEV(d)) |
  306. PCIE_ATU_FUNC(PCI_FUNC(d));
  307. if (PCI_BUS(d) == hose->first_busno + 1) {
  308. ls_pcie_cfg0_set_busdev(pcie, busdev);
  309. addr = pcie->va_cfg0 + (where & ~0x3);
  310. } else {
  311. ls_pcie_cfg1_set_busdev(pcie, busdev);
  312. addr = pcie->va_cfg1 + (where & ~0x3);
  313. }
  314. }
  315. writel(val, addr);
  316. return 0;
  317. }
  318. static void ls_pcie_setup_ctrl(struct ls_pcie *pcie,
  319. struct ls_pcie_info *info)
  320. {
  321. struct pci_controller *hose = &pcie->hose;
  322. pci_dev_t dev = PCI_BDF(hose->first_busno, 0, 0);
  323. ls_pcie_setup_atu(pcie, info);
  324. pci_hose_write_config_dword(hose, dev, PCI_BASE_ADDRESS_0, 0);
  325. /* program correct class for RC */
  326. writel(1, pcie->dbi + PCIE_DBI_RO_WR_EN);
  327. pci_hose_write_config_word(hose, dev, PCI_CLASS_DEVICE,
  328. PCI_CLASS_BRIDGE_PCI);
  329. #ifndef CONFIG_LS102XA
  330. writel(0, pcie->dbi + PCIE_DBI_RO_WR_EN);
  331. #endif
  332. }
  333. static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie,
  334. struct ls_pcie_info *info)
  335. {
  336. u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE;
  337. /* ATU 0 : INBOUND : map BAR0 */
  338. ls_pcie_iatu_inbound_set(pcie, PCIE_ATU_REGION_INDEX0, 0, phys);
  339. /* ATU 1 : INBOUND : map BAR1 */
  340. phys += PCIE_BAR1_SIZE;
  341. ls_pcie_iatu_inbound_set(pcie, PCIE_ATU_REGION_INDEX1, 1, phys);
  342. /* ATU 2 : INBOUND : map BAR2 */
  343. phys += PCIE_BAR2_SIZE;
  344. ls_pcie_iatu_inbound_set(pcie, PCIE_ATU_REGION_INDEX2, 2, phys);
  345. /* ATU 3 : INBOUND : map BAR4 */
  346. phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE;
  347. ls_pcie_iatu_inbound_set(pcie, PCIE_ATU_REGION_INDEX3, 4, phys);
  348. /* ATU 0 : OUTBOUND : map 4G MEM */
  349. ls_pcie_iatu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
  350. PCIE_ATU_TYPE_MEM,
  351. info->phys_base,
  352. 0,
  353. 4 * 1024 * 1024 * 1024ULL);
  354. }
  355. /* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
  356. static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
  357. {
  358. if (size < 4 * 1024)
  359. return;
  360. switch (bar) {
  361. case 0:
  362. writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
  363. break;
  364. case 1:
  365. writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
  366. break;
  367. case 2:
  368. writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
  369. writel(0, bar_base + PCI_BASE_ADDRESS_3);
  370. break;
  371. case 4:
  372. writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
  373. writel(0, bar_base + PCI_BASE_ADDRESS_5);
  374. break;
  375. default:
  376. break;
  377. }
  378. }
  379. static void ls_pcie_ep_setup_bars(void *bar_base)
  380. {
  381. /* BAR0 - 32bit - 4K configuration */
  382. ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
  383. /* BAR1 - 32bit - 8K MSIX*/
  384. ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
  385. /* BAR2 - 64bit - 4K MEM desciptor */
  386. ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
  387. /* BAR4 - 64bit - 1M MEM*/
  388. ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
  389. }
  390. static void ls_pcie_setup_ep(struct ls_pcie *pcie, struct ls_pcie_info *info)
  391. {
  392. struct pci_controller *hose = &pcie->hose;
  393. pci_dev_t dev = PCI_BDF(hose->first_busno, 0, 0);
  394. int sriov;
  395. sriov = pci_hose_find_ext_capability(hose, dev, PCI_EXT_CAP_ID_SRIOV);
  396. if (sriov) {
  397. int pf, vf;
  398. for (pf = 0; pf < PCIE_PF_NUM; pf++) {
  399. for (vf = 0; vf <= PCIE_VF_NUM; vf++) {
  400. #ifndef CONFIG_LS102XA
  401. writel(PCIE_LCTRL0_VAL(pf, vf),
  402. pcie->dbi + PCIE_LUT_BASE +
  403. PCIE_LUT_LCTRL0);
  404. #endif
  405. ls_pcie_ep_setup_bars(pcie->dbi);
  406. ls_pcie_ep_setup_atu(pcie, info);
  407. }
  408. }
  409. /* Disable CFG2 */
  410. #ifndef CONFIG_LS102XA
  411. writel(0, pcie->dbi + PCIE_LUT_BASE + PCIE_LUT_LCTRL0);
  412. #endif
  413. } else {
  414. ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
  415. ls_pcie_ep_setup_atu(pcie, info);
  416. }
  417. }
  418. int ls_pcie_init_ctrl(int busno, enum srds_prtcl dev, struct ls_pcie_info *info)
  419. {
  420. struct ls_pcie *pcie;
  421. struct pci_controller *hose;
  422. int num = dev - PCIE1;
  423. pci_dev_t pdev = PCI_BDF(busno, 0, 0);
  424. int i, linkup, ep_mode;
  425. u8 header_type;
  426. u16 temp16;
  427. if (!is_serdes_configured(dev)) {
  428. printf("PCIe%d: disabled\n", num + 1);
  429. return busno;
  430. }
  431. pcie = malloc(sizeof(*pcie));
  432. if (!pcie)
  433. return busno;
  434. memset(pcie, 0, sizeof(*pcie));
  435. hose = &pcie->hose;
  436. hose->priv_data = pcie;
  437. hose->first_busno = busno;
  438. pcie->idx = num;
  439. pcie->dbi = map_physmem(info->regs, PCIE_DBI_SIZE, MAP_NOCACHE);
  440. pcie->va_cfg0 = map_physmem(info->cfg0_phys,
  441. info->cfg0_size,
  442. MAP_NOCACHE);
  443. pcie->va_cfg1 = map_physmem(info->cfg1_phys,
  444. info->cfg1_size,
  445. MAP_NOCACHE);
  446. /* outbound memory */
  447. pci_set_region(&hose->regions[0],
  448. (pci_size_t)info->mem_bus,
  449. (phys_size_t)info->mem_phys,
  450. (pci_size_t)info->mem_size,
  451. PCI_REGION_MEM);
  452. /* outbound io */
  453. pci_set_region(&hose->regions[1],
  454. (pci_size_t)info->io_bus,
  455. (phys_size_t)info->io_phys,
  456. (pci_size_t)info->io_size,
  457. PCI_REGION_IO);
  458. /* System memory space */
  459. pci_set_region(&hose->regions[2],
  460. CONFIG_SYS_PCI_MEMORY_BUS,
  461. CONFIG_SYS_PCI_MEMORY_PHYS,
  462. CONFIG_SYS_PCI_MEMORY_SIZE,
  463. PCI_REGION_SYS_MEMORY);
  464. hose->region_count = 3;
  465. for (i = 0; i < hose->region_count; i++)
  466. debug("PCI reg:%d %016llx:%016llx %016llx %08lx\n",
  467. i,
  468. (u64)hose->regions[i].phys_start,
  469. (u64)hose->regions[i].bus_start,
  470. (u64)hose->regions[i].size,
  471. hose->regions[i].flags);
  472. pci_set_ops(hose,
  473. pci_hose_read_config_byte_via_dword,
  474. pci_hose_read_config_word_via_dword,
  475. ls_pcie_read_config,
  476. pci_hose_write_config_byte_via_dword,
  477. pci_hose_write_config_word_via_dword,
  478. ls_pcie_write_config);
  479. pci_hose_read_config_byte(hose, pdev, PCI_HEADER_TYPE, &header_type);
  480. ep_mode = (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL;
  481. printf("PCIe%u: %s ", info->pci_num,
  482. ep_mode ? "Endpoint" : "Root Complex");
  483. if (ep_mode)
  484. ls_pcie_setup_ep(pcie, info);
  485. else
  486. ls_pcie_setup_ctrl(pcie, info);
  487. linkup = ls_pcie_link_up(pcie);
  488. if (!linkup) {
  489. /* Let the user know there's no PCIe link */
  490. printf("no link, regs @ 0x%lx\n", info->regs);
  491. hose->last_busno = hose->first_busno;
  492. return busno;
  493. }
  494. /* Print the negotiated PCIe link width */
  495. pci_hose_read_config_word(hose, pdev, PCIE_LINK_STA, &temp16);
  496. printf("x%d gen%d, regs @ 0x%lx\n", (temp16 & 0x3f0) >> 4,
  497. (temp16 & 0xf), info->regs);
  498. if (ep_mode)
  499. return busno;
  500. pci_register_hose(hose);
  501. hose->last_busno = pci_hose_scan(hose);
  502. printf("PCIe%x: Bus %02x - %02x\n",
  503. info->pci_num, hose->first_busno, hose->last_busno);
  504. return hose->last_busno + 1;
  505. }
  506. int ls_pcie_init_board(int busno)
  507. {
  508. struct ls_pcie_info info;
  509. #ifdef CONFIG_PCIE1
  510. SET_LS_PCIE_INFO(info, 1);
  511. busno = ls_pcie_init_ctrl(busno, PCIE1, &info);
  512. #endif
  513. #ifdef CONFIG_PCIE2
  514. SET_LS_PCIE_INFO(info, 2);
  515. busno = ls_pcie_init_ctrl(busno, PCIE2, &info);
  516. #endif
  517. #ifdef CONFIG_PCIE3
  518. SET_LS_PCIE_INFO(info, 3);
  519. busno = ls_pcie_init_ctrl(busno, PCIE3, &info);
  520. #endif
  521. #ifdef CONFIG_PCIE4
  522. SET_LS_PCIE_INFO(info, 4);
  523. busno = ls_pcie_init_ctrl(busno, PCIE4, &info);
  524. #endif
  525. return busno;
  526. }
  527. void pci_init_board(void)
  528. {
  529. ls_pcie_init_board(0);
  530. }
  531. #ifdef CONFIG_OF_BOARD_SETUP
  532. #include <libfdt.h>
  533. #include <fdt_support.h>
  534. static void ft_pcie_ls_setup(void *blob, const char *pci_compat,
  535. unsigned long ctrl_addr, enum srds_prtcl dev)
  536. {
  537. int off;
  538. off = fdt_node_offset_by_compat_reg(blob, pci_compat,
  539. (phys_addr_t)ctrl_addr);
  540. if (off < 0)
  541. return;
  542. if (!is_serdes_configured(dev))
  543. fdt_set_node_status(blob, off, FDT_STATUS_DISABLED, 0);
  544. }
  545. void ft_pci_setup(void *blob, bd_t *bd)
  546. {
  547. #ifdef CONFIG_PCIE1
  548. ft_pcie_ls_setup(blob, FSL_PCIE_COMPAT, CONFIG_SYS_PCIE1_ADDR, PCIE1);
  549. #endif
  550. #ifdef CONFIG_PCIE2
  551. ft_pcie_ls_setup(blob, FSL_PCIE_COMPAT, CONFIG_SYS_PCIE2_ADDR, PCIE2);
  552. #endif
  553. #ifdef CONFIG_PCIE3
  554. ft_pcie_ls_setup(blob, FSL_PCIE_COMPAT, CONFIG_SYS_PCIE3_ADDR, PCIE3);
  555. #endif
  556. #ifdef CONFIG_PCIE4
  557. ft_pcie_ls_setup(blob, FSL_PCIE_COMPAT, CONFIG_SYS_PCIE4_ADDR, PCIE4);
  558. #endif
  559. }
  560. #else
  561. void ft_pci_setup(void *blob, bd_t *bd)
  562. {
  563. }
  564. #endif
  565. #if defined(CONFIG_LS2080A) || defined(CONFIG_LS2085A)
  566. void pcie_set_available_streamids(void *blob, const char *pcie_path,
  567. u32 *stream_ids, int count)
  568. {
  569. int nodeoffset;
  570. int i;
  571. nodeoffset = fdt_path_offset(blob, pcie_path);
  572. if (nodeoffset < 0) {
  573. printf("\n%s: ERROR: unable to update PCIe node\n", __func__);
  574. return;
  575. }
  576. /* for each stream ID, append to mmu-masters */
  577. for (i = 0; i < count; i++) {
  578. fdt_appendprop_u32(blob, nodeoffset, "available-stream-ids",
  579. stream_ids[i]);
  580. }
  581. }
  582. #define MAX_STREAM_IDS 4
  583. void fdt_fixup_smmu_pcie(void *blob)
  584. {
  585. int count;
  586. u32 stream_ids[MAX_STREAM_IDS];
  587. u32 ctlr_streamid = 0x300;
  588. #ifdef CONFIG_PCIE1
  589. /* PEX1 stream ID fixup */
  590. count = FSL_PEX1_STREAM_ID_END - FSL_PEX1_STREAM_ID_START + 1;
  591. alloc_stream_ids(FSL_PEX1_STREAM_ID_START, count, stream_ids,
  592. MAX_STREAM_IDS);
  593. pcie_set_available_streamids(blob, "/pcie@3400000", stream_ids, count);
  594. append_mmu_masters(blob, "/iommu@5000000", "/pcie@3400000",
  595. &ctlr_streamid, 1);
  596. #endif
  597. #ifdef CONFIG_PCIE2
  598. /* PEX2 stream ID fixup */
  599. count = FSL_PEX2_STREAM_ID_END - FSL_PEX2_STREAM_ID_START + 1;
  600. alloc_stream_ids(FSL_PEX2_STREAM_ID_START, count, stream_ids,
  601. MAX_STREAM_IDS);
  602. pcie_set_available_streamids(blob, "/pcie@3500000", stream_ids, count);
  603. append_mmu_masters(blob, "/iommu@5000000", "/pcie@3500000",
  604. &ctlr_streamid, 1);
  605. #endif
  606. #ifdef CONFIG_PCIE3
  607. /* PEX3 stream ID fixup */
  608. count = FSL_PEX3_STREAM_ID_END - FSL_PEX3_STREAM_ID_START + 1;
  609. alloc_stream_ids(FSL_PEX3_STREAM_ID_START, count, stream_ids,
  610. MAX_STREAM_IDS);
  611. pcie_set_available_streamids(blob, "/pcie@3600000", stream_ids, count);
  612. append_mmu_masters(blob, "/iommu@5000000", "/pcie@3600000",
  613. &ctlr_streamid, 1);
  614. #endif
  615. #ifdef CONFIG_PCIE4
  616. /* PEX4 stream ID fixup */
  617. count = FSL_PEX4_STREAM_ID_END - FSL_PEX4_STREAM_ID_START + 1;
  618. alloc_stream_ids(FSL_PEX4_STREAM_ID_START, count, stream_ids,
  619. MAX_STREAM_IDS);
  620. pcie_set_available_streamids(blob, "/pcie@3700000", stream_ids, count);
  621. append_mmu_masters(blob, "/iommu@5000000", "/pcie@3700000",
  622. &ctlr_streamid, 1);
  623. #endif
  624. }
  625. #endif