pcie_layerscape.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603
  1. /*
  2. * Copyright 2017 NXP
  3. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  4. * Layerscape PCIe driver
  5. *
  6. * SPDX-License-Identifier: GPL-2.0+
  7. */
  8. #include <common.h>
  9. #include <asm/arch/fsl_serdes.h>
  10. #include <pci.h>
  11. #include <asm/io.h>
  12. #include <errno.h>
  13. #include <malloc.h>
  14. #include <dm.h>
  15. #if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
  16. defined(CONFIG_ARM)
  17. #include <asm/arch/clock.h>
  18. #endif
  19. #include "pcie_layerscape.h"
  20. DECLARE_GLOBAL_DATA_PTR;
  21. LIST_HEAD(ls_pcie_list);
  22. static unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset)
  23. {
  24. return in_le32(pcie->dbi + offset);
  25. }
  26. static void dbi_writel(struct ls_pcie *pcie, unsigned int value,
  27. unsigned int offset)
  28. {
  29. out_le32(pcie->dbi + offset, value);
  30. }
  31. static unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset)
  32. {
  33. if (pcie->big_endian)
  34. return in_be32(pcie->ctrl + offset);
  35. else
  36. return in_le32(pcie->ctrl + offset);
  37. }
  38. static void ctrl_writel(struct ls_pcie *pcie, unsigned int value,
  39. unsigned int offset)
  40. {
  41. if (pcie->big_endian)
  42. out_be32(pcie->ctrl + offset, value);
  43. else
  44. out_le32(pcie->ctrl + offset, value);
  45. }
  46. static int ls_pcie_ltssm(struct ls_pcie *pcie)
  47. {
  48. u32 state;
  49. uint svr;
  50. svr = get_svr();
  51. if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
  52. state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx));
  53. state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
  54. } else {
  55. state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK;
  56. }
  57. return state;
  58. }
  59. static int ls_pcie_link_up(struct ls_pcie *pcie)
  60. {
  61. int ltssm;
  62. ltssm = ls_pcie_ltssm(pcie);
  63. if (ltssm < LTSSM_PCIE_L0)
  64. return 0;
  65. return 1;
  66. }
  67. static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev)
  68. {
  69. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
  70. PCIE_ATU_VIEWPORT);
  71. dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
  72. }
  73. static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev)
  74. {
  75. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
  76. PCIE_ATU_VIEWPORT);
  77. dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
  78. }
  79. static void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type,
  80. u64 phys, u64 bus_addr, pci_size_t size)
  81. {
  82. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT);
  83. dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE);
  84. dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE);
  85. dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT);
  86. dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET);
  87. dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET);
  88. dbi_writel(pcie, type, PCIE_ATU_CR1);
  89. dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
  90. }
  91. /* Use bar match mode and MEM type as default */
  92. static void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, int idx,
  93. int bar, u64 phys)
  94. {
  95. dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT);
  96. dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET);
  97. dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET);
  98. dbi_writel(pcie, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
  99. dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
  100. PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2);
  101. }
  102. static void ls_pcie_dump_atu(struct ls_pcie *pcie)
  103. {
  104. int i;
  105. for (i = 0; i < PCIE_ATU_REGION_NUM; i++) {
  106. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | i,
  107. PCIE_ATU_VIEWPORT);
  108. debug("iATU%d:\n", i);
  109. debug("\tLOWER PHYS 0x%08x\n",
  110. dbi_readl(pcie, PCIE_ATU_LOWER_BASE));
  111. debug("\tUPPER PHYS 0x%08x\n",
  112. dbi_readl(pcie, PCIE_ATU_UPPER_BASE));
  113. debug("\tLOWER BUS 0x%08x\n",
  114. dbi_readl(pcie, PCIE_ATU_LOWER_TARGET));
  115. debug("\tUPPER BUS 0x%08x\n",
  116. dbi_readl(pcie, PCIE_ATU_UPPER_TARGET));
  117. debug("\tLIMIT 0x%08x\n",
  118. readl(pcie->dbi + PCIE_ATU_LIMIT));
  119. debug("\tCR1 0x%08x\n",
  120. dbi_readl(pcie, PCIE_ATU_CR1));
  121. debug("\tCR2 0x%08x\n",
  122. dbi_readl(pcie, PCIE_ATU_CR2));
  123. }
  124. }
  125. static void ls_pcie_setup_atu(struct ls_pcie *pcie)
  126. {
  127. struct pci_region *io, *mem, *pref;
  128. unsigned long long offset = 0;
  129. int idx = 0;
  130. uint svr;
  131. svr = get_svr();
  132. if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
  133. offset = LS1021_PCIE_SPACE_OFFSET +
  134. LS1021_PCIE_SPACE_SIZE * pcie->idx;
  135. }
  136. /* ATU 0 : OUTBOUND : CFG0 */
  137. ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
  138. PCIE_ATU_TYPE_CFG0,
  139. pcie->cfg_res.start + offset,
  140. 0,
  141. fdt_resource_size(&pcie->cfg_res) / 2);
  142. /* ATU 1 : OUTBOUND : CFG1 */
  143. ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1,
  144. PCIE_ATU_TYPE_CFG1,
  145. pcie->cfg_res.start + offset +
  146. fdt_resource_size(&pcie->cfg_res) / 2,
  147. 0,
  148. fdt_resource_size(&pcie->cfg_res) / 2);
  149. pci_get_regions(pcie->bus, &io, &mem, &pref);
  150. idx = PCIE_ATU_REGION_INDEX1 + 1;
  151. /* Fix the pcie memory map for LS2088A series SoCs */
  152. svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
  153. if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
  154. svr == SVR_LS2048A || svr == SVR_LS2044A ||
  155. svr == SVR_LS2081A || svr == SVR_LS2041A) {
  156. if (io)
  157. io->phys_start = (io->phys_start &
  158. (PCIE_PHYS_SIZE - 1)) +
  159. LS2088A_PCIE1_PHYS_ADDR +
  160. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  161. if (mem)
  162. mem->phys_start = (mem->phys_start &
  163. (PCIE_PHYS_SIZE - 1)) +
  164. LS2088A_PCIE1_PHYS_ADDR +
  165. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  166. if (pref)
  167. pref->phys_start = (pref->phys_start &
  168. (PCIE_PHYS_SIZE - 1)) +
  169. LS2088A_PCIE1_PHYS_ADDR +
  170. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  171. }
  172. if (io)
  173. /* ATU : OUTBOUND : IO */
  174. ls_pcie_atu_outbound_set(pcie, idx++,
  175. PCIE_ATU_TYPE_IO,
  176. io->phys_start + offset,
  177. io->bus_start,
  178. io->size);
  179. if (mem)
  180. /* ATU : OUTBOUND : MEM */
  181. ls_pcie_atu_outbound_set(pcie, idx++,
  182. PCIE_ATU_TYPE_MEM,
  183. mem->phys_start + offset,
  184. mem->bus_start,
  185. mem->size);
  186. if (pref)
  187. /* ATU : OUTBOUND : pref */
  188. ls_pcie_atu_outbound_set(pcie, idx++,
  189. PCIE_ATU_TYPE_MEM,
  190. pref->phys_start + offset,
  191. pref->bus_start,
  192. pref->size);
  193. ls_pcie_dump_atu(pcie);
  194. }
  195. /* Return 0 if the address is valid, -errno if not valid */
  196. static int ls_pcie_addr_valid(struct ls_pcie *pcie, pci_dev_t bdf)
  197. {
  198. struct udevice *bus = pcie->bus;
  199. if (!pcie->enabled)
  200. return -ENXIO;
  201. if (PCI_BUS(bdf) < bus->seq)
  202. return -EINVAL;
  203. if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_link_up(pcie)))
  204. return -EINVAL;
  205. if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
  206. return -EINVAL;
  207. return 0;
  208. }
  209. void *ls_pcie_conf_address(struct ls_pcie *pcie, pci_dev_t bdf,
  210. int offset)
  211. {
  212. struct udevice *bus = pcie->bus;
  213. u32 busdev;
  214. if (PCI_BUS(bdf) == bus->seq)
  215. return pcie->dbi + offset;
  216. busdev = PCIE_ATU_BUS(PCI_BUS(bdf)) |
  217. PCIE_ATU_DEV(PCI_DEV(bdf)) |
  218. PCIE_ATU_FUNC(PCI_FUNC(bdf));
  219. if (PCI_BUS(bdf) == bus->seq + 1) {
  220. ls_pcie_cfg0_set_busdev(pcie, busdev);
  221. return pcie->cfg0 + offset;
  222. } else {
  223. ls_pcie_cfg1_set_busdev(pcie, busdev);
  224. return pcie->cfg1 + offset;
  225. }
  226. }
  227. static int ls_pcie_read_config(struct udevice *bus, pci_dev_t bdf,
  228. uint offset, ulong *valuep,
  229. enum pci_size_t size)
  230. {
  231. struct ls_pcie *pcie = dev_get_priv(bus);
  232. void *address;
  233. if (ls_pcie_addr_valid(pcie, bdf)) {
  234. *valuep = pci_get_ff(size);
  235. return 0;
  236. }
  237. address = ls_pcie_conf_address(pcie, bdf, offset);
  238. switch (size) {
  239. case PCI_SIZE_8:
  240. *valuep = readb(address);
  241. return 0;
  242. case PCI_SIZE_16:
  243. *valuep = readw(address);
  244. return 0;
  245. case PCI_SIZE_32:
  246. *valuep = readl(address);
  247. return 0;
  248. default:
  249. return -EINVAL;
  250. }
  251. }
  252. static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf,
  253. uint offset, ulong value,
  254. enum pci_size_t size)
  255. {
  256. struct ls_pcie *pcie = dev_get_priv(bus);
  257. void *address;
  258. if (ls_pcie_addr_valid(pcie, bdf))
  259. return 0;
  260. address = ls_pcie_conf_address(pcie, bdf, offset);
  261. switch (size) {
  262. case PCI_SIZE_8:
  263. writeb(value, address);
  264. return 0;
  265. case PCI_SIZE_16:
  266. writew(value, address);
  267. return 0;
  268. case PCI_SIZE_32:
  269. writel(value, address);
  270. return 0;
  271. default:
  272. return -EINVAL;
  273. }
  274. }
  275. /* Clear multi-function bit */
  276. static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
  277. {
  278. writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
  279. }
  280. /* Fix class value */
  281. static void ls_pcie_fix_class(struct ls_pcie *pcie)
  282. {
  283. writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
  284. }
  285. /* Drop MSG TLP except for Vendor MSG */
  286. static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
  287. {
  288. u32 val;
  289. val = dbi_readl(pcie, PCIE_STRFMR1);
  290. val &= 0xDFFFFFFF;
  291. dbi_writel(pcie, val, PCIE_STRFMR1);
  292. }
  293. /* Disable all bars in RC mode */
  294. static void ls_pcie_disable_bars(struct ls_pcie *pcie)
  295. {
  296. u32 sriov;
  297. sriov = in_le32(pcie->dbi + PCIE_SRIOV);
  298. /*
  299. * TODO: For PCIe controller with SRIOV, the method to disable bars
  300. * is different and more complex, so will add later.
  301. */
  302. if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
  303. return;
  304. dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0);
  305. dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1);
  306. dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1);
  307. }
  308. static void ls_pcie_setup_ctrl(struct ls_pcie *pcie)
  309. {
  310. ls_pcie_setup_atu(pcie);
  311. dbi_writel(pcie, 1, PCIE_DBI_RO_WR_EN);
  312. ls_pcie_fix_class(pcie);
  313. ls_pcie_clear_multifunction(pcie);
  314. ls_pcie_drop_msg_tlp(pcie);
  315. dbi_writel(pcie, 0, PCIE_DBI_RO_WR_EN);
  316. ls_pcie_disable_bars(pcie);
  317. }
  318. static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie)
  319. {
  320. u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE;
  321. /* ATU 0 : INBOUND : map BAR0 */
  322. ls_pcie_atu_inbound_set(pcie, 0, 0, phys);
  323. /* ATU 1 : INBOUND : map BAR1 */
  324. phys += PCIE_BAR1_SIZE;
  325. ls_pcie_atu_inbound_set(pcie, 1, 1, phys);
  326. /* ATU 2 : INBOUND : map BAR2 */
  327. phys += PCIE_BAR2_SIZE;
  328. ls_pcie_atu_inbound_set(pcie, 2, 2, phys);
  329. /* ATU 3 : INBOUND : map BAR4 */
  330. phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE;
  331. ls_pcie_atu_inbound_set(pcie, 3, 4, phys);
  332. /* ATU 0 : OUTBOUND : map MEM */
  333. ls_pcie_atu_outbound_set(pcie, 0,
  334. PCIE_ATU_TYPE_MEM,
  335. pcie->cfg_res.start,
  336. 0,
  337. CONFIG_SYS_PCI_MEMORY_SIZE);
  338. }
  339. /* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
  340. static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
  341. {
  342. /* The least inbound window is 4KiB */
  343. if (size < 4 * 1024)
  344. return;
  345. switch (bar) {
  346. case 0:
  347. writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
  348. break;
  349. case 1:
  350. writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
  351. break;
  352. case 2:
  353. writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
  354. writel(0, bar_base + PCI_BASE_ADDRESS_3);
  355. break;
  356. case 4:
  357. writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
  358. writel(0, bar_base + PCI_BASE_ADDRESS_5);
  359. break;
  360. default:
  361. break;
  362. }
  363. }
  364. static void ls_pcie_ep_setup_bars(void *bar_base)
  365. {
  366. /* BAR0 - 32bit - 4K configuration */
  367. ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
  368. /* BAR1 - 32bit - 8K MSIX*/
  369. ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
  370. /* BAR2 - 64bit - 4K MEM desciptor */
  371. ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
  372. /* BAR4 - 64bit - 1M MEM*/
  373. ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
  374. }
  375. static void ls_pcie_ep_enable_cfg(struct ls_pcie *pcie)
  376. {
  377. ctrl_writel(pcie, PCIE_CONFIG_READY, PCIE_PF_CONFIG);
  378. }
  379. static void ls_pcie_setup_ep(struct ls_pcie *pcie)
  380. {
  381. u32 sriov;
  382. sriov = readl(pcie->dbi + PCIE_SRIOV);
  383. if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) {
  384. int pf, vf;
  385. for (pf = 0; pf < PCIE_PF_NUM; pf++) {
  386. for (vf = 0; vf <= PCIE_VF_NUM; vf++) {
  387. ctrl_writel(pcie, PCIE_LCTRL0_VAL(pf, vf),
  388. PCIE_PF_VF_CTRL);
  389. ls_pcie_ep_setup_bars(pcie->dbi);
  390. ls_pcie_ep_setup_atu(pcie);
  391. }
  392. }
  393. /* Disable CFG2 */
  394. ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL);
  395. } else {
  396. ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
  397. ls_pcie_ep_setup_atu(pcie);
  398. }
  399. ls_pcie_ep_enable_cfg(pcie);
  400. }
  401. static int ls_pcie_probe(struct udevice *dev)
  402. {
  403. struct ls_pcie *pcie = dev_get_priv(dev);
  404. const void *fdt = gd->fdt_blob;
  405. int node = dev_of_offset(dev);
  406. u8 header_type;
  407. u16 link_sta;
  408. bool ep_mode;
  409. uint svr;
  410. int ret;
  411. fdt_size_t cfg_size;
  412. pcie->bus = dev;
  413. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  414. "dbi", &pcie->dbi_res);
  415. if (ret) {
  416. printf("ls-pcie: resource \"dbi\" not found\n");
  417. return ret;
  418. }
  419. pcie->idx = (pcie->dbi_res.start - PCIE_SYS_BASE_ADDR) / PCIE_CCSR_SIZE;
  420. list_add(&pcie->list, &ls_pcie_list);
  421. pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
  422. if (!pcie->enabled) {
  423. printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
  424. return 0;
  425. }
  426. pcie->dbi = map_physmem(pcie->dbi_res.start,
  427. fdt_resource_size(&pcie->dbi_res),
  428. MAP_NOCACHE);
  429. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  430. "lut", &pcie->lut_res);
  431. if (!ret)
  432. pcie->lut = map_physmem(pcie->lut_res.start,
  433. fdt_resource_size(&pcie->lut_res),
  434. MAP_NOCACHE);
  435. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  436. "ctrl", &pcie->ctrl_res);
  437. if (!ret)
  438. pcie->ctrl = map_physmem(pcie->ctrl_res.start,
  439. fdt_resource_size(&pcie->ctrl_res),
  440. MAP_NOCACHE);
  441. if (!pcie->ctrl)
  442. pcie->ctrl = pcie->lut;
  443. if (!pcie->ctrl) {
  444. printf("%s: NOT find CTRL\n", dev->name);
  445. return -1;
  446. }
  447. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  448. "config", &pcie->cfg_res);
  449. if (ret) {
  450. printf("%s: resource \"config\" not found\n", dev->name);
  451. return ret;
  452. }
  453. /*
  454. * Fix the pcie memory map address and PF control registers address
  455. * for LS2088A series SoCs
  456. */
  457. svr = get_svr();
  458. svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
  459. if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
  460. svr == SVR_LS2048A || svr == SVR_LS2044A ||
  461. svr == SVR_LS2081A || svr == SVR_LS2041A) {
  462. cfg_size = fdt_resource_size(&pcie->cfg_res);
  463. pcie->cfg_res.start = LS2088A_PCIE1_PHYS_ADDR +
  464. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  465. pcie->cfg_res.end = pcie->cfg_res.start + cfg_size;
  466. pcie->ctrl = pcie->lut + 0x40000;
  467. }
  468. pcie->cfg0 = map_physmem(pcie->cfg_res.start,
  469. fdt_resource_size(&pcie->cfg_res),
  470. MAP_NOCACHE);
  471. pcie->cfg1 = pcie->cfg0 + fdt_resource_size(&pcie->cfg_res) / 2;
  472. pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
  473. debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n",
  474. dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut,
  475. (unsigned long)pcie->ctrl, (unsigned long)pcie->cfg0,
  476. pcie->big_endian);
  477. header_type = readb(pcie->dbi + PCI_HEADER_TYPE);
  478. ep_mode = (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL;
  479. printf("PCIe%u: %s %s", pcie->idx, dev->name,
  480. ep_mode ? "Endpoint" : "Root Complex");
  481. if (ep_mode)
  482. ls_pcie_setup_ep(pcie);
  483. else
  484. ls_pcie_setup_ctrl(pcie);
  485. if (!ls_pcie_link_up(pcie)) {
  486. /* Let the user know there's no PCIe link */
  487. printf(": no link\n");
  488. return 0;
  489. }
  490. /* Print the negotiated PCIe link width */
  491. link_sta = readw(pcie->dbi + PCIE_LINK_STA);
  492. printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4,
  493. link_sta & PCIE_LINK_SPEED_MASK);
  494. return 0;
  495. }
  496. static const struct dm_pci_ops ls_pcie_ops = {
  497. .read_config = ls_pcie_read_config,
  498. .write_config = ls_pcie_write_config,
  499. };
  500. static const struct udevice_id ls_pcie_ids[] = {
  501. { .compatible = "fsl,ls-pcie" },
  502. { }
  503. };
  504. U_BOOT_DRIVER(pci_layerscape) = {
  505. .name = "pci_layerscape",
  506. .id = UCLASS_PCI,
  507. .of_match = ls_pcie_ids,
  508. .ops = &ls_pcie_ops,
  509. .probe = ls_pcie_probe,
  510. .priv_auto_alloc_size = sizeof(struct ls_pcie),
  511. };