pcie_layerscape.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /*
  2. * Copyright 2014-2015 Freescale Semiconductor, Inc.
  3. * Layerscape PCIe driver
  4. *
  5. * SPDX-License-Identifier: GPL-2.0+
  6. */
  7. #include <common.h>
  8. #include <asm/arch/fsl_serdes.h>
  9. #include <pci.h>
  10. #include <asm/io.h>
  11. #include <errno.h>
  12. #include <malloc.h>
  13. #include <dm.h>
  14. #include "pcie_layerscape.h"
  15. DECLARE_GLOBAL_DATA_PTR;
  16. LIST_HEAD(ls_pcie_list);
  17. static unsigned int dbi_readl(struct ls_pcie *pcie, unsigned int offset)
  18. {
  19. return in_le32(pcie->dbi + offset);
  20. }
  21. static void dbi_writel(struct ls_pcie *pcie, unsigned int value,
  22. unsigned int offset)
  23. {
  24. out_le32(pcie->dbi + offset, value);
  25. }
  26. static unsigned int ctrl_readl(struct ls_pcie *pcie, unsigned int offset)
  27. {
  28. if (pcie->big_endian)
  29. return in_be32(pcie->ctrl + offset);
  30. else
  31. return in_le32(pcie->ctrl + offset);
  32. }
  33. static void ctrl_writel(struct ls_pcie *pcie, unsigned int value,
  34. unsigned int offset)
  35. {
  36. if (pcie->big_endian)
  37. out_be32(pcie->ctrl + offset, value);
  38. else
  39. out_le32(pcie->ctrl + offset, value);
  40. }
  41. static int ls_pcie_ltssm(struct ls_pcie *pcie)
  42. {
  43. u32 state;
  44. uint svr;
  45. svr = get_svr();
  46. if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
  47. state = ctrl_readl(pcie, LS1021_PEXMSCPORTSR(pcie->idx));
  48. state = (state >> LS1021_LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK;
  49. } else {
  50. state = ctrl_readl(pcie, PCIE_PF_DBG) & LTSSM_STATE_MASK;
  51. }
  52. return state;
  53. }
  54. static int ls_pcie_link_up(struct ls_pcie *pcie)
  55. {
  56. int ltssm;
  57. ltssm = ls_pcie_ltssm(pcie);
  58. if (ltssm < LTSSM_PCIE_L0)
  59. return 0;
  60. return 1;
  61. }
  62. static void ls_pcie_cfg0_set_busdev(struct ls_pcie *pcie, u32 busdev)
  63. {
  64. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0,
  65. PCIE_ATU_VIEWPORT);
  66. dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
  67. }
  68. static void ls_pcie_cfg1_set_busdev(struct ls_pcie *pcie, u32 busdev)
  69. {
  70. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1,
  71. PCIE_ATU_VIEWPORT);
  72. dbi_writel(pcie, busdev, PCIE_ATU_LOWER_TARGET);
  73. }
  74. static void ls_pcie_atu_outbound_set(struct ls_pcie *pcie, int idx, int type,
  75. u64 phys, u64 bus_addr, pci_size_t size)
  76. {
  77. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | idx, PCIE_ATU_VIEWPORT);
  78. dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_BASE);
  79. dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_BASE);
  80. dbi_writel(pcie, (u32)phys + size - 1, PCIE_ATU_LIMIT);
  81. dbi_writel(pcie, (u32)bus_addr, PCIE_ATU_LOWER_TARGET);
  82. dbi_writel(pcie, bus_addr >> 32, PCIE_ATU_UPPER_TARGET);
  83. dbi_writel(pcie, type, PCIE_ATU_CR1);
  84. dbi_writel(pcie, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
  85. }
  86. /* Use bar match mode and MEM type as default */
  87. static void ls_pcie_atu_inbound_set(struct ls_pcie *pcie, int idx,
  88. int bar, u64 phys)
  89. {
  90. dbi_writel(pcie, PCIE_ATU_REGION_INBOUND | idx, PCIE_ATU_VIEWPORT);
  91. dbi_writel(pcie, (u32)phys, PCIE_ATU_LOWER_TARGET);
  92. dbi_writel(pcie, phys >> 32, PCIE_ATU_UPPER_TARGET);
  93. dbi_writel(pcie, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
  94. dbi_writel(pcie, PCIE_ATU_ENABLE | PCIE_ATU_BAR_MODE_ENABLE |
  95. PCIE_ATU_BAR_NUM(bar), PCIE_ATU_CR2);
  96. }
  97. static void ls_pcie_dump_atu(struct ls_pcie *pcie)
  98. {
  99. int i;
  100. for (i = 0; i < PCIE_ATU_REGION_NUM; i++) {
  101. dbi_writel(pcie, PCIE_ATU_REGION_OUTBOUND | i,
  102. PCIE_ATU_VIEWPORT);
  103. debug("iATU%d:\n", i);
  104. debug("\tLOWER PHYS 0x%08x\n",
  105. dbi_readl(pcie, PCIE_ATU_LOWER_BASE));
  106. debug("\tUPPER PHYS 0x%08x\n",
  107. dbi_readl(pcie, PCIE_ATU_UPPER_BASE));
  108. debug("\tLOWER BUS 0x%08x\n",
  109. dbi_readl(pcie, PCIE_ATU_LOWER_TARGET));
  110. debug("\tUPPER BUS 0x%08x\n",
  111. dbi_readl(pcie, PCIE_ATU_UPPER_TARGET));
  112. debug("\tLIMIT 0x%08x\n",
  113. readl(pcie->dbi + PCIE_ATU_LIMIT));
  114. debug("\tCR1 0x%08x\n",
  115. dbi_readl(pcie, PCIE_ATU_CR1));
  116. debug("\tCR2 0x%08x\n",
  117. dbi_readl(pcie, PCIE_ATU_CR2));
  118. }
  119. }
  120. static void ls_pcie_setup_atu(struct ls_pcie *pcie)
  121. {
  122. struct pci_region *io, *mem, *pref;
  123. unsigned long long offset = 0;
  124. int idx = 0;
  125. uint svr;
  126. svr = get_svr();
  127. if (((svr >> SVR_VAR_PER_SHIFT) & SVR_LS102XA_MASK) == SVR_LS102XA) {
  128. offset = LS1021_PCIE_SPACE_OFFSET +
  129. LS1021_PCIE_SPACE_SIZE * pcie->idx;
  130. }
  131. /* ATU 0 : OUTBOUND : CFG0 */
  132. ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX0,
  133. PCIE_ATU_TYPE_CFG0,
  134. pcie->cfg_res.start + offset,
  135. 0,
  136. fdt_resource_size(&pcie->cfg_res) / 2);
  137. /* ATU 1 : OUTBOUND : CFG1 */
  138. ls_pcie_atu_outbound_set(pcie, PCIE_ATU_REGION_INDEX1,
  139. PCIE_ATU_TYPE_CFG1,
  140. pcie->cfg_res.start + offset +
  141. fdt_resource_size(&pcie->cfg_res) / 2,
  142. 0,
  143. fdt_resource_size(&pcie->cfg_res) / 2);
  144. pci_get_regions(pcie->bus, &io, &mem, &pref);
  145. idx = PCIE_ATU_REGION_INDEX1 + 1;
  146. /* Fix the pcie memory map for LS2088A series SoCs */
  147. svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
  148. if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
  149. svr == SVR_LS2048A || svr == SVR_LS2044A) {
  150. if (io)
  151. io->phys_start = (io->phys_start &
  152. (PCIE_PHYS_SIZE - 1)) +
  153. LS2088A_PCIE1_PHYS_ADDR +
  154. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  155. if (mem)
  156. mem->phys_start = (mem->phys_start &
  157. (PCIE_PHYS_SIZE - 1)) +
  158. LS2088A_PCIE1_PHYS_ADDR +
  159. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  160. if (pref)
  161. pref->phys_start = (pref->phys_start &
  162. (PCIE_PHYS_SIZE - 1)) +
  163. LS2088A_PCIE1_PHYS_ADDR +
  164. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  165. }
  166. if (io)
  167. /* ATU : OUTBOUND : IO */
  168. ls_pcie_atu_outbound_set(pcie, idx++,
  169. PCIE_ATU_TYPE_IO,
  170. io->phys_start + offset,
  171. io->bus_start,
  172. io->size);
  173. if (mem)
  174. /* ATU : OUTBOUND : MEM */
  175. ls_pcie_atu_outbound_set(pcie, idx++,
  176. PCIE_ATU_TYPE_MEM,
  177. mem->phys_start + offset,
  178. mem->bus_start,
  179. mem->size);
  180. if (pref)
  181. /* ATU : OUTBOUND : pref */
  182. ls_pcie_atu_outbound_set(pcie, idx++,
  183. PCIE_ATU_TYPE_MEM,
  184. pref->phys_start + offset,
  185. pref->bus_start,
  186. pref->size);
  187. ls_pcie_dump_atu(pcie);
  188. }
  189. /* Return 0 if the address is valid, -errno if not valid */
  190. static int ls_pcie_addr_valid(struct ls_pcie *pcie, pci_dev_t bdf)
  191. {
  192. struct udevice *bus = pcie->bus;
  193. if (!pcie->enabled)
  194. return -ENXIO;
  195. if (PCI_BUS(bdf) < bus->seq)
  196. return -EINVAL;
  197. if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_link_up(pcie)))
  198. return -EINVAL;
  199. if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
  200. return -EINVAL;
  201. return 0;
  202. }
  203. void *ls_pcie_conf_address(struct ls_pcie *pcie, pci_dev_t bdf,
  204. int offset)
  205. {
  206. struct udevice *bus = pcie->bus;
  207. u32 busdev;
  208. if (PCI_BUS(bdf) == bus->seq)
  209. return pcie->dbi + offset;
  210. busdev = PCIE_ATU_BUS(PCI_BUS(bdf)) |
  211. PCIE_ATU_DEV(PCI_DEV(bdf)) |
  212. PCIE_ATU_FUNC(PCI_FUNC(bdf));
  213. if (PCI_BUS(bdf) == bus->seq + 1) {
  214. ls_pcie_cfg0_set_busdev(pcie, busdev);
  215. return pcie->cfg0 + offset;
  216. } else {
  217. ls_pcie_cfg1_set_busdev(pcie, busdev);
  218. return pcie->cfg1 + offset;
  219. }
  220. }
  221. static int ls_pcie_read_config(struct udevice *bus, pci_dev_t bdf,
  222. uint offset, ulong *valuep,
  223. enum pci_size_t size)
  224. {
  225. struct ls_pcie *pcie = dev_get_priv(bus);
  226. void *address;
  227. if (ls_pcie_addr_valid(pcie, bdf)) {
  228. *valuep = pci_get_ff(size);
  229. return 0;
  230. }
  231. address = ls_pcie_conf_address(pcie, bdf, offset);
  232. switch (size) {
  233. case PCI_SIZE_8:
  234. *valuep = readb(address);
  235. return 0;
  236. case PCI_SIZE_16:
  237. *valuep = readw(address);
  238. return 0;
  239. case PCI_SIZE_32:
  240. *valuep = readl(address);
  241. return 0;
  242. default:
  243. return -EINVAL;
  244. }
  245. }
  246. static int ls_pcie_write_config(struct udevice *bus, pci_dev_t bdf,
  247. uint offset, ulong value,
  248. enum pci_size_t size)
  249. {
  250. struct ls_pcie *pcie = dev_get_priv(bus);
  251. void *address;
  252. if (ls_pcie_addr_valid(pcie, bdf))
  253. return 0;
  254. address = ls_pcie_conf_address(pcie, bdf, offset);
  255. switch (size) {
  256. case PCI_SIZE_8:
  257. writeb(value, address);
  258. return 0;
  259. case PCI_SIZE_16:
  260. writew(value, address);
  261. return 0;
  262. case PCI_SIZE_32:
  263. writel(value, address);
  264. return 0;
  265. default:
  266. return -EINVAL;
  267. }
  268. }
  269. /* Clear multi-function bit */
  270. static void ls_pcie_clear_multifunction(struct ls_pcie *pcie)
  271. {
  272. writeb(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE);
  273. }
  274. /* Fix class value */
  275. static void ls_pcie_fix_class(struct ls_pcie *pcie)
  276. {
  277. writew(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE);
  278. }
  279. /* Drop MSG TLP except for Vendor MSG */
  280. static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie)
  281. {
  282. u32 val;
  283. val = dbi_readl(pcie, PCIE_STRFMR1);
  284. val &= 0xDFFFFFFF;
  285. dbi_writel(pcie, val, PCIE_STRFMR1);
  286. }
  287. /* Disable all bars in RC mode */
  288. static void ls_pcie_disable_bars(struct ls_pcie *pcie)
  289. {
  290. u32 sriov;
  291. sriov = in_le32(pcie->dbi + PCIE_SRIOV);
  292. /*
  293. * TODO: For PCIe controller with SRIOV, the method to disable bars
  294. * is different and more complex, so will add later.
  295. */
  296. if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
  297. return;
  298. dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0);
  299. dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1);
  300. dbi_writel(pcie, 0, PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1);
  301. }
  302. static void ls_pcie_setup_ctrl(struct ls_pcie *pcie)
  303. {
  304. ls_pcie_setup_atu(pcie);
  305. dbi_writel(pcie, 1, PCIE_DBI_RO_WR_EN);
  306. ls_pcie_fix_class(pcie);
  307. ls_pcie_clear_multifunction(pcie);
  308. ls_pcie_drop_msg_tlp(pcie);
  309. dbi_writel(pcie, 0, PCIE_DBI_RO_WR_EN);
  310. ls_pcie_disable_bars(pcie);
  311. }
  312. static void ls_pcie_ep_setup_atu(struct ls_pcie *pcie)
  313. {
  314. u64 phys = CONFIG_SYS_PCI_EP_MEMORY_BASE;
  315. /* ATU 0 : INBOUND : map BAR0 */
  316. ls_pcie_atu_inbound_set(pcie, 0, 0, phys);
  317. /* ATU 1 : INBOUND : map BAR1 */
  318. phys += PCIE_BAR1_SIZE;
  319. ls_pcie_atu_inbound_set(pcie, 1, 1, phys);
  320. /* ATU 2 : INBOUND : map BAR2 */
  321. phys += PCIE_BAR2_SIZE;
  322. ls_pcie_atu_inbound_set(pcie, 2, 2, phys);
  323. /* ATU 3 : INBOUND : map BAR4 */
  324. phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR4_SIZE;
  325. ls_pcie_atu_inbound_set(pcie, 3, 4, phys);
  326. /* ATU 0 : OUTBOUND : map MEM */
  327. ls_pcie_atu_outbound_set(pcie, 0,
  328. PCIE_ATU_TYPE_MEM,
  329. pcie->cfg_res.start,
  330. 0,
  331. CONFIG_SYS_PCI_MEMORY_SIZE);
  332. }
  333. /* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
  334. static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
  335. {
  336. /* The least inbound window is 4KiB */
  337. if (size < 4 * 1024)
  338. return;
  339. switch (bar) {
  340. case 0:
  341. writel(size - 1, bar_base + PCI_BASE_ADDRESS_0);
  342. break;
  343. case 1:
  344. writel(size - 1, bar_base + PCI_BASE_ADDRESS_1);
  345. break;
  346. case 2:
  347. writel(size - 1, bar_base + PCI_BASE_ADDRESS_2);
  348. writel(0, bar_base + PCI_BASE_ADDRESS_3);
  349. break;
  350. case 4:
  351. writel(size - 1, bar_base + PCI_BASE_ADDRESS_4);
  352. writel(0, bar_base + PCI_BASE_ADDRESS_5);
  353. break;
  354. default:
  355. break;
  356. }
  357. }
  358. static void ls_pcie_ep_setup_bars(void *bar_base)
  359. {
  360. /* BAR0 - 32bit - 4K configuration */
  361. ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
  362. /* BAR1 - 32bit - 8K MSIX*/
  363. ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
  364. /* BAR2 - 64bit - 4K MEM desciptor */
  365. ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
  366. /* BAR4 - 64bit - 1M MEM*/
  367. ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
  368. }
  369. static void ls_pcie_ep_enable_cfg(struct ls_pcie *pcie)
  370. {
  371. ctrl_writel(pcie, PCIE_CONFIG_READY, PCIE_PF_CONFIG);
  372. }
  373. static void ls_pcie_setup_ep(struct ls_pcie *pcie)
  374. {
  375. u32 sriov;
  376. sriov = readl(pcie->dbi + PCIE_SRIOV);
  377. if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) {
  378. int pf, vf;
  379. for (pf = 0; pf < PCIE_PF_NUM; pf++) {
  380. for (vf = 0; vf <= PCIE_VF_NUM; vf++) {
  381. ctrl_writel(pcie, PCIE_LCTRL0_VAL(pf, vf),
  382. PCIE_PF_VF_CTRL);
  383. ls_pcie_ep_setup_bars(pcie->dbi);
  384. ls_pcie_ep_setup_atu(pcie);
  385. }
  386. }
  387. /* Disable CFG2 */
  388. ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL);
  389. } else {
  390. ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
  391. ls_pcie_ep_setup_atu(pcie);
  392. }
  393. ls_pcie_ep_enable_cfg(pcie);
  394. }
  395. static int ls_pcie_probe(struct udevice *dev)
  396. {
  397. struct ls_pcie *pcie = dev_get_priv(dev);
  398. const void *fdt = gd->fdt_blob;
  399. int node = dev_of_offset(dev);
  400. u8 header_type;
  401. u16 link_sta;
  402. bool ep_mode;
  403. uint svr;
  404. int ret;
  405. pcie->bus = dev;
  406. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  407. "dbi", &pcie->dbi_res);
  408. if (ret) {
  409. printf("ls-pcie: resource \"dbi\" not found\n");
  410. return ret;
  411. }
  412. pcie->idx = (pcie->dbi_res.start - PCIE_SYS_BASE_ADDR) / PCIE_CCSR_SIZE;
  413. list_add(&pcie->list, &ls_pcie_list);
  414. pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
  415. if (!pcie->enabled) {
  416. printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
  417. return 0;
  418. }
  419. pcie->dbi = map_physmem(pcie->dbi_res.start,
  420. fdt_resource_size(&pcie->dbi_res),
  421. MAP_NOCACHE);
  422. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  423. "lut", &pcie->lut_res);
  424. if (!ret)
  425. pcie->lut = map_physmem(pcie->lut_res.start,
  426. fdt_resource_size(&pcie->lut_res),
  427. MAP_NOCACHE);
  428. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  429. "ctrl", &pcie->ctrl_res);
  430. if (!ret)
  431. pcie->ctrl = map_physmem(pcie->ctrl_res.start,
  432. fdt_resource_size(&pcie->ctrl_res),
  433. MAP_NOCACHE);
  434. if (!pcie->ctrl)
  435. pcie->ctrl = pcie->lut;
  436. if (!pcie->ctrl) {
  437. printf("%s: NOT find CTRL\n", dev->name);
  438. return -1;
  439. }
  440. ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
  441. "config", &pcie->cfg_res);
  442. if (ret) {
  443. printf("%s: resource \"config\" not found\n", dev->name);
  444. return ret;
  445. }
  446. /*
  447. * Fix the pcie memory map address and PF control registers address
  448. * for LS2088A series SoCs
  449. */
  450. svr = get_svr();
  451. svr = (svr >> SVR_VAR_PER_SHIFT) & 0xFFFFFE;
  452. if (svr == SVR_LS2088A || svr == SVR_LS2084A ||
  453. svr == SVR_LS2048A || svr == SVR_LS2044A) {
  454. pcie->cfg_res.start = LS2088A_PCIE1_PHYS_ADDR +
  455. LS2088A_PCIE_PHYS_SIZE * pcie->idx;
  456. pcie->ctrl = pcie->lut + 0x40000;
  457. }
  458. pcie->cfg0 = map_physmem(pcie->cfg_res.start,
  459. fdt_resource_size(&pcie->cfg_res),
  460. MAP_NOCACHE);
  461. pcie->cfg1 = pcie->cfg0 + fdt_resource_size(&pcie->cfg_res) / 2;
  462. pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
  463. debug("%s dbi:%lx lut:%lx ctrl:0x%lx cfg0:0x%lx, big-endian:%d\n",
  464. dev->name, (unsigned long)pcie->dbi, (unsigned long)pcie->lut,
  465. (unsigned long)pcie->ctrl, (unsigned long)pcie->cfg0,
  466. pcie->big_endian);
  467. header_type = readb(pcie->dbi + PCI_HEADER_TYPE);
  468. ep_mode = (header_type & 0x7f) == PCI_HEADER_TYPE_NORMAL;
  469. printf("PCIe%u: %s %s", pcie->idx, dev->name,
  470. ep_mode ? "Endpoint" : "Root Complex");
  471. if (ep_mode)
  472. ls_pcie_setup_ep(pcie);
  473. else
  474. ls_pcie_setup_ctrl(pcie);
  475. if (!ls_pcie_link_up(pcie)) {
  476. /* Let the user know there's no PCIe link */
  477. printf(": no link\n");
  478. return 0;
  479. }
  480. /* Print the negotiated PCIe link width */
  481. link_sta = readw(pcie->dbi + PCIE_LINK_STA);
  482. printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4,
  483. link_sta & PCIE_LINK_SPEED_MASK);
  484. return 0;
  485. }
  486. static const struct dm_pci_ops ls_pcie_ops = {
  487. .read_config = ls_pcie_read_config,
  488. .write_config = ls_pcie_write_config,
  489. };
  490. static const struct udevice_id ls_pcie_ids[] = {
  491. { .compatible = "fsl,ls-pcie" },
  492. { }
  493. };
  494. U_BOOT_DRIVER(pci_layerscape) = {
  495. .name = "pci_layerscape",
  496. .id = UCLASS_PCI,
  497. .of_match = ls_pcie_ids,
  498. .ops = &ls_pcie_ops,
  499. .probe = ls_pcie_probe,
  500. .priv_auto_alloc_size = sizeof(struct ls_pcie),
  501. };