zynqmp_gqspi.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2018 Xilinx
  4. *
  5. * Xilinx ZynqMP Generic Quad-SPI(QSPI) controller driver(master mode only)
  6. */
  7. #include <common.h>
  8. #include <asm/arch/clk.h>
  9. #include <asm/arch/hardware.h>
  10. #include <asm/arch/sys_proto.h>
  11. #include <asm/io.h>
  12. #include <clk.h>
  13. #include <dm.h>
  14. #include <malloc.h>
  15. #include <memalign.h>
  16. #include <spi.h>
  17. #include <ubi_uboot.h>
  18. #include <wait_bit.h>
  19. #define GQSPI_GFIFO_STRT_MODE_MASK BIT(29)
  20. #define GQSPI_CONFIG_MODE_EN_MASK (3 << 30)
  21. #define GQSPI_CONFIG_DMA_MODE (2 << 30)
  22. #define GQSPI_CONFIG_CPHA_MASK BIT(2)
  23. #define GQSPI_CONFIG_CPOL_MASK BIT(1)
  24. /*
  25. * QSPI Interrupt Registers bit Masks
  26. *
  27. * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
  28. * bit definitions.
  29. */
  30. #define GQSPI_IXR_TXNFULL_MASK 0x00000004 /* QSPI TX FIFO Overflow */
  31. #define GQSPI_IXR_TXFULL_MASK 0x00000008 /* QSPI TX FIFO is full */
  32. #define GQSPI_IXR_RXNEMTY_MASK 0x00000010 /* QSPI RX FIFO Not Empty */
  33. #define GQSPI_IXR_GFEMTY_MASK 0x00000080 /* QSPI Generic FIFO Empty */
  34. #define GQSPI_IXR_ALL_MASK (GQSPI_IXR_TXNFULL_MASK | \
  35. GQSPI_IXR_RXNEMTY_MASK)
  36. /*
  37. * QSPI Enable Register bit Masks
  38. *
  39. * This register is used to enable or disable the QSPI controller
  40. */
  41. #define GQSPI_ENABLE_ENABLE_MASK 0x00000001 /* QSPI Enable Bit Mask */
  42. #define GQSPI_GFIFO_LOW_BUS BIT(14)
  43. #define GQSPI_GFIFO_CS_LOWER BIT(12)
  44. #define GQSPI_GFIFO_UP_BUS BIT(15)
  45. #define GQSPI_GFIFO_CS_UPPER BIT(13)
  46. #define GQSPI_SPI_MODE_QSPI (3 << 10)
  47. #define GQSPI_SPI_MODE_SPI BIT(10)
  48. #define GQSPI_SPI_MODE_DUAL_SPI (2 << 10)
  49. #define GQSPI_IMD_DATA_CS_ASSERT 5
  50. #define GQSPI_IMD_DATA_CS_DEASSERT 5
  51. #define GQSPI_GFIFO_TX BIT(16)
  52. #define GQSPI_GFIFO_RX BIT(17)
  53. #define GQSPI_GFIFO_STRIPE_MASK BIT(18)
  54. #define GQSPI_GFIFO_IMD_MASK 0xFF
  55. #define GQSPI_GFIFO_EXP_MASK BIT(9)
  56. #define GQSPI_GFIFO_DATA_XFR_MASK BIT(8)
  57. #define GQSPI_STRT_GEN_FIFO BIT(28)
  58. #define GQSPI_GEN_FIFO_STRT_MOD BIT(29)
  59. #define GQSPI_GFIFO_WP_HOLD BIT(19)
  60. #define GQSPI_BAUD_DIV_MASK (7 << 3)
  61. #define GQSPI_DFLT_BAUD_RATE_DIV BIT(3)
  62. #define GQSPI_GFIFO_ALL_INT_MASK 0xFBE
  63. #define GQSPI_DMA_DST_I_STS_DONE BIT(1)
  64. #define GQSPI_DMA_DST_I_STS_MASK 0xFE
  65. #define MODEBITS 0x6
  66. #define GQSPI_GFIFO_SELECT BIT(0)
  67. #define GQSPI_FIFO_THRESHOLD 1
  68. #define SPI_XFER_ON_BOTH 0
  69. #define SPI_XFER_ON_LOWER 1
  70. #define SPI_XFER_ON_UPPER 2
  71. #define GQSPI_DMA_ALIGN 0x4
  72. #define GQSPI_MAX_BAUD_RATE_VAL 7
  73. #define GQSPI_DFLT_BAUD_RATE_VAL 2
  74. #define GQSPI_TIMEOUT 100000000
  75. #define GQSPI_BAUD_DIV_SHIFT 2
  76. #define GQSPI_LPBK_DLY_ADJ_LPBK_SHIFT 5
  77. #define GQSPI_LPBK_DLY_ADJ_DLY_1 0x2
  78. #define GQSPI_LPBK_DLY_ADJ_DLY_1_SHIFT 3
  79. #define GQSPI_LPBK_DLY_ADJ_DLY_0 0x3
  80. #define GQSPI_USE_DATA_DLY 0x1
  81. #define GQSPI_USE_DATA_DLY_SHIFT 31
  82. #define GQSPI_DATA_DLY_ADJ_VALUE 0x2
  83. #define GQSPI_DATA_DLY_ADJ_SHIFT 28
  84. #define TAP_DLY_BYPASS_LQSPI_RX_VALUE 0x1
  85. #define TAP_DLY_BYPASS_LQSPI_RX_SHIFT 2
  86. #define GQSPI_DATA_DLY_ADJ_OFST 0x000001F8
  87. #define IOU_TAPDLY_BYPASS_OFST 0xFF180390
  88. #define GQSPI_LPBK_DLY_ADJ_LPBK_MASK 0x00000020
  89. #define GQSPI_FREQ_40MHZ 40000000
  90. #define GQSPI_FREQ_100MHZ 100000000
  91. #define GQSPI_FREQ_150MHZ 150000000
  92. #define IOU_TAPDLY_BYPASS_MASK 0x7
  93. #define GQSPI_REG_OFFSET 0x100
  94. #define GQSPI_DMA_REG_OFFSET 0x800
  95. /* QSPI register offsets */
  96. struct zynqmp_qspi_regs {
  97. u32 confr; /* 0x00 */
  98. u32 isr; /* 0x04 */
  99. u32 ier; /* 0x08 */
  100. u32 idisr; /* 0x0C */
  101. u32 imaskr; /* 0x10 */
  102. u32 enbr; /* 0x14 */
  103. u32 dr; /* 0x18 */
  104. u32 txd0r; /* 0x1C */
  105. u32 drxr; /* 0x20 */
  106. u32 sicr; /* 0x24 */
  107. u32 txftr; /* 0x28 */
  108. u32 rxftr; /* 0x2C */
  109. u32 gpior; /* 0x30 */
  110. u32 reserved0; /* 0x34 */
  111. u32 lpbkdly; /* 0x38 */
  112. u32 reserved1; /* 0x3C */
  113. u32 genfifo; /* 0x40 */
  114. u32 gqspisel; /* 0x44 */
  115. u32 reserved2; /* 0x48 */
  116. u32 gqfifoctrl; /* 0x4C */
  117. u32 gqfthr; /* 0x50 */
  118. u32 gqpollcfg; /* 0x54 */
  119. u32 gqpollto; /* 0x58 */
  120. u32 gqxfersts; /* 0x5C */
  121. u32 gqfifosnap; /* 0x60 */
  122. u32 gqrxcpy; /* 0x64 */
  123. u32 reserved3[36]; /* 0x68 */
  124. u32 gqspidlyadj; /* 0xF8 */
  125. };
  126. struct zynqmp_qspi_dma_regs {
  127. u32 dmadst; /* 0x00 */
  128. u32 dmasize; /* 0x04 */
  129. u32 dmasts; /* 0x08 */
  130. u32 dmactrl; /* 0x0C */
  131. u32 reserved0; /* 0x10 */
  132. u32 dmaisr; /* 0x14 */
  133. u32 dmaier; /* 0x18 */
  134. u32 dmaidr; /* 0x1C */
  135. u32 dmaimr; /* 0x20 */
  136. u32 dmactrl2; /* 0x24 */
  137. u32 dmadstmsb; /* 0x28 */
  138. };
  139. DECLARE_GLOBAL_DATA_PTR;
  140. struct zynqmp_qspi_platdata {
  141. struct zynqmp_qspi_regs *regs;
  142. struct zynqmp_qspi_dma_regs *dma_regs;
  143. u32 frequency;
  144. u32 speed_hz;
  145. };
  146. struct zynqmp_qspi_priv {
  147. struct zynqmp_qspi_regs *regs;
  148. struct zynqmp_qspi_dma_regs *dma_regs;
  149. const void *tx_buf;
  150. void *rx_buf;
  151. unsigned int len;
  152. int bytes_to_transfer;
  153. int bytes_to_receive;
  154. unsigned int is_inst;
  155. unsigned int cs_change:1;
  156. };
  157. static int zynqmp_qspi_ofdata_to_platdata(struct udevice *bus)
  158. {
  159. struct zynqmp_qspi_platdata *plat = bus->platdata;
  160. debug("%s\n", __func__);
  161. plat->regs = (struct zynqmp_qspi_regs *)(devfdt_get_addr(bus) +
  162. GQSPI_REG_OFFSET);
  163. plat->dma_regs = (struct zynqmp_qspi_dma_regs *)
  164. (devfdt_get_addr(bus) + GQSPI_DMA_REG_OFFSET);
  165. return 0;
  166. }
  167. static void zynqmp_qspi_init_hw(struct zynqmp_qspi_priv *priv)
  168. {
  169. u32 config_reg;
  170. struct zynqmp_qspi_regs *regs = priv->regs;
  171. writel(GQSPI_GFIFO_SELECT, &regs->gqspisel);
  172. writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->idisr);
  173. writel(GQSPI_FIFO_THRESHOLD, &regs->txftr);
  174. writel(GQSPI_FIFO_THRESHOLD, &regs->rxftr);
  175. writel(GQSPI_GFIFO_ALL_INT_MASK, &regs->isr);
  176. config_reg = readl(&regs->confr);
  177. config_reg &= ~(GQSPI_GFIFO_STRT_MODE_MASK |
  178. GQSPI_CONFIG_MODE_EN_MASK);
  179. config_reg |= GQSPI_CONFIG_DMA_MODE |
  180. GQSPI_GFIFO_WP_HOLD |
  181. GQSPI_DFLT_BAUD_RATE_DIV;
  182. writel(config_reg, &regs->confr);
  183. writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  184. }
  185. static u32 zynqmp_qspi_bus_select(struct zynqmp_qspi_priv *priv)
  186. {
  187. u32 gqspi_fifo_reg = 0;
  188. gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS |
  189. GQSPI_GFIFO_CS_LOWER;
  190. return gqspi_fifo_reg;
  191. }
  192. static void zynqmp_qspi_fill_gen_fifo(struct zynqmp_qspi_priv *priv,
  193. u32 gqspi_fifo_reg)
  194. {
  195. struct zynqmp_qspi_regs *regs = priv->regs;
  196. int ret = 0;
  197. ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_GFEMTY_MASK, 1,
  198. GQSPI_TIMEOUT, 1);
  199. if (ret)
  200. printf("%s Timeout\n", __func__);
  201. writel(gqspi_fifo_reg, &regs->genfifo);
  202. }
  203. static void zynqmp_qspi_chipselect(struct zynqmp_qspi_priv *priv, int is_on)
  204. {
  205. u32 gqspi_fifo_reg = 0;
  206. if (is_on) {
  207. gqspi_fifo_reg = zynqmp_qspi_bus_select(priv);
  208. gqspi_fifo_reg |= GQSPI_SPI_MODE_SPI |
  209. GQSPI_IMD_DATA_CS_ASSERT;
  210. } else {
  211. gqspi_fifo_reg = GQSPI_GFIFO_LOW_BUS;
  212. gqspi_fifo_reg |= GQSPI_IMD_DATA_CS_DEASSERT;
  213. }
  214. debug("GFIFO_CMD_CS: 0x%x\n", gqspi_fifo_reg);
  215. zynqmp_qspi_fill_gen_fifo(priv, gqspi_fifo_reg);
  216. }
  217. void zynqmp_qspi_set_tapdelay(struct udevice *bus, u32 baudrateval)
  218. {
  219. struct zynqmp_qspi_platdata *plat = bus->platdata;
  220. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  221. struct zynqmp_qspi_regs *regs = priv->regs;
  222. u32 tapdlybypass = 0, lpbkdlyadj = 0, datadlyadj = 0, clk_rate;
  223. u32 reqhz = 0;
  224. clk_rate = plat->frequency;
  225. reqhz = (clk_rate / (GQSPI_BAUD_DIV_SHIFT << baudrateval));
  226. debug("%s, req_hz:%d, clk_rate:%d, baudrateval:%d\n",
  227. __func__, reqhz, clk_rate, baudrateval);
  228. if (reqhz < GQSPI_FREQ_40MHZ) {
  229. zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
  230. tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
  231. TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
  232. } else if (reqhz < GQSPI_FREQ_100MHZ) {
  233. zynqmp_mmio_read(IOU_TAPDLY_BYPASS_OFST, &tapdlybypass);
  234. tapdlybypass |= (TAP_DLY_BYPASS_LQSPI_RX_VALUE <<
  235. TAP_DLY_BYPASS_LQSPI_RX_SHIFT);
  236. lpbkdlyadj = readl(&regs->lpbkdly);
  237. lpbkdlyadj |= (GQSPI_LPBK_DLY_ADJ_LPBK_MASK);
  238. datadlyadj = readl(&regs->gqspidlyadj);
  239. datadlyadj |= ((GQSPI_USE_DATA_DLY << GQSPI_USE_DATA_DLY_SHIFT)
  240. | (GQSPI_DATA_DLY_ADJ_VALUE <<
  241. GQSPI_DATA_DLY_ADJ_SHIFT));
  242. } else if (reqhz < GQSPI_FREQ_150MHZ) {
  243. lpbkdlyadj = readl(&regs->lpbkdly);
  244. lpbkdlyadj |= ((GQSPI_LPBK_DLY_ADJ_LPBK_MASK) |
  245. GQSPI_LPBK_DLY_ADJ_DLY_0);
  246. }
  247. zynqmp_mmio_write(IOU_TAPDLY_BYPASS_OFST, IOU_TAPDLY_BYPASS_MASK,
  248. tapdlybypass);
  249. writel(lpbkdlyadj, &regs->lpbkdly);
  250. writel(datadlyadj, &regs->gqspidlyadj);
  251. }
  252. static int zynqmp_qspi_set_speed(struct udevice *bus, uint speed)
  253. {
  254. struct zynqmp_qspi_platdata *plat = bus->platdata;
  255. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  256. struct zynqmp_qspi_regs *regs = priv->regs;
  257. u32 confr;
  258. u8 baud_rate_val = 0;
  259. debug("%s\n", __func__);
  260. if (speed > plat->frequency)
  261. speed = plat->frequency;
  262. /* Set the clock frequency */
  263. confr = readl(&regs->confr);
  264. if (speed == 0) {
  265. /* Set baudrate x8, if the freq is 0 */
  266. baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
  267. } else if (plat->speed_hz != speed) {
  268. while ((baud_rate_val < 8) &&
  269. ((plat->frequency /
  270. (2 << baud_rate_val)) > speed))
  271. baud_rate_val++;
  272. if (baud_rate_val > GQSPI_MAX_BAUD_RATE_VAL)
  273. baud_rate_val = GQSPI_DFLT_BAUD_RATE_VAL;
  274. plat->speed_hz = plat->frequency / (2 << baud_rate_val);
  275. }
  276. confr &= ~GQSPI_BAUD_DIV_MASK;
  277. confr |= (baud_rate_val << 3);
  278. writel(confr, &regs->confr);
  279. zynqmp_qspi_set_tapdelay(bus, baud_rate_val);
  280. debug("regs=%p, speed=%d\n", priv->regs, plat->speed_hz);
  281. return 0;
  282. }
  283. static int zynqmp_qspi_probe(struct udevice *bus)
  284. {
  285. struct zynqmp_qspi_platdata *plat = dev_get_platdata(bus);
  286. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  287. struct clk clk;
  288. unsigned long clock;
  289. int ret;
  290. debug("%s: bus:%p, priv:%p\n", __func__, bus, priv);
  291. priv->regs = plat->regs;
  292. priv->dma_regs = plat->dma_regs;
  293. ret = clk_get_by_index(bus, 0, &clk);
  294. if (ret < 0) {
  295. dev_err(dev, "failed to get clock\n");
  296. return ret;
  297. }
  298. clock = clk_get_rate(&clk);
  299. if (IS_ERR_VALUE(clock)) {
  300. dev_err(dev, "failed to get rate\n");
  301. return clock;
  302. }
  303. debug("%s: CLK %ld\n", __func__, clock);
  304. ret = clk_enable(&clk);
  305. if (ret && ret != -ENOSYS) {
  306. dev_err(dev, "failed to enable clock\n");
  307. return ret;
  308. }
  309. plat->frequency = clock;
  310. plat->speed_hz = plat->frequency / 2;
  311. /* init the zynq spi hw */
  312. zynqmp_qspi_init_hw(priv);
  313. return 0;
  314. }
  315. static int zynqmp_qspi_set_mode(struct udevice *bus, uint mode)
  316. {
  317. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  318. struct zynqmp_qspi_regs *regs = priv->regs;
  319. u32 confr;
  320. debug("%s\n", __func__);
  321. /* Set the SPI Clock phase and polarities */
  322. confr = readl(&regs->confr);
  323. confr &= ~(GQSPI_CONFIG_CPHA_MASK |
  324. GQSPI_CONFIG_CPOL_MASK);
  325. if (mode & SPI_CPHA)
  326. confr |= GQSPI_CONFIG_CPHA_MASK;
  327. if (mode & SPI_CPOL)
  328. confr |= GQSPI_CONFIG_CPOL_MASK;
  329. writel(confr, &regs->confr);
  330. return 0;
  331. }
  332. static int zynqmp_qspi_fill_tx_fifo(struct zynqmp_qspi_priv *priv, u32 size)
  333. {
  334. u32 data;
  335. int ret = 0;
  336. struct zynqmp_qspi_regs *regs = priv->regs;
  337. u32 *buf = (u32 *)priv->tx_buf;
  338. u32 len = size;
  339. debug("TxFIFO: 0x%x, size: 0x%x\n", readl(&regs->isr),
  340. size);
  341. while (size) {
  342. ret = wait_for_bit_le32(&regs->isr, GQSPI_IXR_TXNFULL_MASK, 1,
  343. GQSPI_TIMEOUT, 1);
  344. if (ret) {
  345. printf("%s: Timeout\n", __func__);
  346. return ret;
  347. }
  348. if (size >= 4) {
  349. writel(*buf, &regs->txd0r);
  350. buf++;
  351. size -= 4;
  352. } else {
  353. switch (size) {
  354. case 1:
  355. data = *((u8 *)buf);
  356. buf += 1;
  357. data |= GENMASK(31, 8);
  358. break;
  359. case 2:
  360. data = *((u16 *)buf);
  361. buf += 2;
  362. data |= GENMASK(31, 16);
  363. break;
  364. case 3:
  365. data = *((u16 *)buf);
  366. buf += 2;
  367. data |= (*((u8 *)buf) << 16);
  368. buf += 1;
  369. data |= GENMASK(31, 24);
  370. break;
  371. }
  372. writel(data, &regs->txd0r);
  373. size = 0;
  374. }
  375. }
  376. priv->tx_buf += len;
  377. return 0;
  378. }
  379. static void zynqmp_qspi_genfifo_cmd(struct zynqmp_qspi_priv *priv)
  380. {
  381. u32 gen_fifo_cmd;
  382. u32 bytecount = 0;
  383. while (priv->len) {
  384. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  385. gen_fifo_cmd |= GQSPI_GFIFO_TX | GQSPI_SPI_MODE_SPI;
  386. gen_fifo_cmd |= *(u8 *)priv->tx_buf;
  387. bytecount++;
  388. priv->len--;
  389. priv->tx_buf = (u8 *)priv->tx_buf + 1;
  390. debug("GFIFO_CMD_Cmd = 0x%x\n", gen_fifo_cmd);
  391. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  392. }
  393. }
  394. static u32 zynqmp_qspi_calc_exp(struct zynqmp_qspi_priv *priv,
  395. u32 *gen_fifo_cmd)
  396. {
  397. u32 expval = 8;
  398. u32 len;
  399. while (1) {
  400. if (priv->len > 255) {
  401. if (priv->len & (1 << expval)) {
  402. *gen_fifo_cmd &= ~GQSPI_GFIFO_IMD_MASK;
  403. *gen_fifo_cmd |= GQSPI_GFIFO_EXP_MASK;
  404. *gen_fifo_cmd |= expval;
  405. priv->len -= (1 << expval);
  406. return expval;
  407. }
  408. expval++;
  409. } else {
  410. *gen_fifo_cmd &= ~(GQSPI_GFIFO_IMD_MASK |
  411. GQSPI_GFIFO_EXP_MASK);
  412. *gen_fifo_cmd |= (u8)priv->len;
  413. len = (u8)priv->len;
  414. priv->len = 0;
  415. return len;
  416. }
  417. }
  418. }
  419. static int zynqmp_qspi_genfifo_fill_tx(struct zynqmp_qspi_priv *priv)
  420. {
  421. u32 gen_fifo_cmd;
  422. u32 len;
  423. int ret = 0;
  424. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  425. gen_fifo_cmd |= GQSPI_GFIFO_TX |
  426. GQSPI_GFIFO_DATA_XFR_MASK;
  427. gen_fifo_cmd |= GQSPI_SPI_MODE_SPI;
  428. while (priv->len) {
  429. len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
  430. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  431. debug("GFIFO_CMD_TX:0x%x\n", gen_fifo_cmd);
  432. if (gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK)
  433. ret = zynqmp_qspi_fill_tx_fifo(priv,
  434. 1 << len);
  435. else
  436. ret = zynqmp_qspi_fill_tx_fifo(priv,
  437. len);
  438. if (ret)
  439. return ret;
  440. }
  441. return ret;
  442. }
  443. static int zynqmp_qspi_start_dma(struct zynqmp_qspi_priv *priv,
  444. u32 gen_fifo_cmd, u32 *buf)
  445. {
  446. u32 addr;
  447. u32 size, len;
  448. u32 actuallen = priv->len;
  449. int ret = 0;
  450. struct zynqmp_qspi_dma_regs *dma_regs = priv->dma_regs;
  451. writel((unsigned long)buf, &dma_regs->dmadst);
  452. writel(roundup(priv->len, ARCH_DMA_MINALIGN), &dma_regs->dmasize);
  453. writel(GQSPI_DMA_DST_I_STS_MASK, &dma_regs->dmaier);
  454. addr = (unsigned long)buf;
  455. size = roundup(priv->len, ARCH_DMA_MINALIGN);
  456. flush_dcache_range(addr, addr + size);
  457. while (priv->len) {
  458. len = zynqmp_qspi_calc_exp(priv, &gen_fifo_cmd);
  459. if (!(gen_fifo_cmd & GQSPI_GFIFO_EXP_MASK) &&
  460. (len % ARCH_DMA_MINALIGN)) {
  461. gen_fifo_cmd &= ~GENMASK(7, 0);
  462. gen_fifo_cmd |= roundup(len, ARCH_DMA_MINALIGN);
  463. }
  464. zynqmp_qspi_fill_gen_fifo(priv, gen_fifo_cmd);
  465. debug("GFIFO_CMD_RX:0x%x\n", gen_fifo_cmd);
  466. }
  467. ret = wait_for_bit_le32(&dma_regs->dmaisr, GQSPI_DMA_DST_I_STS_DONE,
  468. 1, GQSPI_TIMEOUT, 1);
  469. if (ret) {
  470. printf("DMA Timeout:0x%x\n", readl(&dma_regs->dmaisr));
  471. return -ETIMEDOUT;
  472. }
  473. writel(GQSPI_DMA_DST_I_STS_DONE, &dma_regs->dmaisr);
  474. debug("buf:0x%lx, rxbuf:0x%lx, *buf:0x%x len: 0x%x\n",
  475. (unsigned long)buf, (unsigned long)priv->rx_buf, *buf,
  476. actuallen);
  477. if (buf != priv->rx_buf)
  478. memcpy(priv->rx_buf, buf, actuallen);
  479. return 0;
  480. }
  481. static int zynqmp_qspi_genfifo_fill_rx(struct zynqmp_qspi_priv *priv)
  482. {
  483. u32 gen_fifo_cmd;
  484. u32 *buf;
  485. u32 actuallen = priv->len;
  486. gen_fifo_cmd = zynqmp_qspi_bus_select(priv);
  487. gen_fifo_cmd |= GQSPI_GFIFO_RX |
  488. GQSPI_GFIFO_DATA_XFR_MASK;
  489. gen_fifo_cmd |= GQSPI_SPI_MODE_SPI;
  490. /*
  491. * Check if receive buffer is aligned to 4 byte and length
  492. * is multiples of four byte as we are using dma to receive.
  493. */
  494. if (!((unsigned long)priv->rx_buf & (GQSPI_DMA_ALIGN - 1)) &&
  495. !(actuallen % GQSPI_DMA_ALIGN)) {
  496. buf = (u32 *)priv->rx_buf;
  497. return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
  498. }
  499. ALLOC_CACHE_ALIGN_BUFFER(u8, tmp, roundup(priv->len,
  500. GQSPI_DMA_ALIGN));
  501. buf = (u32 *)tmp;
  502. return zynqmp_qspi_start_dma(priv, gen_fifo_cmd, buf);
  503. }
  504. static int zynqmp_qspi_start_transfer(struct zynqmp_qspi_priv *priv)
  505. {
  506. int ret = 0;
  507. if (priv->is_inst) {
  508. if (priv->tx_buf)
  509. zynqmp_qspi_genfifo_cmd(priv);
  510. else
  511. return -EINVAL;
  512. } else {
  513. if (priv->tx_buf)
  514. ret = zynqmp_qspi_genfifo_fill_tx(priv);
  515. else if (priv->rx_buf)
  516. ret = zynqmp_qspi_genfifo_fill_rx(priv);
  517. else
  518. return -EINVAL;
  519. }
  520. return ret;
  521. }
  522. static int zynqmp_qspi_transfer(struct zynqmp_qspi_priv *priv)
  523. {
  524. static unsigned int cs_change = 1;
  525. int status = 0;
  526. debug("%s\n", __func__);
  527. while (1) {
  528. /* Select the chip if required */
  529. if (cs_change)
  530. zynqmp_qspi_chipselect(priv, 1);
  531. cs_change = priv->cs_change;
  532. if (!priv->tx_buf && !priv->rx_buf && priv->len) {
  533. status = -EINVAL;
  534. break;
  535. }
  536. /* Request the transfer */
  537. if (priv->len) {
  538. status = zynqmp_qspi_start_transfer(priv);
  539. priv->is_inst = 0;
  540. if (status < 0)
  541. break;
  542. }
  543. if (cs_change)
  544. /* Deselect the chip */
  545. zynqmp_qspi_chipselect(priv, 0);
  546. break;
  547. }
  548. return status;
  549. }
  550. static int zynqmp_qspi_claim_bus(struct udevice *dev)
  551. {
  552. struct udevice *bus = dev->parent;
  553. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  554. struct zynqmp_qspi_regs *regs = priv->regs;
  555. writel(GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  556. return 0;
  557. }
  558. static int zynqmp_qspi_release_bus(struct udevice *dev)
  559. {
  560. struct udevice *bus = dev->parent;
  561. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  562. struct zynqmp_qspi_regs *regs = priv->regs;
  563. writel(~GQSPI_ENABLE_ENABLE_MASK, &regs->enbr);
  564. return 0;
  565. }
  566. int zynqmp_qspi_xfer(struct udevice *dev, unsigned int bitlen, const void *dout,
  567. void *din, unsigned long flags)
  568. {
  569. struct udevice *bus = dev->parent;
  570. struct zynqmp_qspi_priv *priv = dev_get_priv(bus);
  571. debug("%s: priv: 0x%08lx bitlen: %d dout: 0x%08lx ", __func__,
  572. (unsigned long)priv, bitlen, (unsigned long)dout);
  573. debug("din: 0x%08lx flags: 0x%lx\n", (unsigned long)din, flags);
  574. priv->tx_buf = dout;
  575. priv->rx_buf = din;
  576. priv->len = bitlen / 8;
  577. /*
  578. * Assume that the beginning of a transfer with bits to
  579. * transmit must contain a device command.
  580. */
  581. if (dout && flags & SPI_XFER_BEGIN)
  582. priv->is_inst = 1;
  583. else
  584. priv->is_inst = 0;
  585. if (flags & SPI_XFER_END)
  586. priv->cs_change = 1;
  587. else
  588. priv->cs_change = 0;
  589. zynqmp_qspi_transfer(priv);
  590. return 0;
  591. }
  592. static const struct dm_spi_ops zynqmp_qspi_ops = {
  593. .claim_bus = zynqmp_qspi_claim_bus,
  594. .release_bus = zynqmp_qspi_release_bus,
  595. .xfer = zynqmp_qspi_xfer,
  596. .set_speed = zynqmp_qspi_set_speed,
  597. .set_mode = zynqmp_qspi_set_mode,
  598. };
  599. static const struct udevice_id zynqmp_qspi_ids[] = {
  600. { .compatible = "xlnx,zynqmp-qspi-1.0" },
  601. { }
  602. };
  603. U_BOOT_DRIVER(zynqmp_qspi) = {
  604. .name = "zynqmp_qspi",
  605. .id = UCLASS_SPI,
  606. .of_match = zynqmp_qspi_ids,
  607. .ops = &zynqmp_qspi_ops,
  608. .ofdata_to_platdata = zynqmp_qspi_ofdata_to_platdata,
  609. .platdata_auto_alloc_size = sizeof(struct zynqmp_qspi_platdata),
  610. .priv_auto_alloc_size = sizeof(struct zynqmp_qspi_priv),
  611. .probe = zynqmp_qspi_probe,
  612. };