mvgbe.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2009
  4. * Marvell Semiconductor <www.marvell.com>
  5. * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
  6. *
  7. * (C) Copyright 2003
  8. * Ingo Assmus <ingo.assmus@keymile.com>
  9. *
  10. * based on - Driver for MV64360X ethernet ports
  11. * Copyright (C) 2002 rabeeh@galileo.co.il
  12. */
  13. #include <common.h>
  14. #include <net.h>
  15. #include <malloc.h>
  16. #include <miiphy.h>
  17. #include <asm/io.h>
  18. #include <linux/errno.h>
  19. #include <asm/types.h>
  20. #include <asm/system.h>
  21. #include <asm/byteorder.h>
  22. #include <asm/arch/cpu.h>
  23. #if defined(CONFIG_KIRKWOOD)
  24. #include <asm/arch/soc.h>
  25. #elif defined(CONFIG_ORION5X)
  26. #include <asm/arch/orion5x.h>
  27. #endif
  28. #include "mvgbe.h"
  29. DECLARE_GLOBAL_DATA_PTR;
  30. #ifndef CONFIG_MVGBE_PORTS
  31. # define CONFIG_MVGBE_PORTS {0, 0}
  32. #endif
  33. #define MV_PHY_ADR_REQUEST 0xee
  34. #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
  35. #if defined(CONFIG_PHYLIB) || defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  36. /*
  37. * smi_reg_read - miiphy_read callback function.
  38. *
  39. * Returns 16bit phy register value, or 0xffff on error
  40. */
  41. static int smi_reg_read(struct mii_dev *bus, int phy_adr, int devad,
  42. int reg_ofs)
  43. {
  44. u16 data = 0;
  45. struct eth_device *dev = eth_get_dev_by_name(bus->name);
  46. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  47. struct mvgbe_registers *regs = dmvgbe->regs;
  48. u32 smi_reg;
  49. u32 timeout;
  50. /* Phyadr read request */
  51. if (phy_adr == MV_PHY_ADR_REQUEST &&
  52. reg_ofs == MV_PHY_ADR_REQUEST) {
  53. /* */
  54. data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
  55. return data;
  56. }
  57. /* check parameters */
  58. if (phy_adr > PHYADR_MASK) {
  59. printf("Err..(%s) Invalid PHY address %d\n",
  60. __func__, phy_adr);
  61. return -EFAULT;
  62. }
  63. if (reg_ofs > PHYREG_MASK) {
  64. printf("Err..(%s) Invalid register offset %d\n",
  65. __func__, reg_ofs);
  66. return -EFAULT;
  67. }
  68. timeout = MVGBE_PHY_SMI_TIMEOUT;
  69. /* wait till the SMI is not busy */
  70. do {
  71. /* read smi register */
  72. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  73. if (timeout-- == 0) {
  74. printf("Err..(%s) SMI busy timeout\n", __func__);
  75. return -EFAULT;
  76. }
  77. } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
  78. /* fill the phy address and regiser offset and read opcode */
  79. smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  80. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
  81. | MVGBE_PHY_SMI_OPCODE_READ;
  82. /* write the smi register */
  83. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  84. /*wait till read value is ready */
  85. timeout = MVGBE_PHY_SMI_TIMEOUT;
  86. do {
  87. /* read smi register */
  88. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  89. if (timeout-- == 0) {
  90. printf("Err..(%s) SMI read ready timeout\n",
  91. __func__);
  92. return -EFAULT;
  93. }
  94. } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
  95. /* Wait for the data to update in the SMI register */
  96. for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
  97. ;
  98. data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
  99. debug("%s:(adr %d, off %d) value= %04x\n", __func__, phy_adr, reg_ofs,
  100. data);
  101. return data;
  102. }
  103. /*
  104. * smi_reg_write - imiiphy_write callback function.
  105. *
  106. * Returns 0 if write succeed, -EINVAL on bad parameters
  107. * -ETIME on timeout
  108. */
  109. static int smi_reg_write(struct mii_dev *bus, int phy_adr, int devad,
  110. int reg_ofs, u16 data)
  111. {
  112. struct eth_device *dev = eth_get_dev_by_name(bus->name);
  113. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  114. struct mvgbe_registers *regs = dmvgbe->regs;
  115. u32 smi_reg;
  116. u32 timeout;
  117. /* Phyadr write request*/
  118. if (phy_adr == MV_PHY_ADR_REQUEST &&
  119. reg_ofs == MV_PHY_ADR_REQUEST) {
  120. MVGBE_REG_WR(regs->phyadr, data);
  121. return 0;
  122. }
  123. /* check parameters */
  124. if (phy_adr > PHYADR_MASK) {
  125. printf("Err..(%s) Invalid phy address\n", __func__);
  126. return -EINVAL;
  127. }
  128. if (reg_ofs > PHYREG_MASK) {
  129. printf("Err..(%s) Invalid register offset\n", __func__);
  130. return -EINVAL;
  131. }
  132. /* wait till the SMI is not busy */
  133. timeout = MVGBE_PHY_SMI_TIMEOUT;
  134. do {
  135. /* read smi register */
  136. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  137. if (timeout-- == 0) {
  138. printf("Err..(%s) SMI busy timeout\n", __func__);
  139. return -ETIME;
  140. }
  141. } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
  142. /* fill the phy addr and reg offset and write opcode and data */
  143. smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
  144. smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  145. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
  146. smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
  147. /* write the smi register */
  148. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  149. return 0;
  150. }
  151. #endif
  152. /* Stop and checks all queues */
  153. static void stop_queue(u32 * qreg)
  154. {
  155. u32 reg_data;
  156. reg_data = readl(qreg);
  157. if (reg_data & 0xFF) {
  158. /* Issue stop command for active channels only */
  159. writel((reg_data << 8), qreg);
  160. /* Wait for all queue activity to terminate. */
  161. do {
  162. /*
  163. * Check port cause register that all queues
  164. * are stopped
  165. */
  166. reg_data = readl(qreg);
  167. }
  168. while (reg_data & 0xFF);
  169. }
  170. }
  171. /*
  172. * set_access_control - Config address decode parameters for Ethernet unit
  173. *
  174. * This function configures the address decode parameters for the Gigabit
  175. * Ethernet Controller according the given parameters struct.
  176. *
  177. * @regs Register struct pointer.
  178. * @param Address decode parameter struct.
  179. */
  180. static void set_access_control(struct mvgbe_registers *regs,
  181. struct mvgbe_winparam *param)
  182. {
  183. u32 access_prot_reg;
  184. /* Set access control register */
  185. access_prot_reg = MVGBE_REG_RD(regs->epap);
  186. /* clear window permission */
  187. access_prot_reg &= (~(3 << (param->win * 2)));
  188. access_prot_reg |= (param->access_ctrl << (param->win * 2));
  189. MVGBE_REG_WR(regs->epap, access_prot_reg);
  190. /* Set window Size reg (SR) */
  191. MVGBE_REG_WR(regs->barsz[param->win].size,
  192. (((param->size / 0x10000) - 1) << 16));
  193. /* Set window Base address reg (BA) */
  194. MVGBE_REG_WR(regs->barsz[param->win].bar,
  195. (param->target | param->attrib | param->base_addr));
  196. /* High address remap reg (HARR) */
  197. if (param->win < 4)
  198. MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
  199. /* Base address enable reg (BARER) */
  200. if (param->enable == 1)
  201. MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
  202. else
  203. MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
  204. }
  205. static void set_dram_access(struct mvgbe_registers *regs)
  206. {
  207. struct mvgbe_winparam win_param;
  208. int i;
  209. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  210. /* Set access parameters for DRAM bank i */
  211. win_param.win = i; /* Use Ethernet window i */
  212. /* Window target - DDR */
  213. win_param.target = MVGBE_TARGET_DRAM;
  214. /* Enable full access */
  215. win_param.access_ctrl = EWIN_ACCESS_FULL;
  216. win_param.high_addr = 0;
  217. /* Get bank base and size */
  218. win_param.base_addr = gd->bd->bi_dram[i].start;
  219. win_param.size = gd->bd->bi_dram[i].size;
  220. if (win_param.size == 0)
  221. win_param.enable = 0;
  222. else
  223. win_param.enable = 1; /* Enable the access */
  224. /* Enable DRAM bank */
  225. switch (i) {
  226. case 0:
  227. win_param.attrib = EBAR_DRAM_CS0;
  228. break;
  229. case 1:
  230. win_param.attrib = EBAR_DRAM_CS1;
  231. break;
  232. case 2:
  233. win_param.attrib = EBAR_DRAM_CS2;
  234. break;
  235. case 3:
  236. win_param.attrib = EBAR_DRAM_CS3;
  237. break;
  238. default:
  239. /* invalid bank, disable access */
  240. win_param.enable = 0;
  241. win_param.attrib = 0;
  242. break;
  243. }
  244. /* Set the access control for address window(EPAPR) RD/WR */
  245. set_access_control(regs, &win_param);
  246. }
  247. }
  248. /*
  249. * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
  250. *
  251. * Go through all the DA filter tables (Unicast, Special Multicast & Other
  252. * Multicast) and set each entry to 0.
  253. */
  254. static void port_init_mac_tables(struct mvgbe_registers *regs)
  255. {
  256. int table_index;
  257. /* Clear DA filter unicast table (Ex_dFUT) */
  258. for (table_index = 0; table_index < 4; ++table_index)
  259. MVGBE_REG_WR(regs->dfut[table_index], 0);
  260. for (table_index = 0; table_index < 64; ++table_index) {
  261. /* Clear DA filter special multicast table (Ex_dFSMT) */
  262. MVGBE_REG_WR(regs->dfsmt[table_index], 0);
  263. /* Clear DA filter other multicast table (Ex_dFOMT) */
  264. MVGBE_REG_WR(regs->dfomt[table_index], 0);
  265. }
  266. }
  267. /*
  268. * port_uc_addr - This function Set the port unicast address table
  269. *
  270. * This function locates the proper entry in the Unicast table for the
  271. * specified MAC nibble and sets its properties according to function
  272. * parameters.
  273. * This function add/removes MAC addresses from the port unicast address
  274. * table.
  275. *
  276. * @uc_nibble Unicast MAC Address last nibble.
  277. * @option 0 = Add, 1 = remove address.
  278. *
  279. * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
  280. */
  281. static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
  282. int option)
  283. {
  284. u32 unicast_reg;
  285. u32 tbl_offset;
  286. u32 reg_offset;
  287. /* Locate the Unicast table entry */
  288. uc_nibble = (0xf & uc_nibble);
  289. /* Register offset from unicast table base */
  290. tbl_offset = (uc_nibble / 4);
  291. /* Entry offset within the above register */
  292. reg_offset = uc_nibble % 4;
  293. switch (option) {
  294. case REJECT_MAC_ADDR:
  295. /*
  296. * Clear accepts frame bit at specified unicast
  297. * DA table entry
  298. */
  299. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  300. unicast_reg &= (0xFF << (8 * reg_offset));
  301. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  302. break;
  303. case ACCEPT_MAC_ADDR:
  304. /* Set accepts frame bit at unicast DA filter table entry */
  305. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  306. unicast_reg &= (0xFF << (8 * reg_offset));
  307. unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
  308. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  309. break;
  310. default:
  311. return 0;
  312. }
  313. return 1;
  314. }
  315. /*
  316. * port_uc_addr_set - This function Set the port Unicast address.
  317. */
  318. static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
  319. {
  320. u32 mac_h;
  321. u32 mac_l;
  322. mac_l = (p_addr[4] << 8) | (p_addr[5]);
  323. mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
  324. (p_addr[3] << 0);
  325. MVGBE_REG_WR(regs->macal, mac_l);
  326. MVGBE_REG_WR(regs->macah, mac_h);
  327. /* Accept frames of this address */
  328. port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
  329. }
  330. /*
  331. * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
  332. */
  333. static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
  334. {
  335. struct mvgbe_rxdesc *p_rx_desc;
  336. int i;
  337. /* initialize the Rx descriptors ring */
  338. p_rx_desc = dmvgbe->p_rxdesc;
  339. for (i = 0; i < RINGSZ; i++) {
  340. p_rx_desc->cmd_sts =
  341. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  342. p_rx_desc->buf_size = PKTSIZE_ALIGN;
  343. p_rx_desc->byte_cnt = 0;
  344. p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
  345. if (i == (RINGSZ - 1))
  346. p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
  347. else {
  348. p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
  349. ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
  350. p_rx_desc = p_rx_desc->nxtdesc_p;
  351. }
  352. }
  353. dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
  354. }
  355. static int mvgbe_init(struct eth_device *dev)
  356. {
  357. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  358. struct mvgbe_registers *regs = dmvgbe->regs;
  359. #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
  360. !defined(CONFIG_PHYLIB) && \
  361. defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  362. int i;
  363. #endif
  364. /* setup RX rings */
  365. mvgbe_init_rx_desc_ring(dmvgbe);
  366. /* Clear the ethernet port interrupts */
  367. MVGBE_REG_WR(regs->ic, 0);
  368. MVGBE_REG_WR(regs->ice, 0);
  369. /* Unmask RX buffer and TX end interrupt */
  370. MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
  371. /* Unmask phy and link status changes interrupts */
  372. MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
  373. set_dram_access(regs);
  374. port_init_mac_tables(regs);
  375. port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
  376. /* Assign port configuration and command. */
  377. MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
  378. MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
  379. MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
  380. /* Assign port SDMA configuration */
  381. MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
  382. MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
  383. MVGBE_REG_WR(regs->tqx[0].tqxtbc,
  384. (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
  385. /* Turn off the port/RXUQ bandwidth limitation */
  386. MVGBE_REG_WR(regs->pmtu, 0);
  387. /* Set maximum receive buffer to 9700 bytes */
  388. MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
  389. | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
  390. /* Enable port initially */
  391. MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  392. /*
  393. * Set ethernet MTU for leaky bucket mechanism to 0 - this will
  394. * disable the leaky bucket mechanism .
  395. */
  396. MVGBE_REG_WR(regs->pmtu, 0);
  397. /* Assignment of Rx CRDB of given RXUQ */
  398. MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
  399. /* ensure previous write is done before enabling Rx DMA */
  400. isb();
  401. /* Enable port Rx. */
  402. MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
  403. #if (defined(CONFIG_MII) || defined(CONFIG_CMD_MII)) && \
  404. !defined(CONFIG_PHYLIB) && \
  405. defined(CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  406. /* Wait up to 5s for the link status */
  407. for (i = 0; i < 5; i++) {
  408. u16 phyadr;
  409. miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
  410. MV_PHY_ADR_REQUEST, &phyadr);
  411. /* Return if we get link up */
  412. if (miiphy_link(dev->name, phyadr))
  413. return 0;
  414. udelay(1000000);
  415. }
  416. printf("No link on %s\n", dev->name);
  417. return -1;
  418. #endif
  419. return 0;
  420. }
  421. static int mvgbe_halt(struct eth_device *dev)
  422. {
  423. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  424. struct mvgbe_registers *regs = dmvgbe->regs;
  425. /* Disable all gigE address decoder */
  426. MVGBE_REG_WR(regs->bare, 0x3f);
  427. stop_queue(&regs->tqc);
  428. stop_queue(&regs->rqc);
  429. /* Disable port */
  430. MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  431. /* Set port is not reset */
  432. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
  433. #ifdef CONFIG_SYS_MII_MODE
  434. /* Set MMI interface up */
  435. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
  436. #endif
  437. /* Disable & mask ethernet port interrupts */
  438. MVGBE_REG_WR(regs->ic, 0);
  439. MVGBE_REG_WR(regs->ice, 0);
  440. MVGBE_REG_WR(regs->pim, 0);
  441. MVGBE_REG_WR(regs->peim, 0);
  442. return 0;
  443. }
  444. static int mvgbe_write_hwaddr(struct eth_device *dev)
  445. {
  446. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  447. struct mvgbe_registers *regs = dmvgbe->regs;
  448. /* Programs net device MAC address after initialization */
  449. port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
  450. return 0;
  451. }
  452. static int mvgbe_send(struct eth_device *dev, void *dataptr, int datasize)
  453. {
  454. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  455. struct mvgbe_registers *regs = dmvgbe->regs;
  456. struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
  457. void *p = (void *)dataptr;
  458. u32 cmd_sts;
  459. u32 txuq0_reg_addr;
  460. /* Copy buffer if it's misaligned */
  461. if ((u32) dataptr & 0x07) {
  462. if (datasize > PKTSIZE_ALIGN) {
  463. printf("Non-aligned data too large (%d)\n",
  464. datasize);
  465. return -1;
  466. }
  467. memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
  468. p = dmvgbe->p_aligned_txbuf;
  469. }
  470. p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
  471. p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
  472. p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
  473. p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
  474. p_txdesc->buf_ptr = (u8 *) p;
  475. p_txdesc->byte_cnt = datasize;
  476. /* Set this tc desc as zeroth TXUQ */
  477. txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
  478. writel((u32) p_txdesc, txuq0_reg_addr);
  479. /* ensure tx desc writes above are performed before we start Tx DMA */
  480. isb();
  481. /* Apply send command using zeroth TXUQ */
  482. MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
  483. /*
  484. * wait for packet xmit completion
  485. */
  486. cmd_sts = readl(&p_txdesc->cmd_sts);
  487. while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
  488. /* return fail if error is detected */
  489. if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
  490. (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
  491. cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
  492. printf("Err..(%s) in xmit packet\n", __func__);
  493. return -1;
  494. }
  495. cmd_sts = readl(&p_txdesc->cmd_sts);
  496. };
  497. return 0;
  498. }
  499. static int mvgbe_recv(struct eth_device *dev)
  500. {
  501. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  502. struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
  503. u32 cmd_sts;
  504. u32 timeout = 0;
  505. u32 rxdesc_curr_addr;
  506. /* wait untill rx packet available or timeout */
  507. do {
  508. if (timeout < MVGBE_PHY_SMI_TIMEOUT)
  509. timeout++;
  510. else {
  511. debug("%s time out...\n", __func__);
  512. return -1;
  513. }
  514. } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
  515. if (p_rxdesc_curr->byte_cnt != 0) {
  516. debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
  517. __func__, (u32) p_rxdesc_curr->byte_cnt,
  518. (u32) p_rxdesc_curr->buf_ptr,
  519. (u32) p_rxdesc_curr->cmd_sts);
  520. }
  521. /*
  522. * In case received a packet without first/last bits on
  523. * OR the error summary bit is on,
  524. * the packets needs to be dropeed.
  525. */
  526. cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
  527. if ((cmd_sts &
  528. (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
  529. != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
  530. printf("Err..(%s) Dropping packet spread on"
  531. " multiple descriptors\n", __func__);
  532. } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
  533. printf("Err..(%s) Dropping packet with errors\n",
  534. __func__);
  535. } else {
  536. /* !!! call higher layer processing */
  537. debug("%s: Sending Received packet to"
  538. " upper layer (net_process_received_packet)\n",
  539. __func__);
  540. /* let the upper layer handle the packet */
  541. net_process_received_packet((p_rxdesc_curr->buf_ptr +
  542. RX_BUF_OFFSET),
  543. (int)(p_rxdesc_curr->byte_cnt -
  544. RX_BUF_OFFSET));
  545. }
  546. /*
  547. * free these descriptors and point next in the ring
  548. */
  549. p_rxdesc_curr->cmd_sts =
  550. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  551. p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
  552. p_rxdesc_curr->byte_cnt = 0;
  553. rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
  554. writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
  555. return 0;
  556. }
  557. #if defined(CONFIG_PHYLIB)
  558. int mvgbe_phylib_init(struct eth_device *dev, int phyid)
  559. {
  560. struct mii_dev *bus;
  561. struct phy_device *phydev;
  562. int ret;
  563. bus = mdio_alloc();
  564. if (!bus) {
  565. printf("mdio_alloc failed\n");
  566. return -ENOMEM;
  567. }
  568. bus->read = smi_reg_read;
  569. bus->write = smi_reg_write;
  570. strcpy(bus->name, dev->name);
  571. ret = mdio_register(bus);
  572. if (ret) {
  573. printf("mdio_register failed\n");
  574. free(bus);
  575. return -ENOMEM;
  576. }
  577. /* Set phy address of the port */
  578. smi_reg_write(bus, MV_PHY_ADR_REQUEST, 0, MV_PHY_ADR_REQUEST, phyid);
  579. phydev = phy_connect(bus, phyid, dev, PHY_INTERFACE_MODE_RGMII);
  580. if (!phydev) {
  581. printf("phy_connect failed\n");
  582. return -ENODEV;
  583. }
  584. phy_config(phydev);
  585. phy_startup(phydev);
  586. return 0;
  587. }
  588. #endif
  589. int mvgbe_initialize(bd_t *bis)
  590. {
  591. struct mvgbe_device *dmvgbe;
  592. struct eth_device *dev;
  593. int devnum;
  594. u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
  595. for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
  596. /*skip if port is configured not to use */
  597. if (used_ports[devnum] == 0)
  598. continue;
  599. dmvgbe = malloc(sizeof(struct mvgbe_device));
  600. if (!dmvgbe)
  601. goto error1;
  602. memset(dmvgbe, 0, sizeof(struct mvgbe_device));
  603. dmvgbe->p_rxdesc =
  604. (struct mvgbe_rxdesc *)memalign(PKTALIGN,
  605. MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
  606. if (!dmvgbe->p_rxdesc)
  607. goto error2;
  608. dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
  609. RINGSZ*PKTSIZE_ALIGN + 1);
  610. if (!dmvgbe->p_rxbuf)
  611. goto error3;
  612. dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
  613. if (!dmvgbe->p_aligned_txbuf)
  614. goto error4;
  615. dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
  616. PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
  617. if (!dmvgbe->p_txdesc) {
  618. free(dmvgbe->p_aligned_txbuf);
  619. error4:
  620. free(dmvgbe->p_rxbuf);
  621. error3:
  622. free(dmvgbe->p_rxdesc);
  623. error2:
  624. free(dmvgbe);
  625. error1:
  626. printf("Err.. %s Failed to allocate memory\n",
  627. __func__);
  628. return -1;
  629. }
  630. dev = &dmvgbe->dev;
  631. /* must be less than sizeof(dev->name) */
  632. sprintf(dev->name, "egiga%d", devnum);
  633. switch (devnum) {
  634. case 0:
  635. dmvgbe->regs = (void *)MVGBE0_BASE;
  636. break;
  637. #if defined(MVGBE1_BASE)
  638. case 1:
  639. dmvgbe->regs = (void *)MVGBE1_BASE;
  640. break;
  641. #endif
  642. default: /* this should never happen */
  643. printf("Err..(%s) Invalid device number %d\n",
  644. __func__, devnum);
  645. return -1;
  646. }
  647. dev->init = (void *)mvgbe_init;
  648. dev->halt = (void *)mvgbe_halt;
  649. dev->send = (void *)mvgbe_send;
  650. dev->recv = (void *)mvgbe_recv;
  651. dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
  652. eth_register(dev);
  653. #if defined(CONFIG_PHYLIB)
  654. mvgbe_phylib_init(dev, PHY_BASE_ADR + devnum);
  655. #elif defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  656. int retval;
  657. struct mii_dev *mdiodev = mdio_alloc();
  658. if (!mdiodev)
  659. return -ENOMEM;
  660. strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
  661. mdiodev->read = smi_reg_read;
  662. mdiodev->write = smi_reg_write;
  663. retval = mdio_register(mdiodev);
  664. if (retval < 0)
  665. return retval;
  666. /* Set phy address of the port */
  667. miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
  668. MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
  669. #endif
  670. }
  671. return 0;
  672. }