mvgbe.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. /*
  2. * (C) Copyright 2009
  3. * Marvell Semiconductor <www.marvell.com>
  4. * Written-by: Prafulla Wadaskar <prafulla@marvell.com>
  5. *
  6. * (C) Copyright 2003
  7. * Ingo Assmus <ingo.assmus@keymile.com>
  8. *
  9. * based on - Driver for MV64360X ethernet ports
  10. * Copyright (C) 2002 rabeeh@galileo.co.il
  11. *
  12. * See file CREDITS for list of people who contributed to this
  13. * project.
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License as
  17. * published by the Free Software Foundation; either version 2 of
  18. * the License, or (at your option) any later version.
  19. *
  20. * This program is distributed in the hope that it will be useful,
  21. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  22. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  23. * GNU General Public License for more details.
  24. *
  25. * You should have received a copy of the GNU General Public License
  26. * along with this program; if not, write to the Free Software
  27. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
  28. * MA 02110-1301 USA
  29. */
  30. #include <common.h>
  31. #include <net.h>
  32. #include <malloc.h>
  33. #include <miiphy.h>
  34. #include <asm/io.h>
  35. #include <asm/errno.h>
  36. #include <asm/types.h>
  37. #include <asm/system.h>
  38. #include <asm/byteorder.h>
  39. #include <asm/arch/cpu.h>
  40. #if defined(CONFIG_KIRKWOOD)
  41. #include <asm/arch/kirkwood.h>
  42. #elif defined(CONFIG_ORION5X)
  43. #include <asm/arch/orion5x.h>
  44. #endif
  45. #include "mvgbe.h"
  46. DECLARE_GLOBAL_DATA_PTR;
  47. #define MV_PHY_ADR_REQUEST 0xee
  48. #define MVGBE_SMI_REG (((struct mvgbe_registers *)MVGBE0_BASE)->smi)
  49. /*
  50. * smi_reg_read - miiphy_read callback function.
  51. *
  52. * Returns 16bit phy register value, or 0xffff on error
  53. */
  54. static int smi_reg_read(const char *devname, u8 phy_adr, u8 reg_ofs, u16 * data)
  55. {
  56. struct eth_device *dev = eth_get_dev_by_name(devname);
  57. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  58. struct mvgbe_registers *regs = dmvgbe->regs;
  59. u32 smi_reg;
  60. u32 timeout;
  61. /* Phyadr read request */
  62. if (phy_adr == MV_PHY_ADR_REQUEST &&
  63. reg_ofs == MV_PHY_ADR_REQUEST) {
  64. /* */
  65. *data = (u16) (MVGBE_REG_RD(regs->phyadr) & PHYADR_MASK);
  66. return 0;
  67. }
  68. /* check parameters */
  69. if (phy_adr > PHYADR_MASK) {
  70. printf("Err..(%s) Invalid PHY address %d\n",
  71. __FUNCTION__, phy_adr);
  72. return -EFAULT;
  73. }
  74. if (reg_ofs > PHYREG_MASK) {
  75. printf("Err..(%s) Invalid register offset %d\n",
  76. __FUNCTION__, reg_ofs);
  77. return -EFAULT;
  78. }
  79. timeout = MVGBE_PHY_SMI_TIMEOUT;
  80. /* wait till the SMI is not busy */
  81. do {
  82. /* read smi register */
  83. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  84. if (timeout-- == 0) {
  85. printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
  86. return -EFAULT;
  87. }
  88. } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
  89. /* fill the phy address and regiser offset and read opcode */
  90. smi_reg = (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  91. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS)
  92. | MVGBE_PHY_SMI_OPCODE_READ;
  93. /* write the smi register */
  94. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  95. /*wait till read value is ready */
  96. timeout = MVGBE_PHY_SMI_TIMEOUT;
  97. do {
  98. /* read smi register */
  99. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  100. if (timeout-- == 0) {
  101. printf("Err..(%s) SMI read ready timeout\n",
  102. __FUNCTION__);
  103. return -EFAULT;
  104. }
  105. } while (!(smi_reg & MVGBE_PHY_SMI_READ_VALID_MASK));
  106. /* Wait for the data to update in the SMI register */
  107. for (timeout = 0; timeout < MVGBE_PHY_SMI_TIMEOUT; timeout++)
  108. ;
  109. *data = (u16) (MVGBE_REG_RD(MVGBE_SMI_REG) & MVGBE_PHY_SMI_DATA_MASK);
  110. debug("%s:(adr %d, off %d) value= %04x\n", __FUNCTION__, phy_adr,
  111. reg_ofs, *data);
  112. return 0;
  113. }
  114. /*
  115. * smi_reg_write - imiiphy_write callback function.
  116. *
  117. * Returns 0 if write succeed, -EINVAL on bad parameters
  118. * -ETIME on timeout
  119. */
  120. static int smi_reg_write(const char *devname, u8 phy_adr, u8 reg_ofs, u16 data)
  121. {
  122. struct eth_device *dev = eth_get_dev_by_name(devname);
  123. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  124. struct mvgbe_registers *regs = dmvgbe->regs;
  125. u32 smi_reg;
  126. u32 timeout;
  127. /* Phyadr write request*/
  128. if (phy_adr == MV_PHY_ADR_REQUEST &&
  129. reg_ofs == MV_PHY_ADR_REQUEST) {
  130. MVGBE_REG_WR(regs->phyadr, data);
  131. return 0;
  132. }
  133. /* check parameters */
  134. if (phy_adr > PHYADR_MASK) {
  135. printf("Err..(%s) Invalid phy address\n", __FUNCTION__);
  136. return -EINVAL;
  137. }
  138. if (reg_ofs > PHYREG_MASK) {
  139. printf("Err..(%s) Invalid register offset\n", __FUNCTION__);
  140. return -EINVAL;
  141. }
  142. /* wait till the SMI is not busy */
  143. timeout = MVGBE_PHY_SMI_TIMEOUT;
  144. do {
  145. /* read smi register */
  146. smi_reg = MVGBE_REG_RD(MVGBE_SMI_REG);
  147. if (timeout-- == 0) {
  148. printf("Err..(%s) SMI busy timeout\n", __FUNCTION__);
  149. return -ETIME;
  150. }
  151. } while (smi_reg & MVGBE_PHY_SMI_BUSY_MASK);
  152. /* fill the phy addr and reg offset and write opcode and data */
  153. smi_reg = (data << MVGBE_PHY_SMI_DATA_OFFS);
  154. smi_reg |= (phy_adr << MVGBE_PHY_SMI_DEV_ADDR_OFFS)
  155. | (reg_ofs << MVGBE_SMI_REG_ADDR_OFFS);
  156. smi_reg &= ~MVGBE_PHY_SMI_OPCODE_READ;
  157. /* write the smi register */
  158. MVGBE_REG_WR(MVGBE_SMI_REG, smi_reg);
  159. return 0;
  160. }
  161. /* Stop and checks all queues */
  162. static void stop_queue(u32 * qreg)
  163. {
  164. u32 reg_data;
  165. reg_data = readl(qreg);
  166. if (reg_data & 0xFF) {
  167. /* Issue stop command for active channels only */
  168. writel((reg_data << 8), qreg);
  169. /* Wait for all queue activity to terminate. */
  170. do {
  171. /*
  172. * Check port cause register that all queues
  173. * are stopped
  174. */
  175. reg_data = readl(qreg);
  176. }
  177. while (reg_data & 0xFF);
  178. }
  179. }
  180. /*
  181. * set_access_control - Config address decode parameters for Ethernet unit
  182. *
  183. * This function configures the address decode parameters for the Gigabit
  184. * Ethernet Controller according the given parameters struct.
  185. *
  186. * @regs Register struct pointer.
  187. * @param Address decode parameter struct.
  188. */
  189. static void set_access_control(struct mvgbe_registers *regs,
  190. struct mvgbe_winparam *param)
  191. {
  192. u32 access_prot_reg;
  193. /* Set access control register */
  194. access_prot_reg = MVGBE_REG_RD(regs->epap);
  195. /* clear window permission */
  196. access_prot_reg &= (~(3 << (param->win * 2)));
  197. access_prot_reg |= (param->access_ctrl << (param->win * 2));
  198. MVGBE_REG_WR(regs->epap, access_prot_reg);
  199. /* Set window Size reg (SR) */
  200. MVGBE_REG_WR(regs->barsz[param->win].size,
  201. (((param->size / 0x10000) - 1) << 16));
  202. /* Set window Base address reg (BA) */
  203. MVGBE_REG_WR(regs->barsz[param->win].bar,
  204. (param->target | param->attrib | param->base_addr));
  205. /* High address remap reg (HARR) */
  206. if (param->win < 4)
  207. MVGBE_REG_WR(regs->ha_remap[param->win], param->high_addr);
  208. /* Base address enable reg (BARER) */
  209. if (param->enable == 1)
  210. MVGBE_REG_BITS_RESET(regs->bare, (1 << param->win));
  211. else
  212. MVGBE_REG_BITS_SET(regs->bare, (1 << param->win));
  213. }
  214. static void set_dram_access(struct mvgbe_registers *regs)
  215. {
  216. struct mvgbe_winparam win_param;
  217. int i;
  218. for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
  219. /* Set access parameters for DRAM bank i */
  220. win_param.win = i; /* Use Ethernet window i */
  221. /* Window target - DDR */
  222. win_param.target = MVGBE_TARGET_DRAM;
  223. /* Enable full access */
  224. win_param.access_ctrl = EWIN_ACCESS_FULL;
  225. win_param.high_addr = 0;
  226. /* Get bank base and size */
  227. win_param.base_addr = gd->bd->bi_dram[i].start;
  228. win_param.size = gd->bd->bi_dram[i].size;
  229. if (win_param.size == 0)
  230. win_param.enable = 0;
  231. else
  232. win_param.enable = 1; /* Enable the access */
  233. /* Enable DRAM bank */
  234. switch (i) {
  235. case 0:
  236. win_param.attrib = EBAR_DRAM_CS0;
  237. break;
  238. case 1:
  239. win_param.attrib = EBAR_DRAM_CS1;
  240. break;
  241. case 2:
  242. win_param.attrib = EBAR_DRAM_CS2;
  243. break;
  244. case 3:
  245. win_param.attrib = EBAR_DRAM_CS3;
  246. break;
  247. default:
  248. /* invalid bank, disable access */
  249. win_param.enable = 0;
  250. win_param.attrib = 0;
  251. break;
  252. }
  253. /* Set the access control for address window(EPAPR) RD/WR */
  254. set_access_control(regs, &win_param);
  255. }
  256. }
  257. /*
  258. * port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
  259. *
  260. * Go through all the DA filter tables (Unicast, Special Multicast & Other
  261. * Multicast) and set each entry to 0.
  262. */
  263. static void port_init_mac_tables(struct mvgbe_registers *regs)
  264. {
  265. int table_index;
  266. /* Clear DA filter unicast table (Ex_dFUT) */
  267. for (table_index = 0; table_index < 4; ++table_index)
  268. MVGBE_REG_WR(regs->dfut[table_index], 0);
  269. for (table_index = 0; table_index < 64; ++table_index) {
  270. /* Clear DA filter special multicast table (Ex_dFSMT) */
  271. MVGBE_REG_WR(regs->dfsmt[table_index], 0);
  272. /* Clear DA filter other multicast table (Ex_dFOMT) */
  273. MVGBE_REG_WR(regs->dfomt[table_index], 0);
  274. }
  275. }
  276. /*
  277. * port_uc_addr - This function Set the port unicast address table
  278. *
  279. * This function locates the proper entry in the Unicast table for the
  280. * specified MAC nibble and sets its properties according to function
  281. * parameters.
  282. * This function add/removes MAC addresses from the port unicast address
  283. * table.
  284. *
  285. * @uc_nibble Unicast MAC Address last nibble.
  286. * @option 0 = Add, 1 = remove address.
  287. *
  288. * RETURN: 1 if output succeeded. 0 if option parameter is invalid.
  289. */
  290. static int port_uc_addr(struct mvgbe_registers *regs, u8 uc_nibble,
  291. int option)
  292. {
  293. u32 unicast_reg;
  294. u32 tbl_offset;
  295. u32 reg_offset;
  296. /* Locate the Unicast table entry */
  297. uc_nibble = (0xf & uc_nibble);
  298. /* Register offset from unicast table base */
  299. tbl_offset = (uc_nibble / 4);
  300. /* Entry offset within the above register */
  301. reg_offset = uc_nibble % 4;
  302. switch (option) {
  303. case REJECT_MAC_ADDR:
  304. /*
  305. * Clear accepts frame bit at specified unicast
  306. * DA table entry
  307. */
  308. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  309. unicast_reg &= (0xFF << (8 * reg_offset));
  310. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  311. break;
  312. case ACCEPT_MAC_ADDR:
  313. /* Set accepts frame bit at unicast DA filter table entry */
  314. unicast_reg = MVGBE_REG_RD(regs->dfut[tbl_offset]);
  315. unicast_reg &= (0xFF << (8 * reg_offset));
  316. unicast_reg |= ((0x01 | (RXUQ << 1)) << (8 * reg_offset));
  317. MVGBE_REG_WR(regs->dfut[tbl_offset], unicast_reg);
  318. break;
  319. default:
  320. return 0;
  321. }
  322. return 1;
  323. }
  324. /*
  325. * port_uc_addr_set - This function Set the port Unicast address.
  326. */
  327. static void port_uc_addr_set(struct mvgbe_registers *regs, u8 * p_addr)
  328. {
  329. u32 mac_h;
  330. u32 mac_l;
  331. mac_l = (p_addr[4] << 8) | (p_addr[5]);
  332. mac_h = (p_addr[0] << 24) | (p_addr[1] << 16) | (p_addr[2] << 8) |
  333. (p_addr[3] << 0);
  334. MVGBE_REG_WR(regs->macal, mac_l);
  335. MVGBE_REG_WR(regs->macah, mac_h);
  336. /* Accept frames of this address */
  337. port_uc_addr(regs, p_addr[5], ACCEPT_MAC_ADDR);
  338. }
  339. /*
  340. * mvgbe_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
  341. */
  342. static void mvgbe_init_rx_desc_ring(struct mvgbe_device *dmvgbe)
  343. {
  344. struct mvgbe_rxdesc *p_rx_desc;
  345. int i;
  346. /* initialize the Rx descriptors ring */
  347. p_rx_desc = dmvgbe->p_rxdesc;
  348. for (i = 0; i < RINGSZ; i++) {
  349. p_rx_desc->cmd_sts =
  350. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  351. p_rx_desc->buf_size = PKTSIZE_ALIGN;
  352. p_rx_desc->byte_cnt = 0;
  353. p_rx_desc->buf_ptr = dmvgbe->p_rxbuf + i * PKTSIZE_ALIGN;
  354. if (i == (RINGSZ - 1))
  355. p_rx_desc->nxtdesc_p = dmvgbe->p_rxdesc;
  356. else {
  357. p_rx_desc->nxtdesc_p = (struct mvgbe_rxdesc *)
  358. ((u32) p_rx_desc + MV_RXQ_DESC_ALIGNED_SIZE);
  359. p_rx_desc = p_rx_desc->nxtdesc_p;
  360. }
  361. }
  362. dmvgbe->p_rxdesc_curr = dmvgbe->p_rxdesc;
  363. }
  364. static int mvgbe_init(struct eth_device *dev)
  365. {
  366. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  367. struct mvgbe_registers *regs = dmvgbe->regs;
  368. #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
  369. && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  370. int i;
  371. #endif
  372. /* setup RX rings */
  373. mvgbe_init_rx_desc_ring(dmvgbe);
  374. /* Clear the ethernet port interrupts */
  375. MVGBE_REG_WR(regs->ic, 0);
  376. MVGBE_REG_WR(regs->ice, 0);
  377. /* Unmask RX buffer and TX end interrupt */
  378. MVGBE_REG_WR(regs->pim, INT_CAUSE_UNMASK_ALL);
  379. /* Unmask phy and link status changes interrupts */
  380. MVGBE_REG_WR(regs->peim, INT_CAUSE_UNMASK_ALL_EXT);
  381. set_dram_access(regs);
  382. port_init_mac_tables(regs);
  383. port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
  384. /* Assign port configuration and command. */
  385. MVGBE_REG_WR(regs->pxc, PRT_CFG_VAL);
  386. MVGBE_REG_WR(regs->pxcx, PORT_CFG_EXTEND_VALUE);
  387. MVGBE_REG_WR(regs->psc0, PORT_SERIAL_CONTROL_VALUE);
  388. /* Assign port SDMA configuration */
  389. MVGBE_REG_WR(regs->sdc, PORT_SDMA_CFG_VALUE);
  390. MVGBE_REG_WR(regs->tqx[0].qxttbc, QTKNBKT_DEF_VAL);
  391. MVGBE_REG_WR(regs->tqx[0].tqxtbc,
  392. (QMTBS_DEF_VAL << 16) | QTKNRT_DEF_VAL);
  393. /* Turn off the port/RXUQ bandwidth limitation */
  394. MVGBE_REG_WR(regs->pmtu, 0);
  395. /* Set maximum receive buffer to 9700 bytes */
  396. MVGBE_REG_WR(regs->psc0, MVGBE_MAX_RX_PACKET_9700BYTE
  397. | (MVGBE_REG_RD(regs->psc0) & MRU_MASK));
  398. /* Enable port initially */
  399. MVGBE_REG_BITS_SET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  400. /*
  401. * Set ethernet MTU for leaky bucket mechanism to 0 - this will
  402. * disable the leaky bucket mechanism .
  403. */
  404. MVGBE_REG_WR(regs->pmtu, 0);
  405. /* Assignment of Rx CRDB of given RXUQ */
  406. MVGBE_REG_WR(regs->rxcdp[RXUQ], (u32) dmvgbe->p_rxdesc_curr);
  407. /* ensure previous write is done before enabling Rx DMA */
  408. isb();
  409. /* Enable port Rx. */
  410. MVGBE_REG_WR(regs->rqc, (1 << RXUQ));
  411. #if (defined (CONFIG_MII) || defined (CONFIG_CMD_MII)) \
  412. && defined (CONFIG_SYS_FAULT_ECHO_LINK_DOWN)
  413. /* Wait up to 5s for the link status */
  414. for (i = 0; i < 5; i++) {
  415. u16 phyadr;
  416. miiphy_read(dev->name, MV_PHY_ADR_REQUEST,
  417. MV_PHY_ADR_REQUEST, &phyadr);
  418. /* Return if we get link up */
  419. if (miiphy_link(dev->name, phyadr))
  420. return 0;
  421. udelay(1000000);
  422. }
  423. printf("No link on %s\n", dev->name);
  424. return -1;
  425. #endif
  426. return 0;
  427. }
  428. static int mvgbe_halt(struct eth_device *dev)
  429. {
  430. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  431. struct mvgbe_registers *regs = dmvgbe->regs;
  432. /* Disable all gigE address decoder */
  433. MVGBE_REG_WR(regs->bare, 0x3f);
  434. stop_queue(&regs->tqc);
  435. stop_queue(&regs->rqc);
  436. /* Disable port */
  437. MVGBE_REG_BITS_RESET(regs->psc0, MVGBE_SERIAL_PORT_EN);
  438. /* Set port is not reset */
  439. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 4);
  440. #ifdef CONFIG_SYS_MII_MODE
  441. /* Set MMI interface up */
  442. MVGBE_REG_BITS_RESET(regs->psc1, 1 << 3);
  443. #endif
  444. /* Disable & mask ethernet port interrupts */
  445. MVGBE_REG_WR(regs->ic, 0);
  446. MVGBE_REG_WR(regs->ice, 0);
  447. MVGBE_REG_WR(regs->pim, 0);
  448. MVGBE_REG_WR(regs->peim, 0);
  449. return 0;
  450. }
  451. static int mvgbe_write_hwaddr(struct eth_device *dev)
  452. {
  453. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  454. struct mvgbe_registers *regs = dmvgbe->regs;
  455. /* Programs net device MAC address after initialization */
  456. port_uc_addr_set(regs, dmvgbe->dev.enetaddr);
  457. return 0;
  458. }
  459. static int mvgbe_send(struct eth_device *dev, void *dataptr,
  460. int datasize)
  461. {
  462. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  463. struct mvgbe_registers *regs = dmvgbe->regs;
  464. struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
  465. void *p = (void *)dataptr;
  466. u32 cmd_sts;
  467. u32 txuq0_reg_addr;
  468. /* Copy buffer if it's misaligned */
  469. if ((u32) dataptr & 0x07) {
  470. if (datasize > PKTSIZE_ALIGN) {
  471. printf("Non-aligned data too large (%d)\n",
  472. datasize);
  473. return -1;
  474. }
  475. memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
  476. p = dmvgbe->p_aligned_txbuf;
  477. }
  478. p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
  479. p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
  480. p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
  481. p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
  482. p_txdesc->buf_ptr = (u8 *) p;
  483. p_txdesc->byte_cnt = datasize;
  484. /* Set this tc desc as zeroth TXUQ */
  485. txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
  486. writel((u32) p_txdesc, txuq0_reg_addr);
  487. /* ensure tx desc writes above are performed before we start Tx DMA */
  488. isb();
  489. /* Apply send command using zeroth TXUQ */
  490. MVGBE_REG_WR(regs->tqc, (1 << TXUQ));
  491. /*
  492. * wait for packet xmit completion
  493. */
  494. cmd_sts = readl(&p_txdesc->cmd_sts);
  495. while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
  496. /* return fail if error is detected */
  497. if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
  498. (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
  499. cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
  500. printf("Err..(%s) in xmit packet\n", __FUNCTION__);
  501. return -1;
  502. }
  503. cmd_sts = readl(&p_txdesc->cmd_sts);
  504. };
  505. return 0;
  506. }
  507. static int mvgbe_recv(struct eth_device *dev)
  508. {
  509. struct mvgbe_device *dmvgbe = to_mvgbe(dev);
  510. struct mvgbe_rxdesc *p_rxdesc_curr = dmvgbe->p_rxdesc_curr;
  511. u32 cmd_sts;
  512. u32 timeout = 0;
  513. u32 rxdesc_curr_addr;
  514. /* wait untill rx packet available or timeout */
  515. do {
  516. if (timeout < MVGBE_PHY_SMI_TIMEOUT)
  517. timeout++;
  518. else {
  519. debug("%s time out...\n", __FUNCTION__);
  520. return -1;
  521. }
  522. } while (readl(&p_rxdesc_curr->cmd_sts) & MVGBE_BUFFER_OWNED_BY_DMA);
  523. if (p_rxdesc_curr->byte_cnt != 0) {
  524. debug("%s: Received %d byte Packet @ 0x%x (cmd_sts= %08x)\n",
  525. __FUNCTION__, (u32) p_rxdesc_curr->byte_cnt,
  526. (u32) p_rxdesc_curr->buf_ptr,
  527. (u32) p_rxdesc_curr->cmd_sts);
  528. }
  529. /*
  530. * In case received a packet without first/last bits on
  531. * OR the error summary bit is on,
  532. * the packets needs to be dropeed.
  533. */
  534. cmd_sts = readl(&p_rxdesc_curr->cmd_sts);
  535. if ((cmd_sts &
  536. (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC))
  537. != (MVGBE_RX_FIRST_DESC | MVGBE_RX_LAST_DESC)) {
  538. printf("Err..(%s) Dropping packet spread on"
  539. " multiple descriptors\n", __FUNCTION__);
  540. } else if (cmd_sts & MVGBE_ERROR_SUMMARY) {
  541. printf("Err..(%s) Dropping packet with errors\n",
  542. __FUNCTION__);
  543. } else {
  544. /* !!! call higher layer processing */
  545. debug("%s: Sending Received packet to"
  546. " upper layer (NetReceive)\n", __FUNCTION__);
  547. /* let the upper layer handle the packet */
  548. NetReceive((p_rxdesc_curr->buf_ptr + RX_BUF_OFFSET),
  549. (int)(p_rxdesc_curr->byte_cnt - RX_BUF_OFFSET));
  550. }
  551. /*
  552. * free these descriptors and point next in the ring
  553. */
  554. p_rxdesc_curr->cmd_sts =
  555. MVGBE_BUFFER_OWNED_BY_DMA | MVGBE_RX_EN_INTERRUPT;
  556. p_rxdesc_curr->buf_size = PKTSIZE_ALIGN;
  557. p_rxdesc_curr->byte_cnt = 0;
  558. rxdesc_curr_addr = (u32)&dmvgbe->p_rxdesc_curr;
  559. writel((unsigned)p_rxdesc_curr->nxtdesc_p, rxdesc_curr_addr);
  560. return 0;
  561. }
  562. int mvgbe_initialize(bd_t *bis)
  563. {
  564. struct mvgbe_device *dmvgbe;
  565. struct eth_device *dev;
  566. int devnum;
  567. u8 used_ports[MAX_MVGBE_DEVS] = CONFIG_MVGBE_PORTS;
  568. for (devnum = 0; devnum < MAX_MVGBE_DEVS; devnum++) {
  569. /*skip if port is configured not to use */
  570. if (used_ports[devnum] == 0)
  571. continue;
  572. dmvgbe = malloc(sizeof(struct mvgbe_device));
  573. if (!dmvgbe)
  574. goto error1;
  575. memset(dmvgbe, 0, sizeof(struct mvgbe_device));
  576. dmvgbe->p_rxdesc =
  577. (struct mvgbe_rxdesc *)memalign(PKTALIGN,
  578. MV_RXQ_DESC_ALIGNED_SIZE*RINGSZ + 1);
  579. if (!dmvgbe->p_rxdesc)
  580. goto error2;
  581. dmvgbe->p_rxbuf = (u8 *) memalign(PKTALIGN,
  582. RINGSZ*PKTSIZE_ALIGN + 1);
  583. if (!dmvgbe->p_rxbuf)
  584. goto error3;
  585. dmvgbe->p_aligned_txbuf = memalign(8, PKTSIZE_ALIGN);
  586. if (!dmvgbe->p_aligned_txbuf)
  587. goto error4;
  588. dmvgbe->p_txdesc = (struct mvgbe_txdesc *) memalign(
  589. PKTALIGN, sizeof(struct mvgbe_txdesc) + 1);
  590. if (!dmvgbe->p_txdesc) {
  591. free(dmvgbe->p_aligned_txbuf);
  592. error4:
  593. free(dmvgbe->p_rxbuf);
  594. error3:
  595. free(dmvgbe->p_rxdesc);
  596. error2:
  597. free(dmvgbe);
  598. error1:
  599. printf("Err.. %s Failed to allocate memory\n",
  600. __FUNCTION__);
  601. return -1;
  602. }
  603. dev = &dmvgbe->dev;
  604. /* must be less than NAMESIZE (16) */
  605. sprintf(dev->name, "egiga%d", devnum);
  606. switch (devnum) {
  607. case 0:
  608. dmvgbe->regs = (void *)MVGBE0_BASE;
  609. break;
  610. #if defined(MVGBE1_BASE)
  611. case 1:
  612. dmvgbe->regs = (void *)MVGBE1_BASE;
  613. break;
  614. #endif
  615. default: /* this should never happen */
  616. printf("Err..(%s) Invalid device number %d\n",
  617. __FUNCTION__, devnum);
  618. return -1;
  619. }
  620. dev->init = (void *)mvgbe_init;
  621. dev->halt = (void *)mvgbe_halt;
  622. dev->send = (void *)mvgbe_send;
  623. dev->recv = (void *)mvgbe_recv;
  624. dev->write_hwaddr = (void *)mvgbe_write_hwaddr;
  625. eth_register(dev);
  626. #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII)
  627. miiphy_register(dev->name, smi_reg_read, smi_reg_write);
  628. /* Set phy address of the port */
  629. miiphy_write(dev->name, MV_PHY_ADR_REQUEST,
  630. MV_PHY_ADR_REQUEST, PHY_BASE_ADR + devnum);
  631. #endif
  632. }
  633. return 0;
  634. }