fec_mxc.c 35 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * (C) Copyright 2009 Ilya Yanok, Emcraft Systems Ltd <yanok@emcraft.com>
  4. * (C) Copyright 2008,2009 Eric Jarrige <eric.jarrige@armadeus.org>
  5. * (C) Copyright 2008 Armadeus Systems nc
  6. * (C) Copyright 2007 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
  7. * (C) Copyright 2007 Pengutronix, Juergen Beisert <j.beisert@pengutronix.de>
  8. */
  9. #include <common.h>
  10. #include <dm.h>
  11. #include <environment.h>
  12. #include <malloc.h>
  13. #include <memalign.h>
  14. #include <miiphy.h>
  15. #include <net.h>
  16. #include <netdev.h>
  17. #include <asm/io.h>
  18. #include <linux/errno.h>
  19. #include <linux/compiler.h>
  20. #include <asm/arch/clock.h>
  21. #include <asm/arch/imx-regs.h>
  22. #include <asm/mach-imx/sys_proto.h>
  23. #include <asm-generic/gpio.h>
  24. #include "fec_mxc.h"
  25. DECLARE_GLOBAL_DATA_PTR;
  26. /*
  27. * Timeout the transfer after 5 mS. This is usually a bit more, since
  28. * the code in the tightloops this timeout is used in adds some overhead.
  29. */
  30. #define FEC_XFER_TIMEOUT 5000
  31. /*
  32. * The standard 32-byte DMA alignment does not work on mx6solox, which requires
  33. * 64-byte alignment in the DMA RX FEC buffer.
  34. * Introduce the FEC_DMA_RX_MINALIGN which can cover mx6solox needs and also
  35. * satisfies the alignment on other SoCs (32-bytes)
  36. */
  37. #define FEC_DMA_RX_MINALIGN 64
  38. #ifndef CONFIG_MII
  39. #error "CONFIG_MII has to be defined!"
  40. #endif
  41. #ifndef CONFIG_FEC_XCV_TYPE
  42. #define CONFIG_FEC_XCV_TYPE MII100
  43. #endif
  44. /*
  45. * The i.MX28 operates with packets in big endian. We need to swap them before
  46. * sending and after receiving.
  47. */
  48. #ifdef CONFIG_MX28
  49. #define CONFIG_FEC_MXC_SWAP_PACKET
  50. #endif
  51. #define RXDESC_PER_CACHELINE (ARCH_DMA_MINALIGN/sizeof(struct fec_bd))
  52. /* Check various alignment issues at compile time */
  53. #if ((ARCH_DMA_MINALIGN < 16) || (ARCH_DMA_MINALIGN % 16 != 0))
  54. #error "ARCH_DMA_MINALIGN must be multiple of 16!"
  55. #endif
  56. #if ((PKTALIGN < ARCH_DMA_MINALIGN) || \
  57. (PKTALIGN % ARCH_DMA_MINALIGN != 0))
  58. #error "PKTALIGN must be multiple of ARCH_DMA_MINALIGN!"
  59. #endif
  60. #undef DEBUG
  61. #ifdef CONFIG_FEC_MXC_SWAP_PACKET
  62. static void swap_packet(uint32_t *packet, int length)
  63. {
  64. int i;
  65. for (i = 0; i < DIV_ROUND_UP(length, 4); i++)
  66. packet[i] = __swab32(packet[i]);
  67. }
  68. #endif
  69. /* MII-interface related functions */
  70. static int fec_mdio_read(struct ethernet_regs *eth, uint8_t phyaddr,
  71. uint8_t regaddr)
  72. {
  73. uint32_t reg; /* convenient holder for the PHY register */
  74. uint32_t phy; /* convenient holder for the PHY */
  75. uint32_t start;
  76. int val;
  77. /*
  78. * reading from any PHY's register is done by properly
  79. * programming the FEC's MII data register.
  80. */
  81. writel(FEC_IEVENT_MII, &eth->ievent);
  82. reg = regaddr << FEC_MII_DATA_RA_SHIFT;
  83. phy = phyaddr << FEC_MII_DATA_PA_SHIFT;
  84. writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA |
  85. phy | reg, &eth->mii_data);
  86. /* wait for the related interrupt */
  87. start = get_timer(0);
  88. while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
  89. if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
  90. printf("Read MDIO failed...\n");
  91. return -1;
  92. }
  93. }
  94. /* clear mii interrupt bit */
  95. writel(FEC_IEVENT_MII, &eth->ievent);
  96. /* it's now safe to read the PHY's register */
  97. val = (unsigned short)readl(&eth->mii_data);
  98. debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyaddr,
  99. regaddr, val);
  100. return val;
  101. }
  102. static void fec_mii_setspeed(struct ethernet_regs *eth)
  103. {
  104. /*
  105. * Set MII_SPEED = (1/(mii_speed * 2)) * System Clock
  106. * and do not drop the Preamble.
  107. *
  108. * The i.MX28 and i.MX6 types have another field in the MSCR (aka
  109. * MII_SPEED) register that defines the MDIO output hold time. Earlier
  110. * versions are RAZ there, so just ignore the difference and write the
  111. * register always.
  112. * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
  113. * HOLDTIME + 1 is the number of clk cycles the fec is holding the
  114. * output.
  115. * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
  116. * Given that ceil(clkrate / 5000000) <= 64, the calculation for
  117. * holdtime cannot result in a value greater than 3.
  118. */
  119. u32 pclk = imx_get_fecclk();
  120. u32 speed = DIV_ROUND_UP(pclk, 5000000);
  121. u32 hold = DIV_ROUND_UP(pclk, 100000000) - 1;
  122. #ifdef FEC_QUIRK_ENET_MAC
  123. speed--;
  124. #endif
  125. writel(speed << 1 | hold << 8, &eth->mii_speed);
  126. debug("%s: mii_speed %08x\n", __func__, readl(&eth->mii_speed));
  127. }
  128. static int fec_mdio_write(struct ethernet_regs *eth, uint8_t phyaddr,
  129. uint8_t regaddr, uint16_t data)
  130. {
  131. uint32_t reg; /* convenient holder for the PHY register */
  132. uint32_t phy; /* convenient holder for the PHY */
  133. uint32_t start;
  134. reg = regaddr << FEC_MII_DATA_RA_SHIFT;
  135. phy = phyaddr << FEC_MII_DATA_PA_SHIFT;
  136. writel(FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR |
  137. FEC_MII_DATA_TA | phy | reg | data, &eth->mii_data);
  138. /* wait for the MII interrupt */
  139. start = get_timer(0);
  140. while (!(readl(&eth->ievent) & FEC_IEVENT_MII)) {
  141. if (get_timer(start) > (CONFIG_SYS_HZ / 1000)) {
  142. printf("Write MDIO failed...\n");
  143. return -1;
  144. }
  145. }
  146. /* clear MII interrupt bit */
  147. writel(FEC_IEVENT_MII, &eth->ievent);
  148. debug("%s: phy: %02x reg:%02x val:%#x\n", __func__, phyaddr,
  149. regaddr, data);
  150. return 0;
  151. }
  152. static int fec_phy_read(struct mii_dev *bus, int phyaddr, int dev_addr,
  153. int regaddr)
  154. {
  155. return fec_mdio_read(bus->priv, phyaddr, regaddr);
  156. }
  157. static int fec_phy_write(struct mii_dev *bus, int phyaddr, int dev_addr,
  158. int regaddr, u16 data)
  159. {
  160. return fec_mdio_write(bus->priv, phyaddr, regaddr, data);
  161. }
  162. #ifndef CONFIG_PHYLIB
  163. static int miiphy_restart_aneg(struct eth_device *dev)
  164. {
  165. int ret = 0;
  166. #if !defined(CONFIG_FEC_MXC_NO_ANEG)
  167. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  168. struct ethernet_regs *eth = fec->bus->priv;
  169. /*
  170. * Wake up from sleep if necessary
  171. * Reset PHY, then delay 300ns
  172. */
  173. #ifdef CONFIG_MX27
  174. fec_mdio_write(eth, fec->phy_id, MII_DCOUNTER, 0x00FF);
  175. #endif
  176. fec_mdio_write(eth, fec->phy_id, MII_BMCR, BMCR_RESET);
  177. udelay(1000);
  178. /* Set the auto-negotiation advertisement register bits */
  179. fec_mdio_write(eth, fec->phy_id, MII_ADVERTISE,
  180. LPA_100FULL | LPA_100HALF | LPA_10FULL |
  181. LPA_10HALF | PHY_ANLPAR_PSB_802_3);
  182. fec_mdio_write(eth, fec->phy_id, MII_BMCR,
  183. BMCR_ANENABLE | BMCR_ANRESTART);
  184. if (fec->mii_postcall)
  185. ret = fec->mii_postcall(fec->phy_id);
  186. #endif
  187. return ret;
  188. }
  189. #ifndef CONFIG_FEC_FIXED_SPEED
  190. static int miiphy_wait_aneg(struct eth_device *dev)
  191. {
  192. uint32_t start;
  193. int status;
  194. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  195. struct ethernet_regs *eth = fec->bus->priv;
  196. /* Wait for AN completion */
  197. start = get_timer(0);
  198. do {
  199. if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
  200. printf("%s: Autonegotiation timeout\n", dev->name);
  201. return -1;
  202. }
  203. status = fec_mdio_read(eth, fec->phy_id, MII_BMSR);
  204. if (status < 0) {
  205. printf("%s: Autonegotiation failed. status: %d\n",
  206. dev->name, status);
  207. return -1;
  208. }
  209. } while (!(status & BMSR_LSTATUS));
  210. return 0;
  211. }
  212. #endif /* CONFIG_FEC_FIXED_SPEED */
  213. #endif
  214. static int fec_rx_task_enable(struct fec_priv *fec)
  215. {
  216. writel(FEC_R_DES_ACTIVE_RDAR, &fec->eth->r_des_active);
  217. return 0;
  218. }
  219. static int fec_rx_task_disable(struct fec_priv *fec)
  220. {
  221. return 0;
  222. }
  223. static int fec_tx_task_enable(struct fec_priv *fec)
  224. {
  225. writel(FEC_X_DES_ACTIVE_TDAR, &fec->eth->x_des_active);
  226. return 0;
  227. }
  228. static int fec_tx_task_disable(struct fec_priv *fec)
  229. {
  230. return 0;
  231. }
  232. /**
  233. * Initialize receive task's buffer descriptors
  234. * @param[in] fec all we know about the device yet
  235. * @param[in] count receive buffer count to be allocated
  236. * @param[in] dsize desired size of each receive buffer
  237. * @return 0 on success
  238. *
  239. * Init all RX descriptors to default values.
  240. */
  241. static void fec_rbd_init(struct fec_priv *fec, int count, int dsize)
  242. {
  243. uint32_t size;
  244. ulong data;
  245. int i;
  246. /*
  247. * Reload the RX descriptors with default values and wipe
  248. * the RX buffers.
  249. */
  250. size = roundup(dsize, ARCH_DMA_MINALIGN);
  251. for (i = 0; i < count; i++) {
  252. data = fec->rbd_base[i].data_pointer;
  253. memset((void *)data, 0, dsize);
  254. flush_dcache_range(data, data + size);
  255. fec->rbd_base[i].status = FEC_RBD_EMPTY;
  256. fec->rbd_base[i].data_length = 0;
  257. }
  258. /* Mark the last RBD to close the ring. */
  259. fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
  260. fec->rbd_index = 0;
  261. flush_dcache_range((ulong)fec->rbd_base,
  262. (ulong)fec->rbd_base + size);
  263. }
  264. /**
  265. * Initialize transmit task's buffer descriptors
  266. * @param[in] fec all we know about the device yet
  267. *
  268. * Transmit buffers are created externally. We only have to init the BDs here.\n
  269. * Note: There is a race condition in the hardware. When only one BD is in
  270. * use it must be marked with the WRAP bit to use it for every transmitt.
  271. * This bit in combination with the READY bit results into double transmit
  272. * of each data buffer. It seems the state machine checks READY earlier then
  273. * resetting it after the first transfer.
  274. * Using two BDs solves this issue.
  275. */
  276. static void fec_tbd_init(struct fec_priv *fec)
  277. {
  278. ulong addr = (ulong)fec->tbd_base;
  279. unsigned size = roundup(2 * sizeof(struct fec_bd),
  280. ARCH_DMA_MINALIGN);
  281. memset(fec->tbd_base, 0, size);
  282. fec->tbd_base[0].status = 0;
  283. fec->tbd_base[1].status = FEC_TBD_WRAP;
  284. fec->tbd_index = 0;
  285. flush_dcache_range(addr, addr + size);
  286. }
  287. /**
  288. * Mark the given read buffer descriptor as free
  289. * @param[in] last 1 if this is the last buffer descriptor in the chain, else 0
  290. * @param[in] prbd buffer descriptor to mark free again
  291. */
  292. static void fec_rbd_clean(int last, struct fec_bd *prbd)
  293. {
  294. unsigned short flags = FEC_RBD_EMPTY;
  295. if (last)
  296. flags |= FEC_RBD_WRAP;
  297. writew(flags, &prbd->status);
  298. writew(0, &prbd->data_length);
  299. }
  300. static int fec_get_hwaddr(int dev_id, unsigned char *mac)
  301. {
  302. imx_get_mac_from_fuse(dev_id, mac);
  303. return !is_valid_ethaddr(mac);
  304. }
  305. #ifdef CONFIG_DM_ETH
  306. static int fecmxc_set_hwaddr(struct udevice *dev)
  307. #else
  308. static int fec_set_hwaddr(struct eth_device *dev)
  309. #endif
  310. {
  311. #ifdef CONFIG_DM_ETH
  312. struct fec_priv *fec = dev_get_priv(dev);
  313. struct eth_pdata *pdata = dev_get_platdata(dev);
  314. uchar *mac = pdata->enetaddr;
  315. #else
  316. uchar *mac = dev->enetaddr;
  317. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  318. #endif
  319. writel(0, &fec->eth->iaddr1);
  320. writel(0, &fec->eth->iaddr2);
  321. writel(0, &fec->eth->gaddr1);
  322. writel(0, &fec->eth->gaddr2);
  323. /* Set physical address */
  324. writel((mac[0] << 24) + (mac[1] << 16) + (mac[2] << 8) + mac[3],
  325. &fec->eth->paddr1);
  326. writel((mac[4] << 24) + (mac[5] << 16) + 0x8808, &fec->eth->paddr2);
  327. return 0;
  328. }
  329. /* Do initial configuration of the FEC registers */
  330. static void fec_reg_setup(struct fec_priv *fec)
  331. {
  332. uint32_t rcntrl;
  333. /* Set interrupt mask register */
  334. writel(0x00000000, &fec->eth->imask);
  335. /* Clear FEC-Lite interrupt event register(IEVENT) */
  336. writel(0xffffffff, &fec->eth->ievent);
  337. /* Set FEC-Lite receive control register(R_CNTRL): */
  338. /* Start with frame length = 1518, common for all modes. */
  339. rcntrl = PKTSIZE << FEC_RCNTRL_MAX_FL_SHIFT;
  340. if (fec->xcv_type != SEVENWIRE) /* xMII modes */
  341. rcntrl |= FEC_RCNTRL_FCE | FEC_RCNTRL_MII_MODE;
  342. if (fec->xcv_type == RGMII)
  343. rcntrl |= FEC_RCNTRL_RGMII;
  344. else if (fec->xcv_type == RMII)
  345. rcntrl |= FEC_RCNTRL_RMII;
  346. writel(rcntrl, &fec->eth->r_cntrl);
  347. }
  348. /**
  349. * Start the FEC engine
  350. * @param[in] dev Our device to handle
  351. */
  352. #ifdef CONFIG_DM_ETH
  353. static int fec_open(struct udevice *dev)
  354. #else
  355. static int fec_open(struct eth_device *edev)
  356. #endif
  357. {
  358. #ifdef CONFIG_DM_ETH
  359. struct fec_priv *fec = dev_get_priv(dev);
  360. #else
  361. struct fec_priv *fec = (struct fec_priv *)edev->priv;
  362. #endif
  363. int speed;
  364. ulong addr, size;
  365. int i;
  366. debug("fec_open: fec_open(dev)\n");
  367. /* full-duplex, heartbeat disabled */
  368. writel(1 << 2, &fec->eth->x_cntrl);
  369. fec->rbd_index = 0;
  370. /* Invalidate all descriptors */
  371. for (i = 0; i < FEC_RBD_NUM - 1; i++)
  372. fec_rbd_clean(0, &fec->rbd_base[i]);
  373. fec_rbd_clean(1, &fec->rbd_base[i]);
  374. /* Flush the descriptors into RAM */
  375. size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd),
  376. ARCH_DMA_MINALIGN);
  377. addr = (ulong)fec->rbd_base;
  378. flush_dcache_range(addr, addr + size);
  379. #ifdef FEC_QUIRK_ENET_MAC
  380. /* Enable ENET HW endian SWAP */
  381. writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_DBSWAP,
  382. &fec->eth->ecntrl);
  383. /* Enable ENET store and forward mode */
  384. writel(readl(&fec->eth->x_wmrk) | FEC_X_WMRK_STRFWD,
  385. &fec->eth->x_wmrk);
  386. #endif
  387. /* Enable FEC-Lite controller */
  388. writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_ETHER_EN,
  389. &fec->eth->ecntrl);
  390. #if defined(CONFIG_MX25) || defined(CONFIG_MX53) || defined(CONFIG_MX6SL)
  391. udelay(100);
  392. /* setup the MII gasket for RMII mode */
  393. /* disable the gasket */
  394. writew(0, &fec->eth->miigsk_enr);
  395. /* wait for the gasket to be disabled */
  396. while (readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY)
  397. udelay(2);
  398. /* configure gasket for RMII, 50 MHz, no loopback, and no echo */
  399. writew(MIIGSK_CFGR_IF_MODE_RMII, &fec->eth->miigsk_cfgr);
  400. /* re-enable the gasket */
  401. writew(MIIGSK_ENR_EN, &fec->eth->miigsk_enr);
  402. /* wait until MII gasket is ready */
  403. int max_loops = 10;
  404. while ((readw(&fec->eth->miigsk_enr) & MIIGSK_ENR_READY) == 0) {
  405. if (--max_loops <= 0) {
  406. printf("WAIT for MII Gasket ready timed out\n");
  407. break;
  408. }
  409. }
  410. #endif
  411. #ifdef CONFIG_PHYLIB
  412. {
  413. /* Start up the PHY */
  414. int ret = phy_startup(fec->phydev);
  415. if (ret) {
  416. printf("Could not initialize PHY %s\n",
  417. fec->phydev->dev->name);
  418. return ret;
  419. }
  420. speed = fec->phydev->speed;
  421. }
  422. #elif CONFIG_FEC_FIXED_SPEED
  423. speed = CONFIG_FEC_FIXED_SPEED;
  424. #else
  425. miiphy_wait_aneg(edev);
  426. speed = miiphy_speed(edev->name, fec->phy_id);
  427. miiphy_duplex(edev->name, fec->phy_id);
  428. #endif
  429. #ifdef FEC_QUIRK_ENET_MAC
  430. {
  431. u32 ecr = readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_SPEED;
  432. u32 rcr = readl(&fec->eth->r_cntrl) & ~FEC_RCNTRL_RMII_10T;
  433. if (speed == _1000BASET)
  434. ecr |= FEC_ECNTRL_SPEED;
  435. else if (speed != _100BASET)
  436. rcr |= FEC_RCNTRL_RMII_10T;
  437. writel(ecr, &fec->eth->ecntrl);
  438. writel(rcr, &fec->eth->r_cntrl);
  439. }
  440. #endif
  441. debug("%s:Speed=%i\n", __func__, speed);
  442. /* Enable SmartDMA receive task */
  443. fec_rx_task_enable(fec);
  444. udelay(100000);
  445. return 0;
  446. }
  447. #ifdef CONFIG_DM_ETH
  448. static int fecmxc_init(struct udevice *dev)
  449. #else
  450. static int fec_init(struct eth_device *dev, bd_t *bd)
  451. #endif
  452. {
  453. #ifdef CONFIG_DM_ETH
  454. struct fec_priv *fec = dev_get_priv(dev);
  455. #else
  456. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  457. #endif
  458. u8 *mib_ptr = (uint8_t *)&fec->eth->rmon_t_drop;
  459. u8 *i;
  460. ulong addr;
  461. /* Initialize MAC address */
  462. #ifdef CONFIG_DM_ETH
  463. fecmxc_set_hwaddr(dev);
  464. #else
  465. fec_set_hwaddr(dev);
  466. #endif
  467. /* Setup transmit descriptors, there are two in total. */
  468. fec_tbd_init(fec);
  469. /* Setup receive descriptors. */
  470. fec_rbd_init(fec, FEC_RBD_NUM, FEC_MAX_PKT_SIZE);
  471. fec_reg_setup(fec);
  472. if (fec->xcv_type != SEVENWIRE)
  473. fec_mii_setspeed(fec->bus->priv);
  474. /* Set Opcode/Pause Duration Register */
  475. writel(0x00010020, &fec->eth->op_pause); /* FIXME 0xffff0020; */
  476. writel(0x2, &fec->eth->x_wmrk);
  477. /* Set multicast address filter */
  478. writel(0x00000000, &fec->eth->gaddr1);
  479. writel(0x00000000, &fec->eth->gaddr2);
  480. /* Do not access reserved register */
  481. if (!is_mx6ul() && !is_mx6ull() && !is_mx8m()) {
  482. /* clear MIB RAM */
  483. for (i = mib_ptr; i <= mib_ptr + 0xfc; i += 4)
  484. writel(0, i);
  485. /* FIFO receive start register */
  486. writel(0x520, &fec->eth->r_fstart);
  487. }
  488. /* size and address of each buffer */
  489. writel(FEC_MAX_PKT_SIZE, &fec->eth->emrbr);
  490. addr = (ulong)fec->tbd_base;
  491. writel((uint32_t)addr, &fec->eth->etdsr);
  492. addr = (ulong)fec->rbd_base;
  493. writel((uint32_t)addr, &fec->eth->erdsr);
  494. #ifndef CONFIG_PHYLIB
  495. if (fec->xcv_type != SEVENWIRE)
  496. miiphy_restart_aneg(dev);
  497. #endif
  498. fec_open(dev);
  499. return 0;
  500. }
  501. /**
  502. * Halt the FEC engine
  503. * @param[in] dev Our device to handle
  504. */
  505. #ifdef CONFIG_DM_ETH
  506. static void fecmxc_halt(struct udevice *dev)
  507. #else
  508. static void fec_halt(struct eth_device *dev)
  509. #endif
  510. {
  511. #ifdef CONFIG_DM_ETH
  512. struct fec_priv *fec = dev_get_priv(dev);
  513. #else
  514. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  515. #endif
  516. int counter = 0xffff;
  517. /* issue graceful stop command to the FEC transmitter if necessary */
  518. writel(FEC_TCNTRL_GTS | readl(&fec->eth->x_cntrl),
  519. &fec->eth->x_cntrl);
  520. debug("eth_halt: wait for stop regs\n");
  521. /* wait for graceful stop to register */
  522. while ((counter--) && (!(readl(&fec->eth->ievent) & FEC_IEVENT_GRA)))
  523. udelay(1);
  524. /* Disable SmartDMA tasks */
  525. fec_tx_task_disable(fec);
  526. fec_rx_task_disable(fec);
  527. /*
  528. * Disable the Ethernet Controller
  529. * Note: this will also reset the BD index counter!
  530. */
  531. writel(readl(&fec->eth->ecntrl) & ~FEC_ECNTRL_ETHER_EN,
  532. &fec->eth->ecntrl);
  533. fec->rbd_index = 0;
  534. fec->tbd_index = 0;
  535. debug("eth_halt: done\n");
  536. }
  537. /**
  538. * Transmit one frame
  539. * @param[in] dev Our ethernet device to handle
  540. * @param[in] packet Pointer to the data to be transmitted
  541. * @param[in] length Data count in bytes
  542. * @return 0 on success
  543. */
  544. #ifdef CONFIG_DM_ETH
  545. static int fecmxc_send(struct udevice *dev, void *packet, int length)
  546. #else
  547. static int fec_send(struct eth_device *dev, void *packet, int length)
  548. #endif
  549. {
  550. unsigned int status;
  551. u32 size;
  552. ulong addr, end;
  553. int timeout = FEC_XFER_TIMEOUT;
  554. int ret = 0;
  555. /*
  556. * This routine transmits one frame. This routine only accepts
  557. * 6-byte Ethernet addresses.
  558. */
  559. #ifdef CONFIG_DM_ETH
  560. struct fec_priv *fec = dev_get_priv(dev);
  561. #else
  562. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  563. #endif
  564. /*
  565. * Check for valid length of data.
  566. */
  567. if ((length > 1500) || (length <= 0)) {
  568. printf("Payload (%d) too large\n", length);
  569. return -1;
  570. }
  571. /*
  572. * Setup the transmit buffer. We are always using the first buffer for
  573. * transmission, the second will be empty and only used to stop the DMA
  574. * engine. We also flush the packet to RAM here to avoid cache trouble.
  575. */
  576. #ifdef CONFIG_FEC_MXC_SWAP_PACKET
  577. swap_packet((uint32_t *)packet, length);
  578. #endif
  579. addr = (ulong)packet;
  580. end = roundup(addr + length, ARCH_DMA_MINALIGN);
  581. addr &= ~(ARCH_DMA_MINALIGN - 1);
  582. flush_dcache_range(addr, end);
  583. writew(length, &fec->tbd_base[fec->tbd_index].data_length);
  584. writel((uint32_t)addr, &fec->tbd_base[fec->tbd_index].data_pointer);
  585. /*
  586. * update BD's status now
  587. * This block:
  588. * - is always the last in a chain (means no chain)
  589. * - should transmitt the CRC
  590. * - might be the last BD in the list, so the address counter should
  591. * wrap (-> keep the WRAP flag)
  592. */
  593. status = readw(&fec->tbd_base[fec->tbd_index].status) & FEC_TBD_WRAP;
  594. status |= FEC_TBD_LAST | FEC_TBD_TC | FEC_TBD_READY;
  595. writew(status, &fec->tbd_base[fec->tbd_index].status);
  596. /*
  597. * Flush data cache. This code flushes both TX descriptors to RAM.
  598. * After this code, the descriptors will be safely in RAM and we
  599. * can start DMA.
  600. */
  601. size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
  602. addr = (ulong)fec->tbd_base;
  603. flush_dcache_range(addr, addr + size);
  604. /*
  605. * Below we read the DMA descriptor's last four bytes back from the
  606. * DRAM. This is important in order to make sure that all WRITE
  607. * operations on the bus that were triggered by previous cache FLUSH
  608. * have completed.
  609. *
  610. * Otherwise, on MX28, it is possible to observe a corruption of the
  611. * DMA descriptors. Please refer to schematic "Figure 1-2" in MX28RM
  612. * for the bus structure of MX28. The scenario is as follows:
  613. *
  614. * 1) ARM core triggers a series of WRITEs on the AHB_ARB2 bus going
  615. * to DRAM due to flush_dcache_range()
  616. * 2) ARM core writes the FEC registers via AHB_ARB2
  617. * 3) FEC DMA starts reading/writing from/to DRAM via AHB_ARB3
  618. *
  619. * Note that 2) does sometimes finish before 1) due to reordering of
  620. * WRITE accesses on the AHB bus, therefore triggering 3) before the
  621. * DMA descriptor is fully written into DRAM. This results in occasional
  622. * corruption of the DMA descriptor.
  623. */
  624. readl(addr + size - 4);
  625. /* Enable SmartDMA transmit task */
  626. fec_tx_task_enable(fec);
  627. /*
  628. * Wait until frame is sent. On each turn of the wait cycle, we must
  629. * invalidate data cache to see what's really in RAM. Also, we need
  630. * barrier here.
  631. */
  632. while (--timeout) {
  633. if (!(readl(&fec->eth->x_des_active) & FEC_X_DES_ACTIVE_TDAR))
  634. break;
  635. }
  636. if (!timeout) {
  637. ret = -EINVAL;
  638. goto out;
  639. }
  640. /*
  641. * The TDAR bit is cleared when the descriptors are all out from TX
  642. * but on mx6solox we noticed that the READY bit is still not cleared
  643. * right after TDAR.
  644. * These are two distinct signals, and in IC simulation, we found that
  645. * TDAR always gets cleared prior than the READY bit of last BD becomes
  646. * cleared.
  647. * In mx6solox, we use a later version of FEC IP. It looks like that
  648. * this intrinsic behaviour of TDAR bit has changed in this newer FEC
  649. * version.
  650. *
  651. * Fix this by polling the READY bit of BD after the TDAR polling,
  652. * which covers the mx6solox case and does not harm the other SoCs.
  653. */
  654. timeout = FEC_XFER_TIMEOUT;
  655. while (--timeout) {
  656. invalidate_dcache_range(addr, addr + size);
  657. if (!(readw(&fec->tbd_base[fec->tbd_index].status) &
  658. FEC_TBD_READY))
  659. break;
  660. }
  661. if (!timeout)
  662. ret = -EINVAL;
  663. out:
  664. debug("fec_send: status 0x%x index %d ret %i\n",
  665. readw(&fec->tbd_base[fec->tbd_index].status),
  666. fec->tbd_index, ret);
  667. /* for next transmission use the other buffer */
  668. if (fec->tbd_index)
  669. fec->tbd_index = 0;
  670. else
  671. fec->tbd_index = 1;
  672. return ret;
  673. }
  674. /**
  675. * Pull one frame from the card
  676. * @param[in] dev Our ethernet device to handle
  677. * @return Length of packet read
  678. */
  679. #ifdef CONFIG_DM_ETH
  680. static int fecmxc_recv(struct udevice *dev, int flags, uchar **packetp)
  681. #else
  682. static int fec_recv(struct eth_device *dev)
  683. #endif
  684. {
  685. #ifdef CONFIG_DM_ETH
  686. struct fec_priv *fec = dev_get_priv(dev);
  687. #else
  688. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  689. #endif
  690. struct fec_bd *rbd = &fec->rbd_base[fec->rbd_index];
  691. unsigned long ievent;
  692. int frame_length, len = 0;
  693. uint16_t bd_status;
  694. ulong addr, size, end;
  695. int i;
  696. #ifdef CONFIG_DM_ETH
  697. *packetp = memalign(ARCH_DMA_MINALIGN, FEC_MAX_PKT_SIZE);
  698. if (*packetp == 0) {
  699. printf("%s: error allocating packetp\n", __func__);
  700. return -ENOMEM;
  701. }
  702. #else
  703. ALLOC_CACHE_ALIGN_BUFFER(uchar, buff, FEC_MAX_PKT_SIZE);
  704. #endif
  705. /* Check if any critical events have happened */
  706. ievent = readl(&fec->eth->ievent);
  707. writel(ievent, &fec->eth->ievent);
  708. debug("fec_recv: ievent 0x%lx\n", ievent);
  709. if (ievent & FEC_IEVENT_BABR) {
  710. #ifdef CONFIG_DM_ETH
  711. fecmxc_halt(dev);
  712. fecmxc_init(dev);
  713. #else
  714. fec_halt(dev);
  715. fec_init(dev, fec->bd);
  716. #endif
  717. printf("some error: 0x%08lx\n", ievent);
  718. return 0;
  719. }
  720. if (ievent & FEC_IEVENT_HBERR) {
  721. /* Heartbeat error */
  722. writel(0x00000001 | readl(&fec->eth->x_cntrl),
  723. &fec->eth->x_cntrl);
  724. }
  725. if (ievent & FEC_IEVENT_GRA) {
  726. /* Graceful stop complete */
  727. if (readl(&fec->eth->x_cntrl) & 0x00000001) {
  728. #ifdef CONFIG_DM_ETH
  729. fecmxc_halt(dev);
  730. #else
  731. fec_halt(dev);
  732. #endif
  733. writel(~0x00000001 & readl(&fec->eth->x_cntrl),
  734. &fec->eth->x_cntrl);
  735. #ifdef CONFIG_DM_ETH
  736. fecmxc_init(dev);
  737. #else
  738. fec_init(dev, fec->bd);
  739. #endif
  740. }
  741. }
  742. /*
  743. * Read the buffer status. Before the status can be read, the data cache
  744. * must be invalidated, because the data in RAM might have been changed
  745. * by DMA. The descriptors are properly aligned to cachelines so there's
  746. * no need to worry they'd overlap.
  747. *
  748. * WARNING: By invalidating the descriptor here, we also invalidate
  749. * the descriptors surrounding this one. Therefore we can NOT change the
  750. * contents of this descriptor nor the surrounding ones. The problem is
  751. * that in order to mark the descriptor as processed, we need to change
  752. * the descriptor. The solution is to mark the whole cache line when all
  753. * descriptors in the cache line are processed.
  754. */
  755. addr = (ulong)rbd;
  756. addr &= ~(ARCH_DMA_MINALIGN - 1);
  757. size = roundup(sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
  758. invalidate_dcache_range(addr, addr + size);
  759. bd_status = readw(&rbd->status);
  760. debug("fec_recv: status 0x%x\n", bd_status);
  761. if (!(bd_status & FEC_RBD_EMPTY)) {
  762. if ((bd_status & FEC_RBD_LAST) && !(bd_status & FEC_RBD_ERR) &&
  763. ((readw(&rbd->data_length) - 4) > 14)) {
  764. /* Get buffer address and size */
  765. addr = readl(&rbd->data_pointer);
  766. frame_length = readw(&rbd->data_length) - 4;
  767. /* Invalidate data cache over the buffer */
  768. end = roundup(addr + frame_length, ARCH_DMA_MINALIGN);
  769. addr &= ~(ARCH_DMA_MINALIGN - 1);
  770. invalidate_dcache_range(addr, end);
  771. /* Fill the buffer and pass it to upper layers */
  772. #ifdef CONFIG_FEC_MXC_SWAP_PACKET
  773. swap_packet((uint32_t *)addr, frame_length);
  774. #endif
  775. #ifdef CONFIG_DM_ETH
  776. memcpy(*packetp, (char *)addr, frame_length);
  777. #else
  778. memcpy(buff, (char *)addr, frame_length);
  779. net_process_received_packet(buff, frame_length);
  780. #endif
  781. len = frame_length;
  782. } else {
  783. if (bd_status & FEC_RBD_ERR)
  784. debug("error frame: 0x%08lx 0x%08x\n",
  785. addr, bd_status);
  786. }
  787. /*
  788. * Free the current buffer, restart the engine and move forward
  789. * to the next buffer. Here we check if the whole cacheline of
  790. * descriptors was already processed and if so, we mark it free
  791. * as whole.
  792. */
  793. size = RXDESC_PER_CACHELINE - 1;
  794. if ((fec->rbd_index & size) == size) {
  795. i = fec->rbd_index - size;
  796. addr = (ulong)&fec->rbd_base[i];
  797. for (; i <= fec->rbd_index ; i++) {
  798. fec_rbd_clean(i == (FEC_RBD_NUM - 1),
  799. &fec->rbd_base[i]);
  800. }
  801. flush_dcache_range(addr,
  802. addr + ARCH_DMA_MINALIGN);
  803. }
  804. fec_rx_task_enable(fec);
  805. fec->rbd_index = (fec->rbd_index + 1) % FEC_RBD_NUM;
  806. }
  807. debug("fec_recv: stop\n");
  808. return len;
  809. }
  810. static void fec_set_dev_name(char *dest, int dev_id)
  811. {
  812. sprintf(dest, (dev_id == -1) ? "FEC" : "FEC%i", dev_id);
  813. }
  814. static int fec_alloc_descs(struct fec_priv *fec)
  815. {
  816. unsigned int size;
  817. int i;
  818. uint8_t *data;
  819. ulong addr;
  820. /* Allocate TX descriptors. */
  821. size = roundup(2 * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
  822. fec->tbd_base = memalign(ARCH_DMA_MINALIGN, size);
  823. if (!fec->tbd_base)
  824. goto err_tx;
  825. /* Allocate RX descriptors. */
  826. size = roundup(FEC_RBD_NUM * sizeof(struct fec_bd), ARCH_DMA_MINALIGN);
  827. fec->rbd_base = memalign(ARCH_DMA_MINALIGN, size);
  828. if (!fec->rbd_base)
  829. goto err_rx;
  830. memset(fec->rbd_base, 0, size);
  831. /* Allocate RX buffers. */
  832. /* Maximum RX buffer size. */
  833. size = roundup(FEC_MAX_PKT_SIZE, FEC_DMA_RX_MINALIGN);
  834. for (i = 0; i < FEC_RBD_NUM; i++) {
  835. data = memalign(FEC_DMA_RX_MINALIGN, size);
  836. if (!data) {
  837. printf("%s: error allocating rxbuf %d\n", __func__, i);
  838. goto err_ring;
  839. }
  840. memset(data, 0, size);
  841. addr = (ulong)data;
  842. fec->rbd_base[i].data_pointer = (uint32_t)addr;
  843. fec->rbd_base[i].status = FEC_RBD_EMPTY;
  844. fec->rbd_base[i].data_length = 0;
  845. /* Flush the buffer to memory. */
  846. flush_dcache_range(addr, addr + size);
  847. }
  848. /* Mark the last RBD to close the ring. */
  849. fec->rbd_base[i - 1].status = FEC_RBD_WRAP | FEC_RBD_EMPTY;
  850. fec->rbd_index = 0;
  851. fec->tbd_index = 0;
  852. return 0;
  853. err_ring:
  854. for (; i >= 0; i--) {
  855. addr = fec->rbd_base[i].data_pointer;
  856. free((void *)addr);
  857. }
  858. free(fec->rbd_base);
  859. err_rx:
  860. free(fec->tbd_base);
  861. err_tx:
  862. return -ENOMEM;
  863. }
  864. static void fec_free_descs(struct fec_priv *fec)
  865. {
  866. int i;
  867. ulong addr;
  868. for (i = 0; i < FEC_RBD_NUM; i++) {
  869. addr = fec->rbd_base[i].data_pointer;
  870. free((void *)addr);
  871. }
  872. free(fec->rbd_base);
  873. free(fec->tbd_base);
  874. }
  875. struct mii_dev *fec_get_miibus(ulong base_addr, int dev_id)
  876. {
  877. struct ethernet_regs *eth = (struct ethernet_regs *)base_addr;
  878. struct mii_dev *bus;
  879. int ret;
  880. bus = mdio_alloc();
  881. if (!bus) {
  882. printf("mdio_alloc failed\n");
  883. return NULL;
  884. }
  885. bus->read = fec_phy_read;
  886. bus->write = fec_phy_write;
  887. bus->priv = eth;
  888. fec_set_dev_name(bus->name, dev_id);
  889. ret = mdio_register(bus);
  890. if (ret) {
  891. printf("mdio_register failed\n");
  892. free(bus);
  893. return NULL;
  894. }
  895. fec_mii_setspeed(eth);
  896. return bus;
  897. }
  898. #ifndef CONFIG_DM_ETH
  899. #ifdef CONFIG_PHYLIB
  900. int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
  901. struct mii_dev *bus, struct phy_device *phydev)
  902. #else
  903. static int fec_probe(bd_t *bd, int dev_id, uint32_t base_addr,
  904. struct mii_dev *bus, int phy_id)
  905. #endif
  906. {
  907. struct eth_device *edev;
  908. struct fec_priv *fec;
  909. unsigned char ethaddr[6];
  910. char mac[16];
  911. uint32_t start;
  912. int ret = 0;
  913. /* create and fill edev struct */
  914. edev = (struct eth_device *)malloc(sizeof(struct eth_device));
  915. if (!edev) {
  916. puts("fec_mxc: not enough malloc memory for eth_device\n");
  917. ret = -ENOMEM;
  918. goto err1;
  919. }
  920. fec = (struct fec_priv *)malloc(sizeof(struct fec_priv));
  921. if (!fec) {
  922. puts("fec_mxc: not enough malloc memory for fec_priv\n");
  923. ret = -ENOMEM;
  924. goto err2;
  925. }
  926. memset(edev, 0, sizeof(*edev));
  927. memset(fec, 0, sizeof(*fec));
  928. ret = fec_alloc_descs(fec);
  929. if (ret)
  930. goto err3;
  931. edev->priv = fec;
  932. edev->init = fec_init;
  933. edev->send = fec_send;
  934. edev->recv = fec_recv;
  935. edev->halt = fec_halt;
  936. edev->write_hwaddr = fec_set_hwaddr;
  937. fec->eth = (struct ethernet_regs *)(ulong)base_addr;
  938. fec->bd = bd;
  939. fec->xcv_type = CONFIG_FEC_XCV_TYPE;
  940. /* Reset chip. */
  941. writel(readl(&fec->eth->ecntrl) | FEC_ECNTRL_RESET, &fec->eth->ecntrl);
  942. start = get_timer(0);
  943. while (readl(&fec->eth->ecntrl) & FEC_ECNTRL_RESET) {
  944. if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
  945. printf("FEC MXC: Timeout resetting chip\n");
  946. goto err4;
  947. }
  948. udelay(10);
  949. }
  950. fec_reg_setup(fec);
  951. fec_set_dev_name(edev->name, dev_id);
  952. fec->dev_id = (dev_id == -1) ? 0 : dev_id;
  953. fec->bus = bus;
  954. fec_mii_setspeed(bus->priv);
  955. #ifdef CONFIG_PHYLIB
  956. fec->phydev = phydev;
  957. phy_connect_dev(phydev, edev);
  958. /* Configure phy */
  959. phy_config(phydev);
  960. #else
  961. fec->phy_id = phy_id;
  962. #endif
  963. eth_register(edev);
  964. /* only support one eth device, the index number pointed by dev_id */
  965. edev->index = fec->dev_id;
  966. if (fec_get_hwaddr(fec->dev_id, ethaddr) == 0) {
  967. debug("got MAC%d address from fuse: %pM\n", fec->dev_id, ethaddr);
  968. memcpy(edev->enetaddr, ethaddr, 6);
  969. if (fec->dev_id)
  970. sprintf(mac, "eth%daddr", fec->dev_id);
  971. else
  972. strcpy(mac, "ethaddr");
  973. if (!env_get(mac))
  974. eth_env_set_enetaddr(mac, ethaddr);
  975. }
  976. return ret;
  977. err4:
  978. fec_free_descs(fec);
  979. err3:
  980. free(fec);
  981. err2:
  982. free(edev);
  983. err1:
  984. return ret;
  985. }
  986. int fecmxc_initialize_multi(bd_t *bd, int dev_id, int phy_id, uint32_t addr)
  987. {
  988. uint32_t base_mii;
  989. struct mii_dev *bus = NULL;
  990. #ifdef CONFIG_PHYLIB
  991. struct phy_device *phydev = NULL;
  992. #endif
  993. int ret;
  994. #ifdef CONFIG_FEC_MXC_MDIO_BASE
  995. /*
  996. * The i.MX28 has two ethernet interfaces, but they are not equal.
  997. * Only the first one can access the MDIO bus.
  998. */
  999. base_mii = CONFIG_FEC_MXC_MDIO_BASE;
  1000. #else
  1001. base_mii = addr;
  1002. #endif
  1003. debug("eth_init: fec_probe(bd, %i, %i) @ %08x\n", dev_id, phy_id, addr);
  1004. bus = fec_get_miibus(base_mii, dev_id);
  1005. if (!bus)
  1006. return -ENOMEM;
  1007. #ifdef CONFIG_PHYLIB
  1008. phydev = phy_find_by_mask(bus, 1 << phy_id, PHY_INTERFACE_MODE_RGMII);
  1009. if (!phydev) {
  1010. mdio_unregister(bus);
  1011. free(bus);
  1012. return -ENOMEM;
  1013. }
  1014. ret = fec_probe(bd, dev_id, addr, bus, phydev);
  1015. #else
  1016. ret = fec_probe(bd, dev_id, addr, bus, phy_id);
  1017. #endif
  1018. if (ret) {
  1019. #ifdef CONFIG_PHYLIB
  1020. free(phydev);
  1021. #endif
  1022. mdio_unregister(bus);
  1023. free(bus);
  1024. }
  1025. return ret;
  1026. }
  1027. #ifdef CONFIG_FEC_MXC_PHYADDR
  1028. int fecmxc_initialize(bd_t *bd)
  1029. {
  1030. return fecmxc_initialize_multi(bd, -1, CONFIG_FEC_MXC_PHYADDR,
  1031. IMX_FEC_BASE);
  1032. }
  1033. #endif
  1034. #ifndef CONFIG_PHYLIB
  1035. int fecmxc_register_mii_postcall(struct eth_device *dev, int (*cb)(int))
  1036. {
  1037. struct fec_priv *fec = (struct fec_priv *)dev->priv;
  1038. fec->mii_postcall = cb;
  1039. return 0;
  1040. }
  1041. #endif
  1042. #else
  1043. static int fecmxc_read_rom_hwaddr(struct udevice *dev)
  1044. {
  1045. struct fec_priv *priv = dev_get_priv(dev);
  1046. struct eth_pdata *pdata = dev_get_platdata(dev);
  1047. return fec_get_hwaddr(priv->dev_id, pdata->enetaddr);
  1048. }
  1049. static int fecmxc_free_pkt(struct udevice *dev, uchar *packet, int length)
  1050. {
  1051. if (packet)
  1052. free(packet);
  1053. return 0;
  1054. }
  1055. static const struct eth_ops fecmxc_ops = {
  1056. .start = fecmxc_init,
  1057. .send = fecmxc_send,
  1058. .recv = fecmxc_recv,
  1059. .free_pkt = fecmxc_free_pkt,
  1060. .stop = fecmxc_halt,
  1061. .write_hwaddr = fecmxc_set_hwaddr,
  1062. .read_rom_hwaddr = fecmxc_read_rom_hwaddr,
  1063. };
  1064. static int fec_phy_init(struct fec_priv *priv, struct udevice *dev)
  1065. {
  1066. struct phy_device *phydev;
  1067. int mask = 0xffffffff;
  1068. #ifdef CONFIG_FEC_MXC_PHYADDR
  1069. mask = 1 << CONFIG_FEC_MXC_PHYADDR;
  1070. #endif
  1071. phydev = phy_find_by_mask(priv->bus, mask, priv->interface);
  1072. if (!phydev)
  1073. return -ENODEV;
  1074. phy_connect_dev(phydev, dev);
  1075. priv->phydev = phydev;
  1076. phy_config(phydev);
  1077. return 0;
  1078. }
  1079. #ifdef CONFIG_DM_GPIO
  1080. /* FEC GPIO reset */
  1081. static void fec_gpio_reset(struct fec_priv *priv)
  1082. {
  1083. debug("fec_gpio_reset: fec_gpio_reset(dev)\n");
  1084. if (dm_gpio_is_valid(&priv->phy_reset_gpio)) {
  1085. dm_gpio_set_value(&priv->phy_reset_gpio, 1);
  1086. udelay(priv->reset_delay);
  1087. dm_gpio_set_value(&priv->phy_reset_gpio, 0);
  1088. }
  1089. }
  1090. #endif
  1091. static int fecmxc_probe(struct udevice *dev)
  1092. {
  1093. struct eth_pdata *pdata = dev_get_platdata(dev);
  1094. struct fec_priv *priv = dev_get_priv(dev);
  1095. struct mii_dev *bus = NULL;
  1096. uint32_t start;
  1097. int ret;
  1098. ret = fec_alloc_descs(priv);
  1099. if (ret)
  1100. return ret;
  1101. #ifdef CONFIG_DM_GPIO
  1102. fec_gpio_reset(priv);
  1103. #endif
  1104. /* Reset chip. */
  1105. writel(readl(&priv->eth->ecntrl) | FEC_ECNTRL_RESET,
  1106. &priv->eth->ecntrl);
  1107. start = get_timer(0);
  1108. while (readl(&priv->eth->ecntrl) & FEC_ECNTRL_RESET) {
  1109. if (get_timer(start) > (CONFIG_SYS_HZ * 5)) {
  1110. printf("FEC MXC: Timeout reseting chip\n");
  1111. goto err_timeout;
  1112. }
  1113. udelay(10);
  1114. }
  1115. fec_reg_setup(priv);
  1116. priv->dev_id = dev->seq;
  1117. #ifdef CONFIG_FEC_MXC_MDIO_BASE
  1118. bus = fec_get_miibus((ulong)CONFIG_FEC_MXC_MDIO_BASE, dev->seq);
  1119. #else
  1120. bus = fec_get_miibus((ulong)priv->eth, dev->seq);
  1121. #endif
  1122. if (!bus) {
  1123. ret = -ENOMEM;
  1124. goto err_mii;
  1125. }
  1126. priv->bus = bus;
  1127. priv->xcv_type = CONFIG_FEC_XCV_TYPE;
  1128. priv->interface = pdata->phy_interface;
  1129. ret = fec_phy_init(priv, dev);
  1130. if (ret)
  1131. goto err_phy;
  1132. return 0;
  1133. err_phy:
  1134. mdio_unregister(bus);
  1135. free(bus);
  1136. err_mii:
  1137. err_timeout:
  1138. fec_free_descs(priv);
  1139. return ret;
  1140. }
  1141. static int fecmxc_remove(struct udevice *dev)
  1142. {
  1143. struct fec_priv *priv = dev_get_priv(dev);
  1144. free(priv->phydev);
  1145. fec_free_descs(priv);
  1146. mdio_unregister(priv->bus);
  1147. mdio_free(priv->bus);
  1148. return 0;
  1149. }
  1150. static int fecmxc_ofdata_to_platdata(struct udevice *dev)
  1151. {
  1152. int ret = 0;
  1153. struct eth_pdata *pdata = dev_get_platdata(dev);
  1154. struct fec_priv *priv = dev_get_priv(dev);
  1155. const char *phy_mode;
  1156. pdata->iobase = (phys_addr_t)devfdt_get_addr(dev);
  1157. priv->eth = (struct ethernet_regs *)pdata->iobase;
  1158. pdata->phy_interface = -1;
  1159. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
  1160. NULL);
  1161. if (phy_mode)
  1162. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  1163. if (pdata->phy_interface == -1) {
  1164. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  1165. return -EINVAL;
  1166. }
  1167. #ifdef CONFIG_DM_GPIO
  1168. ret = gpio_request_by_name(dev, "phy-reset-gpios", 0,
  1169. &priv->phy_reset_gpio, GPIOD_IS_OUT);
  1170. if (ret == 0) {
  1171. ret = dev_read_u32_array(dev, "phy-reset-duration",
  1172. &priv->reset_delay, 1);
  1173. } else if (ret == -ENOENT) {
  1174. priv->reset_delay = 1000;
  1175. ret = 0;
  1176. }
  1177. if (priv->reset_delay > 1000) {
  1178. printf("FEX MXC: gpio reset timeout should be less the 1000\n");
  1179. priv->reset_delay = 1000;
  1180. }
  1181. #endif
  1182. return ret;
  1183. }
  1184. static const struct udevice_id fecmxc_ids[] = {
  1185. { .compatible = "fsl,imx6q-fec" },
  1186. { .compatible = "fsl,imx6sl-fec" },
  1187. { .compatible = "fsl,imx6sx-fec" },
  1188. { .compatible = "fsl,imx6ul-fec" },
  1189. { .compatible = "fsl,imx53-fec" },
  1190. { }
  1191. };
  1192. U_BOOT_DRIVER(fecmxc_gem) = {
  1193. .name = "fecmxc",
  1194. .id = UCLASS_ETH,
  1195. .of_match = fecmxc_ids,
  1196. .ofdata_to_platdata = fecmxc_ofdata_to_platdata,
  1197. .probe = fecmxc_probe,
  1198. .remove = fecmxc_remove,
  1199. .ops = &fecmxc_ops,
  1200. .priv_auto_alloc_size = sizeof(struct fec_priv),
  1201. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  1202. };
  1203. #endif