zynq_gem.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * (C) Copyright 2011 Michal Simek
  3. *
  4. * Michal SIMEK <monstr@monstr.eu>
  5. *
  6. * Based on Xilinx gmac driver:
  7. * (C) Copyright 2011 Xilinx
  8. *
  9. * SPDX-License-Identifier: GPL-2.0+
  10. */
  11. #include <common.h>
  12. #include <net.h>
  13. #include <netdev.h>
  14. #include <config.h>
  15. #include <fdtdec.h>
  16. #include <libfdt.h>
  17. #include <malloc.h>
  18. #include <asm/io.h>
  19. #include <phy.h>
  20. #include <miiphy.h>
  21. #include <watchdog.h>
  22. #include <asm/arch/hardware.h>
  23. #include <asm/arch/sys_proto.h>
  24. #if !defined(CONFIG_PHYLIB)
  25. # error XILINX_GEM_ETHERNET requires PHYLIB
  26. #endif
  27. /* Bit/mask specification */
  28. #define ZYNQ_GEM_PHYMNTNC_OP_MASK 0x40020000 /* operation mask bits */
  29. #define ZYNQ_GEM_PHYMNTNC_OP_R_MASK 0x20000000 /* read operation */
  30. #define ZYNQ_GEM_PHYMNTNC_OP_W_MASK 0x10000000 /* write operation */
  31. #define ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK 23 /* Shift bits for PHYAD */
  32. #define ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK 18 /* Shift bits for PHREG */
  33. #define ZYNQ_GEM_RXBUF_EOF_MASK 0x00008000 /* End of frame. */
  34. #define ZYNQ_GEM_RXBUF_SOF_MASK 0x00004000 /* Start of frame. */
  35. #define ZYNQ_GEM_RXBUF_LEN_MASK 0x00003FFF /* Mask for length field */
  36. #define ZYNQ_GEM_RXBUF_WRAP_MASK 0x00000002 /* Wrap bit, last BD */
  37. #define ZYNQ_GEM_RXBUF_NEW_MASK 0x00000001 /* Used bit.. */
  38. #define ZYNQ_GEM_RXBUF_ADD_MASK 0xFFFFFFFC /* Mask for address */
  39. /* Wrap bit, last descriptor */
  40. #define ZYNQ_GEM_TXBUF_WRAP_MASK 0x40000000
  41. #define ZYNQ_GEM_TXBUF_LAST_MASK 0x00008000 /* Last buffer */
  42. #define ZYNQ_GEM_NWCTRL_TXEN_MASK 0x00000008 /* Enable transmit */
  43. #define ZYNQ_GEM_NWCTRL_RXEN_MASK 0x00000004 /* Enable receive */
  44. #define ZYNQ_GEM_NWCTRL_MDEN_MASK 0x00000010 /* Enable MDIO port */
  45. #define ZYNQ_GEM_NWCTRL_STARTTX_MASK 0x00000200 /* Start tx (tx_go) */
  46. #define ZYNQ_GEM_NWCFG_SPEED100 0x000000001 /* 100 Mbps operation */
  47. #define ZYNQ_GEM_NWCFG_SPEED1000 0x000000400 /* 1Gbps operation */
  48. #define ZYNQ_GEM_NWCFG_FDEN 0x000000002 /* Full Duplex mode */
  49. #define ZYNQ_GEM_NWCFG_FSREM 0x000020000 /* FCS removal */
  50. #define ZYNQ_GEM_NWCFG_MDCCLKDIV 0x000080000 /* Div pclk by 32, 80MHz */
  51. #define ZYNQ_GEM_NWCFG_MDCCLKDIV2 0x0000c0000 /* Div pclk by 48, 120MHz */
  52. #define ZYNQ_GEM_NWCFG_INIT (ZYNQ_GEM_NWCFG_FDEN | \
  53. ZYNQ_GEM_NWCFG_FSREM | \
  54. ZYNQ_GEM_NWCFG_MDCCLKDIV)
  55. #define ZYNQ_GEM_NWSR_MDIOIDLE_MASK 0x00000004 /* PHY management idle */
  56. #define ZYNQ_GEM_DMACR_BLENGTH 0x00000004 /* INCR4 AHB bursts */
  57. /* Use full configured addressable space (8 Kb) */
  58. #define ZYNQ_GEM_DMACR_RXSIZE 0x00000300
  59. /* Use full configured addressable space (4 Kb) */
  60. #define ZYNQ_GEM_DMACR_TXSIZE 0x00000400
  61. /* Set with binary 00011000 to use 1536 byte(1*max length frame/buffer) */
  62. #define ZYNQ_GEM_DMACR_RXBUF 0x00180000
  63. #define ZYNQ_GEM_DMACR_INIT (ZYNQ_GEM_DMACR_BLENGTH | \
  64. ZYNQ_GEM_DMACR_RXSIZE | \
  65. ZYNQ_GEM_DMACR_TXSIZE | \
  66. ZYNQ_GEM_DMACR_RXBUF)
  67. /* Use MII register 1 (MII status register) to detect PHY */
  68. #define PHY_DETECT_REG 1
  69. /* Mask used to verify certain PHY features (or register contents)
  70. * in the register above:
  71. * 0x1000: 10Mbps full duplex support
  72. * 0x0800: 10Mbps half duplex support
  73. * 0x0008: Auto-negotiation support
  74. */
  75. #define PHY_DETECT_MASK 0x1808
  76. /* TX BD status masks */
  77. #define ZYNQ_GEM_TXBUF_FRMLEN_MASK 0x000007ff
  78. #define ZYNQ_GEM_TXBUF_EXHAUSTED 0x08000000
  79. #define ZYNQ_GEM_TXBUF_UNDERRUN 0x10000000
  80. /* Clock frequencies for different speeds */
  81. #define ZYNQ_GEM_FREQUENCY_10 2500000UL
  82. #define ZYNQ_GEM_FREQUENCY_100 25000000UL
  83. #define ZYNQ_GEM_FREQUENCY_1000 125000000UL
  84. /* Device registers */
  85. struct zynq_gem_regs {
  86. u32 nwctrl; /* Network Control reg */
  87. u32 nwcfg; /* Network Config reg */
  88. u32 nwsr; /* Network Status reg */
  89. u32 reserved1;
  90. u32 dmacr; /* DMA Control reg */
  91. u32 txsr; /* TX Status reg */
  92. u32 rxqbase; /* RX Q Base address reg */
  93. u32 txqbase; /* TX Q Base address reg */
  94. u32 rxsr; /* RX Status reg */
  95. u32 reserved2[2];
  96. u32 idr; /* Interrupt Disable reg */
  97. u32 reserved3;
  98. u32 phymntnc; /* Phy Maintaince reg */
  99. u32 reserved4[18];
  100. u32 hashl; /* Hash Low address reg */
  101. u32 hashh; /* Hash High address reg */
  102. #define LADDR_LOW 0
  103. #define LADDR_HIGH 1
  104. u32 laddr[4][LADDR_HIGH + 1]; /* Specific1 addr low/high reg */
  105. u32 match[4]; /* Type ID1 Match reg */
  106. u32 reserved6[18];
  107. u32 stat[44]; /* Octects transmitted Low reg - stat start */
  108. };
  109. /* BD descriptors */
  110. struct emac_bd {
  111. u32 addr; /* Next descriptor pointer */
  112. u32 status;
  113. };
  114. #define RX_BUF 3
  115. /* Page table entries are set to 1MB, or multiples of 1MB
  116. * (not < 1MB). driver uses less bd's so use 1MB bdspace.
  117. */
  118. #define BD_SPACE 0x100000
  119. /* BD separation space */
  120. #define BD_SEPRN_SPACE 64
  121. /* Initialized, rxbd_current, rx_first_buf must be 0 after init */
  122. struct zynq_gem_priv {
  123. struct emac_bd *tx_bd;
  124. struct emac_bd *rx_bd;
  125. char *rxbuffers;
  126. u32 rxbd_current;
  127. u32 rx_first_buf;
  128. int phyaddr;
  129. u32 emio;
  130. int init;
  131. struct phy_device *phydev;
  132. struct mii_dev *bus;
  133. };
  134. static inline int mdio_wait(struct eth_device *dev)
  135. {
  136. struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
  137. u32 timeout = 200;
  138. /* Wait till MDIO interface is ready to accept a new transaction. */
  139. while (--timeout) {
  140. if (readl(&regs->nwsr) & ZYNQ_GEM_NWSR_MDIOIDLE_MASK)
  141. break;
  142. WATCHDOG_RESET();
  143. }
  144. if (!timeout) {
  145. printf("%s: Timeout\n", __func__);
  146. return 1;
  147. }
  148. return 0;
  149. }
  150. static u32 phy_setup_op(struct eth_device *dev, u32 phy_addr, u32 regnum,
  151. u32 op, u16 *data)
  152. {
  153. u32 mgtcr;
  154. struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
  155. if (mdio_wait(dev))
  156. return 1;
  157. /* Construct mgtcr mask for the operation */
  158. mgtcr = ZYNQ_GEM_PHYMNTNC_OP_MASK | op |
  159. (phy_addr << ZYNQ_GEM_PHYMNTNC_PHYAD_SHIFT_MASK) |
  160. (regnum << ZYNQ_GEM_PHYMNTNC_PHREG_SHIFT_MASK) | *data;
  161. /* Write mgtcr and wait for completion */
  162. writel(mgtcr, &regs->phymntnc);
  163. if (mdio_wait(dev))
  164. return 1;
  165. if (op == ZYNQ_GEM_PHYMNTNC_OP_R_MASK)
  166. *data = readl(&regs->phymntnc);
  167. return 0;
  168. }
  169. static u32 phyread(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 *val)
  170. {
  171. return phy_setup_op(dev, phy_addr, regnum,
  172. ZYNQ_GEM_PHYMNTNC_OP_R_MASK, val);
  173. }
  174. static u32 phywrite(struct eth_device *dev, u32 phy_addr, u32 regnum, u16 data)
  175. {
  176. return phy_setup_op(dev, phy_addr, regnum,
  177. ZYNQ_GEM_PHYMNTNC_OP_W_MASK, &data);
  178. }
  179. static void phy_detection(struct eth_device *dev)
  180. {
  181. int i;
  182. u16 phyreg;
  183. struct zynq_gem_priv *priv = dev->priv;
  184. if (priv->phyaddr != -1) {
  185. phyread(dev, priv->phyaddr, PHY_DETECT_REG, &phyreg);
  186. if ((phyreg != 0xFFFF) &&
  187. ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
  188. /* Found a valid PHY address */
  189. debug("Default phy address %d is valid\n",
  190. priv->phyaddr);
  191. return;
  192. } else {
  193. debug("PHY address is not setup correctly %d\n",
  194. priv->phyaddr);
  195. priv->phyaddr = -1;
  196. }
  197. }
  198. debug("detecting phy address\n");
  199. if (priv->phyaddr == -1) {
  200. /* detect the PHY address */
  201. for (i = 31; i >= 0; i--) {
  202. phyread(dev, i, PHY_DETECT_REG, &phyreg);
  203. if ((phyreg != 0xFFFF) &&
  204. ((phyreg & PHY_DETECT_MASK) == PHY_DETECT_MASK)) {
  205. /* Found a valid PHY address */
  206. priv->phyaddr = i;
  207. debug("Found valid phy address, %d\n", i);
  208. return;
  209. }
  210. }
  211. }
  212. printf("PHY is not detected\n");
  213. }
  214. static int zynq_gem_setup_mac(struct eth_device *dev)
  215. {
  216. u32 i, macaddrlow, macaddrhigh;
  217. struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
  218. /* Set the MAC bits [31:0] in BOT */
  219. macaddrlow = dev->enetaddr[0];
  220. macaddrlow |= dev->enetaddr[1] << 8;
  221. macaddrlow |= dev->enetaddr[2] << 16;
  222. macaddrlow |= dev->enetaddr[3] << 24;
  223. /* Set MAC bits [47:32] in TOP */
  224. macaddrhigh = dev->enetaddr[4];
  225. macaddrhigh |= dev->enetaddr[5] << 8;
  226. for (i = 0; i < 4; i++) {
  227. writel(0, &regs->laddr[i][LADDR_LOW]);
  228. writel(0, &regs->laddr[i][LADDR_HIGH]);
  229. /* Do not use MATCHx register */
  230. writel(0, &regs->match[i]);
  231. }
  232. writel(macaddrlow, &regs->laddr[0][LADDR_LOW]);
  233. writel(macaddrhigh, &regs->laddr[0][LADDR_HIGH]);
  234. return 0;
  235. }
  236. static int zynq_gem_init(struct eth_device *dev, bd_t * bis)
  237. {
  238. u32 i;
  239. unsigned long clk_rate = 0;
  240. struct phy_device *phydev;
  241. const u32 stat_size = (sizeof(struct zynq_gem_regs) -
  242. offsetof(struct zynq_gem_regs, stat)) / 4;
  243. struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
  244. struct zynq_gem_priv *priv = dev->priv;
  245. const u32 supported = SUPPORTED_10baseT_Half |
  246. SUPPORTED_10baseT_Full |
  247. SUPPORTED_100baseT_Half |
  248. SUPPORTED_100baseT_Full |
  249. SUPPORTED_1000baseT_Half |
  250. SUPPORTED_1000baseT_Full;
  251. if (!priv->init) {
  252. /* Disable all interrupts */
  253. writel(0xFFFFFFFF, &regs->idr);
  254. /* Disable the receiver & transmitter */
  255. writel(0, &regs->nwctrl);
  256. writel(0, &regs->txsr);
  257. writel(0, &regs->rxsr);
  258. writel(0, &regs->phymntnc);
  259. /* Clear the Hash registers for the mac address
  260. * pointed by AddressPtr
  261. */
  262. writel(0x0, &regs->hashl);
  263. /* Write bits [63:32] in TOP */
  264. writel(0x0, &regs->hashh);
  265. /* Clear all counters */
  266. for (i = 0; i <= stat_size; i++)
  267. readl(&regs->stat[i]);
  268. /* Setup RxBD space */
  269. memset(priv->rx_bd, 0, RX_BUF * sizeof(struct emac_bd));
  270. for (i = 0; i < RX_BUF; i++) {
  271. priv->rx_bd[i].status = 0xF0000000;
  272. priv->rx_bd[i].addr =
  273. ((u32)(priv->rxbuffers) +
  274. (i * PKTSIZE_ALIGN));
  275. }
  276. /* WRAP bit to last BD */
  277. priv->rx_bd[--i].addr |= ZYNQ_GEM_RXBUF_WRAP_MASK;
  278. /* Write RxBDs to IP */
  279. writel((u32)priv->rx_bd, &regs->rxqbase);
  280. /* Setup for DMA Configuration register */
  281. writel(ZYNQ_GEM_DMACR_INIT, &regs->dmacr);
  282. /* Setup for Network Control register, MDIO, Rx and Tx enable */
  283. setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_MDEN_MASK);
  284. priv->init++;
  285. }
  286. phy_detection(dev);
  287. /* interface - look at tsec */
  288. phydev = phy_connect(priv->bus, priv->phyaddr, dev,
  289. PHY_INTERFACE_MODE_MII);
  290. phydev->supported = supported | ADVERTISED_Pause |
  291. ADVERTISED_Asym_Pause;
  292. phydev->advertising = phydev->supported;
  293. priv->phydev = phydev;
  294. phy_config(phydev);
  295. phy_startup(phydev);
  296. if (!phydev->link) {
  297. printf("%s: No link.\n", phydev->dev->name);
  298. return -1;
  299. }
  300. switch (phydev->speed) {
  301. case SPEED_1000:
  302. writel(ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED1000,
  303. &regs->nwcfg);
  304. clk_rate = ZYNQ_GEM_FREQUENCY_1000;
  305. break;
  306. case SPEED_100:
  307. clrsetbits_le32(&regs->nwcfg, ZYNQ_GEM_NWCFG_SPEED1000,
  308. ZYNQ_GEM_NWCFG_INIT | ZYNQ_GEM_NWCFG_SPEED100);
  309. clk_rate = ZYNQ_GEM_FREQUENCY_100;
  310. break;
  311. case SPEED_10:
  312. clk_rate = ZYNQ_GEM_FREQUENCY_10;
  313. break;
  314. }
  315. /* Change the rclk and clk only not using EMIO interface */
  316. if (!priv->emio)
  317. zynq_slcr_gem_clk_setup(dev->iobase !=
  318. ZYNQ_GEM_BASEADDR0, clk_rate);
  319. setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
  320. ZYNQ_GEM_NWCTRL_TXEN_MASK);
  321. return 0;
  322. }
  323. static int zynq_gem_send(struct eth_device *dev, void *ptr, int len)
  324. {
  325. u32 addr, size;
  326. struct zynq_gem_priv *priv = dev->priv;
  327. struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
  328. /* setup BD */
  329. writel((u32)priv->tx_bd, &regs->txqbase);
  330. /* Setup Tx BD */
  331. memset(priv->tx_bd, 0, sizeof(struct emac_bd));
  332. priv->tx_bd->addr = (u32)ptr;
  333. priv->tx_bd->status = (len & ZYNQ_GEM_TXBUF_FRMLEN_MASK) |
  334. ZYNQ_GEM_TXBUF_LAST_MASK;
  335. addr = (u32) ptr;
  336. addr &= ~(ARCH_DMA_MINALIGN - 1);
  337. size = roundup(len, ARCH_DMA_MINALIGN);
  338. flush_dcache_range(addr, addr + size);
  339. barrier();
  340. /* Start transmit */
  341. setbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_STARTTX_MASK);
  342. /* Read TX BD status */
  343. if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_UNDERRUN)
  344. printf("TX underrun\n");
  345. if (priv->tx_bd->status & ZYNQ_GEM_TXBUF_EXHAUSTED)
  346. printf("TX buffers exhausted in mid frame\n");
  347. return 0;
  348. }
  349. /* Do not check frame_recd flag in rx_status register 0x20 - just poll BD */
  350. static int zynq_gem_recv(struct eth_device *dev)
  351. {
  352. int frame_len;
  353. struct zynq_gem_priv *priv = dev->priv;
  354. struct emac_bd *current_bd = &priv->rx_bd[priv->rxbd_current];
  355. struct emac_bd *first_bd;
  356. if (!(current_bd->addr & ZYNQ_GEM_RXBUF_NEW_MASK))
  357. return 0;
  358. if (!(current_bd->status &
  359. (ZYNQ_GEM_RXBUF_SOF_MASK | ZYNQ_GEM_RXBUF_EOF_MASK))) {
  360. printf("GEM: SOF or EOF not set for last buffer received!\n");
  361. return 0;
  362. }
  363. frame_len = current_bd->status & ZYNQ_GEM_RXBUF_LEN_MASK;
  364. if (frame_len) {
  365. u32 addr = current_bd->addr & ZYNQ_GEM_RXBUF_ADD_MASK;
  366. addr &= ~(ARCH_DMA_MINALIGN - 1);
  367. u32 size = roundup(frame_len, ARCH_DMA_MINALIGN);
  368. invalidate_dcache_range(addr, addr + size);
  369. net_process_received_packet((u8 *)addr, frame_len);
  370. if (current_bd->status & ZYNQ_GEM_RXBUF_SOF_MASK)
  371. priv->rx_first_buf = priv->rxbd_current;
  372. else {
  373. current_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
  374. current_bd->status = 0xF0000000; /* FIXME */
  375. }
  376. if (current_bd->status & ZYNQ_GEM_RXBUF_EOF_MASK) {
  377. first_bd = &priv->rx_bd[priv->rx_first_buf];
  378. first_bd->addr &= ~ZYNQ_GEM_RXBUF_NEW_MASK;
  379. first_bd->status = 0xF0000000;
  380. }
  381. if ((++priv->rxbd_current) >= RX_BUF)
  382. priv->rxbd_current = 0;
  383. }
  384. return frame_len;
  385. }
  386. static void zynq_gem_halt(struct eth_device *dev)
  387. {
  388. struct zynq_gem_regs *regs = (struct zynq_gem_regs *)dev->iobase;
  389. clrsetbits_le32(&regs->nwctrl, ZYNQ_GEM_NWCTRL_RXEN_MASK |
  390. ZYNQ_GEM_NWCTRL_TXEN_MASK, 0);
  391. }
  392. static int zynq_gem_miiphyread(const char *devname, uchar addr,
  393. uchar reg, ushort *val)
  394. {
  395. struct eth_device *dev = eth_get_dev();
  396. int ret;
  397. ret = phyread(dev, addr, reg, val);
  398. debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, *val);
  399. return ret;
  400. }
  401. static int zynq_gem_miiphy_write(const char *devname, uchar addr,
  402. uchar reg, ushort val)
  403. {
  404. struct eth_device *dev = eth_get_dev();
  405. debug("%s 0x%x, 0x%x, 0x%x\n", __func__, addr, reg, val);
  406. return phywrite(dev, addr, reg, val);
  407. }
  408. int zynq_gem_initialize(bd_t *bis, phys_addr_t base_addr,
  409. int phy_addr, u32 emio)
  410. {
  411. struct eth_device *dev;
  412. struct zynq_gem_priv *priv;
  413. void *bd_space;
  414. dev = calloc(1, sizeof(*dev));
  415. if (dev == NULL)
  416. return -1;
  417. dev->priv = calloc(1, sizeof(struct zynq_gem_priv));
  418. if (dev->priv == NULL) {
  419. free(dev);
  420. return -1;
  421. }
  422. priv = dev->priv;
  423. /* Align rxbuffers to ARCH_DMA_MINALIGN */
  424. priv->rxbuffers = memalign(ARCH_DMA_MINALIGN, RX_BUF * PKTSIZE_ALIGN);
  425. memset(priv->rxbuffers, 0, RX_BUF * PKTSIZE_ALIGN);
  426. /* Align bd_space to 1MB */
  427. bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
  428. mmu_set_region_dcache_behaviour((phys_addr_t)bd_space,
  429. BD_SPACE, DCACHE_OFF);
  430. /* Initialize the bd spaces for tx and rx bd's */
  431. priv->tx_bd = (struct emac_bd *)bd_space;
  432. priv->rx_bd = (struct emac_bd *)((u32)bd_space + BD_SEPRN_SPACE);
  433. priv->phyaddr = phy_addr;
  434. priv->emio = emio;
  435. sprintf(dev->name, "Gem.%lx", base_addr);
  436. dev->iobase = base_addr;
  437. dev->init = zynq_gem_init;
  438. dev->halt = zynq_gem_halt;
  439. dev->send = zynq_gem_send;
  440. dev->recv = zynq_gem_recv;
  441. dev->write_hwaddr = zynq_gem_setup_mac;
  442. eth_register(dev);
  443. miiphy_register(dev->name, zynq_gem_miiphyread, zynq_gem_miiphy_write);
  444. priv->bus = miiphy_get_dev_by_name(dev->name);
  445. return 1;
  446. }
  447. #ifdef CONFIG_OF_CONTROL
  448. int zynq_gem_of_init(const void *blob)
  449. {
  450. int offset = 0;
  451. u32 ret = 0;
  452. u32 reg, phy_reg;
  453. debug("ZYNQ GEM: Initialization\n");
  454. do {
  455. offset = fdt_node_offset_by_compatible(blob, offset,
  456. "xlnx,ps7-ethernet-1.00.a");
  457. if (offset != -1) {
  458. reg = fdtdec_get_addr(blob, offset, "reg");
  459. if (reg != FDT_ADDR_T_NONE) {
  460. offset = fdtdec_lookup_phandle(blob, offset,
  461. "phy-handle");
  462. if (offset != -1)
  463. phy_reg = fdtdec_get_addr(blob, offset,
  464. "reg");
  465. else
  466. phy_reg = 0;
  467. debug("ZYNQ GEM: addr %x, phyaddr %x\n",
  468. reg, phy_reg);
  469. ret |= zynq_gem_initialize(NULL, reg,
  470. phy_reg, 0);
  471. } else {
  472. debug("ZYNQ GEM: Can't get base address\n");
  473. return -1;
  474. }
  475. }
  476. } while (offset != -1);
  477. return ret;
  478. }
  479. #endif