eth.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678
  1. /*
  2. * Copyright 2009-2011 Freescale Semiconductor, Inc.
  3. * Dave Liu <daveliu@freescale.com>
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License as
  7. * published by the Free Software Foundation; either version 2 of
  8. * the License, or (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
  18. * MA 02111-1307 USA
  19. */
  20. #include <common.h>
  21. #include <asm/io.h>
  22. #include <malloc.h>
  23. #include <net.h>
  24. #include <hwconfig.h>
  25. #include <fm_eth.h>
  26. #include <fsl_mdio.h>
  27. #include <miiphy.h>
  28. #include <phy.h>
  29. #include <asm/fsl_dtsec.h>
  30. #include <asm/fsl_tgec.h>
  31. #include "fm.h"
  32. static struct eth_device *devlist[NUM_FM_PORTS];
  33. static int num_controllers;
  34. #if defined(CONFIG_MII) || defined(CONFIG_CMD_MII) && !defined(BITBANGMII)
  35. #define TBIANA_SETTINGS (TBIANA_ASYMMETRIC_PAUSE | TBIANA_SYMMETRIC_PAUSE | \
  36. TBIANA_FULL_DUPLEX)
  37. #define TBIANA_SGMII_ACK 0x4001
  38. #define TBICR_SETTINGS (TBICR_ANEG_ENABLE | TBICR_RESTART_ANEG | \
  39. TBICR_FULL_DUPLEX | TBICR_SPEED1_SET)
  40. /* Configure the TBI for SGMII operation */
  41. void dtsec_configure_serdes(struct fm_eth *priv)
  42. {
  43. struct dtsec *regs = priv->mac->base;
  44. struct tsec_mii_mng *phyregs = priv->mac->phyregs;
  45. /*
  46. * Access TBI PHY registers at given TSEC register offset as
  47. * opposed to the register offset used for external PHY accesses
  48. */
  49. tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_TBICON,
  50. TBICON_CLK_SELECT);
  51. tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0, TBI_ANA,
  52. TBIANA_SGMII_ACK);
  53. tsec_local_mdio_write(phyregs, in_be32(&regs->tbipa), 0,
  54. TBI_CR, TBICR_SETTINGS);
  55. }
  56. static void dtsec_init_phy(struct eth_device *dev)
  57. {
  58. struct fm_eth *fm_eth = dev->priv;
  59. struct dtsec *regs = (struct dtsec *)fm_eth->mac->base;
  60. /* Assign a Physical address to the TBI */
  61. out_be32(&regs->tbipa, CONFIG_SYS_TBIPA_VALUE);
  62. if (fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII)
  63. dtsec_configure_serdes(fm_eth);
  64. }
  65. static int tgec_is_fibre(struct eth_device *dev)
  66. {
  67. struct fm_eth *fm = dev->priv;
  68. char phyopt[20];
  69. sprintf(phyopt, "fsl_fm%d_xaui_phy", fm->fm_index + 1);
  70. return hwconfig_arg_cmp(phyopt, "xfi");
  71. }
  72. #endif
  73. static u16 muram_readw(u16 *addr)
  74. {
  75. u32 base = (u32)addr & ~0x3;
  76. u32 val32 = *(u32 *)base;
  77. int byte_pos;
  78. u16 ret;
  79. byte_pos = (u32)addr & 0x3;
  80. if (byte_pos)
  81. ret = (u16)(val32 & 0x0000ffff);
  82. else
  83. ret = (u16)((val32 & 0xffff0000) >> 16);
  84. return ret;
  85. }
  86. static void muram_writew(u16 *addr, u16 val)
  87. {
  88. u32 base = (u32)addr & ~0x3;
  89. u32 org32 = *(u32 *)base;
  90. u32 val32;
  91. int byte_pos;
  92. byte_pos = (u32)addr & 0x3;
  93. if (byte_pos)
  94. val32 = (org32 & 0xffff0000) | val;
  95. else
  96. val32 = (org32 & 0x0000ffff) | ((u32)val << 16);
  97. *(u32 *)base = val32;
  98. }
  99. static void bmi_rx_port_disable(struct fm_bmi_rx_port *rx_port)
  100. {
  101. int timeout = 1000000;
  102. clrbits_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_EN);
  103. /* wait until the rx port is not busy */
  104. while ((in_be32(&rx_port->fmbm_rst) & FMBM_RST_BSY) && timeout--)
  105. ;
  106. }
  107. static void bmi_rx_port_init(struct fm_bmi_rx_port *rx_port)
  108. {
  109. /* set BMI to independent mode, Rx port disable */
  110. out_be32(&rx_port->fmbm_rcfg, FMBM_RCFG_IM);
  111. /* clear FOF in IM case */
  112. out_be32(&rx_port->fmbm_rim, 0);
  113. /* Rx frame next engine -RISC */
  114. out_be32(&rx_port->fmbm_rfne, NIA_ENG_RISC | NIA_RISC_AC_IM_RX);
  115. /* Rx command attribute - no order, MR[3] = 1 */
  116. clrbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_ORDER | FMBM_RFCA_MR_MASK);
  117. setbits_be32(&rx_port->fmbm_rfca, FMBM_RFCA_MR(4));
  118. /* enable Rx statistic counters */
  119. out_be32(&rx_port->fmbm_rstc, FMBM_RSTC_EN);
  120. /* disable Rx performance counters */
  121. out_be32(&rx_port->fmbm_rpc, 0);
  122. }
  123. static void bmi_tx_port_disable(struct fm_bmi_tx_port *tx_port)
  124. {
  125. int timeout = 1000000;
  126. clrbits_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_EN);
  127. /* wait until the tx port is not busy */
  128. while ((in_be32(&tx_port->fmbm_tst) & FMBM_TST_BSY) && timeout--)
  129. ;
  130. }
  131. static void bmi_tx_port_init(struct fm_bmi_tx_port *tx_port)
  132. {
  133. /* set BMI to independent mode, Tx port disable */
  134. out_be32(&tx_port->fmbm_tcfg, FMBM_TCFG_IM);
  135. /* Tx frame next engine -RISC */
  136. out_be32(&tx_port->fmbm_tfne, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
  137. out_be32(&tx_port->fmbm_tfene, NIA_ENG_RISC | NIA_RISC_AC_IM_TX);
  138. /* Tx command attribute - no order, MR[3] = 1 */
  139. clrbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_ORDER | FMBM_TFCA_MR_MASK);
  140. setbits_be32(&tx_port->fmbm_tfca, FMBM_TFCA_MR(4));
  141. /* enable Tx statistic counters */
  142. out_be32(&tx_port->fmbm_tstc, FMBM_TSTC_EN);
  143. /* disable Tx performance counters */
  144. out_be32(&tx_port->fmbm_tpc, 0);
  145. }
  146. static int fm_eth_rx_port_parameter_init(struct fm_eth *fm_eth)
  147. {
  148. struct fm_port_global_pram *pram;
  149. u32 pram_page_offset;
  150. void *rx_bd_ring_base;
  151. void *rx_buf_pool;
  152. struct fm_port_bd *rxbd;
  153. struct fm_port_qd *rxqd;
  154. struct fm_bmi_rx_port *bmi_rx_port = fm_eth->rx_port;
  155. int i;
  156. /* alloc global parameter ram at MURAM */
  157. pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
  158. FM_PRAM_SIZE, FM_PRAM_ALIGN);
  159. fm_eth->rx_pram = pram;
  160. /* parameter page offset to MURAM */
  161. pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index);
  162. /* enable global mode- snooping data buffers and BDs */
  163. pram->mode = PRAM_MODE_GLOBAL;
  164. /* init the Rx queue descriptor pionter */
  165. pram->rxqd_ptr = pram_page_offset + 0x20;
  166. /* set the max receive buffer length, power of 2 */
  167. muram_writew(&pram->mrblr, MAX_RXBUF_LOG2);
  168. /* alloc Rx buffer descriptors from main memory */
  169. rx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
  170. * RX_BD_RING_SIZE);
  171. if (!rx_bd_ring_base)
  172. return 0;
  173. memset(rx_bd_ring_base, 0, sizeof(struct fm_port_bd)
  174. * RX_BD_RING_SIZE);
  175. /* alloc Rx buffer from main memory */
  176. rx_buf_pool = malloc(MAX_RXBUF_LEN * RX_BD_RING_SIZE);
  177. if (!rx_buf_pool)
  178. return 0;
  179. memset(rx_buf_pool, 0, MAX_RXBUF_LEN * RX_BD_RING_SIZE);
  180. /* save them to fm_eth */
  181. fm_eth->rx_bd_ring = rx_bd_ring_base;
  182. fm_eth->cur_rxbd = rx_bd_ring_base;
  183. fm_eth->rx_buf = rx_buf_pool;
  184. /* init Rx BDs ring */
  185. rxbd = (struct fm_port_bd *)rx_bd_ring_base;
  186. for (i = 0; i < RX_BD_RING_SIZE; i++) {
  187. rxbd->status = RxBD_EMPTY;
  188. rxbd->len = 0;
  189. rxbd->buf_ptr_hi = 0;
  190. rxbd->buf_ptr_lo = (u32)rx_buf_pool + i * MAX_RXBUF_LEN;
  191. rxbd++;
  192. }
  193. /* set the Rx queue descriptor */
  194. rxqd = &pram->rxqd;
  195. muram_writew(&rxqd->gen, 0);
  196. muram_writew(&rxqd->bd_ring_base_hi, 0);
  197. rxqd->bd_ring_base_lo = (u32)rx_bd_ring_base;
  198. muram_writew(&rxqd->bd_ring_size, sizeof(struct fm_port_bd)
  199. * RX_BD_RING_SIZE);
  200. muram_writew(&rxqd->offset_in, 0);
  201. muram_writew(&rxqd->offset_out, 0);
  202. /* set IM parameter ram pointer to Rx Frame Queue ID */
  203. out_be32(&bmi_rx_port->fmbm_rfqid, pram_page_offset);
  204. return 1;
  205. }
  206. static int fm_eth_tx_port_parameter_init(struct fm_eth *fm_eth)
  207. {
  208. struct fm_port_global_pram *pram;
  209. u32 pram_page_offset;
  210. void *tx_bd_ring_base;
  211. struct fm_port_bd *txbd;
  212. struct fm_port_qd *txqd;
  213. struct fm_bmi_tx_port *bmi_tx_port = fm_eth->tx_port;
  214. int i;
  215. /* alloc global parameter ram at MURAM */
  216. pram = (struct fm_port_global_pram *)fm_muram_alloc(fm_eth->fm_index,
  217. FM_PRAM_SIZE, FM_PRAM_ALIGN);
  218. fm_eth->tx_pram = pram;
  219. /* parameter page offset to MURAM */
  220. pram_page_offset = (u32)pram - fm_muram_base(fm_eth->fm_index);
  221. /* enable global mode- snooping data buffers and BDs */
  222. pram->mode = PRAM_MODE_GLOBAL;
  223. /* init the Tx queue descriptor pionter */
  224. pram->txqd_ptr = pram_page_offset + 0x40;
  225. /* alloc Tx buffer descriptors from main memory */
  226. tx_bd_ring_base = malloc(sizeof(struct fm_port_bd)
  227. * TX_BD_RING_SIZE);
  228. if (!tx_bd_ring_base)
  229. return 0;
  230. memset(tx_bd_ring_base, 0, sizeof(struct fm_port_bd)
  231. * TX_BD_RING_SIZE);
  232. /* save it to fm_eth */
  233. fm_eth->tx_bd_ring = tx_bd_ring_base;
  234. fm_eth->cur_txbd = tx_bd_ring_base;
  235. /* init Tx BDs ring */
  236. txbd = (struct fm_port_bd *)tx_bd_ring_base;
  237. for (i = 0; i < TX_BD_RING_SIZE; i++) {
  238. txbd->status = TxBD_LAST;
  239. txbd->len = 0;
  240. txbd->buf_ptr_hi = 0;
  241. txbd->buf_ptr_lo = 0;
  242. }
  243. /* set the Tx queue decriptor */
  244. txqd = &pram->txqd;
  245. muram_writew(&txqd->bd_ring_base_hi, 0);
  246. txqd->bd_ring_base_lo = (u32)tx_bd_ring_base;
  247. muram_writew(&txqd->bd_ring_size, sizeof(struct fm_port_bd)
  248. * TX_BD_RING_SIZE);
  249. muram_writew(&txqd->offset_in, 0);
  250. muram_writew(&txqd->offset_out, 0);
  251. /* set IM parameter ram pointer to Tx Confirmation Frame Queue ID */
  252. out_be32(&bmi_tx_port->fmbm_tcfqid, pram_page_offset);
  253. return 1;
  254. }
  255. static int fm_eth_init(struct fm_eth *fm_eth)
  256. {
  257. if (!fm_eth_rx_port_parameter_init(fm_eth))
  258. return 0;
  259. if (!fm_eth_tx_port_parameter_init(fm_eth))
  260. return 0;
  261. return 1;
  262. }
  263. static int fm_eth_startup(struct fm_eth *fm_eth)
  264. {
  265. struct fsl_enet_mac *mac;
  266. mac = fm_eth->mac;
  267. /* Rx/TxBDs, Rx/TxQDs, Rx buff and parameter ram init */
  268. if (!fm_eth_init(fm_eth))
  269. return 0;
  270. /* setup the MAC controller */
  271. mac->init_mac(mac);
  272. /* For some reason we need to set SPEED_100 */
  273. if ((fm_eth->enet_if == PHY_INTERFACE_MODE_SGMII) && mac->set_if_mode)
  274. mac->set_if_mode(mac, fm_eth->enet_if, SPEED_100);
  275. /* init bmi rx port, IM mode and disable */
  276. bmi_rx_port_init(fm_eth->rx_port);
  277. /* init bmi tx port, IM mode and disable */
  278. bmi_tx_port_init(fm_eth->tx_port);
  279. return 1;
  280. }
  281. static void fmc_tx_port_graceful_stop_enable(struct fm_eth *fm_eth)
  282. {
  283. struct fm_port_global_pram *pram;
  284. pram = fm_eth->tx_pram;
  285. /* graceful stop transmission of frames */
  286. pram->mode |= PRAM_MODE_GRACEFUL_STOP;
  287. sync();
  288. }
  289. static void fmc_tx_port_graceful_stop_disable(struct fm_eth *fm_eth)
  290. {
  291. struct fm_port_global_pram *pram;
  292. pram = fm_eth->tx_pram;
  293. /* re-enable transmission of frames */
  294. pram->mode &= ~PRAM_MODE_GRACEFUL_STOP;
  295. sync();
  296. }
  297. static int fm_eth_open(struct eth_device *dev, bd_t *bd)
  298. {
  299. struct fm_eth *fm_eth;
  300. struct fsl_enet_mac *mac;
  301. #ifdef CONFIG_PHYLIB
  302. int ret;
  303. #endif
  304. fm_eth = (struct fm_eth *)dev->priv;
  305. mac = fm_eth->mac;
  306. /* setup the MAC address */
  307. if (dev->enetaddr[0] & 0x01) {
  308. printf("%s: MacAddress is multcast address\n", __func__);
  309. return 1;
  310. }
  311. mac->set_mac_addr(mac, dev->enetaddr);
  312. /* enable bmi Rx port */
  313. setbits_be32(&fm_eth->rx_port->fmbm_rcfg, FMBM_RCFG_EN);
  314. /* enable MAC rx/tx port */
  315. mac->enable_mac(mac);
  316. /* enable bmi Tx port */
  317. setbits_be32(&fm_eth->tx_port->fmbm_tcfg, FMBM_TCFG_EN);
  318. /* re-enable transmission of frame */
  319. fmc_tx_port_graceful_stop_disable(fm_eth);
  320. #ifdef CONFIG_PHYLIB
  321. ret = phy_startup(fm_eth->phydev);
  322. if (ret) {
  323. printf("%s: Could not initialize\n", fm_eth->phydev->dev->name);
  324. return ret;
  325. }
  326. #else
  327. fm_eth->phydev->speed = SPEED_1000;
  328. fm_eth->phydev->link = 1;
  329. fm_eth->phydev->duplex = DUPLEX_FULL;
  330. #endif
  331. /* set the MAC-PHY mode */
  332. mac->set_if_mode(mac, fm_eth->enet_if, fm_eth->phydev->speed);
  333. if (!fm_eth->phydev->link)
  334. printf("%s: No link.\n", fm_eth->phydev->dev->name);
  335. return fm_eth->phydev->link ? 0 : -1;
  336. }
  337. static void fm_eth_halt(struct eth_device *dev)
  338. {
  339. struct fm_eth *fm_eth;
  340. struct fsl_enet_mac *mac;
  341. fm_eth = (struct fm_eth *)dev->priv;
  342. mac = fm_eth->mac;
  343. /* graceful stop the transmission of frames */
  344. fmc_tx_port_graceful_stop_enable(fm_eth);
  345. /* disable bmi Tx port */
  346. bmi_tx_port_disable(fm_eth->tx_port);
  347. /* disable MAC rx/tx port */
  348. mac->disable_mac(mac);
  349. /* disable bmi Rx port */
  350. bmi_rx_port_disable(fm_eth->rx_port);
  351. phy_shutdown(fm_eth->phydev);
  352. }
  353. static int fm_eth_send(struct eth_device *dev, void *buf, int len)
  354. {
  355. struct fm_eth *fm_eth;
  356. struct fm_port_global_pram *pram;
  357. struct fm_port_bd *txbd, *txbd_base;
  358. u16 offset_in;
  359. int i;
  360. fm_eth = (struct fm_eth *)dev->priv;
  361. pram = fm_eth->tx_pram;
  362. txbd = fm_eth->cur_txbd;
  363. /* find one empty TxBD */
  364. for (i = 0; txbd->status & TxBD_READY; i++) {
  365. udelay(100);
  366. if (i > 0x1000) {
  367. printf("%s: Tx buffer not ready\n", dev->name);
  368. return 0;
  369. }
  370. }
  371. /* setup TxBD */
  372. txbd->buf_ptr_hi = 0;
  373. txbd->buf_ptr_lo = (u32)buf;
  374. txbd->len = len;
  375. sync();
  376. txbd->status = TxBD_READY | TxBD_LAST;
  377. sync();
  378. /* update TxQD, let RISC to send the packet */
  379. offset_in = muram_readw(&pram->txqd.offset_in);
  380. offset_in += sizeof(struct fm_port_bd);
  381. if (offset_in >= muram_readw(&pram->txqd.bd_ring_size))
  382. offset_in = 0;
  383. muram_writew(&pram->txqd.offset_in, offset_in);
  384. sync();
  385. /* wait for buffer to be transmitted */
  386. for (i = 0; txbd->status & TxBD_READY; i++) {
  387. udelay(100);
  388. if (i > 0x10000) {
  389. printf("%s: Tx error\n", dev->name);
  390. return 0;
  391. }
  392. }
  393. /* advance the TxBD */
  394. txbd++;
  395. txbd_base = (struct fm_port_bd *)fm_eth->tx_bd_ring;
  396. if (txbd >= (txbd_base + TX_BD_RING_SIZE))
  397. txbd = txbd_base;
  398. /* update current txbd */
  399. fm_eth->cur_txbd = (void *)txbd;
  400. return 1;
  401. }
  402. static int fm_eth_recv(struct eth_device *dev)
  403. {
  404. struct fm_eth *fm_eth;
  405. struct fm_port_global_pram *pram;
  406. struct fm_port_bd *rxbd, *rxbd_base;
  407. u16 status, len;
  408. u8 *data;
  409. u16 offset_out;
  410. fm_eth = (struct fm_eth *)dev->priv;
  411. pram = fm_eth->rx_pram;
  412. rxbd = fm_eth->cur_rxbd;
  413. status = rxbd->status;
  414. while (!(status & RxBD_EMPTY)) {
  415. if (!(status & RxBD_ERROR)) {
  416. data = (u8 *)rxbd->buf_ptr_lo;
  417. len = rxbd->len;
  418. NetReceive(data, len);
  419. } else {
  420. printf("%s: Rx error\n", dev->name);
  421. return 0;
  422. }
  423. /* clear the RxBDs */
  424. rxbd->status = RxBD_EMPTY;
  425. rxbd->len = 0;
  426. sync();
  427. /* advance RxBD */
  428. rxbd++;
  429. rxbd_base = (struct fm_port_bd *)fm_eth->rx_bd_ring;
  430. if (rxbd >= (rxbd_base + RX_BD_RING_SIZE))
  431. rxbd = rxbd_base;
  432. /* read next status */
  433. status = rxbd->status;
  434. /* update RxQD */
  435. offset_out = muram_readw(&pram->rxqd.offset_out);
  436. offset_out += sizeof(struct fm_port_bd);
  437. if (offset_out >= muram_readw(&pram->rxqd.bd_ring_size))
  438. offset_out = 0;
  439. muram_writew(&pram->rxqd.offset_out, offset_out);
  440. sync();
  441. }
  442. fm_eth->cur_rxbd = (void *)rxbd;
  443. return 1;
  444. }
  445. static int fm_eth_init_mac(struct fm_eth *fm_eth, struct ccsr_fman *reg)
  446. {
  447. struct fsl_enet_mac *mac;
  448. int num;
  449. void *base, *phyregs = NULL;
  450. num = fm_eth->num;
  451. /* Get the mac registers base address */
  452. if (fm_eth->type == FM_ETH_1G_E) {
  453. base = &reg->mac_1g[num].fm_dtesc;
  454. phyregs = &reg->mac_1g[num].fm_mdio.miimcfg;
  455. } else {
  456. base = &reg->mac_10g[num].fm_10gec;
  457. phyregs = &reg->mac_10g[num].fm_10gec_mdio;
  458. }
  459. /* alloc mac controller */
  460. mac = malloc(sizeof(struct fsl_enet_mac));
  461. if (!mac)
  462. return 0;
  463. memset(mac, 0, sizeof(struct fsl_enet_mac));
  464. /* save the mac to fm_eth struct */
  465. fm_eth->mac = mac;
  466. if (fm_eth->type == FM_ETH_1G_E)
  467. init_dtsec(mac, base, phyregs, MAX_RXBUF_LEN);
  468. else
  469. init_tgec(mac, base, phyregs, MAX_RXBUF_LEN);
  470. return 1;
  471. }
  472. static int init_phy(struct eth_device *dev)
  473. {
  474. struct fm_eth *fm_eth = dev->priv;
  475. struct phy_device *phydev = NULL;
  476. u32 supported;
  477. #ifdef CONFIG_PHYLIB
  478. if (fm_eth->type == FM_ETH_1G_E)
  479. dtsec_init_phy(dev);
  480. if (fm_eth->bus) {
  481. phydev = phy_connect(fm_eth->bus, fm_eth->phyaddr, dev,
  482. fm_eth->enet_if);
  483. }
  484. if (!phydev) {
  485. printf("Failed to connect\n");
  486. return -1;
  487. }
  488. if (fm_eth->type == FM_ETH_1G_E) {
  489. supported = (SUPPORTED_10baseT_Half |
  490. SUPPORTED_10baseT_Full |
  491. SUPPORTED_100baseT_Half |
  492. SUPPORTED_100baseT_Full |
  493. SUPPORTED_1000baseT_Full);
  494. } else {
  495. supported = SUPPORTED_10000baseT_Full;
  496. if (tgec_is_fibre(dev))
  497. phydev->port = PORT_FIBRE;
  498. }
  499. phydev->supported &= supported;
  500. phydev->advertising = phydev->supported;
  501. fm_eth->phydev = phydev;
  502. phy_config(phydev);
  503. #endif
  504. return 0;
  505. }
  506. int fm_eth_initialize(struct ccsr_fman *reg, struct fm_eth_info *info)
  507. {
  508. struct eth_device *dev;
  509. struct fm_eth *fm_eth;
  510. int i, num = info->num;
  511. /* alloc eth device */
  512. dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  513. if (!dev)
  514. return 0;
  515. memset(dev, 0, sizeof(struct eth_device));
  516. /* alloc the FMan ethernet private struct */
  517. fm_eth = (struct fm_eth *)malloc(sizeof(struct fm_eth));
  518. if (!fm_eth)
  519. return 0;
  520. memset(fm_eth, 0, sizeof(struct fm_eth));
  521. /* save off some things we need from the info struct */
  522. fm_eth->fm_index = info->index - 1; /* keep as 0 based for muram */
  523. fm_eth->num = num;
  524. fm_eth->type = info->type;
  525. fm_eth->rx_port = (void *)&reg->port[info->rx_port_id - 1].fm_bmi;
  526. fm_eth->tx_port = (void *)&reg->port[info->tx_port_id - 1].fm_bmi;
  527. /* set the ethernet max receive length */
  528. fm_eth->max_rx_len = MAX_RXBUF_LEN;
  529. /* init global mac structure */
  530. if (!fm_eth_init_mac(fm_eth, reg))
  531. return 0;
  532. /* keep same as the manual, we call FMAN1, FMAN2, DTSEC1, DTSEC2, etc */
  533. if (fm_eth->type == FM_ETH_1G_E)
  534. sprintf(dev->name, "FM%d@DTSEC%d", info->index, num + 1);
  535. else
  536. sprintf(dev->name, "FM%d@TGEC%d", info->index, num + 1);
  537. devlist[num_controllers++] = dev;
  538. dev->iobase = 0;
  539. dev->priv = (void *)fm_eth;
  540. dev->init = fm_eth_open;
  541. dev->halt = fm_eth_halt;
  542. dev->send = fm_eth_send;
  543. dev->recv = fm_eth_recv;
  544. fm_eth->dev = dev;
  545. fm_eth->bus = info->bus;
  546. fm_eth->phyaddr = info->phy_addr;
  547. fm_eth->enet_if = info->enet_if;
  548. /* startup the FM im */
  549. if (!fm_eth_startup(fm_eth))
  550. return 0;
  551. if (init_phy(dev))
  552. return 0;
  553. /* clear the ethernet address */
  554. for (i = 0; i < 6; i++)
  555. dev->enetaddr[i] = 0;
  556. eth_register(dev);
  557. return 1;
  558. }