sh_eth.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021
  1. /*
  2. * sh_eth.c - Driver for Renesas ethernet controller.
  3. *
  4. * Copyright (C) 2008, 2011 Renesas Solutions Corp.
  5. * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
  6. * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
  7. * Copyright (C) 2013, 2014 Renesas Electronics Corporation
  8. *
  9. * SPDX-License-Identifier: GPL-2.0+
  10. */
  11. #include <config.h>
  12. #include <common.h>
  13. #include <malloc.h>
  14. #include <net.h>
  15. #include <netdev.h>
  16. #include <miiphy.h>
  17. #include <linux/errno.h>
  18. #include <asm/io.h>
  19. #ifdef CONFIG_DM_ETH
  20. #include <clk.h>
  21. #include <dm.h>
  22. #include <linux/mii.h>
  23. #include <asm/gpio.h>
  24. #endif
  25. #include "sh_eth.h"
  26. #ifndef CONFIG_SH_ETHER_USE_PORT
  27. # error "Please define CONFIG_SH_ETHER_USE_PORT"
  28. #endif
  29. #ifndef CONFIG_SH_ETHER_PHY_ADDR
  30. # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
  31. #endif
  32. #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
  33. #define flush_cache_wback(addr, len) \
  34. flush_dcache_range((u32)addr, \
  35. (u32)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
  36. #else
  37. #define flush_cache_wback(...)
  38. #endif
  39. #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
  40. #define invalidate_cache(addr, len) \
  41. { \
  42. u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
  43. u32 start, end; \
  44. \
  45. start = (u32)addr; \
  46. end = start + len; \
  47. start &= ~(line_size - 1); \
  48. end = ((end + line_size - 1) & ~(line_size - 1)); \
  49. \
  50. invalidate_dcache_range(start, end); \
  51. }
  52. #else
  53. #define invalidate_cache(...)
  54. #endif
  55. #define TIMEOUT_CNT 1000
  56. static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
  57. {
  58. int ret = 0, timeout;
  59. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  60. if (!packet || len > 0xffff) {
  61. printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
  62. ret = -EINVAL;
  63. goto err;
  64. }
  65. /* packet must be a 4 byte boundary */
  66. if ((int)packet & 3) {
  67. printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
  68. , __func__);
  69. ret = -EFAULT;
  70. goto err;
  71. }
  72. /* Update tx descriptor */
  73. flush_cache_wback(packet, len);
  74. port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
  75. port_info->tx_desc_cur->td1 = len << 16;
  76. /* Must preserve the end of descriptor list indication */
  77. if (port_info->tx_desc_cur->td0 & TD_TDLE)
  78. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
  79. else
  80. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
  81. flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
  82. /* Restart the transmitter if disabled */
  83. if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
  84. sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
  85. /* Wait until packet is transmitted */
  86. timeout = TIMEOUT_CNT;
  87. do {
  88. invalidate_cache(port_info->tx_desc_cur,
  89. sizeof(struct tx_desc_s));
  90. udelay(100);
  91. } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
  92. if (timeout < 0) {
  93. printf(SHETHER_NAME ": transmit timeout\n");
  94. ret = -ETIMEDOUT;
  95. goto err;
  96. }
  97. port_info->tx_desc_cur++;
  98. if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
  99. port_info->tx_desc_cur = port_info->tx_desc_base;
  100. err:
  101. return ret;
  102. }
  103. static int sh_eth_recv_start(struct sh_eth_dev *eth)
  104. {
  105. int len = 0;
  106. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  107. /* Check if the rx descriptor is ready */
  108. invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
  109. if (port_info->rx_desc_cur->rd0 & RD_RACT)
  110. return -EINVAL;
  111. /* Check for errors */
  112. if (port_info->rx_desc_cur->rd0 & RD_RFE)
  113. return -EINVAL;
  114. len = port_info->rx_desc_cur->rd1 & 0xffff;
  115. return len;
  116. }
  117. static void sh_eth_recv_finish(struct sh_eth_dev *eth)
  118. {
  119. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  120. /* Make current descriptor available again */
  121. if (port_info->rx_desc_cur->rd0 & RD_RDLE)
  122. port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
  123. else
  124. port_info->rx_desc_cur->rd0 = RD_RACT;
  125. flush_cache_wback(port_info->rx_desc_cur,
  126. sizeof(struct rx_desc_s));
  127. /* Point to the next descriptor */
  128. port_info->rx_desc_cur++;
  129. if (port_info->rx_desc_cur >=
  130. port_info->rx_desc_base + NUM_RX_DESC)
  131. port_info->rx_desc_cur = port_info->rx_desc_base;
  132. }
  133. static int sh_eth_reset(struct sh_eth_dev *eth)
  134. {
  135. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  136. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  137. int ret = 0, i;
  138. /* Start e-dmac transmitter and receiver */
  139. sh_eth_write(port_info, EDSR_ENALL, EDSR);
  140. /* Perform a software reset and wait for it to complete */
  141. sh_eth_write(port_info, EDMR_SRST, EDMR);
  142. for (i = 0; i < TIMEOUT_CNT; i++) {
  143. if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
  144. break;
  145. udelay(1000);
  146. }
  147. if (i == TIMEOUT_CNT) {
  148. printf(SHETHER_NAME ": Software reset timeout\n");
  149. ret = -EIO;
  150. }
  151. return ret;
  152. #else
  153. sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
  154. udelay(3000);
  155. sh_eth_write(port_info,
  156. sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
  157. return 0;
  158. #endif
  159. }
  160. static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
  161. {
  162. int i, ret = 0;
  163. u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
  164. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  165. struct tx_desc_s *cur_tx_desc;
  166. /*
  167. * Allocate rx descriptors. They must be aligned to size of struct
  168. * tx_desc_s.
  169. */
  170. port_info->tx_desc_alloc =
  171. memalign(sizeof(struct tx_desc_s), alloc_desc_size);
  172. if (!port_info->tx_desc_alloc) {
  173. printf(SHETHER_NAME ": memalign failed\n");
  174. ret = -ENOMEM;
  175. goto err;
  176. }
  177. flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
  178. /* Make sure we use a P2 address (non-cacheable) */
  179. port_info->tx_desc_base =
  180. (struct tx_desc_s *)ADDR_TO_P2((u32)port_info->tx_desc_alloc);
  181. port_info->tx_desc_cur = port_info->tx_desc_base;
  182. /* Initialize all descriptors */
  183. for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
  184. cur_tx_desc++, i++) {
  185. cur_tx_desc->td0 = 0x00;
  186. cur_tx_desc->td1 = 0x00;
  187. cur_tx_desc->td2 = 0x00;
  188. }
  189. /* Mark the end of the descriptors */
  190. cur_tx_desc--;
  191. cur_tx_desc->td0 |= TD_TDLE;
  192. /*
  193. * Point the controller to the tx descriptor list. Must use physical
  194. * addresses
  195. */
  196. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
  197. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  198. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
  199. sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
  200. sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
  201. #endif
  202. err:
  203. return ret;
  204. }
  205. static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
  206. {
  207. int i, ret = 0;
  208. u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
  209. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  210. struct rx_desc_s *cur_rx_desc;
  211. u8 *rx_buf;
  212. /*
  213. * Allocate rx descriptors. They must be aligned to size of struct
  214. * rx_desc_s.
  215. */
  216. port_info->rx_desc_alloc =
  217. memalign(sizeof(struct rx_desc_s), alloc_desc_size);
  218. if (!port_info->rx_desc_alloc) {
  219. printf(SHETHER_NAME ": memalign failed\n");
  220. ret = -ENOMEM;
  221. goto err;
  222. }
  223. flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
  224. /* Make sure we use a P2 address (non-cacheable) */
  225. port_info->rx_desc_base =
  226. (struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);
  227. port_info->rx_desc_cur = port_info->rx_desc_base;
  228. /*
  229. * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
  230. * aligned and in P2 area.
  231. */
  232. port_info->rx_buf_alloc =
  233. memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
  234. if (!port_info->rx_buf_alloc) {
  235. printf(SHETHER_NAME ": alloc failed\n");
  236. ret = -ENOMEM;
  237. goto err_buf_alloc;
  238. }
  239. port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);
  240. /* Initialize all descriptors */
  241. for (cur_rx_desc = port_info->rx_desc_base,
  242. rx_buf = port_info->rx_buf_base, i = 0;
  243. i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
  244. cur_rx_desc->rd0 = RD_RACT;
  245. cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
  246. cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
  247. }
  248. /* Mark the end of the descriptors */
  249. cur_rx_desc--;
  250. cur_rx_desc->rd0 |= RD_RDLE;
  251. /* Point the controller to the rx descriptor list */
  252. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
  253. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  254. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
  255. sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
  256. sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
  257. #endif
  258. return ret;
  259. err_buf_alloc:
  260. free(port_info->rx_desc_alloc);
  261. port_info->rx_desc_alloc = NULL;
  262. err:
  263. return ret;
  264. }
  265. static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
  266. {
  267. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  268. if (port_info->tx_desc_alloc) {
  269. free(port_info->tx_desc_alloc);
  270. port_info->tx_desc_alloc = NULL;
  271. }
  272. }
  273. static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
  274. {
  275. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  276. if (port_info->rx_desc_alloc) {
  277. free(port_info->rx_desc_alloc);
  278. port_info->rx_desc_alloc = NULL;
  279. }
  280. if (port_info->rx_buf_alloc) {
  281. free(port_info->rx_buf_alloc);
  282. port_info->rx_buf_alloc = NULL;
  283. }
  284. }
  285. static int sh_eth_desc_init(struct sh_eth_dev *eth)
  286. {
  287. int ret = 0;
  288. ret = sh_eth_tx_desc_init(eth);
  289. if (ret)
  290. goto err_tx_init;
  291. ret = sh_eth_rx_desc_init(eth);
  292. if (ret)
  293. goto err_rx_init;
  294. return ret;
  295. err_rx_init:
  296. sh_eth_tx_desc_free(eth);
  297. err_tx_init:
  298. return ret;
  299. }
  300. static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
  301. unsigned char *mac)
  302. {
  303. u32 val;
  304. val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
  305. sh_eth_write(port_info, val, MAHR);
  306. val = (mac[4] << 8) | mac[5];
  307. sh_eth_write(port_info, val, MALR);
  308. }
  309. static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
  310. {
  311. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  312. /* Configure e-dmac registers */
  313. sh_eth_write(port_info, (sh_eth_read(port_info, EDMR) & ~EMDR_DESC_R) |
  314. (EMDR_DESC | EDMR_EL), EDMR);
  315. sh_eth_write(port_info, 0, EESIPR);
  316. sh_eth_write(port_info, 0, TRSCER);
  317. sh_eth_write(port_info, 0, TFTR);
  318. sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
  319. sh_eth_write(port_info, RMCR_RST, RMCR);
  320. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  321. sh_eth_write(port_info, 0, RPADIR);
  322. #endif
  323. sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
  324. /* Configure e-mac registers */
  325. sh_eth_write(port_info, 0, ECSIPR);
  326. /* Set Mac address */
  327. sh_eth_write_hwaddr(port_info, mac);
  328. sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
  329. #if defined(SH_ETH_TYPE_GETHER)
  330. sh_eth_write(port_info, 0, PIPR);
  331. #endif
  332. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  333. sh_eth_write(port_info, APR_AP, APR);
  334. sh_eth_write(port_info, MPR_MP, MPR);
  335. sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
  336. #endif
  337. #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
  338. sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
  339. #elif defined(CONFIG_RCAR_GEN2)
  340. sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
  341. #endif
  342. }
  343. static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
  344. {
  345. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  346. struct phy_device *phy = port_info->phydev;
  347. int ret = 0;
  348. u32 val = 0;
  349. /* Set the transfer speed */
  350. if (phy->speed == 100) {
  351. printf(SHETHER_NAME ": 100Base/");
  352. #if defined(SH_ETH_TYPE_GETHER)
  353. sh_eth_write(port_info, GECMR_100B, GECMR);
  354. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  355. sh_eth_write(port_info, 1, RTRATE);
  356. #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_RCAR_GEN2)
  357. val = ECMR_RTM;
  358. #endif
  359. } else if (phy->speed == 10) {
  360. printf(SHETHER_NAME ": 10Base/");
  361. #if defined(SH_ETH_TYPE_GETHER)
  362. sh_eth_write(port_info, GECMR_10B, GECMR);
  363. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  364. sh_eth_write(port_info, 0, RTRATE);
  365. #endif
  366. }
  367. #if defined(SH_ETH_TYPE_GETHER)
  368. else if (phy->speed == 1000) {
  369. printf(SHETHER_NAME ": 1000Base/");
  370. sh_eth_write(port_info, GECMR_1000B, GECMR);
  371. }
  372. #endif
  373. /* Check if full duplex mode is supported by the phy */
  374. if (phy->duplex) {
  375. printf("Full\n");
  376. sh_eth_write(port_info,
  377. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
  378. ECMR);
  379. } else {
  380. printf("Half\n");
  381. sh_eth_write(port_info,
  382. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
  383. ECMR);
  384. }
  385. return ret;
  386. }
  387. static void sh_eth_start(struct sh_eth_dev *eth)
  388. {
  389. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  390. /*
  391. * Enable the e-dmac receiver only. The transmitter will be enabled when
  392. * we have something to transmit
  393. */
  394. sh_eth_write(port_info, EDRRR_R, EDRRR);
  395. }
  396. static void sh_eth_stop(struct sh_eth_dev *eth)
  397. {
  398. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  399. sh_eth_write(port_info, ~EDRRR_R, EDRRR);
  400. }
  401. static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
  402. {
  403. int ret = 0;
  404. ret = sh_eth_reset(eth);
  405. if (ret)
  406. return ret;
  407. ret = sh_eth_desc_init(eth);
  408. if (ret)
  409. return ret;
  410. sh_eth_mac_regs_config(eth, mac);
  411. return 0;
  412. }
  413. static int sh_eth_start_common(struct sh_eth_dev *eth)
  414. {
  415. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  416. int ret;
  417. ret = phy_startup(port_info->phydev);
  418. if (ret) {
  419. printf(SHETHER_NAME ": phy startup failure\n");
  420. return ret;
  421. }
  422. ret = sh_eth_phy_regs_config(eth);
  423. if (ret)
  424. return ret;
  425. sh_eth_start(eth);
  426. return 0;
  427. }
  428. #ifndef CONFIG_DM_ETH
  429. static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
  430. {
  431. int ret = 0;
  432. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  433. struct eth_device *dev = port_info->dev;
  434. struct phy_device *phydev;
  435. phydev = phy_connect(
  436. miiphy_get_dev_by_name(dev->name),
  437. port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
  438. port_info->phydev = phydev;
  439. phy_config(phydev);
  440. return ret;
  441. }
  442. static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
  443. {
  444. struct sh_eth_dev *eth = dev->priv;
  445. return sh_eth_send_common(eth, packet, len);
  446. }
  447. static int sh_eth_recv_common(struct sh_eth_dev *eth)
  448. {
  449. int len = 0;
  450. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  451. uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  452. len = sh_eth_recv_start(eth);
  453. if (len > 0) {
  454. invalidate_cache(packet, len);
  455. net_process_received_packet(packet, len);
  456. sh_eth_recv_finish(eth);
  457. } else
  458. len = 0;
  459. /* Restart the receiver if disabled */
  460. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  461. sh_eth_write(port_info, EDRRR_R, EDRRR);
  462. return len;
  463. }
  464. static int sh_eth_recv_legacy(struct eth_device *dev)
  465. {
  466. struct sh_eth_dev *eth = dev->priv;
  467. return sh_eth_recv_common(eth);
  468. }
  469. static int sh_eth_init_legacy(struct eth_device *dev, bd_t *bd)
  470. {
  471. struct sh_eth_dev *eth = dev->priv;
  472. int ret;
  473. ret = sh_eth_init_common(eth, dev->enetaddr);
  474. if (ret)
  475. return ret;
  476. ret = sh_eth_phy_config_legacy(eth);
  477. if (ret) {
  478. printf(SHETHER_NAME ": phy config timeout\n");
  479. goto err_start;
  480. }
  481. ret = sh_eth_start_common(eth);
  482. if (ret)
  483. goto err_start;
  484. return 0;
  485. err_start:
  486. sh_eth_tx_desc_free(eth);
  487. sh_eth_rx_desc_free(eth);
  488. return ret;
  489. }
  490. void sh_eth_halt_legacy(struct eth_device *dev)
  491. {
  492. struct sh_eth_dev *eth = dev->priv;
  493. sh_eth_stop(eth);
  494. }
  495. int sh_eth_initialize(bd_t *bd)
  496. {
  497. int ret = 0;
  498. struct sh_eth_dev *eth = NULL;
  499. struct eth_device *dev = NULL;
  500. struct mii_dev *mdiodev;
  501. eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
  502. if (!eth) {
  503. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  504. ret = -ENOMEM;
  505. goto err;
  506. }
  507. dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  508. if (!dev) {
  509. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  510. ret = -ENOMEM;
  511. goto err;
  512. }
  513. memset(dev, 0, sizeof(struct eth_device));
  514. memset(eth, 0, sizeof(struct sh_eth_dev));
  515. eth->port = CONFIG_SH_ETHER_USE_PORT;
  516. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  517. eth->port_info[eth->port].iobase =
  518. (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
  519. dev->priv = (void *)eth;
  520. dev->iobase = 0;
  521. dev->init = sh_eth_init_legacy;
  522. dev->halt = sh_eth_halt_legacy;
  523. dev->send = sh_eth_send_legacy;
  524. dev->recv = sh_eth_recv_legacy;
  525. eth->port_info[eth->port].dev = dev;
  526. strcpy(dev->name, SHETHER_NAME);
  527. /* Register Device to EtherNet subsystem */
  528. eth_register(dev);
  529. bb_miiphy_buses[0].priv = eth;
  530. mdiodev = mdio_alloc();
  531. if (!mdiodev)
  532. return -ENOMEM;
  533. strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
  534. mdiodev->read = bb_miiphy_read;
  535. mdiodev->write = bb_miiphy_write;
  536. ret = mdio_register(mdiodev);
  537. if (ret < 0)
  538. return ret;
  539. if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
  540. puts("Please set MAC address\n");
  541. return ret;
  542. err:
  543. if (dev)
  544. free(dev);
  545. if (eth)
  546. free(eth);
  547. printf(SHETHER_NAME ": Failed\n");
  548. return ret;
  549. }
  550. #else /* CONFIG_DM_ETH */
  551. struct sh_ether_priv {
  552. struct sh_eth_dev shdev;
  553. struct mii_dev *bus;
  554. void __iomem *iobase;
  555. struct clk clk;
  556. struct gpio_desc reset_gpio;
  557. };
  558. static int sh_ether_send(struct udevice *dev, void *packet, int len)
  559. {
  560. struct sh_ether_priv *priv = dev_get_priv(dev);
  561. struct sh_eth_dev *eth = &priv->shdev;
  562. return sh_eth_send_common(eth, packet, len);
  563. }
  564. static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
  565. {
  566. struct sh_ether_priv *priv = dev_get_priv(dev);
  567. struct sh_eth_dev *eth = &priv->shdev;
  568. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  569. uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  570. int len;
  571. len = sh_eth_recv_start(eth);
  572. if (len > 0) {
  573. invalidate_cache(packet, len);
  574. *packetp = packet;
  575. return len;
  576. } else {
  577. len = 0;
  578. /* Restart the receiver if disabled */
  579. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  580. sh_eth_write(port_info, EDRRR_R, EDRRR);
  581. return -EAGAIN;
  582. }
  583. }
  584. static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
  585. {
  586. struct sh_ether_priv *priv = dev_get_priv(dev);
  587. struct sh_eth_dev *eth = &priv->shdev;
  588. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  589. sh_eth_recv_finish(eth);
  590. /* Restart the receiver if disabled */
  591. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  592. sh_eth_write(port_info, EDRRR_R, EDRRR);
  593. return 0;
  594. }
  595. static int sh_ether_write_hwaddr(struct udevice *dev)
  596. {
  597. struct sh_ether_priv *priv = dev_get_priv(dev);
  598. struct sh_eth_dev *eth = &priv->shdev;
  599. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  600. struct eth_pdata *pdata = dev_get_platdata(dev);
  601. sh_eth_write_hwaddr(port_info, pdata->enetaddr);
  602. return 0;
  603. }
  604. static int sh_eth_phy_config(struct udevice *dev)
  605. {
  606. struct sh_ether_priv *priv = dev_get_priv(dev);
  607. struct eth_pdata *pdata = dev_get_platdata(dev);
  608. struct sh_eth_dev *eth = &priv->shdev;
  609. int ret = 0;
  610. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  611. struct phy_device *phydev;
  612. int mask = 0xffffffff;
  613. phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
  614. if (!phydev)
  615. return -ENODEV;
  616. phy_connect_dev(phydev, dev);
  617. port_info->phydev = phydev;
  618. phy_config(phydev);
  619. return ret;
  620. }
  621. static int sh_ether_start(struct udevice *dev)
  622. {
  623. struct sh_ether_priv *priv = dev_get_priv(dev);
  624. struct eth_pdata *pdata = dev_get_platdata(dev);
  625. struct sh_eth_dev *eth = &priv->shdev;
  626. int ret;
  627. ret = clk_enable(&priv->clk);
  628. if (ret)
  629. return ret;
  630. ret = sh_eth_init_common(eth, pdata->enetaddr);
  631. if (ret)
  632. goto err_clk;
  633. ret = sh_eth_phy_config(dev);
  634. if (ret) {
  635. printf(SHETHER_NAME ": phy config timeout\n");
  636. goto err_start;
  637. }
  638. ret = sh_eth_start_common(eth);
  639. if (ret)
  640. goto err_start;
  641. return 0;
  642. err_start:
  643. sh_eth_tx_desc_free(eth);
  644. sh_eth_rx_desc_free(eth);
  645. err_clk:
  646. clk_disable(&priv->clk);
  647. return ret;
  648. }
  649. static void sh_ether_stop(struct udevice *dev)
  650. {
  651. struct sh_ether_priv *priv = dev_get_priv(dev);
  652. sh_eth_stop(&priv->shdev);
  653. clk_disable(&priv->clk);
  654. }
  655. static int sh_ether_probe(struct udevice *udev)
  656. {
  657. struct eth_pdata *pdata = dev_get_platdata(udev);
  658. struct sh_ether_priv *priv = dev_get_priv(udev);
  659. struct sh_eth_dev *eth = &priv->shdev;
  660. struct mii_dev *mdiodev;
  661. void __iomem *iobase;
  662. int ret;
  663. iobase = map_physmem(pdata->iobase, 0x1000, MAP_NOCACHE);
  664. priv->iobase = iobase;
  665. ret = clk_get_by_index(udev, 0, &priv->clk);
  666. if (ret < 0)
  667. goto err_mdio_alloc;
  668. gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
  669. GPIOD_IS_OUT);
  670. mdiodev = mdio_alloc();
  671. if (!mdiodev) {
  672. ret = -ENOMEM;
  673. goto err_mdio_alloc;
  674. }
  675. mdiodev->read = bb_miiphy_read;
  676. mdiodev->write = bb_miiphy_write;
  677. bb_miiphy_buses[0].priv = eth;
  678. snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
  679. ret = mdio_register(mdiodev);
  680. if (ret < 0)
  681. goto err_mdio_register;
  682. priv->bus = miiphy_get_dev_by_name(udev->name);
  683. eth->port = CONFIG_SH_ETHER_USE_PORT;
  684. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  685. eth->port_info[eth->port].iobase =
  686. (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
  687. return 0;
  688. err_mdio_register:
  689. mdio_free(mdiodev);
  690. err_mdio_alloc:
  691. unmap_physmem(priv->iobase, MAP_NOCACHE);
  692. return ret;
  693. }
  694. static int sh_ether_remove(struct udevice *udev)
  695. {
  696. struct sh_ether_priv *priv = dev_get_priv(udev);
  697. struct sh_eth_dev *eth = &priv->shdev;
  698. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  699. free(port_info->phydev);
  700. mdio_unregister(priv->bus);
  701. mdio_free(priv->bus);
  702. if (dm_gpio_is_valid(&priv->reset_gpio))
  703. dm_gpio_free(udev, &priv->reset_gpio);
  704. unmap_physmem(priv->iobase, MAP_NOCACHE);
  705. return 0;
  706. }
  707. static const struct eth_ops sh_ether_ops = {
  708. .start = sh_ether_start,
  709. .send = sh_ether_send,
  710. .recv = sh_ether_recv,
  711. .free_pkt = sh_ether_free_pkt,
  712. .stop = sh_ether_stop,
  713. .write_hwaddr = sh_ether_write_hwaddr,
  714. };
  715. int sh_ether_ofdata_to_platdata(struct udevice *dev)
  716. {
  717. struct eth_pdata *pdata = dev_get_platdata(dev);
  718. const char *phy_mode;
  719. const fdt32_t *cell;
  720. int ret = 0;
  721. pdata->iobase = devfdt_get_addr(dev);
  722. pdata->phy_interface = -1;
  723. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
  724. NULL);
  725. if (phy_mode)
  726. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  727. if (pdata->phy_interface == -1) {
  728. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  729. return -EINVAL;
  730. }
  731. pdata->max_speed = 1000;
  732. cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
  733. if (cell)
  734. pdata->max_speed = fdt32_to_cpu(*cell);
  735. sprintf(bb_miiphy_buses[0].name, dev->name);
  736. return ret;
  737. }
  738. static const struct udevice_id sh_ether_ids[] = {
  739. { .compatible = "renesas,ether-r8a7791" },
  740. { }
  741. };
  742. U_BOOT_DRIVER(eth_sh_ether) = {
  743. .name = "sh_ether",
  744. .id = UCLASS_ETH,
  745. .of_match = sh_ether_ids,
  746. .ofdata_to_platdata = sh_ether_ofdata_to_platdata,
  747. .probe = sh_ether_probe,
  748. .remove = sh_ether_remove,
  749. .ops = &sh_ether_ops,
  750. .priv_auto_alloc_size = sizeof(struct sh_ether_priv),
  751. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  752. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  753. };
  754. #endif
  755. /******* for bb_miiphy *******/
  756. static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
  757. {
  758. return 0;
  759. }
  760. static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
  761. {
  762. struct sh_eth_dev *eth = bus->priv;
  763. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  764. sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
  765. return 0;
  766. }
  767. static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
  768. {
  769. struct sh_eth_dev *eth = bus->priv;
  770. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  771. sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
  772. return 0;
  773. }
  774. static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
  775. {
  776. struct sh_eth_dev *eth = bus->priv;
  777. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  778. if (v)
  779. sh_eth_write(port_info,
  780. sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
  781. else
  782. sh_eth_write(port_info,
  783. sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
  784. return 0;
  785. }
  786. static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
  787. {
  788. struct sh_eth_dev *eth = bus->priv;
  789. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  790. *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
  791. return 0;
  792. }
  793. static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
  794. {
  795. struct sh_eth_dev *eth = bus->priv;
  796. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  797. if (v)
  798. sh_eth_write(port_info,
  799. sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
  800. else
  801. sh_eth_write(port_info,
  802. sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
  803. return 0;
  804. }
  805. static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
  806. {
  807. udelay(10);
  808. return 0;
  809. }
  810. struct bb_miiphy_bus bb_miiphy_buses[] = {
  811. {
  812. .name = "sh_eth",
  813. .init = sh_eth_bb_init,
  814. .mdio_active = sh_eth_bb_mdio_active,
  815. .mdio_tristate = sh_eth_bb_mdio_tristate,
  816. .set_mdio = sh_eth_bb_set_mdio,
  817. .get_mdio = sh_eth_bb_get_mdio,
  818. .set_mdc = sh_eth_bb_set_mdc,
  819. .delay = sh_eth_bb_delay,
  820. }
  821. };
  822. int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);