sh_eth.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * sh_eth.c - Driver for Renesas ethernet controller.
  4. *
  5. * Copyright (C) 2008, 2011 Renesas Solutions Corp.
  6. * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
  7. * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
  8. * Copyright (C) 2013, 2014 Renesas Electronics Corporation
  9. */
  10. #include <config.h>
  11. #include <common.h>
  12. #include <environment.h>
  13. #include <malloc.h>
  14. #include <net.h>
  15. #include <netdev.h>
  16. #include <miiphy.h>
  17. #include <linux/errno.h>
  18. #include <asm/io.h>
  19. #ifdef CONFIG_DM_ETH
  20. #include <clk.h>
  21. #include <dm.h>
  22. #include <linux/mii.h>
  23. #include <asm/gpio.h>
  24. #endif
  25. #include "sh_eth.h"
  26. #ifndef CONFIG_SH_ETHER_USE_PORT
  27. # error "Please define CONFIG_SH_ETHER_USE_PORT"
  28. #endif
  29. #ifndef CONFIG_SH_ETHER_PHY_ADDR
  30. # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
  31. #endif
  32. #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
  33. #define flush_cache_wback(addr, len) \
  34. flush_dcache_range((u32)addr, \
  35. (u32)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
  36. #else
  37. #define flush_cache_wback(...)
  38. #endif
  39. #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
  40. #define invalidate_cache(addr, len) \
  41. { \
  42. u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
  43. u32 start, end; \
  44. \
  45. start = (u32)addr; \
  46. end = start + len; \
  47. start &= ~(line_size - 1); \
  48. end = ((end + line_size - 1) & ~(line_size - 1)); \
  49. \
  50. invalidate_dcache_range(start, end); \
  51. }
  52. #else
  53. #define invalidate_cache(...)
  54. #endif
  55. #define TIMEOUT_CNT 1000
  56. static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
  57. {
  58. int ret = 0, timeout;
  59. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  60. if (!packet || len > 0xffff) {
  61. printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
  62. ret = -EINVAL;
  63. goto err;
  64. }
  65. /* packet must be a 4 byte boundary */
  66. if ((int)packet & 3) {
  67. printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
  68. , __func__);
  69. ret = -EFAULT;
  70. goto err;
  71. }
  72. /* Update tx descriptor */
  73. flush_cache_wback(packet, len);
  74. port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
  75. port_info->tx_desc_cur->td1 = len << 16;
  76. /* Must preserve the end of descriptor list indication */
  77. if (port_info->tx_desc_cur->td0 & TD_TDLE)
  78. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
  79. else
  80. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
  81. flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
  82. /* Restart the transmitter if disabled */
  83. if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
  84. sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
  85. /* Wait until packet is transmitted */
  86. timeout = TIMEOUT_CNT;
  87. do {
  88. invalidate_cache(port_info->tx_desc_cur,
  89. sizeof(struct tx_desc_s));
  90. udelay(100);
  91. } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
  92. if (timeout < 0) {
  93. printf(SHETHER_NAME ": transmit timeout\n");
  94. ret = -ETIMEDOUT;
  95. goto err;
  96. }
  97. port_info->tx_desc_cur++;
  98. if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
  99. port_info->tx_desc_cur = port_info->tx_desc_base;
  100. err:
  101. return ret;
  102. }
  103. static int sh_eth_recv_start(struct sh_eth_dev *eth)
  104. {
  105. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  106. /* Check if the rx descriptor is ready */
  107. invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
  108. if (port_info->rx_desc_cur->rd0 & RD_RACT)
  109. return -EINVAL;
  110. /* Check for errors */
  111. if (port_info->rx_desc_cur->rd0 & RD_RFE)
  112. return -EINVAL;
  113. return port_info->rx_desc_cur->rd1 & 0xffff;
  114. }
  115. static void sh_eth_recv_finish(struct sh_eth_dev *eth)
  116. {
  117. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  118. /* Make current descriptor available again */
  119. if (port_info->rx_desc_cur->rd0 & RD_RDLE)
  120. port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
  121. else
  122. port_info->rx_desc_cur->rd0 = RD_RACT;
  123. flush_cache_wback(port_info->rx_desc_cur,
  124. sizeof(struct rx_desc_s));
  125. /* Point to the next descriptor */
  126. port_info->rx_desc_cur++;
  127. if (port_info->rx_desc_cur >=
  128. port_info->rx_desc_base + NUM_RX_DESC)
  129. port_info->rx_desc_cur = port_info->rx_desc_base;
  130. }
  131. static int sh_eth_reset(struct sh_eth_dev *eth)
  132. {
  133. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  134. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  135. int ret = 0, i;
  136. /* Start e-dmac transmitter and receiver */
  137. sh_eth_write(port_info, EDSR_ENALL, EDSR);
  138. /* Perform a software reset and wait for it to complete */
  139. sh_eth_write(port_info, EDMR_SRST, EDMR);
  140. for (i = 0; i < TIMEOUT_CNT; i++) {
  141. if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
  142. break;
  143. udelay(1000);
  144. }
  145. if (i == TIMEOUT_CNT) {
  146. printf(SHETHER_NAME ": Software reset timeout\n");
  147. ret = -EIO;
  148. }
  149. return ret;
  150. #else
  151. sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
  152. mdelay(3);
  153. sh_eth_write(port_info,
  154. sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
  155. return 0;
  156. #endif
  157. }
  158. static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
  159. {
  160. int i, ret = 0;
  161. u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
  162. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  163. struct tx_desc_s *cur_tx_desc;
  164. /*
  165. * Allocate rx descriptors. They must be aligned to size of struct
  166. * tx_desc_s.
  167. */
  168. port_info->tx_desc_alloc =
  169. memalign(sizeof(struct tx_desc_s), alloc_desc_size);
  170. if (!port_info->tx_desc_alloc) {
  171. printf(SHETHER_NAME ": memalign failed\n");
  172. ret = -ENOMEM;
  173. goto err;
  174. }
  175. flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
  176. /* Make sure we use a P2 address (non-cacheable) */
  177. port_info->tx_desc_base =
  178. (struct tx_desc_s *)ADDR_TO_P2((u32)port_info->tx_desc_alloc);
  179. port_info->tx_desc_cur = port_info->tx_desc_base;
  180. /* Initialize all descriptors */
  181. for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
  182. cur_tx_desc++, i++) {
  183. cur_tx_desc->td0 = 0x00;
  184. cur_tx_desc->td1 = 0x00;
  185. cur_tx_desc->td2 = 0x00;
  186. }
  187. /* Mark the end of the descriptors */
  188. cur_tx_desc--;
  189. cur_tx_desc->td0 |= TD_TDLE;
  190. /*
  191. * Point the controller to the tx descriptor list. Must use physical
  192. * addresses
  193. */
  194. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
  195. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  196. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
  197. sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
  198. sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
  199. #endif
  200. err:
  201. return ret;
  202. }
  203. static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
  204. {
  205. int i, ret = 0;
  206. u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
  207. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  208. struct rx_desc_s *cur_rx_desc;
  209. u8 *rx_buf;
  210. /*
  211. * Allocate rx descriptors. They must be aligned to size of struct
  212. * rx_desc_s.
  213. */
  214. port_info->rx_desc_alloc =
  215. memalign(sizeof(struct rx_desc_s), alloc_desc_size);
  216. if (!port_info->rx_desc_alloc) {
  217. printf(SHETHER_NAME ": memalign failed\n");
  218. ret = -ENOMEM;
  219. goto err;
  220. }
  221. flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
  222. /* Make sure we use a P2 address (non-cacheable) */
  223. port_info->rx_desc_base =
  224. (struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);
  225. port_info->rx_desc_cur = port_info->rx_desc_base;
  226. /*
  227. * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
  228. * aligned and in P2 area.
  229. */
  230. port_info->rx_buf_alloc =
  231. memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
  232. if (!port_info->rx_buf_alloc) {
  233. printf(SHETHER_NAME ": alloc failed\n");
  234. ret = -ENOMEM;
  235. goto err_buf_alloc;
  236. }
  237. port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);
  238. /* Initialize all descriptors */
  239. for (cur_rx_desc = port_info->rx_desc_base,
  240. rx_buf = port_info->rx_buf_base, i = 0;
  241. i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
  242. cur_rx_desc->rd0 = RD_RACT;
  243. cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
  244. cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
  245. }
  246. /* Mark the end of the descriptors */
  247. cur_rx_desc--;
  248. cur_rx_desc->rd0 |= RD_RDLE;
  249. /* Point the controller to the rx descriptor list */
  250. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
  251. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  252. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
  253. sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
  254. sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
  255. #endif
  256. return ret;
  257. err_buf_alloc:
  258. free(port_info->rx_desc_alloc);
  259. port_info->rx_desc_alloc = NULL;
  260. err:
  261. return ret;
  262. }
  263. static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
  264. {
  265. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  266. if (port_info->tx_desc_alloc) {
  267. free(port_info->tx_desc_alloc);
  268. port_info->tx_desc_alloc = NULL;
  269. }
  270. }
  271. static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
  272. {
  273. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  274. if (port_info->rx_desc_alloc) {
  275. free(port_info->rx_desc_alloc);
  276. port_info->rx_desc_alloc = NULL;
  277. }
  278. if (port_info->rx_buf_alloc) {
  279. free(port_info->rx_buf_alloc);
  280. port_info->rx_buf_alloc = NULL;
  281. }
  282. }
  283. static int sh_eth_desc_init(struct sh_eth_dev *eth)
  284. {
  285. int ret = 0;
  286. ret = sh_eth_tx_desc_init(eth);
  287. if (ret)
  288. goto err_tx_init;
  289. ret = sh_eth_rx_desc_init(eth);
  290. if (ret)
  291. goto err_rx_init;
  292. return ret;
  293. err_rx_init:
  294. sh_eth_tx_desc_free(eth);
  295. err_tx_init:
  296. return ret;
  297. }
  298. static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
  299. unsigned char *mac)
  300. {
  301. u32 val;
  302. val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
  303. sh_eth_write(port_info, val, MAHR);
  304. val = (mac[4] << 8) | mac[5];
  305. sh_eth_write(port_info, val, MALR);
  306. }
  307. static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
  308. {
  309. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  310. /* Configure e-dmac registers */
  311. sh_eth_write(port_info, (sh_eth_read(port_info, EDMR) & ~EMDR_DESC_R) |
  312. (EMDR_DESC | EDMR_EL), EDMR);
  313. sh_eth_write(port_info, 0, EESIPR);
  314. sh_eth_write(port_info, 0, TRSCER);
  315. sh_eth_write(port_info, 0, TFTR);
  316. sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
  317. sh_eth_write(port_info, RMCR_RST, RMCR);
  318. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  319. sh_eth_write(port_info, 0, RPADIR);
  320. #endif
  321. sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
  322. /* Configure e-mac registers */
  323. sh_eth_write(port_info, 0, ECSIPR);
  324. /* Set Mac address */
  325. sh_eth_write_hwaddr(port_info, mac);
  326. sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
  327. #if defined(SH_ETH_TYPE_GETHER)
  328. sh_eth_write(port_info, 0, PIPR);
  329. #endif
  330. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  331. sh_eth_write(port_info, APR_AP, APR);
  332. sh_eth_write(port_info, MPR_MP, MPR);
  333. sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
  334. #endif
  335. #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
  336. sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
  337. #elif defined(CONFIG_RCAR_GEN2)
  338. sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
  339. #endif
  340. }
  341. static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
  342. {
  343. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  344. struct phy_device *phy = port_info->phydev;
  345. int ret = 0;
  346. u32 val = 0;
  347. /* Set the transfer speed */
  348. if (phy->speed == 100) {
  349. printf(SHETHER_NAME ": 100Base/");
  350. #if defined(SH_ETH_TYPE_GETHER)
  351. sh_eth_write(port_info, GECMR_100B, GECMR);
  352. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  353. sh_eth_write(port_info, 1, RTRATE);
  354. #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_RCAR_GEN2)
  355. val = ECMR_RTM;
  356. #endif
  357. } else if (phy->speed == 10) {
  358. printf(SHETHER_NAME ": 10Base/");
  359. #if defined(SH_ETH_TYPE_GETHER)
  360. sh_eth_write(port_info, GECMR_10B, GECMR);
  361. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  362. sh_eth_write(port_info, 0, RTRATE);
  363. #endif
  364. }
  365. #if defined(SH_ETH_TYPE_GETHER)
  366. else if (phy->speed == 1000) {
  367. printf(SHETHER_NAME ": 1000Base/");
  368. sh_eth_write(port_info, GECMR_1000B, GECMR);
  369. }
  370. #endif
  371. /* Check if full duplex mode is supported by the phy */
  372. if (phy->duplex) {
  373. printf("Full\n");
  374. sh_eth_write(port_info,
  375. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
  376. ECMR);
  377. } else {
  378. printf("Half\n");
  379. sh_eth_write(port_info,
  380. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
  381. ECMR);
  382. }
  383. return ret;
  384. }
  385. static void sh_eth_start(struct sh_eth_dev *eth)
  386. {
  387. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  388. /*
  389. * Enable the e-dmac receiver only. The transmitter will be enabled when
  390. * we have something to transmit
  391. */
  392. sh_eth_write(port_info, EDRRR_R, EDRRR);
  393. }
  394. static void sh_eth_stop(struct sh_eth_dev *eth)
  395. {
  396. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  397. sh_eth_write(port_info, ~EDRRR_R, EDRRR);
  398. }
  399. static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
  400. {
  401. int ret = 0;
  402. ret = sh_eth_reset(eth);
  403. if (ret)
  404. return ret;
  405. ret = sh_eth_desc_init(eth);
  406. if (ret)
  407. return ret;
  408. sh_eth_mac_regs_config(eth, mac);
  409. return 0;
  410. }
  411. static int sh_eth_start_common(struct sh_eth_dev *eth)
  412. {
  413. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  414. int ret;
  415. ret = phy_startup(port_info->phydev);
  416. if (ret) {
  417. printf(SHETHER_NAME ": phy startup failure\n");
  418. return ret;
  419. }
  420. ret = sh_eth_phy_regs_config(eth);
  421. if (ret)
  422. return ret;
  423. sh_eth_start(eth);
  424. return 0;
  425. }
  426. #ifndef CONFIG_DM_ETH
  427. static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
  428. {
  429. int ret = 0;
  430. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  431. struct eth_device *dev = port_info->dev;
  432. struct phy_device *phydev;
  433. phydev = phy_connect(
  434. miiphy_get_dev_by_name(dev->name),
  435. port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
  436. port_info->phydev = phydev;
  437. phy_config(phydev);
  438. return ret;
  439. }
  440. static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
  441. {
  442. struct sh_eth_dev *eth = dev->priv;
  443. return sh_eth_send_common(eth, packet, len);
  444. }
  445. static int sh_eth_recv_common(struct sh_eth_dev *eth)
  446. {
  447. int len = 0;
  448. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  449. uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  450. len = sh_eth_recv_start(eth);
  451. if (len > 0) {
  452. invalidate_cache(packet, len);
  453. net_process_received_packet(packet, len);
  454. sh_eth_recv_finish(eth);
  455. } else
  456. len = 0;
  457. /* Restart the receiver if disabled */
  458. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  459. sh_eth_write(port_info, EDRRR_R, EDRRR);
  460. return len;
  461. }
  462. static int sh_eth_recv_legacy(struct eth_device *dev)
  463. {
  464. struct sh_eth_dev *eth = dev->priv;
  465. return sh_eth_recv_common(eth);
  466. }
  467. static int sh_eth_init_legacy(struct eth_device *dev, bd_t *bd)
  468. {
  469. struct sh_eth_dev *eth = dev->priv;
  470. int ret;
  471. ret = sh_eth_init_common(eth, dev->enetaddr);
  472. if (ret)
  473. return ret;
  474. ret = sh_eth_phy_config_legacy(eth);
  475. if (ret) {
  476. printf(SHETHER_NAME ": phy config timeout\n");
  477. goto err_start;
  478. }
  479. ret = sh_eth_start_common(eth);
  480. if (ret)
  481. goto err_start;
  482. return 0;
  483. err_start:
  484. sh_eth_tx_desc_free(eth);
  485. sh_eth_rx_desc_free(eth);
  486. return ret;
  487. }
  488. void sh_eth_halt_legacy(struct eth_device *dev)
  489. {
  490. struct sh_eth_dev *eth = dev->priv;
  491. sh_eth_stop(eth);
  492. }
  493. int sh_eth_initialize(bd_t *bd)
  494. {
  495. int ret = 0;
  496. struct sh_eth_dev *eth = NULL;
  497. struct eth_device *dev = NULL;
  498. struct mii_dev *mdiodev;
  499. eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
  500. if (!eth) {
  501. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  502. ret = -ENOMEM;
  503. goto err;
  504. }
  505. dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  506. if (!dev) {
  507. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  508. ret = -ENOMEM;
  509. goto err;
  510. }
  511. memset(dev, 0, sizeof(struct eth_device));
  512. memset(eth, 0, sizeof(struct sh_eth_dev));
  513. eth->port = CONFIG_SH_ETHER_USE_PORT;
  514. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  515. eth->port_info[eth->port].iobase =
  516. (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
  517. dev->priv = (void *)eth;
  518. dev->iobase = 0;
  519. dev->init = sh_eth_init_legacy;
  520. dev->halt = sh_eth_halt_legacy;
  521. dev->send = sh_eth_send_legacy;
  522. dev->recv = sh_eth_recv_legacy;
  523. eth->port_info[eth->port].dev = dev;
  524. strcpy(dev->name, SHETHER_NAME);
  525. /* Register Device to EtherNet subsystem */
  526. eth_register(dev);
  527. bb_miiphy_buses[0].priv = eth;
  528. mdiodev = mdio_alloc();
  529. if (!mdiodev)
  530. return -ENOMEM;
  531. strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
  532. mdiodev->read = bb_miiphy_read;
  533. mdiodev->write = bb_miiphy_write;
  534. ret = mdio_register(mdiodev);
  535. if (ret < 0)
  536. return ret;
  537. if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
  538. puts("Please set MAC address\n");
  539. return ret;
  540. err:
  541. if (dev)
  542. free(dev);
  543. if (eth)
  544. free(eth);
  545. printf(SHETHER_NAME ": Failed\n");
  546. return ret;
  547. }
  548. #else /* CONFIG_DM_ETH */
  549. struct sh_ether_priv {
  550. struct sh_eth_dev shdev;
  551. struct mii_dev *bus;
  552. phys_addr_t iobase;
  553. struct clk clk;
  554. struct gpio_desc reset_gpio;
  555. };
  556. static int sh_ether_send(struct udevice *dev, void *packet, int len)
  557. {
  558. struct sh_ether_priv *priv = dev_get_priv(dev);
  559. struct sh_eth_dev *eth = &priv->shdev;
  560. return sh_eth_send_common(eth, packet, len);
  561. }
  562. static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
  563. {
  564. struct sh_ether_priv *priv = dev_get_priv(dev);
  565. struct sh_eth_dev *eth = &priv->shdev;
  566. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  567. uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  568. int len;
  569. len = sh_eth_recv_start(eth);
  570. if (len > 0) {
  571. invalidate_cache(packet, len);
  572. *packetp = packet;
  573. return len;
  574. } else {
  575. len = 0;
  576. /* Restart the receiver if disabled */
  577. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  578. sh_eth_write(port_info, EDRRR_R, EDRRR);
  579. return -EAGAIN;
  580. }
  581. }
  582. static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
  583. {
  584. struct sh_ether_priv *priv = dev_get_priv(dev);
  585. struct sh_eth_dev *eth = &priv->shdev;
  586. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  587. sh_eth_recv_finish(eth);
  588. /* Restart the receiver if disabled */
  589. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  590. sh_eth_write(port_info, EDRRR_R, EDRRR);
  591. return 0;
  592. }
  593. static int sh_ether_write_hwaddr(struct udevice *dev)
  594. {
  595. struct sh_ether_priv *priv = dev_get_priv(dev);
  596. struct sh_eth_dev *eth = &priv->shdev;
  597. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  598. struct eth_pdata *pdata = dev_get_platdata(dev);
  599. sh_eth_write_hwaddr(port_info, pdata->enetaddr);
  600. return 0;
  601. }
  602. static int sh_eth_phy_config(struct udevice *dev)
  603. {
  604. struct sh_ether_priv *priv = dev_get_priv(dev);
  605. struct eth_pdata *pdata = dev_get_platdata(dev);
  606. struct sh_eth_dev *eth = &priv->shdev;
  607. int ret = 0;
  608. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  609. struct phy_device *phydev;
  610. int mask = 0xffffffff;
  611. phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
  612. if (!phydev)
  613. return -ENODEV;
  614. phy_connect_dev(phydev, dev);
  615. port_info->phydev = phydev;
  616. phy_config(phydev);
  617. return ret;
  618. }
  619. static int sh_ether_start(struct udevice *dev)
  620. {
  621. struct sh_ether_priv *priv = dev_get_priv(dev);
  622. struct eth_pdata *pdata = dev_get_platdata(dev);
  623. struct sh_eth_dev *eth = &priv->shdev;
  624. int ret;
  625. ret = clk_enable(&priv->clk);
  626. if (ret)
  627. return ret;
  628. ret = sh_eth_init_common(eth, pdata->enetaddr);
  629. if (ret)
  630. goto err_clk;
  631. ret = sh_eth_phy_config(dev);
  632. if (ret) {
  633. printf(SHETHER_NAME ": phy config timeout\n");
  634. goto err_start;
  635. }
  636. ret = sh_eth_start_common(eth);
  637. if (ret)
  638. goto err_start;
  639. return 0;
  640. err_start:
  641. sh_eth_tx_desc_free(eth);
  642. sh_eth_rx_desc_free(eth);
  643. err_clk:
  644. clk_disable(&priv->clk);
  645. return ret;
  646. }
  647. static void sh_ether_stop(struct udevice *dev)
  648. {
  649. struct sh_ether_priv *priv = dev_get_priv(dev);
  650. sh_eth_stop(&priv->shdev);
  651. clk_disable(&priv->clk);
  652. }
  653. static int sh_ether_probe(struct udevice *udev)
  654. {
  655. struct eth_pdata *pdata = dev_get_platdata(udev);
  656. struct sh_ether_priv *priv = dev_get_priv(udev);
  657. struct sh_eth_dev *eth = &priv->shdev;
  658. struct ofnode_phandle_args phandle_args;
  659. struct mii_dev *mdiodev;
  660. int ret;
  661. priv->iobase = pdata->iobase;
  662. ret = clk_get_by_index(udev, 0, &priv->clk);
  663. if (ret < 0)
  664. return ret;
  665. ret = dev_read_phandle_with_args(udev, "phy-handle", NULL, 0, 0, &phandle_args);
  666. if (!ret) {
  667. gpio_request_by_name_nodev(phandle_args.node, "reset-gpios", 0,
  668. &priv->reset_gpio, GPIOD_IS_OUT);
  669. }
  670. if (!dm_gpio_is_valid(&priv->reset_gpio)) {
  671. gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
  672. GPIOD_IS_OUT);
  673. }
  674. mdiodev = mdio_alloc();
  675. if (!mdiodev) {
  676. ret = -ENOMEM;
  677. return ret;
  678. }
  679. mdiodev->read = bb_miiphy_read;
  680. mdiodev->write = bb_miiphy_write;
  681. bb_miiphy_buses[0].priv = eth;
  682. snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
  683. ret = mdio_register(mdiodev);
  684. if (ret < 0)
  685. goto err_mdio_register;
  686. priv->bus = miiphy_get_dev_by_name(udev->name);
  687. eth->port = CONFIG_SH_ETHER_USE_PORT;
  688. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  689. eth->port_info[eth->port].iobase =
  690. (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
  691. return 0;
  692. err_mdio_register:
  693. mdio_free(mdiodev);
  694. return ret;
  695. }
  696. static int sh_ether_remove(struct udevice *udev)
  697. {
  698. struct sh_ether_priv *priv = dev_get_priv(udev);
  699. struct sh_eth_dev *eth = &priv->shdev;
  700. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  701. free(port_info->phydev);
  702. mdio_unregister(priv->bus);
  703. mdio_free(priv->bus);
  704. if (dm_gpio_is_valid(&priv->reset_gpio))
  705. dm_gpio_free(udev, &priv->reset_gpio);
  706. return 0;
  707. }
  708. static const struct eth_ops sh_ether_ops = {
  709. .start = sh_ether_start,
  710. .send = sh_ether_send,
  711. .recv = sh_ether_recv,
  712. .free_pkt = sh_ether_free_pkt,
  713. .stop = sh_ether_stop,
  714. .write_hwaddr = sh_ether_write_hwaddr,
  715. };
  716. int sh_ether_ofdata_to_platdata(struct udevice *dev)
  717. {
  718. struct eth_pdata *pdata = dev_get_platdata(dev);
  719. const char *phy_mode;
  720. const fdt32_t *cell;
  721. int ret = 0;
  722. pdata->iobase = devfdt_get_addr(dev);
  723. pdata->phy_interface = -1;
  724. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
  725. NULL);
  726. if (phy_mode)
  727. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  728. if (pdata->phy_interface == -1) {
  729. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  730. return -EINVAL;
  731. }
  732. pdata->max_speed = 1000;
  733. cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
  734. if (cell)
  735. pdata->max_speed = fdt32_to_cpu(*cell);
  736. sprintf(bb_miiphy_buses[0].name, dev->name);
  737. return ret;
  738. }
  739. static const struct udevice_id sh_ether_ids[] = {
  740. { .compatible = "renesas,ether-r8a7790" },
  741. { .compatible = "renesas,ether-r8a7791" },
  742. { .compatible = "renesas,ether-r8a7793" },
  743. { .compatible = "renesas,ether-r8a7794" },
  744. { }
  745. };
  746. U_BOOT_DRIVER(eth_sh_ether) = {
  747. .name = "sh_ether",
  748. .id = UCLASS_ETH,
  749. .of_match = sh_ether_ids,
  750. .ofdata_to_platdata = sh_ether_ofdata_to_platdata,
  751. .probe = sh_ether_probe,
  752. .remove = sh_ether_remove,
  753. .ops = &sh_ether_ops,
  754. .priv_auto_alloc_size = sizeof(struct sh_ether_priv),
  755. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  756. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  757. };
  758. #endif
  759. /******* for bb_miiphy *******/
  760. static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
  761. {
  762. return 0;
  763. }
  764. static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
  765. {
  766. struct sh_eth_dev *eth = bus->priv;
  767. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  768. sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
  769. return 0;
  770. }
  771. static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
  772. {
  773. struct sh_eth_dev *eth = bus->priv;
  774. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  775. sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
  776. return 0;
  777. }
  778. static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
  779. {
  780. struct sh_eth_dev *eth = bus->priv;
  781. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  782. if (v)
  783. sh_eth_write(port_info,
  784. sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
  785. else
  786. sh_eth_write(port_info,
  787. sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
  788. return 0;
  789. }
  790. static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
  791. {
  792. struct sh_eth_dev *eth = bus->priv;
  793. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  794. *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
  795. return 0;
  796. }
  797. static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
  798. {
  799. struct sh_eth_dev *eth = bus->priv;
  800. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  801. if (v)
  802. sh_eth_write(port_info,
  803. sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
  804. else
  805. sh_eth_write(port_info,
  806. sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
  807. return 0;
  808. }
  809. static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
  810. {
  811. udelay(10);
  812. return 0;
  813. }
  814. struct bb_miiphy_bus bb_miiphy_buses[] = {
  815. {
  816. .name = "sh_eth",
  817. .init = sh_eth_bb_init,
  818. .mdio_active = sh_eth_bb_mdio_active,
  819. .mdio_tristate = sh_eth_bb_mdio_tristate,
  820. .set_mdio = sh_eth_bb_set_mdio,
  821. .get_mdio = sh_eth_bb_get_mdio,
  822. .set_mdc = sh_eth_bb_set_mdc,
  823. .delay = sh_eth_bb_delay,
  824. }
  825. };
  826. int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);