sh_eth.c 24 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013
  1. /*
  2. * sh_eth.c - Driver for Renesas ethernet controller.
  3. *
  4. * Copyright (C) 2008, 2011 Renesas Solutions Corp.
  5. * Copyright (c) 2008, 2011, 2014 2014 Nobuhiro Iwamatsu
  6. * Copyright (c) 2007 Carlos Munoz <carlos@kenati.com>
  7. * Copyright (C) 2013, 2014 Renesas Electronics Corporation
  8. *
  9. * SPDX-License-Identifier: GPL-2.0+
  10. */
  11. #include <config.h>
  12. #include <common.h>
  13. #include <environment.h>
  14. #include <malloc.h>
  15. #include <net.h>
  16. #include <netdev.h>
  17. #include <miiphy.h>
  18. #include <linux/errno.h>
  19. #include <asm/io.h>
  20. #ifdef CONFIG_DM_ETH
  21. #include <clk.h>
  22. #include <dm.h>
  23. #include <linux/mii.h>
  24. #include <asm/gpio.h>
  25. #endif
  26. #include "sh_eth.h"
  27. #ifndef CONFIG_SH_ETHER_USE_PORT
  28. # error "Please define CONFIG_SH_ETHER_USE_PORT"
  29. #endif
  30. #ifndef CONFIG_SH_ETHER_PHY_ADDR
  31. # error "Please define CONFIG_SH_ETHER_PHY_ADDR"
  32. #endif
  33. #if defined(CONFIG_SH_ETHER_CACHE_WRITEBACK) && !defined(CONFIG_SYS_DCACHE_OFF)
  34. #define flush_cache_wback(addr, len) \
  35. flush_dcache_range((u32)addr, \
  36. (u32)(addr + ALIGN(len, CONFIG_SH_ETHER_ALIGNE_SIZE)))
  37. #else
  38. #define flush_cache_wback(...)
  39. #endif
  40. #if defined(CONFIG_SH_ETHER_CACHE_INVALIDATE) && defined(CONFIG_ARM)
  41. #define invalidate_cache(addr, len) \
  42. { \
  43. u32 line_size = CONFIG_SH_ETHER_ALIGNE_SIZE; \
  44. u32 start, end; \
  45. \
  46. start = (u32)addr; \
  47. end = start + len; \
  48. start &= ~(line_size - 1); \
  49. end = ((end + line_size - 1) & ~(line_size - 1)); \
  50. \
  51. invalidate_dcache_range(start, end); \
  52. }
  53. #else
  54. #define invalidate_cache(...)
  55. #endif
  56. #define TIMEOUT_CNT 1000
  57. static int sh_eth_send_common(struct sh_eth_dev *eth, void *packet, int len)
  58. {
  59. int ret = 0, timeout;
  60. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  61. if (!packet || len > 0xffff) {
  62. printf(SHETHER_NAME ": %s: Invalid argument\n", __func__);
  63. ret = -EINVAL;
  64. goto err;
  65. }
  66. /* packet must be a 4 byte boundary */
  67. if ((int)packet & 3) {
  68. printf(SHETHER_NAME ": %s: packet not 4 byte aligned\n"
  69. , __func__);
  70. ret = -EFAULT;
  71. goto err;
  72. }
  73. /* Update tx descriptor */
  74. flush_cache_wback(packet, len);
  75. port_info->tx_desc_cur->td2 = ADDR_TO_PHY(packet);
  76. port_info->tx_desc_cur->td1 = len << 16;
  77. /* Must preserve the end of descriptor list indication */
  78. if (port_info->tx_desc_cur->td0 & TD_TDLE)
  79. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP | TD_TDLE;
  80. else
  81. port_info->tx_desc_cur->td0 = TD_TACT | TD_TFP;
  82. flush_cache_wback(port_info->tx_desc_cur, sizeof(struct tx_desc_s));
  83. /* Restart the transmitter if disabled */
  84. if (!(sh_eth_read(port_info, EDTRR) & EDTRR_TRNS))
  85. sh_eth_write(port_info, EDTRR_TRNS, EDTRR);
  86. /* Wait until packet is transmitted */
  87. timeout = TIMEOUT_CNT;
  88. do {
  89. invalidate_cache(port_info->tx_desc_cur,
  90. sizeof(struct tx_desc_s));
  91. udelay(100);
  92. } while (port_info->tx_desc_cur->td0 & TD_TACT && timeout--);
  93. if (timeout < 0) {
  94. printf(SHETHER_NAME ": transmit timeout\n");
  95. ret = -ETIMEDOUT;
  96. goto err;
  97. }
  98. port_info->tx_desc_cur++;
  99. if (port_info->tx_desc_cur >= port_info->tx_desc_base + NUM_TX_DESC)
  100. port_info->tx_desc_cur = port_info->tx_desc_base;
  101. err:
  102. return ret;
  103. }
  104. static int sh_eth_recv_start(struct sh_eth_dev *eth)
  105. {
  106. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  107. /* Check if the rx descriptor is ready */
  108. invalidate_cache(port_info->rx_desc_cur, sizeof(struct rx_desc_s));
  109. if (port_info->rx_desc_cur->rd0 & RD_RACT)
  110. return -EINVAL;
  111. /* Check for errors */
  112. if (port_info->rx_desc_cur->rd0 & RD_RFE)
  113. return -EINVAL;
  114. return port_info->rx_desc_cur->rd1 & 0xffff;
  115. }
  116. static void sh_eth_recv_finish(struct sh_eth_dev *eth)
  117. {
  118. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  119. /* Make current descriptor available again */
  120. if (port_info->rx_desc_cur->rd0 & RD_RDLE)
  121. port_info->rx_desc_cur->rd0 = RD_RACT | RD_RDLE;
  122. else
  123. port_info->rx_desc_cur->rd0 = RD_RACT;
  124. flush_cache_wback(port_info->rx_desc_cur,
  125. sizeof(struct rx_desc_s));
  126. /* Point to the next descriptor */
  127. port_info->rx_desc_cur++;
  128. if (port_info->rx_desc_cur >=
  129. port_info->rx_desc_base + NUM_RX_DESC)
  130. port_info->rx_desc_cur = port_info->rx_desc_base;
  131. }
  132. static int sh_eth_reset(struct sh_eth_dev *eth)
  133. {
  134. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  135. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  136. int ret = 0, i;
  137. /* Start e-dmac transmitter and receiver */
  138. sh_eth_write(port_info, EDSR_ENALL, EDSR);
  139. /* Perform a software reset and wait for it to complete */
  140. sh_eth_write(port_info, EDMR_SRST, EDMR);
  141. for (i = 0; i < TIMEOUT_CNT; i++) {
  142. if (!(sh_eth_read(port_info, EDMR) & EDMR_SRST))
  143. break;
  144. udelay(1000);
  145. }
  146. if (i == TIMEOUT_CNT) {
  147. printf(SHETHER_NAME ": Software reset timeout\n");
  148. ret = -EIO;
  149. }
  150. return ret;
  151. #else
  152. sh_eth_write(port_info, sh_eth_read(port_info, EDMR) | EDMR_SRST, EDMR);
  153. mdelay(3);
  154. sh_eth_write(port_info,
  155. sh_eth_read(port_info, EDMR) & ~EDMR_SRST, EDMR);
  156. return 0;
  157. #endif
  158. }
  159. static int sh_eth_tx_desc_init(struct sh_eth_dev *eth)
  160. {
  161. int i, ret = 0;
  162. u32 alloc_desc_size = NUM_TX_DESC * sizeof(struct tx_desc_s);
  163. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  164. struct tx_desc_s *cur_tx_desc;
  165. /*
  166. * Allocate rx descriptors. They must be aligned to size of struct
  167. * tx_desc_s.
  168. */
  169. port_info->tx_desc_alloc =
  170. memalign(sizeof(struct tx_desc_s), alloc_desc_size);
  171. if (!port_info->tx_desc_alloc) {
  172. printf(SHETHER_NAME ": memalign failed\n");
  173. ret = -ENOMEM;
  174. goto err;
  175. }
  176. flush_cache_wback(port_info->tx_desc_alloc, alloc_desc_size);
  177. /* Make sure we use a P2 address (non-cacheable) */
  178. port_info->tx_desc_base =
  179. (struct tx_desc_s *)ADDR_TO_P2((u32)port_info->tx_desc_alloc);
  180. port_info->tx_desc_cur = port_info->tx_desc_base;
  181. /* Initialize all descriptors */
  182. for (cur_tx_desc = port_info->tx_desc_base, i = 0; i < NUM_TX_DESC;
  183. cur_tx_desc++, i++) {
  184. cur_tx_desc->td0 = 0x00;
  185. cur_tx_desc->td1 = 0x00;
  186. cur_tx_desc->td2 = 0x00;
  187. }
  188. /* Mark the end of the descriptors */
  189. cur_tx_desc--;
  190. cur_tx_desc->td0 |= TD_TDLE;
  191. /*
  192. * Point the controller to the tx descriptor list. Must use physical
  193. * addresses
  194. */
  195. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDLAR);
  196. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  197. sh_eth_write(port_info, ADDR_TO_PHY(port_info->tx_desc_base), TDFAR);
  198. sh_eth_write(port_info, ADDR_TO_PHY(cur_tx_desc), TDFXR);
  199. sh_eth_write(port_info, 0x01, TDFFR);/* Last discriptor bit */
  200. #endif
  201. err:
  202. return ret;
  203. }
  204. static int sh_eth_rx_desc_init(struct sh_eth_dev *eth)
  205. {
  206. int i, ret = 0;
  207. u32 alloc_desc_size = NUM_RX_DESC * sizeof(struct rx_desc_s);
  208. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  209. struct rx_desc_s *cur_rx_desc;
  210. u8 *rx_buf;
  211. /*
  212. * Allocate rx descriptors. They must be aligned to size of struct
  213. * rx_desc_s.
  214. */
  215. port_info->rx_desc_alloc =
  216. memalign(sizeof(struct rx_desc_s), alloc_desc_size);
  217. if (!port_info->rx_desc_alloc) {
  218. printf(SHETHER_NAME ": memalign failed\n");
  219. ret = -ENOMEM;
  220. goto err;
  221. }
  222. flush_cache_wback(port_info->rx_desc_alloc, alloc_desc_size);
  223. /* Make sure we use a P2 address (non-cacheable) */
  224. port_info->rx_desc_base =
  225. (struct rx_desc_s *)ADDR_TO_P2((u32)port_info->rx_desc_alloc);
  226. port_info->rx_desc_cur = port_info->rx_desc_base;
  227. /*
  228. * Allocate rx data buffers. They must be RX_BUF_ALIGNE_SIZE bytes
  229. * aligned and in P2 area.
  230. */
  231. port_info->rx_buf_alloc =
  232. memalign(RX_BUF_ALIGNE_SIZE, NUM_RX_DESC * MAX_BUF_SIZE);
  233. if (!port_info->rx_buf_alloc) {
  234. printf(SHETHER_NAME ": alloc failed\n");
  235. ret = -ENOMEM;
  236. goto err_buf_alloc;
  237. }
  238. port_info->rx_buf_base = (u8 *)ADDR_TO_P2((u32)port_info->rx_buf_alloc);
  239. /* Initialize all descriptors */
  240. for (cur_rx_desc = port_info->rx_desc_base,
  241. rx_buf = port_info->rx_buf_base, i = 0;
  242. i < NUM_RX_DESC; cur_rx_desc++, rx_buf += MAX_BUF_SIZE, i++) {
  243. cur_rx_desc->rd0 = RD_RACT;
  244. cur_rx_desc->rd1 = MAX_BUF_SIZE << 16;
  245. cur_rx_desc->rd2 = (u32)ADDR_TO_PHY(rx_buf);
  246. }
  247. /* Mark the end of the descriptors */
  248. cur_rx_desc--;
  249. cur_rx_desc->rd0 |= RD_RDLE;
  250. /* Point the controller to the rx descriptor list */
  251. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDLAR);
  252. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  253. sh_eth_write(port_info, ADDR_TO_PHY(port_info->rx_desc_base), RDFAR);
  254. sh_eth_write(port_info, ADDR_TO_PHY(cur_rx_desc), RDFXR);
  255. sh_eth_write(port_info, RDFFR_RDLF, RDFFR);
  256. #endif
  257. return ret;
  258. err_buf_alloc:
  259. free(port_info->rx_desc_alloc);
  260. port_info->rx_desc_alloc = NULL;
  261. err:
  262. return ret;
  263. }
  264. static void sh_eth_tx_desc_free(struct sh_eth_dev *eth)
  265. {
  266. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  267. if (port_info->tx_desc_alloc) {
  268. free(port_info->tx_desc_alloc);
  269. port_info->tx_desc_alloc = NULL;
  270. }
  271. }
  272. static void sh_eth_rx_desc_free(struct sh_eth_dev *eth)
  273. {
  274. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  275. if (port_info->rx_desc_alloc) {
  276. free(port_info->rx_desc_alloc);
  277. port_info->rx_desc_alloc = NULL;
  278. }
  279. if (port_info->rx_buf_alloc) {
  280. free(port_info->rx_buf_alloc);
  281. port_info->rx_buf_alloc = NULL;
  282. }
  283. }
  284. static int sh_eth_desc_init(struct sh_eth_dev *eth)
  285. {
  286. int ret = 0;
  287. ret = sh_eth_tx_desc_init(eth);
  288. if (ret)
  289. goto err_tx_init;
  290. ret = sh_eth_rx_desc_init(eth);
  291. if (ret)
  292. goto err_rx_init;
  293. return ret;
  294. err_rx_init:
  295. sh_eth_tx_desc_free(eth);
  296. err_tx_init:
  297. return ret;
  298. }
  299. static void sh_eth_write_hwaddr(struct sh_eth_info *port_info,
  300. unsigned char *mac)
  301. {
  302. u32 val;
  303. val = (mac[0] << 24) | (mac[1] << 16) | (mac[2] << 8) | mac[3];
  304. sh_eth_write(port_info, val, MAHR);
  305. val = (mac[4] << 8) | mac[5];
  306. sh_eth_write(port_info, val, MALR);
  307. }
  308. static void sh_eth_mac_regs_config(struct sh_eth_dev *eth, unsigned char *mac)
  309. {
  310. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  311. /* Configure e-dmac registers */
  312. sh_eth_write(port_info, (sh_eth_read(port_info, EDMR) & ~EMDR_DESC_R) |
  313. (EMDR_DESC | EDMR_EL), EDMR);
  314. sh_eth_write(port_info, 0, EESIPR);
  315. sh_eth_write(port_info, 0, TRSCER);
  316. sh_eth_write(port_info, 0, TFTR);
  317. sh_eth_write(port_info, (FIFO_SIZE_T | FIFO_SIZE_R), FDR);
  318. sh_eth_write(port_info, RMCR_RST, RMCR);
  319. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  320. sh_eth_write(port_info, 0, RPADIR);
  321. #endif
  322. sh_eth_write(port_info, (FIFO_F_D_RFF | FIFO_F_D_RFD), FCFTR);
  323. /* Configure e-mac registers */
  324. sh_eth_write(port_info, 0, ECSIPR);
  325. /* Set Mac address */
  326. sh_eth_write_hwaddr(port_info, mac);
  327. sh_eth_write(port_info, RFLR_RFL_MIN, RFLR);
  328. #if defined(SH_ETH_TYPE_GETHER)
  329. sh_eth_write(port_info, 0, PIPR);
  330. #endif
  331. #if defined(SH_ETH_TYPE_GETHER) || defined(SH_ETH_TYPE_RZ)
  332. sh_eth_write(port_info, APR_AP, APR);
  333. sh_eth_write(port_info, MPR_MP, MPR);
  334. sh_eth_write(port_info, TPAUSER_TPAUSE, TPAUSER);
  335. #endif
  336. #if defined(CONFIG_CPU_SH7734) || defined(CONFIG_R8A7740)
  337. sh_eth_write(port_info, CONFIG_SH_ETHER_SH7734_MII, RMII_MII);
  338. #elif defined(CONFIG_RCAR_GEN2)
  339. sh_eth_write(port_info, sh_eth_read(port_info, RMIIMR) | 0x1, RMIIMR);
  340. #endif
  341. }
  342. static int sh_eth_phy_regs_config(struct sh_eth_dev *eth)
  343. {
  344. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  345. struct phy_device *phy = port_info->phydev;
  346. int ret = 0;
  347. u32 val = 0;
  348. /* Set the transfer speed */
  349. if (phy->speed == 100) {
  350. printf(SHETHER_NAME ": 100Base/");
  351. #if defined(SH_ETH_TYPE_GETHER)
  352. sh_eth_write(port_info, GECMR_100B, GECMR);
  353. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  354. sh_eth_write(port_info, 1, RTRATE);
  355. #elif defined(CONFIG_CPU_SH7724) || defined(CONFIG_RCAR_GEN2)
  356. val = ECMR_RTM;
  357. #endif
  358. } else if (phy->speed == 10) {
  359. printf(SHETHER_NAME ": 10Base/");
  360. #if defined(SH_ETH_TYPE_GETHER)
  361. sh_eth_write(port_info, GECMR_10B, GECMR);
  362. #elif defined(CONFIG_CPU_SH7757) || defined(CONFIG_CPU_SH7752)
  363. sh_eth_write(port_info, 0, RTRATE);
  364. #endif
  365. }
  366. #if defined(SH_ETH_TYPE_GETHER)
  367. else if (phy->speed == 1000) {
  368. printf(SHETHER_NAME ": 1000Base/");
  369. sh_eth_write(port_info, GECMR_1000B, GECMR);
  370. }
  371. #endif
  372. /* Check if full duplex mode is supported by the phy */
  373. if (phy->duplex) {
  374. printf("Full\n");
  375. sh_eth_write(port_info,
  376. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE | ECMR_DM),
  377. ECMR);
  378. } else {
  379. printf("Half\n");
  380. sh_eth_write(port_info,
  381. val | (ECMR_CHG_DM | ECMR_RE | ECMR_TE),
  382. ECMR);
  383. }
  384. return ret;
  385. }
  386. static void sh_eth_start(struct sh_eth_dev *eth)
  387. {
  388. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  389. /*
  390. * Enable the e-dmac receiver only. The transmitter will be enabled when
  391. * we have something to transmit
  392. */
  393. sh_eth_write(port_info, EDRRR_R, EDRRR);
  394. }
  395. static void sh_eth_stop(struct sh_eth_dev *eth)
  396. {
  397. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  398. sh_eth_write(port_info, ~EDRRR_R, EDRRR);
  399. }
  400. static int sh_eth_init_common(struct sh_eth_dev *eth, unsigned char *mac)
  401. {
  402. int ret = 0;
  403. ret = sh_eth_reset(eth);
  404. if (ret)
  405. return ret;
  406. ret = sh_eth_desc_init(eth);
  407. if (ret)
  408. return ret;
  409. sh_eth_mac_regs_config(eth, mac);
  410. return 0;
  411. }
  412. static int sh_eth_start_common(struct sh_eth_dev *eth)
  413. {
  414. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  415. int ret;
  416. ret = phy_startup(port_info->phydev);
  417. if (ret) {
  418. printf(SHETHER_NAME ": phy startup failure\n");
  419. return ret;
  420. }
  421. ret = sh_eth_phy_regs_config(eth);
  422. if (ret)
  423. return ret;
  424. sh_eth_start(eth);
  425. return 0;
  426. }
  427. #ifndef CONFIG_DM_ETH
  428. static int sh_eth_phy_config_legacy(struct sh_eth_dev *eth)
  429. {
  430. int ret = 0;
  431. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  432. struct eth_device *dev = port_info->dev;
  433. struct phy_device *phydev;
  434. phydev = phy_connect(
  435. miiphy_get_dev_by_name(dev->name),
  436. port_info->phy_addr, dev, CONFIG_SH_ETHER_PHY_MODE);
  437. port_info->phydev = phydev;
  438. phy_config(phydev);
  439. return ret;
  440. }
  441. static int sh_eth_send_legacy(struct eth_device *dev, void *packet, int len)
  442. {
  443. struct sh_eth_dev *eth = dev->priv;
  444. return sh_eth_send_common(eth, packet, len);
  445. }
  446. static int sh_eth_recv_common(struct sh_eth_dev *eth)
  447. {
  448. int len = 0;
  449. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  450. uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  451. len = sh_eth_recv_start(eth);
  452. if (len > 0) {
  453. invalidate_cache(packet, len);
  454. net_process_received_packet(packet, len);
  455. sh_eth_recv_finish(eth);
  456. } else
  457. len = 0;
  458. /* Restart the receiver if disabled */
  459. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  460. sh_eth_write(port_info, EDRRR_R, EDRRR);
  461. return len;
  462. }
  463. static int sh_eth_recv_legacy(struct eth_device *dev)
  464. {
  465. struct sh_eth_dev *eth = dev->priv;
  466. return sh_eth_recv_common(eth);
  467. }
  468. static int sh_eth_init_legacy(struct eth_device *dev, bd_t *bd)
  469. {
  470. struct sh_eth_dev *eth = dev->priv;
  471. int ret;
  472. ret = sh_eth_init_common(eth, dev->enetaddr);
  473. if (ret)
  474. return ret;
  475. ret = sh_eth_phy_config_legacy(eth);
  476. if (ret) {
  477. printf(SHETHER_NAME ": phy config timeout\n");
  478. goto err_start;
  479. }
  480. ret = sh_eth_start_common(eth);
  481. if (ret)
  482. goto err_start;
  483. return 0;
  484. err_start:
  485. sh_eth_tx_desc_free(eth);
  486. sh_eth_rx_desc_free(eth);
  487. return ret;
  488. }
  489. void sh_eth_halt_legacy(struct eth_device *dev)
  490. {
  491. struct sh_eth_dev *eth = dev->priv;
  492. sh_eth_stop(eth);
  493. }
  494. int sh_eth_initialize(bd_t *bd)
  495. {
  496. int ret = 0;
  497. struct sh_eth_dev *eth = NULL;
  498. struct eth_device *dev = NULL;
  499. struct mii_dev *mdiodev;
  500. eth = (struct sh_eth_dev *)malloc(sizeof(struct sh_eth_dev));
  501. if (!eth) {
  502. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  503. ret = -ENOMEM;
  504. goto err;
  505. }
  506. dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  507. if (!dev) {
  508. printf(SHETHER_NAME ": %s: malloc failed\n", __func__);
  509. ret = -ENOMEM;
  510. goto err;
  511. }
  512. memset(dev, 0, sizeof(struct eth_device));
  513. memset(eth, 0, sizeof(struct sh_eth_dev));
  514. eth->port = CONFIG_SH_ETHER_USE_PORT;
  515. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  516. eth->port_info[eth->port].iobase =
  517. (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
  518. dev->priv = (void *)eth;
  519. dev->iobase = 0;
  520. dev->init = sh_eth_init_legacy;
  521. dev->halt = sh_eth_halt_legacy;
  522. dev->send = sh_eth_send_legacy;
  523. dev->recv = sh_eth_recv_legacy;
  524. eth->port_info[eth->port].dev = dev;
  525. strcpy(dev->name, SHETHER_NAME);
  526. /* Register Device to EtherNet subsystem */
  527. eth_register(dev);
  528. bb_miiphy_buses[0].priv = eth;
  529. mdiodev = mdio_alloc();
  530. if (!mdiodev)
  531. return -ENOMEM;
  532. strncpy(mdiodev->name, dev->name, MDIO_NAME_LEN);
  533. mdiodev->read = bb_miiphy_read;
  534. mdiodev->write = bb_miiphy_write;
  535. ret = mdio_register(mdiodev);
  536. if (ret < 0)
  537. return ret;
  538. if (!eth_env_get_enetaddr("ethaddr", dev->enetaddr))
  539. puts("Please set MAC address\n");
  540. return ret;
  541. err:
  542. if (dev)
  543. free(dev);
  544. if (eth)
  545. free(eth);
  546. printf(SHETHER_NAME ": Failed\n");
  547. return ret;
  548. }
  549. #else /* CONFIG_DM_ETH */
  550. struct sh_ether_priv {
  551. struct sh_eth_dev shdev;
  552. struct mii_dev *bus;
  553. phys_addr_t iobase;
  554. struct clk clk;
  555. struct gpio_desc reset_gpio;
  556. };
  557. static int sh_ether_send(struct udevice *dev, void *packet, int len)
  558. {
  559. struct sh_ether_priv *priv = dev_get_priv(dev);
  560. struct sh_eth_dev *eth = &priv->shdev;
  561. return sh_eth_send_common(eth, packet, len);
  562. }
  563. static int sh_ether_recv(struct udevice *dev, int flags, uchar **packetp)
  564. {
  565. struct sh_ether_priv *priv = dev_get_priv(dev);
  566. struct sh_eth_dev *eth = &priv->shdev;
  567. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  568. uchar *packet = (uchar *)ADDR_TO_P2(port_info->rx_desc_cur->rd2);
  569. int len;
  570. len = sh_eth_recv_start(eth);
  571. if (len > 0) {
  572. invalidate_cache(packet, len);
  573. *packetp = packet;
  574. return len;
  575. } else {
  576. len = 0;
  577. /* Restart the receiver if disabled */
  578. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  579. sh_eth_write(port_info, EDRRR_R, EDRRR);
  580. return -EAGAIN;
  581. }
  582. }
  583. static int sh_ether_free_pkt(struct udevice *dev, uchar *packet, int length)
  584. {
  585. struct sh_ether_priv *priv = dev_get_priv(dev);
  586. struct sh_eth_dev *eth = &priv->shdev;
  587. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  588. sh_eth_recv_finish(eth);
  589. /* Restart the receiver if disabled */
  590. if (!(sh_eth_read(port_info, EDRRR) & EDRRR_R))
  591. sh_eth_write(port_info, EDRRR_R, EDRRR);
  592. return 0;
  593. }
  594. static int sh_ether_write_hwaddr(struct udevice *dev)
  595. {
  596. struct sh_ether_priv *priv = dev_get_priv(dev);
  597. struct sh_eth_dev *eth = &priv->shdev;
  598. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  599. struct eth_pdata *pdata = dev_get_platdata(dev);
  600. sh_eth_write_hwaddr(port_info, pdata->enetaddr);
  601. return 0;
  602. }
  603. static int sh_eth_phy_config(struct udevice *dev)
  604. {
  605. struct sh_ether_priv *priv = dev_get_priv(dev);
  606. struct eth_pdata *pdata = dev_get_platdata(dev);
  607. struct sh_eth_dev *eth = &priv->shdev;
  608. int ret = 0;
  609. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  610. struct phy_device *phydev;
  611. int mask = 0xffffffff;
  612. phydev = phy_find_by_mask(priv->bus, mask, pdata->phy_interface);
  613. if (!phydev)
  614. return -ENODEV;
  615. phy_connect_dev(phydev, dev);
  616. port_info->phydev = phydev;
  617. phy_config(phydev);
  618. return ret;
  619. }
  620. static int sh_ether_start(struct udevice *dev)
  621. {
  622. struct sh_ether_priv *priv = dev_get_priv(dev);
  623. struct eth_pdata *pdata = dev_get_platdata(dev);
  624. struct sh_eth_dev *eth = &priv->shdev;
  625. int ret;
  626. ret = clk_enable(&priv->clk);
  627. if (ret)
  628. return ret;
  629. ret = sh_eth_init_common(eth, pdata->enetaddr);
  630. if (ret)
  631. goto err_clk;
  632. ret = sh_eth_phy_config(dev);
  633. if (ret) {
  634. printf(SHETHER_NAME ": phy config timeout\n");
  635. goto err_start;
  636. }
  637. ret = sh_eth_start_common(eth);
  638. if (ret)
  639. goto err_start;
  640. return 0;
  641. err_start:
  642. sh_eth_tx_desc_free(eth);
  643. sh_eth_rx_desc_free(eth);
  644. err_clk:
  645. clk_disable(&priv->clk);
  646. return ret;
  647. }
  648. static void sh_ether_stop(struct udevice *dev)
  649. {
  650. struct sh_ether_priv *priv = dev_get_priv(dev);
  651. sh_eth_stop(&priv->shdev);
  652. clk_disable(&priv->clk);
  653. }
  654. static int sh_ether_probe(struct udevice *udev)
  655. {
  656. struct eth_pdata *pdata = dev_get_platdata(udev);
  657. struct sh_ether_priv *priv = dev_get_priv(udev);
  658. struct sh_eth_dev *eth = &priv->shdev;
  659. struct mii_dev *mdiodev;
  660. int ret;
  661. priv->iobase = pdata->iobase;
  662. ret = clk_get_by_index(udev, 0, &priv->clk);
  663. if (ret < 0)
  664. return ret;
  665. gpio_request_by_name(udev, "reset-gpios", 0, &priv->reset_gpio,
  666. GPIOD_IS_OUT);
  667. mdiodev = mdio_alloc();
  668. if (!mdiodev) {
  669. ret = -ENOMEM;
  670. return ret;
  671. }
  672. mdiodev->read = bb_miiphy_read;
  673. mdiodev->write = bb_miiphy_write;
  674. bb_miiphy_buses[0].priv = eth;
  675. snprintf(mdiodev->name, sizeof(mdiodev->name), udev->name);
  676. ret = mdio_register(mdiodev);
  677. if (ret < 0)
  678. goto err_mdio_register;
  679. priv->bus = miiphy_get_dev_by_name(udev->name);
  680. eth->port = CONFIG_SH_ETHER_USE_PORT;
  681. eth->port_info[eth->port].phy_addr = CONFIG_SH_ETHER_PHY_ADDR;
  682. eth->port_info[eth->port].iobase =
  683. (void __iomem *)(BASE_IO_ADDR + 0x800 * eth->port);
  684. return 0;
  685. err_mdio_register:
  686. mdio_free(mdiodev);
  687. return ret;
  688. }
  689. static int sh_ether_remove(struct udevice *udev)
  690. {
  691. struct sh_ether_priv *priv = dev_get_priv(udev);
  692. struct sh_eth_dev *eth = &priv->shdev;
  693. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  694. free(port_info->phydev);
  695. mdio_unregister(priv->bus);
  696. mdio_free(priv->bus);
  697. if (dm_gpio_is_valid(&priv->reset_gpio))
  698. dm_gpio_free(udev, &priv->reset_gpio);
  699. return 0;
  700. }
  701. static const struct eth_ops sh_ether_ops = {
  702. .start = sh_ether_start,
  703. .send = sh_ether_send,
  704. .recv = sh_ether_recv,
  705. .free_pkt = sh_ether_free_pkt,
  706. .stop = sh_ether_stop,
  707. .write_hwaddr = sh_ether_write_hwaddr,
  708. };
  709. int sh_ether_ofdata_to_platdata(struct udevice *dev)
  710. {
  711. struct eth_pdata *pdata = dev_get_platdata(dev);
  712. const char *phy_mode;
  713. const fdt32_t *cell;
  714. int ret = 0;
  715. pdata->iobase = devfdt_get_addr(dev);
  716. pdata->phy_interface = -1;
  717. phy_mode = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "phy-mode",
  718. NULL);
  719. if (phy_mode)
  720. pdata->phy_interface = phy_get_interface_by_name(phy_mode);
  721. if (pdata->phy_interface == -1) {
  722. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  723. return -EINVAL;
  724. }
  725. pdata->max_speed = 1000;
  726. cell = fdt_getprop(gd->fdt_blob, dev_of_offset(dev), "max-speed", NULL);
  727. if (cell)
  728. pdata->max_speed = fdt32_to_cpu(*cell);
  729. sprintf(bb_miiphy_buses[0].name, dev->name);
  730. return ret;
  731. }
  732. static const struct udevice_id sh_ether_ids[] = {
  733. { .compatible = "renesas,ether-r8a7791" },
  734. { }
  735. };
  736. U_BOOT_DRIVER(eth_sh_ether) = {
  737. .name = "sh_ether",
  738. .id = UCLASS_ETH,
  739. .of_match = sh_ether_ids,
  740. .ofdata_to_platdata = sh_ether_ofdata_to_platdata,
  741. .probe = sh_ether_probe,
  742. .remove = sh_ether_remove,
  743. .ops = &sh_ether_ops,
  744. .priv_auto_alloc_size = sizeof(struct sh_ether_priv),
  745. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  746. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  747. };
  748. #endif
  749. /******* for bb_miiphy *******/
  750. static int sh_eth_bb_init(struct bb_miiphy_bus *bus)
  751. {
  752. return 0;
  753. }
  754. static int sh_eth_bb_mdio_active(struct bb_miiphy_bus *bus)
  755. {
  756. struct sh_eth_dev *eth = bus->priv;
  757. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  758. sh_eth_write(port_info, sh_eth_read(port_info, PIR) | PIR_MMD, PIR);
  759. return 0;
  760. }
  761. static int sh_eth_bb_mdio_tristate(struct bb_miiphy_bus *bus)
  762. {
  763. struct sh_eth_dev *eth = bus->priv;
  764. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  765. sh_eth_write(port_info, sh_eth_read(port_info, PIR) & ~PIR_MMD, PIR);
  766. return 0;
  767. }
  768. static int sh_eth_bb_set_mdio(struct bb_miiphy_bus *bus, int v)
  769. {
  770. struct sh_eth_dev *eth = bus->priv;
  771. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  772. if (v)
  773. sh_eth_write(port_info,
  774. sh_eth_read(port_info, PIR) | PIR_MDO, PIR);
  775. else
  776. sh_eth_write(port_info,
  777. sh_eth_read(port_info, PIR) & ~PIR_MDO, PIR);
  778. return 0;
  779. }
  780. static int sh_eth_bb_get_mdio(struct bb_miiphy_bus *bus, int *v)
  781. {
  782. struct sh_eth_dev *eth = bus->priv;
  783. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  784. *v = (sh_eth_read(port_info, PIR) & PIR_MDI) >> 3;
  785. return 0;
  786. }
  787. static int sh_eth_bb_set_mdc(struct bb_miiphy_bus *bus, int v)
  788. {
  789. struct sh_eth_dev *eth = bus->priv;
  790. struct sh_eth_info *port_info = &eth->port_info[eth->port];
  791. if (v)
  792. sh_eth_write(port_info,
  793. sh_eth_read(port_info, PIR) | PIR_MDC, PIR);
  794. else
  795. sh_eth_write(port_info,
  796. sh_eth_read(port_info, PIR) & ~PIR_MDC, PIR);
  797. return 0;
  798. }
  799. static int sh_eth_bb_delay(struct bb_miiphy_bus *bus)
  800. {
  801. udelay(10);
  802. return 0;
  803. }
  804. struct bb_miiphy_bus bb_miiphy_buses[] = {
  805. {
  806. .name = "sh_eth",
  807. .init = sh_eth_bb_init,
  808. .mdio_active = sh_eth_bb_mdio_active,
  809. .mdio_tristate = sh_eth_bb_mdio_tristate,
  810. .set_mdio = sh_eth_bb_set_mdio,
  811. .get_mdio = sh_eth_bb_get_mdio,
  812. .set_mdc = sh_eth_bb_set_mdc,
  813. .delay = sh_eth_bb_delay,
  814. }
  815. };
  816. int bb_miiphy_buses_num = ARRAY_SIZE(bb_miiphy_buses);