cpsw.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <asm/errno.h>
  24. #include <asm/io.h>
  25. #include <phy.h>
  26. #include <asm/arch/cpu.h>
  27. #include <dm.h>
  28. #include <fdt_support.h>
  29. DECLARE_GLOBAL_DATA_PTR;
  30. #define BITMASK(bits) (BIT(bits) - 1)
  31. #define PHY_REG_MASK 0x1f
  32. #define PHY_ID_MASK 0x1f
  33. #define NUM_DESCS (PKTBUFSRX * 2)
  34. #define PKT_MIN 60
  35. #define PKT_MAX (1500 + 14 + 4 + 4)
  36. #define CLEAR_BIT 1
  37. #define GIGABITEN BIT(7)
  38. #define FULLDUPLEXEN BIT(0)
  39. #define MIIEN BIT(15)
  40. /* reg offset */
  41. #define CPSW_HOST_PORT_OFFSET 0x108
  42. #define CPSW_SLAVE0_OFFSET 0x208
  43. #define CPSW_SLAVE1_OFFSET 0x308
  44. #define CPSW_SLAVE_SIZE 0x100
  45. #define CPSW_CPDMA_OFFSET 0x800
  46. #define CPSW_HW_STATS 0x900
  47. #define CPSW_STATERAM_OFFSET 0xa00
  48. #define CPSW_CPTS_OFFSET 0xc00
  49. #define CPSW_ALE_OFFSET 0xd00
  50. #define CPSW_SLIVER0_OFFSET 0xd80
  51. #define CPSW_SLIVER1_OFFSET 0xdc0
  52. #define CPSW_BD_OFFSET 0x2000
  53. #define CPSW_MDIO_DIV 0xff
  54. #define AM335X_GMII_SEL_OFFSET 0x630
  55. /* DMA Registers */
  56. #define CPDMA_TXCONTROL 0x004
  57. #define CPDMA_RXCONTROL 0x014
  58. #define CPDMA_SOFTRESET 0x01c
  59. #define CPDMA_RXFREE 0x0e0
  60. #define CPDMA_TXHDP_VER1 0x100
  61. #define CPDMA_TXHDP_VER2 0x200
  62. #define CPDMA_RXHDP_VER1 0x120
  63. #define CPDMA_RXHDP_VER2 0x220
  64. #define CPDMA_TXCP_VER1 0x140
  65. #define CPDMA_TXCP_VER2 0x240
  66. #define CPDMA_RXCP_VER1 0x160
  67. #define CPDMA_RXCP_VER2 0x260
  68. /* Descriptor mode bits */
  69. #define CPDMA_DESC_SOP BIT(31)
  70. #define CPDMA_DESC_EOP BIT(30)
  71. #define CPDMA_DESC_OWNER BIT(29)
  72. #define CPDMA_DESC_EOQ BIT(28)
  73. /*
  74. * This timeout definition is a worst-case ultra defensive measure against
  75. * unexpected controller lock ups. Ideally, we should never ever hit this
  76. * scenario in practice.
  77. */
  78. #define MDIO_TIMEOUT 100 /* msecs */
  79. #define CPDMA_TIMEOUT 100 /* msecs */
  80. struct cpsw_mdio_regs {
  81. u32 version;
  82. u32 control;
  83. #define CONTROL_IDLE BIT(31)
  84. #define CONTROL_ENABLE BIT(30)
  85. u32 alive;
  86. u32 link;
  87. u32 linkintraw;
  88. u32 linkintmasked;
  89. u32 __reserved_0[2];
  90. u32 userintraw;
  91. u32 userintmasked;
  92. u32 userintmaskset;
  93. u32 userintmaskclr;
  94. u32 __reserved_1[20];
  95. struct {
  96. u32 access;
  97. u32 physel;
  98. #define USERACCESS_GO BIT(31)
  99. #define USERACCESS_WRITE BIT(30)
  100. #define USERACCESS_ACK BIT(29)
  101. #define USERACCESS_READ (0)
  102. #define USERACCESS_DATA (0xffff)
  103. } user[0];
  104. };
  105. struct cpsw_regs {
  106. u32 id_ver;
  107. u32 control;
  108. u32 soft_reset;
  109. u32 stat_port_en;
  110. u32 ptype;
  111. };
  112. struct cpsw_slave_regs {
  113. u32 max_blks;
  114. u32 blk_cnt;
  115. u32 flow_thresh;
  116. u32 port_vlan;
  117. u32 tx_pri_map;
  118. #ifdef CONFIG_AM33XX
  119. u32 gap_thresh;
  120. #elif defined(CONFIG_TI814X)
  121. u32 ts_ctl;
  122. u32 ts_seq_ltype;
  123. u32 ts_vlan;
  124. #endif
  125. u32 sa_lo;
  126. u32 sa_hi;
  127. };
  128. struct cpsw_host_regs {
  129. u32 max_blks;
  130. u32 blk_cnt;
  131. u32 flow_thresh;
  132. u32 port_vlan;
  133. u32 tx_pri_map;
  134. u32 cpdma_tx_pri_map;
  135. u32 cpdma_rx_chan_map;
  136. };
  137. struct cpsw_sliver_regs {
  138. u32 id_ver;
  139. u32 mac_control;
  140. u32 mac_status;
  141. u32 soft_reset;
  142. u32 rx_maxlen;
  143. u32 __reserved_0;
  144. u32 rx_pause;
  145. u32 tx_pause;
  146. u32 __reserved_1;
  147. u32 rx_pri_map;
  148. };
  149. #define ALE_ENTRY_BITS 68
  150. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  151. /* ALE Registers */
  152. #define ALE_CONTROL 0x08
  153. #define ALE_UNKNOWNVLAN 0x18
  154. #define ALE_TABLE_CONTROL 0x20
  155. #define ALE_TABLE 0x34
  156. #define ALE_PORTCTL 0x40
  157. #define ALE_TABLE_WRITE BIT(31)
  158. #define ALE_TYPE_FREE 0
  159. #define ALE_TYPE_ADDR 1
  160. #define ALE_TYPE_VLAN 2
  161. #define ALE_TYPE_VLAN_ADDR 3
  162. #define ALE_UCAST_PERSISTANT 0
  163. #define ALE_UCAST_UNTOUCHED 1
  164. #define ALE_UCAST_OUI 2
  165. #define ALE_UCAST_TOUCHED 3
  166. #define ALE_MCAST_FWD 0
  167. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  168. #define ALE_MCAST_FWD_LEARN 2
  169. #define ALE_MCAST_FWD_2 3
  170. enum cpsw_ale_port_state {
  171. ALE_PORT_STATE_DISABLE = 0x00,
  172. ALE_PORT_STATE_BLOCK = 0x01,
  173. ALE_PORT_STATE_LEARN = 0x02,
  174. ALE_PORT_STATE_FORWARD = 0x03,
  175. };
  176. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  177. #define ALE_SECURE 1
  178. #define ALE_BLOCKED 2
  179. struct cpsw_slave {
  180. struct cpsw_slave_regs *regs;
  181. struct cpsw_sliver_regs *sliver;
  182. int slave_num;
  183. u32 mac_control;
  184. struct cpsw_slave_data *data;
  185. };
  186. struct cpdma_desc {
  187. /* hardware fields */
  188. u32 hw_next;
  189. u32 hw_buffer;
  190. u32 hw_len;
  191. u32 hw_mode;
  192. /* software fields */
  193. u32 sw_buffer;
  194. u32 sw_len;
  195. };
  196. struct cpdma_chan {
  197. struct cpdma_desc *head, *tail;
  198. void *hdp, *cp, *rxfree;
  199. };
  200. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  201. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  202. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  203. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  204. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  205. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  206. #define for_active_slave(slave, priv) \
  207. slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
  208. #define for_each_slave(slave, priv) \
  209. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  210. (priv)->data.slaves; slave++)
  211. struct cpsw_priv {
  212. #ifdef CONFIG_DM_ETH
  213. struct udevice *dev;
  214. #else
  215. struct eth_device *dev;
  216. #endif
  217. struct cpsw_platform_data data;
  218. int host_port;
  219. struct cpsw_regs *regs;
  220. void *dma_regs;
  221. struct cpsw_host_regs *host_port_regs;
  222. void *ale_regs;
  223. struct cpdma_desc *descs;
  224. struct cpdma_desc *desc_free;
  225. struct cpdma_chan rx_chan, tx_chan;
  226. struct cpsw_slave *slaves;
  227. struct phy_device *phydev;
  228. struct mii_dev *bus;
  229. u32 phy_mask;
  230. };
  231. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  232. {
  233. int idx;
  234. idx = start / 32;
  235. start -= idx * 32;
  236. idx = 2 - idx; /* flip */
  237. return (ale_entry[idx] >> start) & BITMASK(bits);
  238. }
  239. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  240. u32 value)
  241. {
  242. int idx;
  243. value &= BITMASK(bits);
  244. idx = start / 32;
  245. start -= idx * 32;
  246. idx = 2 - idx; /* flip */
  247. ale_entry[idx] &= ~(BITMASK(bits) << start);
  248. ale_entry[idx] |= (value << start);
  249. }
  250. #define DEFINE_ALE_FIELD(name, start, bits) \
  251. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  252. { \
  253. return cpsw_ale_get_field(ale_entry, start, bits); \
  254. } \
  255. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  256. { \
  257. cpsw_ale_set_field(ale_entry, start, bits, value); \
  258. }
  259. DEFINE_ALE_FIELD(entry_type, 60, 2)
  260. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  261. DEFINE_ALE_FIELD(port_mask, 66, 3)
  262. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  263. DEFINE_ALE_FIELD(port_num, 66, 2)
  264. DEFINE_ALE_FIELD(blocked, 65, 1)
  265. DEFINE_ALE_FIELD(secure, 64, 1)
  266. DEFINE_ALE_FIELD(mcast, 40, 1)
  267. /* The MAC address field in the ALE entry cannot be macroized as above */
  268. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  269. {
  270. int i;
  271. for (i = 0; i < 6; i++)
  272. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  273. }
  274. static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
  275. {
  276. int i;
  277. for (i = 0; i < 6; i++)
  278. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  279. }
  280. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  281. {
  282. int i;
  283. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  284. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  285. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  286. return idx;
  287. }
  288. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  289. {
  290. int i;
  291. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  292. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  293. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  294. return idx;
  295. }
  296. static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
  297. {
  298. u32 ale_entry[ALE_ENTRY_WORDS];
  299. int type, idx;
  300. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  301. u8 entry_addr[6];
  302. cpsw_ale_read(priv, idx, ale_entry);
  303. type = cpsw_ale_get_entry_type(ale_entry);
  304. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  305. continue;
  306. cpsw_ale_get_addr(ale_entry, entry_addr);
  307. if (memcmp(entry_addr, addr, 6) == 0)
  308. return idx;
  309. }
  310. return -ENOENT;
  311. }
  312. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  313. {
  314. u32 ale_entry[ALE_ENTRY_WORDS];
  315. int type, idx;
  316. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  317. cpsw_ale_read(priv, idx, ale_entry);
  318. type = cpsw_ale_get_entry_type(ale_entry);
  319. if (type == ALE_TYPE_FREE)
  320. return idx;
  321. }
  322. return -ENOENT;
  323. }
  324. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  325. {
  326. u32 ale_entry[ALE_ENTRY_WORDS];
  327. int type, idx;
  328. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  329. cpsw_ale_read(priv, idx, ale_entry);
  330. type = cpsw_ale_get_entry_type(ale_entry);
  331. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  332. continue;
  333. if (cpsw_ale_get_mcast(ale_entry))
  334. continue;
  335. type = cpsw_ale_get_ucast_type(ale_entry);
  336. if (type != ALE_UCAST_PERSISTANT &&
  337. type != ALE_UCAST_OUI)
  338. return idx;
  339. }
  340. return -ENOENT;
  341. }
  342. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
  343. int port, int flags)
  344. {
  345. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  346. int idx;
  347. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  348. cpsw_ale_set_addr(ale_entry, addr);
  349. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  350. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  351. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  352. cpsw_ale_set_port_num(ale_entry, port);
  353. idx = cpsw_ale_match_addr(priv, addr);
  354. if (idx < 0)
  355. idx = cpsw_ale_match_free(priv);
  356. if (idx < 0)
  357. idx = cpsw_ale_find_ageable(priv);
  358. if (idx < 0)
  359. return -ENOMEM;
  360. cpsw_ale_write(priv, idx, ale_entry);
  361. return 0;
  362. }
  363. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
  364. int port_mask)
  365. {
  366. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  367. int idx, mask;
  368. idx = cpsw_ale_match_addr(priv, addr);
  369. if (idx >= 0)
  370. cpsw_ale_read(priv, idx, ale_entry);
  371. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  372. cpsw_ale_set_addr(ale_entry, addr);
  373. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  374. mask = cpsw_ale_get_port_mask(ale_entry);
  375. port_mask |= mask;
  376. cpsw_ale_set_port_mask(ale_entry, port_mask);
  377. if (idx < 0)
  378. idx = cpsw_ale_match_free(priv);
  379. if (idx < 0)
  380. idx = cpsw_ale_find_ageable(priv);
  381. if (idx < 0)
  382. return -ENOMEM;
  383. cpsw_ale_write(priv, idx, ale_entry);
  384. return 0;
  385. }
  386. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  387. {
  388. u32 tmp, mask = BIT(bit);
  389. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  390. tmp &= ~mask;
  391. tmp |= val ? mask : 0;
  392. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  393. }
  394. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  395. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  396. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  397. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  398. int val)
  399. {
  400. int offset = ALE_PORTCTL + 4 * port;
  401. u32 tmp, mask = 0x3;
  402. tmp = __raw_readl(priv->ale_regs + offset);
  403. tmp &= ~mask;
  404. tmp |= val & mask;
  405. __raw_writel(tmp, priv->ale_regs + offset);
  406. }
  407. static struct cpsw_mdio_regs *mdio_regs;
  408. /* wait until hardware is ready for another user access */
  409. static inline u32 wait_for_user_access(void)
  410. {
  411. u32 reg = 0;
  412. int timeout = MDIO_TIMEOUT;
  413. while (timeout-- &&
  414. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  415. udelay(10);
  416. if (timeout == -1) {
  417. printf("wait_for_user_access Timeout\n");
  418. return -ETIMEDOUT;
  419. }
  420. return reg;
  421. }
  422. /* wait until hardware state machine is idle */
  423. static inline void wait_for_idle(void)
  424. {
  425. int timeout = MDIO_TIMEOUT;
  426. while (timeout-- &&
  427. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  428. udelay(10);
  429. if (timeout == -1)
  430. printf("wait_for_idle Timeout\n");
  431. }
  432. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  433. int dev_addr, int phy_reg)
  434. {
  435. int data;
  436. u32 reg;
  437. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  438. return -EINVAL;
  439. wait_for_user_access();
  440. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  441. (phy_id << 16));
  442. __raw_writel(reg, &mdio_regs->user[0].access);
  443. reg = wait_for_user_access();
  444. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  445. return data;
  446. }
  447. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  448. int phy_reg, u16 data)
  449. {
  450. u32 reg;
  451. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  452. return -EINVAL;
  453. wait_for_user_access();
  454. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  455. (phy_id << 16) | (data & USERACCESS_DATA));
  456. __raw_writel(reg, &mdio_regs->user[0].access);
  457. wait_for_user_access();
  458. return 0;
  459. }
  460. static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
  461. {
  462. struct mii_dev *bus = mdio_alloc();
  463. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  464. /* set enable and clock divider */
  465. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  466. /*
  467. * wait for scan logic to settle:
  468. * the scan time consists of (a) a large fixed component, and (b) a
  469. * small component that varies with the mii bus frequency. These
  470. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  471. * silicon. Since the effect of (b) was found to be largely
  472. * negligible, we keep things simple here.
  473. */
  474. udelay(1000);
  475. bus->read = cpsw_mdio_read;
  476. bus->write = cpsw_mdio_write;
  477. strcpy(bus->name, name);
  478. mdio_register(bus);
  479. }
  480. /* Set a self-clearing bit in a register, and wait for it to clear */
  481. static inline void setbit_and_wait_for_clear32(void *addr)
  482. {
  483. __raw_writel(CLEAR_BIT, addr);
  484. while (__raw_readl(addr) & CLEAR_BIT)
  485. ;
  486. }
  487. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  488. ((mac)[2] << 16) | ((mac)[3] << 24))
  489. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  490. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  491. struct cpsw_priv *priv)
  492. {
  493. #ifdef CONFIG_DM_ETH
  494. struct eth_pdata *pdata = dev_get_platdata(priv->dev);
  495. writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
  496. writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
  497. #else
  498. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  499. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  500. #endif
  501. }
  502. static void cpsw_slave_update_link(struct cpsw_slave *slave,
  503. struct cpsw_priv *priv, int *link)
  504. {
  505. struct phy_device *phy;
  506. u32 mac_control = 0;
  507. phy = priv->phydev;
  508. if (!phy)
  509. return;
  510. phy_startup(phy);
  511. *link = phy->link;
  512. if (*link) { /* link up */
  513. mac_control = priv->data.mac_control;
  514. if (phy->speed == 1000)
  515. mac_control |= GIGABITEN;
  516. if (phy->duplex == DUPLEX_FULL)
  517. mac_control |= FULLDUPLEXEN;
  518. if (phy->speed == 100)
  519. mac_control |= MIIEN;
  520. }
  521. if (mac_control == slave->mac_control)
  522. return;
  523. if (mac_control) {
  524. printf("link up on port %d, speed %d, %s duplex\n",
  525. slave->slave_num, phy->speed,
  526. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  527. } else {
  528. printf("link down on port %d\n", slave->slave_num);
  529. }
  530. __raw_writel(mac_control, &slave->sliver->mac_control);
  531. slave->mac_control = mac_control;
  532. }
  533. static int cpsw_update_link(struct cpsw_priv *priv)
  534. {
  535. int link = 0;
  536. struct cpsw_slave *slave;
  537. for_active_slave(slave, priv)
  538. cpsw_slave_update_link(slave, priv, &link);
  539. return link;
  540. }
  541. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  542. {
  543. if (priv->host_port == 0)
  544. return slave_num + 1;
  545. else
  546. return slave_num;
  547. }
  548. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  549. {
  550. u32 slave_port;
  551. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  552. /* setup priority mapping */
  553. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  554. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  555. /* setup max packet size, and mac address */
  556. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  557. cpsw_set_slave_mac(slave, priv);
  558. slave->mac_control = 0; /* no link yet */
  559. /* enable forwarding */
  560. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  561. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  562. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
  563. priv->phy_mask |= 1 << slave->data->phy_addr;
  564. }
  565. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  566. {
  567. struct cpdma_desc *desc = priv->desc_free;
  568. if (desc)
  569. priv->desc_free = desc_read_ptr(desc, hw_next);
  570. return desc;
  571. }
  572. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  573. {
  574. if (desc) {
  575. desc_write(desc, hw_next, priv->desc_free);
  576. priv->desc_free = desc;
  577. }
  578. }
  579. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  580. void *buffer, int len)
  581. {
  582. struct cpdma_desc *desc, *prev;
  583. u32 mode;
  584. desc = cpdma_desc_alloc(priv);
  585. if (!desc)
  586. return -ENOMEM;
  587. if (len < PKT_MIN)
  588. len = PKT_MIN;
  589. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  590. desc_write(desc, hw_next, 0);
  591. desc_write(desc, hw_buffer, buffer);
  592. desc_write(desc, hw_len, len);
  593. desc_write(desc, hw_mode, mode | len);
  594. desc_write(desc, sw_buffer, buffer);
  595. desc_write(desc, sw_len, len);
  596. if (!chan->head) {
  597. /* simple case - first packet enqueued */
  598. chan->head = desc;
  599. chan->tail = desc;
  600. chan_write(chan, hdp, desc);
  601. goto done;
  602. }
  603. /* not the first packet - enqueue at the tail */
  604. prev = chan->tail;
  605. desc_write(prev, hw_next, desc);
  606. chan->tail = desc;
  607. /* next check if EOQ has been triggered already */
  608. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  609. chan_write(chan, hdp, desc);
  610. done:
  611. if (chan->rxfree)
  612. chan_write(chan, rxfree, 1);
  613. return 0;
  614. }
  615. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  616. void **buffer, int *len)
  617. {
  618. struct cpdma_desc *desc = chan->head;
  619. u32 status;
  620. if (!desc)
  621. return -ENOENT;
  622. status = desc_read(desc, hw_mode);
  623. if (len)
  624. *len = status & 0x7ff;
  625. if (buffer)
  626. *buffer = desc_read_ptr(desc, sw_buffer);
  627. if (status & CPDMA_DESC_OWNER) {
  628. if (chan_read(chan, hdp) == 0) {
  629. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  630. chan_write(chan, hdp, desc);
  631. }
  632. return -EBUSY;
  633. }
  634. chan->head = desc_read_ptr(desc, hw_next);
  635. chan_write(chan, cp, desc);
  636. cpdma_desc_free(priv, desc);
  637. return 0;
  638. }
  639. static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
  640. {
  641. struct cpsw_slave *slave;
  642. int i, ret;
  643. /* soft reset the controller and initialize priv */
  644. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  645. /* initialize and reset the address lookup engine */
  646. cpsw_ale_enable(priv, 1);
  647. cpsw_ale_clear(priv, 1);
  648. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  649. /* setup host port priority mapping */
  650. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  651. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  652. /* disable priority elevation and enable statistics on all ports */
  653. __raw_writel(0, &priv->regs->ptype);
  654. /* enable statistics collection only on the host port */
  655. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  656. __raw_writel(0x7, &priv->regs->stat_port_en);
  657. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  658. cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
  659. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
  660. for_active_slave(slave, priv)
  661. cpsw_slave_init(slave, priv);
  662. cpsw_update_link(priv);
  663. /* init descriptor pool */
  664. for (i = 0; i < NUM_DESCS; i++) {
  665. desc_write(&priv->descs[i], hw_next,
  666. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  667. }
  668. priv->desc_free = &priv->descs[0];
  669. /* initialize channels */
  670. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  671. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  672. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  673. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  674. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  675. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  676. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  677. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  678. } else {
  679. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  680. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  681. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  682. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  683. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  684. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  685. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  686. }
  687. /* clear dma state */
  688. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  689. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  690. for (i = 0; i < priv->data.channels; i++) {
  691. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  692. * i);
  693. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  694. * i);
  695. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  696. * i);
  697. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  698. * i);
  699. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  700. * i);
  701. }
  702. } else {
  703. for (i = 0; i < priv->data.channels; i++) {
  704. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  705. * i);
  706. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  707. * i);
  708. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  709. * i);
  710. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  711. * i);
  712. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  713. * i);
  714. }
  715. }
  716. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  717. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  718. /* submit rx descs */
  719. for (i = 0; i < PKTBUFSRX; i++) {
  720. ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
  721. PKTSIZE);
  722. if (ret < 0) {
  723. printf("error %d submitting rx desc\n", ret);
  724. break;
  725. }
  726. }
  727. return 0;
  728. }
  729. static void _cpsw_halt(struct cpsw_priv *priv)
  730. {
  731. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  732. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  733. /* soft reset the controller and initialize priv */
  734. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  735. /* clear dma state */
  736. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  737. }
  738. static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
  739. {
  740. void *buffer;
  741. int len;
  742. int timeout = CPDMA_TIMEOUT;
  743. flush_dcache_range((unsigned long)packet,
  744. (unsigned long)packet + length);
  745. /* first reap completed packets */
  746. while (timeout-- &&
  747. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  748. ;
  749. if (timeout == -1) {
  750. printf("cpdma_process timeout\n");
  751. return -ETIMEDOUT;
  752. }
  753. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  754. }
  755. static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
  756. {
  757. void *buffer;
  758. int len;
  759. int ret = -EAGAIN;
  760. ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
  761. if (ret < 0)
  762. return ret;
  763. invalidate_dcache_range((unsigned long)buffer,
  764. (unsigned long)buffer + PKTSIZE_ALIGN);
  765. *pkt = buffer;
  766. return len;
  767. }
  768. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  769. struct cpsw_priv *priv)
  770. {
  771. void *regs = priv->regs;
  772. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  773. slave->slave_num = slave_num;
  774. slave->data = data;
  775. slave->regs = regs + data->slave_reg_ofs;
  776. slave->sliver = regs + data->sliver_reg_ofs;
  777. }
  778. static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
  779. {
  780. struct phy_device *phydev;
  781. u32 supported = PHY_GBIT_FEATURES;
  782. phydev = phy_connect(priv->bus,
  783. slave->data->phy_addr,
  784. priv->dev,
  785. slave->data->phy_if);
  786. if (!phydev)
  787. return -1;
  788. phydev->supported &= supported;
  789. phydev->advertising = phydev->supported;
  790. priv->phydev = phydev;
  791. phy_config(phydev);
  792. return 1;
  793. }
  794. int _cpsw_register(struct cpsw_priv *priv)
  795. {
  796. struct cpsw_slave *slave;
  797. struct cpsw_platform_data *data = &priv->data;
  798. void *regs = (void *)data->cpsw_base;
  799. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  800. if (!priv->slaves) {
  801. return -ENOMEM;
  802. }
  803. priv->host_port = data->host_port_num;
  804. priv->regs = regs;
  805. priv->host_port_regs = regs + data->host_port_reg_ofs;
  806. priv->dma_regs = regs + data->cpdma_reg_ofs;
  807. priv->ale_regs = regs + data->ale_reg_ofs;
  808. priv->descs = (void *)regs + data->bd_ram_ofs;
  809. int idx = 0;
  810. for_each_slave(slave, priv) {
  811. cpsw_slave_setup(slave, idx, priv);
  812. idx = idx + 1;
  813. }
  814. cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
  815. priv->bus = miiphy_get_dev_by_name(priv->dev->name);
  816. for_active_slave(slave, priv)
  817. cpsw_phy_init(priv, slave);
  818. return 0;
  819. }
  820. #ifndef CONFIG_DM_ETH
  821. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  822. {
  823. struct cpsw_priv *priv = dev->priv;
  824. return _cpsw_init(priv, dev->enetaddr);
  825. }
  826. static void cpsw_halt(struct eth_device *dev)
  827. {
  828. struct cpsw_priv *priv = dev->priv;
  829. return _cpsw_halt(priv);
  830. }
  831. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  832. {
  833. struct cpsw_priv *priv = dev->priv;
  834. return _cpsw_send(priv, packet, length);
  835. }
  836. static int cpsw_recv(struct eth_device *dev)
  837. {
  838. struct cpsw_priv *priv = dev->priv;
  839. uchar *pkt = NULL;
  840. int len;
  841. len = _cpsw_recv(priv, &pkt);
  842. if (len > 0) {
  843. net_process_received_packet(pkt, len);
  844. cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
  845. }
  846. return len;
  847. }
  848. int cpsw_register(struct cpsw_platform_data *data)
  849. {
  850. struct cpsw_priv *priv;
  851. struct eth_device *dev;
  852. int ret;
  853. dev = calloc(sizeof(*dev), 1);
  854. if (!dev)
  855. return -ENOMEM;
  856. priv = calloc(sizeof(*priv), 1);
  857. if (!priv) {
  858. free(dev);
  859. return -ENOMEM;
  860. }
  861. priv->dev = dev;
  862. priv->data = *data;
  863. strcpy(dev->name, "cpsw");
  864. dev->iobase = 0;
  865. dev->init = cpsw_init;
  866. dev->halt = cpsw_halt;
  867. dev->send = cpsw_send;
  868. dev->recv = cpsw_recv;
  869. dev->priv = priv;
  870. eth_register(dev);
  871. ret = _cpsw_register(priv);
  872. if (ret < 0) {
  873. eth_unregister(dev);
  874. free(dev);
  875. free(priv);
  876. return ret;
  877. }
  878. return 1;
  879. }
  880. #else
  881. static int cpsw_eth_start(struct udevice *dev)
  882. {
  883. struct eth_pdata *pdata = dev_get_platdata(dev);
  884. struct cpsw_priv *priv = dev_get_priv(dev);
  885. return _cpsw_init(priv, pdata->enetaddr);
  886. }
  887. static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
  888. {
  889. struct cpsw_priv *priv = dev_get_priv(dev);
  890. return _cpsw_send(priv, packet, length);
  891. }
  892. static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
  893. {
  894. struct cpsw_priv *priv = dev_get_priv(dev);
  895. return _cpsw_recv(priv, packetp);
  896. }
  897. static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
  898. int length)
  899. {
  900. struct cpsw_priv *priv = dev_get_priv(dev);
  901. return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
  902. }
  903. static void cpsw_eth_stop(struct udevice *dev)
  904. {
  905. struct cpsw_priv *priv = dev_get_priv(dev);
  906. return _cpsw_halt(priv);
  907. }
  908. static int cpsw_eth_probe(struct udevice *dev)
  909. {
  910. struct cpsw_priv *priv = dev_get_priv(dev);
  911. priv->dev = dev;
  912. return _cpsw_register(priv);
  913. }
  914. static const struct eth_ops cpsw_eth_ops = {
  915. .start = cpsw_eth_start,
  916. .send = cpsw_eth_send,
  917. .recv = cpsw_eth_recv,
  918. .free_pkt = cpsw_eth_free_pkt,
  919. .stop = cpsw_eth_stop,
  920. };
  921. static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
  922. {
  923. return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL);
  924. }
  925. static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
  926. {
  927. struct eth_pdata *pdata = dev_get_platdata(dev);
  928. struct cpsw_priv *priv = dev_get_priv(dev);
  929. const char *phy_mode;
  930. const void *fdt = gd->fdt_blob;
  931. int node = dev->of_offset;
  932. int subnode;
  933. int slave_index = 0;
  934. int active_slave;
  935. int ret;
  936. pdata->iobase = dev_get_addr(dev);
  937. priv->data.version = CPSW_CTRL_VERSION_2;
  938. priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
  939. priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
  940. priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
  941. priv->data.mdio_div = CPSW_MDIO_DIV;
  942. priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
  943. pdata->phy_interface = -1;
  944. priv->data.cpsw_base = pdata->iobase;
  945. priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
  946. if (priv->data.channels <= 0) {
  947. printf("error: cpdma_channels not found in dt\n");
  948. return -ENOENT;
  949. }
  950. priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
  951. if (priv->data.slaves <= 0) {
  952. printf("error: slaves not found in dt\n");
  953. return -ENOENT;
  954. }
  955. priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
  956. priv->data.slaves);
  957. priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
  958. if (priv->data.ale_entries <= 0) {
  959. printf("error: ale_entries not found in dt\n");
  960. return -ENOENT;
  961. }
  962. priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
  963. if (priv->data.bd_ram_ofs <= 0) {
  964. printf("error: bd_ram_size not found in dt\n");
  965. return -ENOENT;
  966. }
  967. priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
  968. if (priv->data.mac_control <= 0) {
  969. printf("error: ale_entries not found in dt\n");
  970. return -ENOENT;
  971. }
  972. active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
  973. priv->data.active_slave = active_slave;
  974. fdt_for_each_subnode(fdt, subnode, node) {
  975. int len;
  976. const char *name;
  977. name = fdt_get_name(fdt, subnode, &len);
  978. if (!strncmp(name, "mdio", 4)) {
  979. u32 mdio_base;
  980. mdio_base = cpsw_get_addr_by_node(fdt, subnode);
  981. if (mdio_base == FDT_ADDR_T_NONE) {
  982. error("Not able to get MDIO address space\n");
  983. return -ENOENT;
  984. }
  985. priv->data.mdio_base = mdio_base;
  986. }
  987. if (!strncmp(name, "slave", 5)) {
  988. u32 phy_id[2];
  989. if (slave_index >= priv->data.slaves)
  990. continue;
  991. phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
  992. if (phy_mode)
  993. priv->data.slave_data[slave_index].phy_if =
  994. phy_get_interface_by_name(phy_mode);
  995. fdtdec_get_int_array(fdt, subnode, "phy_id", phy_id, 2);
  996. priv->data.slave_data[slave_index].phy_addr = phy_id[1];
  997. slave_index++;
  998. }
  999. if (!strncmp(name, "cpsw-phy-sel", 12)) {
  1000. priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
  1001. subnode);
  1002. if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
  1003. error("Not able to get gmii_sel reg address\n");
  1004. return -ENOENT;
  1005. }
  1006. }
  1007. }
  1008. priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
  1009. priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
  1010. if (priv->data.slaves == 2) {
  1011. priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
  1012. priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
  1013. }
  1014. ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
  1015. if (ret < 0) {
  1016. error("cpsw read efuse mac failed\n");
  1017. return ret;
  1018. }
  1019. pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
  1020. if (pdata->phy_interface == -1) {
  1021. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  1022. return -EINVAL;
  1023. }
  1024. switch (pdata->phy_interface) {
  1025. case PHY_INTERFACE_MODE_MII:
  1026. writel(MII_MODE_ENABLE, priv->data.gmii_sel);
  1027. break;
  1028. case PHY_INTERFACE_MODE_RMII:
  1029. writel(RMII_MODE_ENABLE, priv->data.gmii_sel);
  1030. break;
  1031. case PHY_INTERFACE_MODE_RGMII:
  1032. case PHY_INTERFACE_MODE_RGMII_ID:
  1033. case PHY_INTERFACE_MODE_RGMII_RXID:
  1034. case PHY_INTERFACE_MODE_RGMII_TXID:
  1035. writel(RGMII_MODE_ENABLE, priv->data.gmii_sel);
  1036. break;
  1037. }
  1038. return 0;
  1039. }
  1040. static const struct udevice_id cpsw_eth_ids[] = {
  1041. { .compatible = "ti,cpsw" },
  1042. { .compatible = "ti,am335x-cpsw" },
  1043. { }
  1044. };
  1045. U_BOOT_DRIVER(eth_cpsw) = {
  1046. .name = "eth_cpsw",
  1047. .id = UCLASS_ETH,
  1048. .of_match = cpsw_eth_ids,
  1049. .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
  1050. .probe = cpsw_eth_probe,
  1051. .ops = &cpsw_eth_ops,
  1052. .priv_auto_alloc_size = sizeof(struct cpsw_priv),
  1053. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  1054. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  1055. };
  1056. #endif /* CONFIG_DM_ETH */