cpsw.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <linux/errno.h>
  24. #include <asm/gpio.h>
  25. #include <asm/io.h>
  26. #include <phy.h>
  27. #include <asm/arch/cpu.h>
  28. #include <dm.h>
  29. #include <fdt_support.h>
  30. DECLARE_GLOBAL_DATA_PTR;
  31. #define BITMASK(bits) (BIT(bits) - 1)
  32. #define PHY_REG_MASK 0x1f
  33. #define PHY_ID_MASK 0x1f
  34. #define NUM_DESCS (PKTBUFSRX * 2)
  35. #define PKT_MIN 60
  36. #define PKT_MAX (1500 + 14 + 4 + 4)
  37. #define CLEAR_BIT 1
  38. #define GIGABITEN BIT(7)
  39. #define FULLDUPLEXEN BIT(0)
  40. #define MIIEN BIT(15)
  41. /* reg offset */
  42. #define CPSW_HOST_PORT_OFFSET 0x108
  43. #define CPSW_SLAVE0_OFFSET 0x208
  44. #define CPSW_SLAVE1_OFFSET 0x308
  45. #define CPSW_SLAVE_SIZE 0x100
  46. #define CPSW_CPDMA_OFFSET 0x800
  47. #define CPSW_HW_STATS 0x900
  48. #define CPSW_STATERAM_OFFSET 0xa00
  49. #define CPSW_CPTS_OFFSET 0xc00
  50. #define CPSW_ALE_OFFSET 0xd00
  51. #define CPSW_SLIVER0_OFFSET 0xd80
  52. #define CPSW_SLIVER1_OFFSET 0xdc0
  53. #define CPSW_BD_OFFSET 0x2000
  54. #define CPSW_MDIO_DIV 0xff
  55. #define AM335X_GMII_SEL_OFFSET 0x630
  56. /* DMA Registers */
  57. #define CPDMA_TXCONTROL 0x004
  58. #define CPDMA_RXCONTROL 0x014
  59. #define CPDMA_SOFTRESET 0x01c
  60. #define CPDMA_RXFREE 0x0e0
  61. #define CPDMA_TXHDP_VER1 0x100
  62. #define CPDMA_TXHDP_VER2 0x200
  63. #define CPDMA_RXHDP_VER1 0x120
  64. #define CPDMA_RXHDP_VER2 0x220
  65. #define CPDMA_TXCP_VER1 0x140
  66. #define CPDMA_TXCP_VER2 0x240
  67. #define CPDMA_RXCP_VER1 0x160
  68. #define CPDMA_RXCP_VER2 0x260
  69. /* Descriptor mode bits */
  70. #define CPDMA_DESC_SOP BIT(31)
  71. #define CPDMA_DESC_EOP BIT(30)
  72. #define CPDMA_DESC_OWNER BIT(29)
  73. #define CPDMA_DESC_EOQ BIT(28)
  74. /*
  75. * This timeout definition is a worst-case ultra defensive measure against
  76. * unexpected controller lock ups. Ideally, we should never ever hit this
  77. * scenario in practice.
  78. */
  79. #define MDIO_TIMEOUT 100 /* msecs */
  80. #define CPDMA_TIMEOUT 100 /* msecs */
  81. struct cpsw_mdio_regs {
  82. u32 version;
  83. u32 control;
  84. #define CONTROL_IDLE BIT(31)
  85. #define CONTROL_ENABLE BIT(30)
  86. u32 alive;
  87. u32 link;
  88. u32 linkintraw;
  89. u32 linkintmasked;
  90. u32 __reserved_0[2];
  91. u32 userintraw;
  92. u32 userintmasked;
  93. u32 userintmaskset;
  94. u32 userintmaskclr;
  95. u32 __reserved_1[20];
  96. struct {
  97. u32 access;
  98. u32 physel;
  99. #define USERACCESS_GO BIT(31)
  100. #define USERACCESS_WRITE BIT(30)
  101. #define USERACCESS_ACK BIT(29)
  102. #define USERACCESS_READ (0)
  103. #define USERACCESS_DATA (0xffff)
  104. } user[0];
  105. };
  106. struct cpsw_regs {
  107. u32 id_ver;
  108. u32 control;
  109. u32 soft_reset;
  110. u32 stat_port_en;
  111. u32 ptype;
  112. };
  113. struct cpsw_slave_regs {
  114. u32 max_blks;
  115. u32 blk_cnt;
  116. u32 flow_thresh;
  117. u32 port_vlan;
  118. u32 tx_pri_map;
  119. #ifdef CONFIG_AM33XX
  120. u32 gap_thresh;
  121. #elif defined(CONFIG_TI814X)
  122. u32 ts_ctl;
  123. u32 ts_seq_ltype;
  124. u32 ts_vlan;
  125. #endif
  126. u32 sa_lo;
  127. u32 sa_hi;
  128. };
  129. struct cpsw_host_regs {
  130. u32 max_blks;
  131. u32 blk_cnt;
  132. u32 flow_thresh;
  133. u32 port_vlan;
  134. u32 tx_pri_map;
  135. u32 cpdma_tx_pri_map;
  136. u32 cpdma_rx_chan_map;
  137. };
  138. struct cpsw_sliver_regs {
  139. u32 id_ver;
  140. u32 mac_control;
  141. u32 mac_status;
  142. u32 soft_reset;
  143. u32 rx_maxlen;
  144. u32 __reserved_0;
  145. u32 rx_pause;
  146. u32 tx_pause;
  147. u32 __reserved_1;
  148. u32 rx_pri_map;
  149. };
  150. #define ALE_ENTRY_BITS 68
  151. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  152. /* ALE Registers */
  153. #define ALE_CONTROL 0x08
  154. #define ALE_UNKNOWNVLAN 0x18
  155. #define ALE_TABLE_CONTROL 0x20
  156. #define ALE_TABLE 0x34
  157. #define ALE_PORTCTL 0x40
  158. #define ALE_TABLE_WRITE BIT(31)
  159. #define ALE_TYPE_FREE 0
  160. #define ALE_TYPE_ADDR 1
  161. #define ALE_TYPE_VLAN 2
  162. #define ALE_TYPE_VLAN_ADDR 3
  163. #define ALE_UCAST_PERSISTANT 0
  164. #define ALE_UCAST_UNTOUCHED 1
  165. #define ALE_UCAST_OUI 2
  166. #define ALE_UCAST_TOUCHED 3
  167. #define ALE_MCAST_FWD 0
  168. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  169. #define ALE_MCAST_FWD_LEARN 2
  170. #define ALE_MCAST_FWD_2 3
  171. enum cpsw_ale_port_state {
  172. ALE_PORT_STATE_DISABLE = 0x00,
  173. ALE_PORT_STATE_BLOCK = 0x01,
  174. ALE_PORT_STATE_LEARN = 0x02,
  175. ALE_PORT_STATE_FORWARD = 0x03,
  176. };
  177. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  178. #define ALE_SECURE 1
  179. #define ALE_BLOCKED 2
  180. struct cpsw_slave {
  181. struct cpsw_slave_regs *regs;
  182. struct cpsw_sliver_regs *sliver;
  183. int slave_num;
  184. u32 mac_control;
  185. struct cpsw_slave_data *data;
  186. };
  187. struct cpdma_desc {
  188. /* hardware fields */
  189. u32 hw_next;
  190. u32 hw_buffer;
  191. u32 hw_len;
  192. u32 hw_mode;
  193. /* software fields */
  194. u32 sw_buffer;
  195. u32 sw_len;
  196. };
  197. struct cpdma_chan {
  198. struct cpdma_desc *head, *tail;
  199. void *hdp, *cp, *rxfree;
  200. };
  201. /* AM33xx SoC specific definitions for the CONTROL port */
  202. #define AM33XX_GMII_SEL_MODE_MII 0
  203. #define AM33XX_GMII_SEL_MODE_RMII 1
  204. #define AM33XX_GMII_SEL_MODE_RGMII 2
  205. #define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
  206. #define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
  207. #define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
  208. #define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
  209. #define GMII_SEL_MODE_MASK 0x3
  210. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  211. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  212. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  213. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  214. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  215. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  216. #define for_active_slave(slave, priv) \
  217. slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
  218. #define for_each_slave(slave, priv) \
  219. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  220. (priv)->data.slaves; slave++)
  221. struct cpsw_priv {
  222. #ifdef CONFIG_DM_ETH
  223. struct udevice *dev;
  224. #else
  225. struct eth_device *dev;
  226. #endif
  227. struct cpsw_platform_data data;
  228. int host_port;
  229. struct cpsw_regs *regs;
  230. void *dma_regs;
  231. struct cpsw_host_regs *host_port_regs;
  232. void *ale_regs;
  233. struct cpdma_desc *descs;
  234. struct cpdma_desc *desc_free;
  235. struct cpdma_chan rx_chan, tx_chan;
  236. struct cpsw_slave *slaves;
  237. struct phy_device *phydev;
  238. struct mii_dev *bus;
  239. u32 phy_mask;
  240. };
  241. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  242. {
  243. int idx;
  244. idx = start / 32;
  245. start -= idx * 32;
  246. idx = 2 - idx; /* flip */
  247. return (ale_entry[idx] >> start) & BITMASK(bits);
  248. }
  249. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  250. u32 value)
  251. {
  252. int idx;
  253. value &= BITMASK(bits);
  254. idx = start / 32;
  255. start -= idx * 32;
  256. idx = 2 - idx; /* flip */
  257. ale_entry[idx] &= ~(BITMASK(bits) << start);
  258. ale_entry[idx] |= (value << start);
  259. }
  260. #define DEFINE_ALE_FIELD(name, start, bits) \
  261. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  262. { \
  263. return cpsw_ale_get_field(ale_entry, start, bits); \
  264. } \
  265. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  266. { \
  267. cpsw_ale_set_field(ale_entry, start, bits, value); \
  268. }
  269. DEFINE_ALE_FIELD(entry_type, 60, 2)
  270. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  271. DEFINE_ALE_FIELD(port_mask, 66, 3)
  272. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  273. DEFINE_ALE_FIELD(port_num, 66, 2)
  274. DEFINE_ALE_FIELD(blocked, 65, 1)
  275. DEFINE_ALE_FIELD(secure, 64, 1)
  276. DEFINE_ALE_FIELD(mcast, 40, 1)
  277. /* The MAC address field in the ALE entry cannot be macroized as above */
  278. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  279. {
  280. int i;
  281. for (i = 0; i < 6; i++)
  282. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  283. }
  284. static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
  285. {
  286. int i;
  287. for (i = 0; i < 6; i++)
  288. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  289. }
  290. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  291. {
  292. int i;
  293. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  294. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  295. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  296. return idx;
  297. }
  298. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  299. {
  300. int i;
  301. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  302. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  303. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  304. return idx;
  305. }
  306. static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
  307. {
  308. u32 ale_entry[ALE_ENTRY_WORDS];
  309. int type, idx;
  310. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  311. u8 entry_addr[6];
  312. cpsw_ale_read(priv, idx, ale_entry);
  313. type = cpsw_ale_get_entry_type(ale_entry);
  314. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  315. continue;
  316. cpsw_ale_get_addr(ale_entry, entry_addr);
  317. if (memcmp(entry_addr, addr, 6) == 0)
  318. return idx;
  319. }
  320. return -ENOENT;
  321. }
  322. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  323. {
  324. u32 ale_entry[ALE_ENTRY_WORDS];
  325. int type, idx;
  326. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  327. cpsw_ale_read(priv, idx, ale_entry);
  328. type = cpsw_ale_get_entry_type(ale_entry);
  329. if (type == ALE_TYPE_FREE)
  330. return idx;
  331. }
  332. return -ENOENT;
  333. }
  334. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  335. {
  336. u32 ale_entry[ALE_ENTRY_WORDS];
  337. int type, idx;
  338. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  339. cpsw_ale_read(priv, idx, ale_entry);
  340. type = cpsw_ale_get_entry_type(ale_entry);
  341. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  342. continue;
  343. if (cpsw_ale_get_mcast(ale_entry))
  344. continue;
  345. type = cpsw_ale_get_ucast_type(ale_entry);
  346. if (type != ALE_UCAST_PERSISTANT &&
  347. type != ALE_UCAST_OUI)
  348. return idx;
  349. }
  350. return -ENOENT;
  351. }
  352. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
  353. int port, int flags)
  354. {
  355. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  356. int idx;
  357. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  358. cpsw_ale_set_addr(ale_entry, addr);
  359. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  360. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  361. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  362. cpsw_ale_set_port_num(ale_entry, port);
  363. idx = cpsw_ale_match_addr(priv, addr);
  364. if (idx < 0)
  365. idx = cpsw_ale_match_free(priv);
  366. if (idx < 0)
  367. idx = cpsw_ale_find_ageable(priv);
  368. if (idx < 0)
  369. return -ENOMEM;
  370. cpsw_ale_write(priv, idx, ale_entry);
  371. return 0;
  372. }
  373. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
  374. int port_mask)
  375. {
  376. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  377. int idx, mask;
  378. idx = cpsw_ale_match_addr(priv, addr);
  379. if (idx >= 0)
  380. cpsw_ale_read(priv, idx, ale_entry);
  381. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  382. cpsw_ale_set_addr(ale_entry, addr);
  383. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  384. mask = cpsw_ale_get_port_mask(ale_entry);
  385. port_mask |= mask;
  386. cpsw_ale_set_port_mask(ale_entry, port_mask);
  387. if (idx < 0)
  388. idx = cpsw_ale_match_free(priv);
  389. if (idx < 0)
  390. idx = cpsw_ale_find_ageable(priv);
  391. if (idx < 0)
  392. return -ENOMEM;
  393. cpsw_ale_write(priv, idx, ale_entry);
  394. return 0;
  395. }
  396. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  397. {
  398. u32 tmp, mask = BIT(bit);
  399. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  400. tmp &= ~mask;
  401. tmp |= val ? mask : 0;
  402. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  403. }
  404. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  405. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  406. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  407. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  408. int val)
  409. {
  410. int offset = ALE_PORTCTL + 4 * port;
  411. u32 tmp, mask = 0x3;
  412. tmp = __raw_readl(priv->ale_regs + offset);
  413. tmp &= ~mask;
  414. tmp |= val & mask;
  415. __raw_writel(tmp, priv->ale_regs + offset);
  416. }
  417. static struct cpsw_mdio_regs *mdio_regs;
  418. /* wait until hardware is ready for another user access */
  419. static inline u32 wait_for_user_access(void)
  420. {
  421. u32 reg = 0;
  422. int timeout = MDIO_TIMEOUT;
  423. while (timeout-- &&
  424. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  425. udelay(10);
  426. if (timeout == -1) {
  427. printf("wait_for_user_access Timeout\n");
  428. return -ETIMEDOUT;
  429. }
  430. return reg;
  431. }
  432. /* wait until hardware state machine is idle */
  433. static inline void wait_for_idle(void)
  434. {
  435. int timeout = MDIO_TIMEOUT;
  436. while (timeout-- &&
  437. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  438. udelay(10);
  439. if (timeout == -1)
  440. printf("wait_for_idle Timeout\n");
  441. }
  442. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  443. int dev_addr, int phy_reg)
  444. {
  445. int data;
  446. u32 reg;
  447. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  448. return -EINVAL;
  449. wait_for_user_access();
  450. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  451. (phy_id << 16));
  452. __raw_writel(reg, &mdio_regs->user[0].access);
  453. reg = wait_for_user_access();
  454. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  455. return data;
  456. }
  457. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  458. int phy_reg, u16 data)
  459. {
  460. u32 reg;
  461. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  462. return -EINVAL;
  463. wait_for_user_access();
  464. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  465. (phy_id << 16) | (data & USERACCESS_DATA));
  466. __raw_writel(reg, &mdio_regs->user[0].access);
  467. wait_for_user_access();
  468. return 0;
  469. }
  470. static void cpsw_mdio_init(const char *name, u32 mdio_base, u32 div)
  471. {
  472. struct mii_dev *bus = mdio_alloc();
  473. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  474. /* set enable and clock divider */
  475. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  476. /*
  477. * wait for scan logic to settle:
  478. * the scan time consists of (a) a large fixed component, and (b) a
  479. * small component that varies with the mii bus frequency. These
  480. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  481. * silicon. Since the effect of (b) was found to be largely
  482. * negligible, we keep things simple here.
  483. */
  484. udelay(1000);
  485. bus->read = cpsw_mdio_read;
  486. bus->write = cpsw_mdio_write;
  487. strcpy(bus->name, name);
  488. mdio_register(bus);
  489. }
  490. /* Set a self-clearing bit in a register, and wait for it to clear */
  491. static inline void setbit_and_wait_for_clear32(void *addr)
  492. {
  493. __raw_writel(CLEAR_BIT, addr);
  494. while (__raw_readl(addr) & CLEAR_BIT)
  495. ;
  496. }
  497. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  498. ((mac)[2] << 16) | ((mac)[3] << 24))
  499. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  500. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  501. struct cpsw_priv *priv)
  502. {
  503. #ifdef CONFIG_DM_ETH
  504. struct eth_pdata *pdata = dev_get_platdata(priv->dev);
  505. writel(mac_hi(pdata->enetaddr), &slave->regs->sa_hi);
  506. writel(mac_lo(pdata->enetaddr), &slave->regs->sa_lo);
  507. #else
  508. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  509. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  510. #endif
  511. }
  512. static void cpsw_slave_update_link(struct cpsw_slave *slave,
  513. struct cpsw_priv *priv, int *link)
  514. {
  515. struct phy_device *phy;
  516. u32 mac_control = 0;
  517. phy = priv->phydev;
  518. if (!phy)
  519. return;
  520. phy_startup(phy);
  521. *link = phy->link;
  522. if (*link) { /* link up */
  523. mac_control = priv->data.mac_control;
  524. if (phy->speed == 1000)
  525. mac_control |= GIGABITEN;
  526. if (phy->duplex == DUPLEX_FULL)
  527. mac_control |= FULLDUPLEXEN;
  528. if (phy->speed == 100)
  529. mac_control |= MIIEN;
  530. }
  531. if (mac_control == slave->mac_control)
  532. return;
  533. if (mac_control) {
  534. printf("link up on port %d, speed %d, %s duplex\n",
  535. slave->slave_num, phy->speed,
  536. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  537. } else {
  538. printf("link down on port %d\n", slave->slave_num);
  539. }
  540. __raw_writel(mac_control, &slave->sliver->mac_control);
  541. slave->mac_control = mac_control;
  542. }
  543. static int cpsw_update_link(struct cpsw_priv *priv)
  544. {
  545. int link = 0;
  546. struct cpsw_slave *slave;
  547. for_active_slave(slave, priv)
  548. cpsw_slave_update_link(slave, priv, &link);
  549. return link;
  550. }
  551. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  552. {
  553. if (priv->host_port == 0)
  554. return slave_num + 1;
  555. else
  556. return slave_num;
  557. }
  558. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  559. {
  560. u32 slave_port;
  561. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  562. /* setup priority mapping */
  563. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  564. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  565. /* setup max packet size, and mac address */
  566. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  567. cpsw_set_slave_mac(slave, priv);
  568. slave->mac_control = 0; /* no link yet */
  569. /* enable forwarding */
  570. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  571. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  572. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
  573. priv->phy_mask |= 1 << slave->data->phy_addr;
  574. }
  575. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  576. {
  577. struct cpdma_desc *desc = priv->desc_free;
  578. if (desc)
  579. priv->desc_free = desc_read_ptr(desc, hw_next);
  580. return desc;
  581. }
  582. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  583. {
  584. if (desc) {
  585. desc_write(desc, hw_next, priv->desc_free);
  586. priv->desc_free = desc;
  587. }
  588. }
  589. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  590. void *buffer, int len)
  591. {
  592. struct cpdma_desc *desc, *prev;
  593. u32 mode;
  594. desc = cpdma_desc_alloc(priv);
  595. if (!desc)
  596. return -ENOMEM;
  597. if (len < PKT_MIN)
  598. len = PKT_MIN;
  599. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  600. desc_write(desc, hw_next, 0);
  601. desc_write(desc, hw_buffer, buffer);
  602. desc_write(desc, hw_len, len);
  603. desc_write(desc, hw_mode, mode | len);
  604. desc_write(desc, sw_buffer, buffer);
  605. desc_write(desc, sw_len, len);
  606. if (!chan->head) {
  607. /* simple case - first packet enqueued */
  608. chan->head = desc;
  609. chan->tail = desc;
  610. chan_write(chan, hdp, desc);
  611. goto done;
  612. }
  613. /* not the first packet - enqueue at the tail */
  614. prev = chan->tail;
  615. desc_write(prev, hw_next, desc);
  616. chan->tail = desc;
  617. /* next check if EOQ has been triggered already */
  618. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  619. chan_write(chan, hdp, desc);
  620. done:
  621. if (chan->rxfree)
  622. chan_write(chan, rxfree, 1);
  623. return 0;
  624. }
  625. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  626. void **buffer, int *len)
  627. {
  628. struct cpdma_desc *desc = chan->head;
  629. u32 status;
  630. if (!desc)
  631. return -ENOENT;
  632. status = desc_read(desc, hw_mode);
  633. if (len)
  634. *len = status & 0x7ff;
  635. if (buffer)
  636. *buffer = desc_read_ptr(desc, sw_buffer);
  637. if (status & CPDMA_DESC_OWNER) {
  638. if (chan_read(chan, hdp) == 0) {
  639. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  640. chan_write(chan, hdp, desc);
  641. }
  642. return -EBUSY;
  643. }
  644. chan->head = desc_read_ptr(desc, hw_next);
  645. chan_write(chan, cp, desc);
  646. cpdma_desc_free(priv, desc);
  647. return 0;
  648. }
  649. static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
  650. {
  651. struct cpsw_slave *slave;
  652. int i, ret;
  653. /* soft reset the controller and initialize priv */
  654. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  655. /* initialize and reset the address lookup engine */
  656. cpsw_ale_enable(priv, 1);
  657. cpsw_ale_clear(priv, 1);
  658. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  659. /* setup host port priority mapping */
  660. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  661. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  662. /* disable priority elevation and enable statistics on all ports */
  663. __raw_writel(0, &priv->regs->ptype);
  664. /* enable statistics collection only on the host port */
  665. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  666. __raw_writel(0x7, &priv->regs->stat_port_en);
  667. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  668. cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
  669. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
  670. for_active_slave(slave, priv)
  671. cpsw_slave_init(slave, priv);
  672. cpsw_update_link(priv);
  673. /* init descriptor pool */
  674. for (i = 0; i < NUM_DESCS; i++) {
  675. desc_write(&priv->descs[i], hw_next,
  676. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  677. }
  678. priv->desc_free = &priv->descs[0];
  679. /* initialize channels */
  680. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  681. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  682. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  683. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  684. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  685. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  686. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  687. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  688. } else {
  689. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  690. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  691. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  692. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  693. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  694. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  695. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  696. }
  697. /* clear dma state */
  698. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  699. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  700. for (i = 0; i < priv->data.channels; i++) {
  701. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  702. * i);
  703. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  704. * i);
  705. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  706. * i);
  707. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  708. * i);
  709. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  710. * i);
  711. }
  712. } else {
  713. for (i = 0; i < priv->data.channels; i++) {
  714. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  715. * i);
  716. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  717. * i);
  718. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  719. * i);
  720. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  721. * i);
  722. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  723. * i);
  724. }
  725. }
  726. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  727. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  728. /* submit rx descs */
  729. for (i = 0; i < PKTBUFSRX; i++) {
  730. ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
  731. PKTSIZE);
  732. if (ret < 0) {
  733. printf("error %d submitting rx desc\n", ret);
  734. break;
  735. }
  736. }
  737. return 0;
  738. }
  739. static void _cpsw_halt(struct cpsw_priv *priv)
  740. {
  741. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  742. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  743. /* soft reset the controller and initialize priv */
  744. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  745. /* clear dma state */
  746. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  747. }
  748. static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
  749. {
  750. void *buffer;
  751. int len;
  752. int timeout = CPDMA_TIMEOUT;
  753. flush_dcache_range((unsigned long)packet,
  754. (unsigned long)packet + ALIGN(length, PKTALIGN));
  755. /* first reap completed packets */
  756. while (timeout-- &&
  757. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  758. ;
  759. if (timeout == -1) {
  760. printf("cpdma_process timeout\n");
  761. return -ETIMEDOUT;
  762. }
  763. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  764. }
  765. static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
  766. {
  767. void *buffer;
  768. int len;
  769. int ret = -EAGAIN;
  770. ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
  771. if (ret < 0)
  772. return ret;
  773. invalidate_dcache_range((unsigned long)buffer,
  774. (unsigned long)buffer + PKTSIZE_ALIGN);
  775. *pkt = buffer;
  776. return len;
  777. }
  778. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  779. struct cpsw_priv *priv)
  780. {
  781. void *regs = priv->regs;
  782. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  783. slave->slave_num = slave_num;
  784. slave->data = data;
  785. slave->regs = regs + data->slave_reg_ofs;
  786. slave->sliver = regs + data->sliver_reg_ofs;
  787. }
  788. static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
  789. {
  790. struct phy_device *phydev;
  791. u32 supported = PHY_GBIT_FEATURES;
  792. phydev = phy_connect(priv->bus,
  793. slave->data->phy_addr,
  794. priv->dev,
  795. slave->data->phy_if);
  796. if (!phydev)
  797. return -1;
  798. phydev->supported &= supported;
  799. phydev->advertising = phydev->supported;
  800. #ifdef CONFIG_DM_ETH
  801. if (slave->data->phy_of_handle)
  802. dev_set_of_offset(phydev->dev, slave->data->phy_of_handle);
  803. #endif
  804. priv->phydev = phydev;
  805. phy_config(phydev);
  806. return 1;
  807. }
  808. int _cpsw_register(struct cpsw_priv *priv)
  809. {
  810. struct cpsw_slave *slave;
  811. struct cpsw_platform_data *data = &priv->data;
  812. void *regs = (void *)data->cpsw_base;
  813. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  814. if (!priv->slaves) {
  815. return -ENOMEM;
  816. }
  817. priv->host_port = data->host_port_num;
  818. priv->regs = regs;
  819. priv->host_port_regs = regs + data->host_port_reg_ofs;
  820. priv->dma_regs = regs + data->cpdma_reg_ofs;
  821. priv->ale_regs = regs + data->ale_reg_ofs;
  822. priv->descs = (void *)regs + data->bd_ram_ofs;
  823. int idx = 0;
  824. for_each_slave(slave, priv) {
  825. cpsw_slave_setup(slave, idx, priv);
  826. idx = idx + 1;
  827. }
  828. cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
  829. priv->bus = miiphy_get_dev_by_name(priv->dev->name);
  830. for_active_slave(slave, priv)
  831. cpsw_phy_init(priv, slave);
  832. return 0;
  833. }
  834. #ifndef CONFIG_DM_ETH
  835. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  836. {
  837. struct cpsw_priv *priv = dev->priv;
  838. return _cpsw_init(priv, dev->enetaddr);
  839. }
  840. static void cpsw_halt(struct eth_device *dev)
  841. {
  842. struct cpsw_priv *priv = dev->priv;
  843. return _cpsw_halt(priv);
  844. }
  845. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  846. {
  847. struct cpsw_priv *priv = dev->priv;
  848. return _cpsw_send(priv, packet, length);
  849. }
  850. static int cpsw_recv(struct eth_device *dev)
  851. {
  852. struct cpsw_priv *priv = dev->priv;
  853. uchar *pkt = NULL;
  854. int len;
  855. len = _cpsw_recv(priv, &pkt);
  856. if (len > 0) {
  857. net_process_received_packet(pkt, len);
  858. cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
  859. }
  860. return len;
  861. }
  862. int cpsw_register(struct cpsw_platform_data *data)
  863. {
  864. struct cpsw_priv *priv;
  865. struct eth_device *dev;
  866. int ret;
  867. dev = calloc(sizeof(*dev), 1);
  868. if (!dev)
  869. return -ENOMEM;
  870. priv = calloc(sizeof(*priv), 1);
  871. if (!priv) {
  872. free(dev);
  873. return -ENOMEM;
  874. }
  875. priv->dev = dev;
  876. priv->data = *data;
  877. strcpy(dev->name, "cpsw");
  878. dev->iobase = 0;
  879. dev->init = cpsw_init;
  880. dev->halt = cpsw_halt;
  881. dev->send = cpsw_send;
  882. dev->recv = cpsw_recv;
  883. dev->priv = priv;
  884. eth_register(dev);
  885. ret = _cpsw_register(priv);
  886. if (ret < 0) {
  887. eth_unregister(dev);
  888. free(dev);
  889. free(priv);
  890. return ret;
  891. }
  892. return 1;
  893. }
  894. #else
  895. static int cpsw_eth_start(struct udevice *dev)
  896. {
  897. struct eth_pdata *pdata = dev_get_platdata(dev);
  898. struct cpsw_priv *priv = dev_get_priv(dev);
  899. return _cpsw_init(priv, pdata->enetaddr);
  900. }
  901. static int cpsw_eth_send(struct udevice *dev, void *packet, int length)
  902. {
  903. struct cpsw_priv *priv = dev_get_priv(dev);
  904. return _cpsw_send(priv, packet, length);
  905. }
  906. static int cpsw_eth_recv(struct udevice *dev, int flags, uchar **packetp)
  907. {
  908. struct cpsw_priv *priv = dev_get_priv(dev);
  909. return _cpsw_recv(priv, packetp);
  910. }
  911. static int cpsw_eth_free_pkt(struct udevice *dev, uchar *packet,
  912. int length)
  913. {
  914. struct cpsw_priv *priv = dev_get_priv(dev);
  915. return cpdma_submit(priv, &priv->rx_chan, packet, PKTSIZE);
  916. }
  917. static void cpsw_eth_stop(struct udevice *dev)
  918. {
  919. struct cpsw_priv *priv = dev_get_priv(dev);
  920. return _cpsw_halt(priv);
  921. }
  922. static int cpsw_eth_probe(struct udevice *dev)
  923. {
  924. struct cpsw_priv *priv = dev_get_priv(dev);
  925. priv->dev = dev;
  926. return _cpsw_register(priv);
  927. }
  928. static const struct eth_ops cpsw_eth_ops = {
  929. .start = cpsw_eth_start,
  930. .send = cpsw_eth_send,
  931. .recv = cpsw_eth_recv,
  932. .free_pkt = cpsw_eth_free_pkt,
  933. .stop = cpsw_eth_stop,
  934. };
  935. static inline fdt_addr_t cpsw_get_addr_by_node(const void *fdt, int node)
  936. {
  937. return fdtdec_get_addr_size_auto_noparent(fdt, node, "reg", 0, NULL,
  938. false);
  939. }
  940. static void cpsw_gmii_sel_am3352(struct cpsw_priv *priv,
  941. phy_interface_t phy_mode)
  942. {
  943. u32 reg;
  944. u32 mask;
  945. u32 mode = 0;
  946. bool rgmii_id = false;
  947. int slave = priv->data.active_slave;
  948. reg = readl(priv->data.gmii_sel);
  949. switch (phy_mode) {
  950. case PHY_INTERFACE_MODE_RMII:
  951. mode = AM33XX_GMII_SEL_MODE_RMII;
  952. break;
  953. case PHY_INTERFACE_MODE_RGMII:
  954. mode = AM33XX_GMII_SEL_MODE_RGMII;
  955. break;
  956. case PHY_INTERFACE_MODE_RGMII_ID:
  957. case PHY_INTERFACE_MODE_RGMII_RXID:
  958. case PHY_INTERFACE_MODE_RGMII_TXID:
  959. mode = AM33XX_GMII_SEL_MODE_RGMII;
  960. rgmii_id = true;
  961. break;
  962. case PHY_INTERFACE_MODE_MII:
  963. default:
  964. mode = AM33XX_GMII_SEL_MODE_MII;
  965. break;
  966. };
  967. mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
  968. mode <<= slave * 2;
  969. if (priv->data.rmii_clock_external) {
  970. if (slave == 0)
  971. mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
  972. else
  973. mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
  974. }
  975. if (rgmii_id) {
  976. if (slave == 0)
  977. mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
  978. else
  979. mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
  980. }
  981. reg &= ~mask;
  982. reg |= mode;
  983. writel(reg, priv->data.gmii_sel);
  984. }
  985. static void cpsw_gmii_sel_dra7xx(struct cpsw_priv *priv,
  986. phy_interface_t phy_mode)
  987. {
  988. u32 reg;
  989. u32 mask;
  990. u32 mode = 0;
  991. int slave = priv->data.active_slave;
  992. reg = readl(priv->data.gmii_sel);
  993. switch (phy_mode) {
  994. case PHY_INTERFACE_MODE_RMII:
  995. mode = AM33XX_GMII_SEL_MODE_RMII;
  996. break;
  997. case PHY_INTERFACE_MODE_RGMII:
  998. case PHY_INTERFACE_MODE_RGMII_ID:
  999. case PHY_INTERFACE_MODE_RGMII_RXID:
  1000. case PHY_INTERFACE_MODE_RGMII_TXID:
  1001. mode = AM33XX_GMII_SEL_MODE_RGMII;
  1002. break;
  1003. case PHY_INTERFACE_MODE_MII:
  1004. default:
  1005. mode = AM33XX_GMII_SEL_MODE_MII;
  1006. break;
  1007. };
  1008. switch (slave) {
  1009. case 0:
  1010. mask = GMII_SEL_MODE_MASK;
  1011. break;
  1012. case 1:
  1013. mask = GMII_SEL_MODE_MASK << 4;
  1014. mode <<= 4;
  1015. break;
  1016. default:
  1017. dev_err(priv->dev, "invalid slave number...\n");
  1018. return;
  1019. }
  1020. if (priv->data.rmii_clock_external)
  1021. dev_err(priv->dev, "RMII External clock is not supported\n");
  1022. reg &= ~mask;
  1023. reg |= mode;
  1024. writel(reg, priv->data.gmii_sel);
  1025. }
  1026. static void cpsw_phy_sel(struct cpsw_priv *priv, const char *compat,
  1027. phy_interface_t phy_mode)
  1028. {
  1029. if (!strcmp(compat, "ti,am3352-cpsw-phy-sel"))
  1030. cpsw_gmii_sel_am3352(priv, phy_mode);
  1031. if (!strcmp(compat, "ti,am43xx-cpsw-phy-sel"))
  1032. cpsw_gmii_sel_am3352(priv, phy_mode);
  1033. else if (!strcmp(compat, "ti,dra7xx-cpsw-phy-sel"))
  1034. cpsw_gmii_sel_dra7xx(priv, phy_mode);
  1035. }
  1036. static int cpsw_eth_ofdata_to_platdata(struct udevice *dev)
  1037. {
  1038. struct eth_pdata *pdata = dev_get_platdata(dev);
  1039. struct cpsw_priv *priv = dev_get_priv(dev);
  1040. struct gpio_desc *mode_gpios;
  1041. const char *phy_mode;
  1042. const char *phy_sel_compat = NULL;
  1043. const void *fdt = gd->fdt_blob;
  1044. int node = dev_of_offset(dev);
  1045. int subnode;
  1046. int slave_index = 0;
  1047. int active_slave;
  1048. int num_mode_gpios;
  1049. int ret;
  1050. pdata->iobase = dev_get_addr(dev);
  1051. priv->data.version = CPSW_CTRL_VERSION_2;
  1052. priv->data.bd_ram_ofs = CPSW_BD_OFFSET;
  1053. priv->data.ale_reg_ofs = CPSW_ALE_OFFSET;
  1054. priv->data.cpdma_reg_ofs = CPSW_CPDMA_OFFSET;
  1055. priv->data.mdio_div = CPSW_MDIO_DIV;
  1056. priv->data.host_port_reg_ofs = CPSW_HOST_PORT_OFFSET,
  1057. pdata->phy_interface = -1;
  1058. priv->data.cpsw_base = pdata->iobase;
  1059. priv->data.channels = fdtdec_get_int(fdt, node, "cpdma_channels", -1);
  1060. if (priv->data.channels <= 0) {
  1061. printf("error: cpdma_channels not found in dt\n");
  1062. return -ENOENT;
  1063. }
  1064. priv->data.slaves = fdtdec_get_int(fdt, node, "slaves", -1);
  1065. if (priv->data.slaves <= 0) {
  1066. printf("error: slaves not found in dt\n");
  1067. return -ENOENT;
  1068. }
  1069. priv->data.slave_data = malloc(sizeof(struct cpsw_slave_data) *
  1070. priv->data.slaves);
  1071. priv->data.ale_entries = fdtdec_get_int(fdt, node, "ale_entries", -1);
  1072. if (priv->data.ale_entries <= 0) {
  1073. printf("error: ale_entries not found in dt\n");
  1074. return -ENOENT;
  1075. }
  1076. priv->data.bd_ram_ofs = fdtdec_get_int(fdt, node, "bd_ram_size", -1);
  1077. if (priv->data.bd_ram_ofs <= 0) {
  1078. printf("error: bd_ram_size not found in dt\n");
  1079. return -ENOENT;
  1080. }
  1081. priv->data.mac_control = fdtdec_get_int(fdt, node, "mac_control", -1);
  1082. if (priv->data.mac_control <= 0) {
  1083. printf("error: ale_entries not found in dt\n");
  1084. return -ENOENT;
  1085. }
  1086. num_mode_gpios = gpio_get_list_count(dev, "mode-gpios");
  1087. if (num_mode_gpios > 0) {
  1088. mode_gpios = malloc(sizeof(struct gpio_desc) *
  1089. num_mode_gpios);
  1090. gpio_request_list_by_name(dev, "mode-gpios", mode_gpios,
  1091. num_mode_gpios, GPIOD_IS_OUT);
  1092. free(mode_gpios);
  1093. }
  1094. active_slave = fdtdec_get_int(fdt, node, "active_slave", 0);
  1095. priv->data.active_slave = active_slave;
  1096. fdt_for_each_subnode(subnode, fdt, node) {
  1097. int len;
  1098. const char *name;
  1099. name = fdt_get_name(fdt, subnode, &len);
  1100. if (!strncmp(name, "mdio", 4)) {
  1101. u32 mdio_base;
  1102. mdio_base = cpsw_get_addr_by_node(fdt, subnode);
  1103. if (mdio_base == FDT_ADDR_T_NONE) {
  1104. error("Not able to get MDIO address space\n");
  1105. return -ENOENT;
  1106. }
  1107. priv->data.mdio_base = mdio_base;
  1108. }
  1109. if (!strncmp(name, "slave", 5)) {
  1110. u32 phy_id[2];
  1111. if (slave_index >= priv->data.slaves)
  1112. continue;
  1113. phy_mode = fdt_getprop(fdt, subnode, "phy-mode", NULL);
  1114. if (phy_mode)
  1115. priv->data.slave_data[slave_index].phy_if =
  1116. phy_get_interface_by_name(phy_mode);
  1117. priv->data.slave_data[slave_index].phy_of_handle =
  1118. fdtdec_lookup_phandle(fdt, subnode,
  1119. "phy-handle");
  1120. if (priv->data.slave_data[slave_index].phy_of_handle >= 0) {
  1121. priv->data.slave_data[slave_index].phy_addr =
  1122. fdtdec_get_int(gd->fdt_blob,
  1123. priv->data.slave_data[slave_index].phy_of_handle,
  1124. "reg", -1);
  1125. } else {
  1126. fdtdec_get_int_array(fdt, subnode, "phy_id",
  1127. phy_id, 2);
  1128. priv->data.slave_data[slave_index].phy_addr =
  1129. phy_id[1];
  1130. }
  1131. slave_index++;
  1132. }
  1133. if (!strncmp(name, "cpsw-phy-sel", 12)) {
  1134. priv->data.gmii_sel = cpsw_get_addr_by_node(fdt,
  1135. subnode);
  1136. if (priv->data.gmii_sel == FDT_ADDR_T_NONE) {
  1137. error("Not able to get gmii_sel reg address\n");
  1138. return -ENOENT;
  1139. }
  1140. if (fdt_get_property(fdt, subnode, "rmii-clock-ext",
  1141. NULL))
  1142. priv->data.rmii_clock_external = true;
  1143. phy_sel_compat = fdt_getprop(fdt, subnode, "compatible",
  1144. NULL);
  1145. if (!phy_sel_compat) {
  1146. error("Not able to get gmii_sel compatible\n");
  1147. return -ENOENT;
  1148. }
  1149. }
  1150. }
  1151. priv->data.slave_data[0].slave_reg_ofs = CPSW_SLAVE0_OFFSET;
  1152. priv->data.slave_data[0].sliver_reg_ofs = CPSW_SLIVER0_OFFSET;
  1153. if (priv->data.slaves == 2) {
  1154. priv->data.slave_data[1].slave_reg_ofs = CPSW_SLAVE1_OFFSET;
  1155. priv->data.slave_data[1].sliver_reg_ofs = CPSW_SLIVER1_OFFSET;
  1156. }
  1157. ret = ti_cm_get_macid(dev, active_slave, pdata->enetaddr);
  1158. if (ret < 0) {
  1159. error("cpsw read efuse mac failed\n");
  1160. return ret;
  1161. }
  1162. pdata->phy_interface = priv->data.slave_data[active_slave].phy_if;
  1163. if (pdata->phy_interface == -1) {
  1164. debug("%s: Invalid PHY interface '%s'\n", __func__, phy_mode);
  1165. return -EINVAL;
  1166. }
  1167. /* Select phy interface in control module */
  1168. cpsw_phy_sel(priv, phy_sel_compat, pdata->phy_interface);
  1169. return 0;
  1170. }
  1171. static const struct udevice_id cpsw_eth_ids[] = {
  1172. { .compatible = "ti,cpsw" },
  1173. { .compatible = "ti,am335x-cpsw" },
  1174. { }
  1175. };
  1176. U_BOOT_DRIVER(eth_cpsw) = {
  1177. .name = "eth_cpsw",
  1178. .id = UCLASS_ETH,
  1179. .of_match = cpsw_eth_ids,
  1180. .ofdata_to_platdata = cpsw_eth_ofdata_to_platdata,
  1181. .probe = cpsw_eth_probe,
  1182. .ops = &cpsw_eth_ops,
  1183. .priv_auto_alloc_size = sizeof(struct cpsw_priv),
  1184. .platdata_auto_alloc_size = sizeof(struct eth_pdata),
  1185. .flags = DM_FLAG_ALLOC_PRIV_DMA,
  1186. };
  1187. #endif /* CONFIG_DM_ETH */