cpsw.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <asm/errno.h>
  24. #include <asm/io.h>
  25. #include <phy.h>
  26. #include <asm/arch/cpu.h>
  27. #define BITMASK(bits) (BIT(bits) - 1)
  28. #define PHY_REG_MASK 0x1f
  29. #define PHY_ID_MASK 0x1f
  30. #define NUM_DESCS (PKTBUFSRX * 2)
  31. #define PKT_MIN 60
  32. #define PKT_MAX (1500 + 14 + 4 + 4)
  33. #define CLEAR_BIT 1
  34. #define GIGABITEN BIT(7)
  35. #define FULLDUPLEXEN BIT(0)
  36. #define MIIEN BIT(15)
  37. /* DMA Registers */
  38. #define CPDMA_TXCONTROL 0x004
  39. #define CPDMA_RXCONTROL 0x014
  40. #define CPDMA_SOFTRESET 0x01c
  41. #define CPDMA_RXFREE 0x0e0
  42. #define CPDMA_TXHDP_VER1 0x100
  43. #define CPDMA_TXHDP_VER2 0x200
  44. #define CPDMA_RXHDP_VER1 0x120
  45. #define CPDMA_RXHDP_VER2 0x220
  46. #define CPDMA_TXCP_VER1 0x140
  47. #define CPDMA_TXCP_VER2 0x240
  48. #define CPDMA_RXCP_VER1 0x160
  49. #define CPDMA_RXCP_VER2 0x260
  50. #define CPDMA_RAM_ADDR 0x4a102000
  51. /* Descriptor mode bits */
  52. #define CPDMA_DESC_SOP BIT(31)
  53. #define CPDMA_DESC_EOP BIT(30)
  54. #define CPDMA_DESC_OWNER BIT(29)
  55. #define CPDMA_DESC_EOQ BIT(28)
  56. /*
  57. * This timeout definition is a worst-case ultra defensive measure against
  58. * unexpected controller lock ups. Ideally, we should never ever hit this
  59. * scenario in practice.
  60. */
  61. #define MDIO_TIMEOUT 100 /* msecs */
  62. #define CPDMA_TIMEOUT 100 /* msecs */
  63. struct cpsw_mdio_regs {
  64. u32 version;
  65. u32 control;
  66. #define CONTROL_IDLE BIT(31)
  67. #define CONTROL_ENABLE BIT(30)
  68. u32 alive;
  69. u32 link;
  70. u32 linkintraw;
  71. u32 linkintmasked;
  72. u32 __reserved_0[2];
  73. u32 userintraw;
  74. u32 userintmasked;
  75. u32 userintmaskset;
  76. u32 userintmaskclr;
  77. u32 __reserved_1[20];
  78. struct {
  79. u32 access;
  80. u32 physel;
  81. #define USERACCESS_GO BIT(31)
  82. #define USERACCESS_WRITE BIT(30)
  83. #define USERACCESS_ACK BIT(29)
  84. #define USERACCESS_READ (0)
  85. #define USERACCESS_DATA (0xffff)
  86. } user[0];
  87. };
  88. struct cpsw_regs {
  89. u32 id_ver;
  90. u32 control;
  91. u32 soft_reset;
  92. u32 stat_port_en;
  93. u32 ptype;
  94. };
  95. struct cpsw_slave_regs {
  96. u32 max_blks;
  97. u32 blk_cnt;
  98. u32 flow_thresh;
  99. u32 port_vlan;
  100. u32 tx_pri_map;
  101. #ifdef CONFIG_AM33XX
  102. u32 gap_thresh;
  103. #elif defined(CONFIG_TI814X)
  104. u32 ts_ctl;
  105. u32 ts_seq_ltype;
  106. u32 ts_vlan;
  107. #endif
  108. u32 sa_lo;
  109. u32 sa_hi;
  110. };
  111. struct cpsw_host_regs {
  112. u32 max_blks;
  113. u32 blk_cnt;
  114. u32 flow_thresh;
  115. u32 port_vlan;
  116. u32 tx_pri_map;
  117. u32 cpdma_tx_pri_map;
  118. u32 cpdma_rx_chan_map;
  119. };
  120. struct cpsw_sliver_regs {
  121. u32 id_ver;
  122. u32 mac_control;
  123. u32 mac_status;
  124. u32 soft_reset;
  125. u32 rx_maxlen;
  126. u32 __reserved_0;
  127. u32 rx_pause;
  128. u32 tx_pause;
  129. u32 __reserved_1;
  130. u32 rx_pri_map;
  131. };
  132. #define ALE_ENTRY_BITS 68
  133. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  134. /* ALE Registers */
  135. #define ALE_CONTROL 0x08
  136. #define ALE_UNKNOWNVLAN 0x18
  137. #define ALE_TABLE_CONTROL 0x20
  138. #define ALE_TABLE 0x34
  139. #define ALE_PORTCTL 0x40
  140. #define ALE_TABLE_WRITE BIT(31)
  141. #define ALE_TYPE_FREE 0
  142. #define ALE_TYPE_ADDR 1
  143. #define ALE_TYPE_VLAN 2
  144. #define ALE_TYPE_VLAN_ADDR 3
  145. #define ALE_UCAST_PERSISTANT 0
  146. #define ALE_UCAST_UNTOUCHED 1
  147. #define ALE_UCAST_OUI 2
  148. #define ALE_UCAST_TOUCHED 3
  149. #define ALE_MCAST_FWD 0
  150. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  151. #define ALE_MCAST_FWD_LEARN 2
  152. #define ALE_MCAST_FWD_2 3
  153. enum cpsw_ale_port_state {
  154. ALE_PORT_STATE_DISABLE = 0x00,
  155. ALE_PORT_STATE_BLOCK = 0x01,
  156. ALE_PORT_STATE_LEARN = 0x02,
  157. ALE_PORT_STATE_FORWARD = 0x03,
  158. };
  159. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  160. #define ALE_SECURE 1
  161. #define ALE_BLOCKED 2
  162. struct cpsw_slave {
  163. struct cpsw_slave_regs *regs;
  164. struct cpsw_sliver_regs *sliver;
  165. int slave_num;
  166. u32 mac_control;
  167. struct cpsw_slave_data *data;
  168. };
  169. struct cpdma_desc {
  170. /* hardware fields */
  171. u32 hw_next;
  172. u32 hw_buffer;
  173. u32 hw_len;
  174. u32 hw_mode;
  175. /* software fields */
  176. u32 sw_buffer;
  177. u32 sw_len;
  178. };
  179. struct cpdma_chan {
  180. struct cpdma_desc *head, *tail;
  181. void *hdp, *cp, *rxfree;
  182. };
  183. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  184. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  185. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  186. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  187. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  188. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  189. #define for_each_slave(slave, priv) \
  190. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  191. (priv)->data.slaves; slave++)
  192. struct cpsw_priv {
  193. struct eth_device *dev;
  194. struct cpsw_platform_data data;
  195. int host_port;
  196. struct cpsw_regs *regs;
  197. void *dma_regs;
  198. struct cpsw_host_regs *host_port_regs;
  199. void *ale_regs;
  200. struct cpdma_desc *descs;
  201. struct cpdma_desc *desc_free;
  202. struct cpdma_chan rx_chan, tx_chan;
  203. struct cpsw_slave *slaves;
  204. struct phy_device *phydev;
  205. struct mii_dev *bus;
  206. u32 mdio_link;
  207. u32 phy_mask;
  208. };
  209. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  210. {
  211. int idx;
  212. idx = start / 32;
  213. start -= idx * 32;
  214. idx = 2 - idx; /* flip */
  215. return (ale_entry[idx] >> start) & BITMASK(bits);
  216. }
  217. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  218. u32 value)
  219. {
  220. int idx;
  221. value &= BITMASK(bits);
  222. idx = start / 32;
  223. start -= idx * 32;
  224. idx = 2 - idx; /* flip */
  225. ale_entry[idx] &= ~(BITMASK(bits) << start);
  226. ale_entry[idx] |= (value << start);
  227. }
  228. #define DEFINE_ALE_FIELD(name, start, bits) \
  229. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  230. { \
  231. return cpsw_ale_get_field(ale_entry, start, bits); \
  232. } \
  233. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  234. { \
  235. cpsw_ale_set_field(ale_entry, start, bits, value); \
  236. }
  237. DEFINE_ALE_FIELD(entry_type, 60, 2)
  238. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  239. DEFINE_ALE_FIELD(port_mask, 66, 3)
  240. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  241. DEFINE_ALE_FIELD(port_num, 66, 2)
  242. DEFINE_ALE_FIELD(blocked, 65, 1)
  243. DEFINE_ALE_FIELD(secure, 64, 1)
  244. DEFINE_ALE_FIELD(mcast, 40, 1)
  245. /* The MAC address field in the ALE entry cannot be macroized as above */
  246. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  247. {
  248. int i;
  249. for (i = 0; i < 6; i++)
  250. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  251. }
  252. static inline void cpsw_ale_set_addr(u32 *ale_entry, u8 *addr)
  253. {
  254. int i;
  255. for (i = 0; i < 6; i++)
  256. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  257. }
  258. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  259. {
  260. int i;
  261. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  262. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  263. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  264. return idx;
  265. }
  266. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  267. {
  268. int i;
  269. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  270. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  271. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  272. return idx;
  273. }
  274. static int cpsw_ale_match_addr(struct cpsw_priv *priv, u8* addr)
  275. {
  276. u32 ale_entry[ALE_ENTRY_WORDS];
  277. int type, idx;
  278. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  279. u8 entry_addr[6];
  280. cpsw_ale_read(priv, idx, ale_entry);
  281. type = cpsw_ale_get_entry_type(ale_entry);
  282. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  283. continue;
  284. cpsw_ale_get_addr(ale_entry, entry_addr);
  285. if (memcmp(entry_addr, addr, 6) == 0)
  286. return idx;
  287. }
  288. return -ENOENT;
  289. }
  290. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  291. {
  292. u32 ale_entry[ALE_ENTRY_WORDS];
  293. int type, idx;
  294. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  295. cpsw_ale_read(priv, idx, ale_entry);
  296. type = cpsw_ale_get_entry_type(ale_entry);
  297. if (type == ALE_TYPE_FREE)
  298. return idx;
  299. }
  300. return -ENOENT;
  301. }
  302. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  303. {
  304. u32 ale_entry[ALE_ENTRY_WORDS];
  305. int type, idx;
  306. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  307. cpsw_ale_read(priv, idx, ale_entry);
  308. type = cpsw_ale_get_entry_type(ale_entry);
  309. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  310. continue;
  311. if (cpsw_ale_get_mcast(ale_entry))
  312. continue;
  313. type = cpsw_ale_get_ucast_type(ale_entry);
  314. if (type != ALE_UCAST_PERSISTANT &&
  315. type != ALE_UCAST_OUI)
  316. return idx;
  317. }
  318. return -ENOENT;
  319. }
  320. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, u8 *addr,
  321. int port, int flags)
  322. {
  323. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  324. int idx;
  325. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  326. cpsw_ale_set_addr(ale_entry, addr);
  327. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  328. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  329. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  330. cpsw_ale_set_port_num(ale_entry, port);
  331. idx = cpsw_ale_match_addr(priv, addr);
  332. if (idx < 0)
  333. idx = cpsw_ale_match_free(priv);
  334. if (idx < 0)
  335. idx = cpsw_ale_find_ageable(priv);
  336. if (idx < 0)
  337. return -ENOMEM;
  338. cpsw_ale_write(priv, idx, ale_entry);
  339. return 0;
  340. }
  341. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, u8 *addr, int port_mask)
  342. {
  343. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  344. int idx, mask;
  345. idx = cpsw_ale_match_addr(priv, addr);
  346. if (idx >= 0)
  347. cpsw_ale_read(priv, idx, ale_entry);
  348. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  349. cpsw_ale_set_addr(ale_entry, addr);
  350. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  351. mask = cpsw_ale_get_port_mask(ale_entry);
  352. port_mask |= mask;
  353. cpsw_ale_set_port_mask(ale_entry, port_mask);
  354. if (idx < 0)
  355. idx = cpsw_ale_match_free(priv);
  356. if (idx < 0)
  357. idx = cpsw_ale_find_ageable(priv);
  358. if (idx < 0)
  359. return -ENOMEM;
  360. cpsw_ale_write(priv, idx, ale_entry);
  361. return 0;
  362. }
  363. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  364. {
  365. u32 tmp, mask = BIT(bit);
  366. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  367. tmp &= ~mask;
  368. tmp |= val ? mask : 0;
  369. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  370. }
  371. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  372. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  373. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  374. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  375. int val)
  376. {
  377. int offset = ALE_PORTCTL + 4 * port;
  378. u32 tmp, mask = 0x3;
  379. tmp = __raw_readl(priv->ale_regs + offset);
  380. tmp &= ~mask;
  381. tmp |= val & mask;
  382. __raw_writel(tmp, priv->ale_regs + offset);
  383. }
  384. static struct cpsw_mdio_regs *mdio_regs;
  385. /* wait until hardware is ready for another user access */
  386. static inline u32 wait_for_user_access(void)
  387. {
  388. u32 reg = 0;
  389. int timeout = MDIO_TIMEOUT;
  390. while (timeout-- &&
  391. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  392. udelay(10);
  393. if (timeout == -1) {
  394. printf("wait_for_user_access Timeout\n");
  395. return -ETIMEDOUT;
  396. }
  397. return reg;
  398. }
  399. /* wait until hardware state machine is idle */
  400. static inline void wait_for_idle(void)
  401. {
  402. int timeout = MDIO_TIMEOUT;
  403. while (timeout-- &&
  404. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  405. udelay(10);
  406. if (timeout == -1)
  407. printf("wait_for_idle Timeout\n");
  408. }
  409. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  410. int dev_addr, int phy_reg)
  411. {
  412. unsigned short data;
  413. u32 reg;
  414. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  415. return -EINVAL;
  416. wait_for_user_access();
  417. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  418. (phy_id << 16));
  419. __raw_writel(reg, &mdio_regs->user[0].access);
  420. reg = wait_for_user_access();
  421. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  422. return data;
  423. }
  424. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  425. int phy_reg, u16 data)
  426. {
  427. u32 reg;
  428. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  429. return -EINVAL;
  430. wait_for_user_access();
  431. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  432. (phy_id << 16) | (data & USERACCESS_DATA));
  433. __raw_writel(reg, &mdio_regs->user[0].access);
  434. wait_for_user_access();
  435. return 0;
  436. }
  437. static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
  438. {
  439. struct mii_dev *bus = mdio_alloc();
  440. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  441. /* set enable and clock divider */
  442. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  443. /*
  444. * wait for scan logic to settle:
  445. * the scan time consists of (a) a large fixed component, and (b) a
  446. * small component that varies with the mii bus frequency. These
  447. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  448. * silicon. Since the effect of (b) was found to be largely
  449. * negligible, we keep things simple here.
  450. */
  451. udelay(1000);
  452. bus->read = cpsw_mdio_read;
  453. bus->write = cpsw_mdio_write;
  454. sprintf(bus->name, name);
  455. mdio_register(bus);
  456. }
  457. /* Set a self-clearing bit in a register, and wait for it to clear */
  458. static inline void setbit_and_wait_for_clear32(void *addr)
  459. {
  460. __raw_writel(CLEAR_BIT, addr);
  461. while (__raw_readl(addr) & CLEAR_BIT)
  462. ;
  463. }
  464. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  465. ((mac)[2] << 16) | ((mac)[3] << 24))
  466. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  467. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  468. struct cpsw_priv *priv)
  469. {
  470. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  471. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  472. }
  473. static void cpsw_slave_update_link(struct cpsw_slave *slave,
  474. struct cpsw_priv *priv, int *link)
  475. {
  476. struct phy_device *phy = priv->phydev;
  477. u32 mac_control = 0;
  478. phy_startup(phy);
  479. *link = phy->link;
  480. if (*link) { /* link up */
  481. mac_control = priv->data.mac_control;
  482. if (phy->speed == 1000)
  483. mac_control |= GIGABITEN;
  484. if (phy->duplex == DUPLEX_FULL)
  485. mac_control |= FULLDUPLEXEN;
  486. if (phy->speed == 100)
  487. mac_control |= MIIEN;
  488. }
  489. if (mac_control == slave->mac_control)
  490. return;
  491. if (mac_control) {
  492. printf("link up on port %d, speed %d, %s duplex\n",
  493. slave->slave_num, phy->speed,
  494. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  495. } else {
  496. printf("link down on port %d\n", slave->slave_num);
  497. }
  498. __raw_writel(mac_control, &slave->sliver->mac_control);
  499. slave->mac_control = mac_control;
  500. }
  501. static int cpsw_update_link(struct cpsw_priv *priv)
  502. {
  503. int link = 0;
  504. struct cpsw_slave *slave;
  505. for_each_slave(slave, priv)
  506. cpsw_slave_update_link(slave, priv, &link);
  507. priv->mdio_link = readl(&mdio_regs->link);
  508. return link;
  509. }
  510. static int cpsw_check_link(struct cpsw_priv *priv)
  511. {
  512. u32 link = 0;
  513. link = __raw_readl(&mdio_regs->link) & priv->phy_mask;
  514. if ((link) && (link == priv->mdio_link))
  515. return 1;
  516. return cpsw_update_link(priv);
  517. }
  518. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  519. {
  520. if (priv->host_port == 0)
  521. return slave_num + 1;
  522. else
  523. return slave_num;
  524. }
  525. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  526. {
  527. u32 slave_port;
  528. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  529. /* setup priority mapping */
  530. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  531. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  532. /* setup max packet size, and mac address */
  533. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  534. cpsw_set_slave_mac(slave, priv);
  535. slave->mac_control = 0; /* no link yet */
  536. /* enable forwarding */
  537. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  538. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  539. cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << slave_port);
  540. priv->phy_mask |= 1 << slave->data->phy_id;
  541. }
  542. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  543. {
  544. struct cpdma_desc *desc = priv->desc_free;
  545. if (desc)
  546. priv->desc_free = desc_read_ptr(desc, hw_next);
  547. return desc;
  548. }
  549. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  550. {
  551. if (desc) {
  552. desc_write(desc, hw_next, priv->desc_free);
  553. priv->desc_free = desc;
  554. }
  555. }
  556. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  557. void *buffer, int len)
  558. {
  559. struct cpdma_desc *desc, *prev;
  560. u32 mode;
  561. desc = cpdma_desc_alloc(priv);
  562. if (!desc)
  563. return -ENOMEM;
  564. if (len < PKT_MIN)
  565. len = PKT_MIN;
  566. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  567. desc_write(desc, hw_next, 0);
  568. desc_write(desc, hw_buffer, buffer);
  569. desc_write(desc, hw_len, len);
  570. desc_write(desc, hw_mode, mode | len);
  571. desc_write(desc, sw_buffer, buffer);
  572. desc_write(desc, sw_len, len);
  573. if (!chan->head) {
  574. /* simple case - first packet enqueued */
  575. chan->head = desc;
  576. chan->tail = desc;
  577. chan_write(chan, hdp, desc);
  578. goto done;
  579. }
  580. /* not the first packet - enqueue at the tail */
  581. prev = chan->tail;
  582. desc_write(prev, hw_next, desc);
  583. chan->tail = desc;
  584. /* next check if EOQ has been triggered already */
  585. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  586. chan_write(chan, hdp, desc);
  587. done:
  588. if (chan->rxfree)
  589. chan_write(chan, rxfree, 1);
  590. return 0;
  591. }
  592. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  593. void **buffer, int *len)
  594. {
  595. struct cpdma_desc *desc = chan->head;
  596. u32 status;
  597. if (!desc)
  598. return -ENOENT;
  599. status = desc_read(desc, hw_mode);
  600. if (len)
  601. *len = status & 0x7ff;
  602. if (buffer)
  603. *buffer = desc_read_ptr(desc, sw_buffer);
  604. if (status & CPDMA_DESC_OWNER) {
  605. if (chan_read(chan, hdp) == 0) {
  606. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  607. chan_write(chan, hdp, desc);
  608. }
  609. return -EBUSY;
  610. }
  611. chan->head = desc_read_ptr(desc, hw_next);
  612. chan_write(chan, cp, desc);
  613. cpdma_desc_free(priv, desc);
  614. return 0;
  615. }
  616. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  617. {
  618. struct cpsw_priv *priv = dev->priv;
  619. struct cpsw_slave *slave;
  620. int i, ret;
  621. /* soft reset the controller and initialize priv */
  622. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  623. /* initialize and reset the address lookup engine */
  624. cpsw_ale_enable(priv, 1);
  625. cpsw_ale_clear(priv, 1);
  626. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  627. /* setup host port priority mapping */
  628. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  629. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  630. /* disable priority elevation and enable statistics on all ports */
  631. __raw_writel(0, &priv->regs->ptype);
  632. /* enable statistics collection only on the host port */
  633. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  634. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  635. cpsw_ale_add_ucast(priv, priv->dev->enetaddr, priv->host_port,
  636. ALE_SECURE);
  637. cpsw_ale_add_mcast(priv, NetBcastAddr, 1 << priv->host_port);
  638. for_each_slave(slave, priv)
  639. cpsw_slave_init(slave, priv);
  640. cpsw_update_link(priv);
  641. /* init descriptor pool */
  642. for (i = 0; i < NUM_DESCS; i++) {
  643. desc_write(&priv->descs[i], hw_next,
  644. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  645. }
  646. priv->desc_free = &priv->descs[0];
  647. /* initialize channels */
  648. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  649. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  650. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  651. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  652. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  653. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  654. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  655. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  656. } else {
  657. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  658. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  659. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  660. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  661. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  662. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  663. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  664. }
  665. /* clear dma state */
  666. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  667. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  668. for (i = 0; i < priv->data.channels; i++) {
  669. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  670. * i);
  671. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  672. * i);
  673. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  674. * i);
  675. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  676. * i);
  677. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  678. * i);
  679. }
  680. } else {
  681. for (i = 0; i < priv->data.channels; i++) {
  682. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  683. * i);
  684. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  685. * i);
  686. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  687. * i);
  688. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  689. * i);
  690. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  691. * i);
  692. }
  693. }
  694. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  695. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  696. /* submit rx descs */
  697. for (i = 0; i < PKTBUFSRX; i++) {
  698. ret = cpdma_submit(priv, &priv->rx_chan, NetRxPackets[i],
  699. PKTSIZE);
  700. if (ret < 0) {
  701. printf("error %d submitting rx desc\n", ret);
  702. break;
  703. }
  704. }
  705. return 0;
  706. }
  707. static void cpsw_halt(struct eth_device *dev)
  708. {
  709. struct cpsw_priv *priv = dev->priv;
  710. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  711. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  712. /* soft reset the controller and initialize priv */
  713. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  714. /* clear dma state */
  715. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  716. priv->data.control(0);
  717. }
  718. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  719. {
  720. struct cpsw_priv *priv = dev->priv;
  721. void *buffer;
  722. int len;
  723. int timeout = CPDMA_TIMEOUT;
  724. if (!cpsw_check_link(priv))
  725. return -EIO;
  726. flush_dcache_range((unsigned long)packet,
  727. (unsigned long)packet + length);
  728. /* first reap completed packets */
  729. while (timeout-- &&
  730. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  731. ;
  732. if (timeout == -1) {
  733. printf("cpdma_process timeout\n");
  734. return -ETIMEDOUT;
  735. }
  736. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  737. }
  738. static int cpsw_recv(struct eth_device *dev)
  739. {
  740. struct cpsw_priv *priv = dev->priv;
  741. void *buffer;
  742. int len;
  743. cpsw_update_link(priv);
  744. while (cpdma_process(priv, &priv->rx_chan, &buffer, &len) >= 0) {
  745. invalidate_dcache_range((unsigned long)buffer,
  746. (unsigned long)buffer + PKTSIZE_ALIGN);
  747. NetReceive(buffer, len);
  748. cpdma_submit(priv, &priv->rx_chan, buffer, PKTSIZE);
  749. }
  750. return 0;
  751. }
  752. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  753. struct cpsw_priv *priv)
  754. {
  755. void *regs = priv->regs;
  756. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  757. slave->slave_num = slave_num;
  758. slave->data = data;
  759. slave->regs = regs + data->slave_reg_ofs;
  760. slave->sliver = regs + data->sliver_reg_ofs;
  761. }
  762. static int cpsw_phy_init(struct eth_device *dev, struct cpsw_slave *slave)
  763. {
  764. struct cpsw_priv *priv = (struct cpsw_priv *)dev->priv;
  765. struct phy_device *phydev;
  766. u32 supported = (SUPPORTED_10baseT_Half |
  767. SUPPORTED_10baseT_Full |
  768. SUPPORTED_100baseT_Half |
  769. SUPPORTED_100baseT_Full |
  770. SUPPORTED_1000baseT_Full);
  771. phydev = phy_connect(priv->bus,
  772. CONFIG_PHY_ADDR,
  773. dev,
  774. slave->data->phy_if);
  775. phydev->supported &= supported;
  776. phydev->advertising = phydev->supported;
  777. priv->phydev = phydev;
  778. phy_config(phydev);
  779. return 1;
  780. }
  781. int cpsw_register(struct cpsw_platform_data *data)
  782. {
  783. struct cpsw_priv *priv;
  784. struct cpsw_slave *slave;
  785. void *regs = (void *)data->cpsw_base;
  786. struct eth_device *dev;
  787. dev = calloc(sizeof(*dev), 1);
  788. if (!dev)
  789. return -ENOMEM;
  790. priv = calloc(sizeof(*priv), 1);
  791. if (!priv) {
  792. free(dev);
  793. return -ENOMEM;
  794. }
  795. priv->data = *data;
  796. priv->dev = dev;
  797. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  798. if (!priv->slaves) {
  799. free(dev);
  800. free(priv);
  801. return -ENOMEM;
  802. }
  803. priv->descs = (void *)CPDMA_RAM_ADDR;
  804. priv->host_port = data->host_port_num;
  805. priv->regs = regs;
  806. priv->host_port_regs = regs + data->host_port_reg_ofs;
  807. priv->dma_regs = regs + data->cpdma_reg_ofs;
  808. priv->ale_regs = regs + data->ale_reg_ofs;
  809. int idx = 0;
  810. for_each_slave(slave, priv) {
  811. cpsw_slave_setup(slave, idx, priv);
  812. idx = idx + 1;
  813. }
  814. strcpy(dev->name, "cpsw");
  815. dev->iobase = 0;
  816. dev->init = cpsw_init;
  817. dev->halt = cpsw_halt;
  818. dev->send = cpsw_send;
  819. dev->recv = cpsw_recv;
  820. dev->priv = priv;
  821. eth_register(dev);
  822. cpsw_mdio_init(dev->name, data->mdio_base, data->mdio_div);
  823. priv->bus = miiphy_get_dev_by_name(dev->name);
  824. for_each_slave(slave, priv)
  825. cpsw_phy_init(dev, slave);
  826. return 1;
  827. }