cpsw.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051
  1. /*
  2. * CPSW Ethernet Switch Driver
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License as
  8. * published by the Free Software Foundation version 2.
  9. *
  10. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  11. * kind, whether express or implied; without even the implied warranty
  12. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. */
  15. #include <common.h>
  16. #include <command.h>
  17. #include <net.h>
  18. #include <miiphy.h>
  19. #include <malloc.h>
  20. #include <net.h>
  21. #include <netdev.h>
  22. #include <cpsw.h>
  23. #include <asm/errno.h>
  24. #include <asm/io.h>
  25. #include <phy.h>
  26. #include <asm/arch/cpu.h>
  27. #define BITMASK(bits) (BIT(bits) - 1)
  28. #define PHY_REG_MASK 0x1f
  29. #define PHY_ID_MASK 0x1f
  30. #define NUM_DESCS (PKTBUFSRX * 2)
  31. #define PKT_MIN 60
  32. #define PKT_MAX (1500 + 14 + 4 + 4)
  33. #define CLEAR_BIT 1
  34. #define GIGABITEN BIT(7)
  35. #define FULLDUPLEXEN BIT(0)
  36. #define MIIEN BIT(15)
  37. /* DMA Registers */
  38. #define CPDMA_TXCONTROL 0x004
  39. #define CPDMA_RXCONTROL 0x014
  40. #define CPDMA_SOFTRESET 0x01c
  41. #define CPDMA_RXFREE 0x0e0
  42. #define CPDMA_TXHDP_VER1 0x100
  43. #define CPDMA_TXHDP_VER2 0x200
  44. #define CPDMA_RXHDP_VER1 0x120
  45. #define CPDMA_RXHDP_VER2 0x220
  46. #define CPDMA_TXCP_VER1 0x140
  47. #define CPDMA_TXCP_VER2 0x240
  48. #define CPDMA_RXCP_VER1 0x160
  49. #define CPDMA_RXCP_VER2 0x260
  50. /* Descriptor mode bits */
  51. #define CPDMA_DESC_SOP BIT(31)
  52. #define CPDMA_DESC_EOP BIT(30)
  53. #define CPDMA_DESC_OWNER BIT(29)
  54. #define CPDMA_DESC_EOQ BIT(28)
  55. /*
  56. * This timeout definition is a worst-case ultra defensive measure against
  57. * unexpected controller lock ups. Ideally, we should never ever hit this
  58. * scenario in practice.
  59. */
  60. #define MDIO_TIMEOUT 100 /* msecs */
  61. #define CPDMA_TIMEOUT 100 /* msecs */
  62. struct cpsw_mdio_regs {
  63. u32 version;
  64. u32 control;
  65. #define CONTROL_IDLE BIT(31)
  66. #define CONTROL_ENABLE BIT(30)
  67. u32 alive;
  68. u32 link;
  69. u32 linkintraw;
  70. u32 linkintmasked;
  71. u32 __reserved_0[2];
  72. u32 userintraw;
  73. u32 userintmasked;
  74. u32 userintmaskset;
  75. u32 userintmaskclr;
  76. u32 __reserved_1[20];
  77. struct {
  78. u32 access;
  79. u32 physel;
  80. #define USERACCESS_GO BIT(31)
  81. #define USERACCESS_WRITE BIT(30)
  82. #define USERACCESS_ACK BIT(29)
  83. #define USERACCESS_READ (0)
  84. #define USERACCESS_DATA (0xffff)
  85. } user[0];
  86. };
  87. struct cpsw_regs {
  88. u32 id_ver;
  89. u32 control;
  90. u32 soft_reset;
  91. u32 stat_port_en;
  92. u32 ptype;
  93. };
  94. struct cpsw_slave_regs {
  95. u32 max_blks;
  96. u32 blk_cnt;
  97. u32 flow_thresh;
  98. u32 port_vlan;
  99. u32 tx_pri_map;
  100. #ifdef CONFIG_AM33XX
  101. u32 gap_thresh;
  102. #elif defined(CONFIG_TI814X)
  103. u32 ts_ctl;
  104. u32 ts_seq_ltype;
  105. u32 ts_vlan;
  106. #endif
  107. u32 sa_lo;
  108. u32 sa_hi;
  109. };
  110. struct cpsw_host_regs {
  111. u32 max_blks;
  112. u32 blk_cnt;
  113. u32 flow_thresh;
  114. u32 port_vlan;
  115. u32 tx_pri_map;
  116. u32 cpdma_tx_pri_map;
  117. u32 cpdma_rx_chan_map;
  118. };
  119. struct cpsw_sliver_regs {
  120. u32 id_ver;
  121. u32 mac_control;
  122. u32 mac_status;
  123. u32 soft_reset;
  124. u32 rx_maxlen;
  125. u32 __reserved_0;
  126. u32 rx_pause;
  127. u32 tx_pause;
  128. u32 __reserved_1;
  129. u32 rx_pri_map;
  130. };
  131. #define ALE_ENTRY_BITS 68
  132. #define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
  133. /* ALE Registers */
  134. #define ALE_CONTROL 0x08
  135. #define ALE_UNKNOWNVLAN 0x18
  136. #define ALE_TABLE_CONTROL 0x20
  137. #define ALE_TABLE 0x34
  138. #define ALE_PORTCTL 0x40
  139. #define ALE_TABLE_WRITE BIT(31)
  140. #define ALE_TYPE_FREE 0
  141. #define ALE_TYPE_ADDR 1
  142. #define ALE_TYPE_VLAN 2
  143. #define ALE_TYPE_VLAN_ADDR 3
  144. #define ALE_UCAST_PERSISTANT 0
  145. #define ALE_UCAST_UNTOUCHED 1
  146. #define ALE_UCAST_OUI 2
  147. #define ALE_UCAST_TOUCHED 3
  148. #define ALE_MCAST_FWD 0
  149. #define ALE_MCAST_BLOCK_LEARN_FWD 1
  150. #define ALE_MCAST_FWD_LEARN 2
  151. #define ALE_MCAST_FWD_2 3
  152. enum cpsw_ale_port_state {
  153. ALE_PORT_STATE_DISABLE = 0x00,
  154. ALE_PORT_STATE_BLOCK = 0x01,
  155. ALE_PORT_STATE_LEARN = 0x02,
  156. ALE_PORT_STATE_FORWARD = 0x03,
  157. };
  158. /* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
  159. #define ALE_SECURE 1
  160. #define ALE_BLOCKED 2
  161. struct cpsw_slave {
  162. struct cpsw_slave_regs *regs;
  163. struct cpsw_sliver_regs *sliver;
  164. int slave_num;
  165. u32 mac_control;
  166. struct cpsw_slave_data *data;
  167. };
  168. struct cpdma_desc {
  169. /* hardware fields */
  170. u32 hw_next;
  171. u32 hw_buffer;
  172. u32 hw_len;
  173. u32 hw_mode;
  174. /* software fields */
  175. u32 sw_buffer;
  176. u32 sw_len;
  177. };
  178. struct cpdma_chan {
  179. struct cpdma_desc *head, *tail;
  180. void *hdp, *cp, *rxfree;
  181. };
  182. #define desc_write(desc, fld, val) __raw_writel((u32)(val), &(desc)->fld)
  183. #define desc_read(desc, fld) __raw_readl(&(desc)->fld)
  184. #define desc_read_ptr(desc, fld) ((void *)__raw_readl(&(desc)->fld))
  185. #define chan_write(chan, fld, val) __raw_writel((u32)(val), (chan)->fld)
  186. #define chan_read(chan, fld) __raw_readl((chan)->fld)
  187. #define chan_read_ptr(chan, fld) ((void *)__raw_readl((chan)->fld))
  188. #define for_active_slave(slave, priv) \
  189. slave = (priv)->slaves + (priv)->data.active_slave; if (slave)
  190. #define for_each_slave(slave, priv) \
  191. for (slave = (priv)->slaves; slave != (priv)->slaves + \
  192. (priv)->data.slaves; slave++)
  193. struct cpsw_priv {
  194. struct eth_device *dev;
  195. struct cpsw_platform_data data;
  196. int host_port;
  197. struct cpsw_regs *regs;
  198. void *dma_regs;
  199. struct cpsw_host_regs *host_port_regs;
  200. void *ale_regs;
  201. struct cpdma_desc *descs;
  202. struct cpdma_desc *desc_free;
  203. struct cpdma_chan rx_chan, tx_chan;
  204. struct cpsw_slave *slaves;
  205. struct phy_device *phydev;
  206. struct mii_dev *bus;
  207. u32 phy_mask;
  208. };
  209. static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
  210. {
  211. int idx;
  212. idx = start / 32;
  213. start -= idx * 32;
  214. idx = 2 - idx; /* flip */
  215. return (ale_entry[idx] >> start) & BITMASK(bits);
  216. }
  217. static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
  218. u32 value)
  219. {
  220. int idx;
  221. value &= BITMASK(bits);
  222. idx = start / 32;
  223. start -= idx * 32;
  224. idx = 2 - idx; /* flip */
  225. ale_entry[idx] &= ~(BITMASK(bits) << start);
  226. ale_entry[idx] |= (value << start);
  227. }
  228. #define DEFINE_ALE_FIELD(name, start, bits) \
  229. static inline int cpsw_ale_get_##name(u32 *ale_entry) \
  230. { \
  231. return cpsw_ale_get_field(ale_entry, start, bits); \
  232. } \
  233. static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
  234. { \
  235. cpsw_ale_set_field(ale_entry, start, bits, value); \
  236. }
  237. DEFINE_ALE_FIELD(entry_type, 60, 2)
  238. DEFINE_ALE_FIELD(mcast_state, 62, 2)
  239. DEFINE_ALE_FIELD(port_mask, 66, 3)
  240. DEFINE_ALE_FIELD(ucast_type, 62, 2)
  241. DEFINE_ALE_FIELD(port_num, 66, 2)
  242. DEFINE_ALE_FIELD(blocked, 65, 1)
  243. DEFINE_ALE_FIELD(secure, 64, 1)
  244. DEFINE_ALE_FIELD(mcast, 40, 1)
  245. /* The MAC address field in the ALE entry cannot be macroized as above */
  246. static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
  247. {
  248. int i;
  249. for (i = 0; i < 6; i++)
  250. addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
  251. }
  252. static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
  253. {
  254. int i;
  255. for (i = 0; i < 6; i++)
  256. cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
  257. }
  258. static int cpsw_ale_read(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  259. {
  260. int i;
  261. __raw_writel(idx, priv->ale_regs + ALE_TABLE_CONTROL);
  262. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  263. ale_entry[i] = __raw_readl(priv->ale_regs + ALE_TABLE + 4 * i);
  264. return idx;
  265. }
  266. static int cpsw_ale_write(struct cpsw_priv *priv, int idx, u32 *ale_entry)
  267. {
  268. int i;
  269. for (i = 0; i < ALE_ENTRY_WORDS; i++)
  270. __raw_writel(ale_entry[i], priv->ale_regs + ALE_TABLE + 4 * i);
  271. __raw_writel(idx | ALE_TABLE_WRITE, priv->ale_regs + ALE_TABLE_CONTROL);
  272. return idx;
  273. }
  274. static int cpsw_ale_match_addr(struct cpsw_priv *priv, const u8 *addr)
  275. {
  276. u32 ale_entry[ALE_ENTRY_WORDS];
  277. int type, idx;
  278. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  279. u8 entry_addr[6];
  280. cpsw_ale_read(priv, idx, ale_entry);
  281. type = cpsw_ale_get_entry_type(ale_entry);
  282. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  283. continue;
  284. cpsw_ale_get_addr(ale_entry, entry_addr);
  285. if (memcmp(entry_addr, addr, 6) == 0)
  286. return idx;
  287. }
  288. return -ENOENT;
  289. }
  290. static int cpsw_ale_match_free(struct cpsw_priv *priv)
  291. {
  292. u32 ale_entry[ALE_ENTRY_WORDS];
  293. int type, idx;
  294. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  295. cpsw_ale_read(priv, idx, ale_entry);
  296. type = cpsw_ale_get_entry_type(ale_entry);
  297. if (type == ALE_TYPE_FREE)
  298. return idx;
  299. }
  300. return -ENOENT;
  301. }
  302. static int cpsw_ale_find_ageable(struct cpsw_priv *priv)
  303. {
  304. u32 ale_entry[ALE_ENTRY_WORDS];
  305. int type, idx;
  306. for (idx = 0; idx < priv->data.ale_entries; idx++) {
  307. cpsw_ale_read(priv, idx, ale_entry);
  308. type = cpsw_ale_get_entry_type(ale_entry);
  309. if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
  310. continue;
  311. if (cpsw_ale_get_mcast(ale_entry))
  312. continue;
  313. type = cpsw_ale_get_ucast_type(ale_entry);
  314. if (type != ALE_UCAST_PERSISTANT &&
  315. type != ALE_UCAST_OUI)
  316. return idx;
  317. }
  318. return -ENOENT;
  319. }
  320. static int cpsw_ale_add_ucast(struct cpsw_priv *priv, const u8 *addr,
  321. int port, int flags)
  322. {
  323. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  324. int idx;
  325. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  326. cpsw_ale_set_addr(ale_entry, addr);
  327. cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
  328. cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
  329. cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
  330. cpsw_ale_set_port_num(ale_entry, port);
  331. idx = cpsw_ale_match_addr(priv, addr);
  332. if (idx < 0)
  333. idx = cpsw_ale_match_free(priv);
  334. if (idx < 0)
  335. idx = cpsw_ale_find_ageable(priv);
  336. if (idx < 0)
  337. return -ENOMEM;
  338. cpsw_ale_write(priv, idx, ale_entry);
  339. return 0;
  340. }
  341. static int cpsw_ale_add_mcast(struct cpsw_priv *priv, const u8 *addr,
  342. int port_mask)
  343. {
  344. u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
  345. int idx, mask;
  346. idx = cpsw_ale_match_addr(priv, addr);
  347. if (idx >= 0)
  348. cpsw_ale_read(priv, idx, ale_entry);
  349. cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
  350. cpsw_ale_set_addr(ale_entry, addr);
  351. cpsw_ale_set_mcast_state(ale_entry, ALE_MCAST_FWD_2);
  352. mask = cpsw_ale_get_port_mask(ale_entry);
  353. port_mask |= mask;
  354. cpsw_ale_set_port_mask(ale_entry, port_mask);
  355. if (idx < 0)
  356. idx = cpsw_ale_match_free(priv);
  357. if (idx < 0)
  358. idx = cpsw_ale_find_ageable(priv);
  359. if (idx < 0)
  360. return -ENOMEM;
  361. cpsw_ale_write(priv, idx, ale_entry);
  362. return 0;
  363. }
  364. static inline void cpsw_ale_control(struct cpsw_priv *priv, int bit, int val)
  365. {
  366. u32 tmp, mask = BIT(bit);
  367. tmp = __raw_readl(priv->ale_regs + ALE_CONTROL);
  368. tmp &= ~mask;
  369. tmp |= val ? mask : 0;
  370. __raw_writel(tmp, priv->ale_regs + ALE_CONTROL);
  371. }
  372. #define cpsw_ale_enable(priv, val) cpsw_ale_control(priv, 31, val)
  373. #define cpsw_ale_clear(priv, val) cpsw_ale_control(priv, 30, val)
  374. #define cpsw_ale_vlan_aware(priv, val) cpsw_ale_control(priv, 2, val)
  375. static inline void cpsw_ale_port_state(struct cpsw_priv *priv, int port,
  376. int val)
  377. {
  378. int offset = ALE_PORTCTL + 4 * port;
  379. u32 tmp, mask = 0x3;
  380. tmp = __raw_readl(priv->ale_regs + offset);
  381. tmp &= ~mask;
  382. tmp |= val & mask;
  383. __raw_writel(tmp, priv->ale_regs + offset);
  384. }
  385. static struct cpsw_mdio_regs *mdio_regs;
  386. /* wait until hardware is ready for another user access */
  387. static inline u32 wait_for_user_access(void)
  388. {
  389. u32 reg = 0;
  390. int timeout = MDIO_TIMEOUT;
  391. while (timeout-- &&
  392. ((reg = __raw_readl(&mdio_regs->user[0].access)) & USERACCESS_GO))
  393. udelay(10);
  394. if (timeout == -1) {
  395. printf("wait_for_user_access Timeout\n");
  396. return -ETIMEDOUT;
  397. }
  398. return reg;
  399. }
  400. /* wait until hardware state machine is idle */
  401. static inline void wait_for_idle(void)
  402. {
  403. int timeout = MDIO_TIMEOUT;
  404. while (timeout-- &&
  405. ((__raw_readl(&mdio_regs->control) & CONTROL_IDLE) == 0))
  406. udelay(10);
  407. if (timeout == -1)
  408. printf("wait_for_idle Timeout\n");
  409. }
  410. static int cpsw_mdio_read(struct mii_dev *bus, int phy_id,
  411. int dev_addr, int phy_reg)
  412. {
  413. int data;
  414. u32 reg;
  415. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  416. return -EINVAL;
  417. wait_for_user_access();
  418. reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
  419. (phy_id << 16));
  420. __raw_writel(reg, &mdio_regs->user[0].access);
  421. reg = wait_for_user_access();
  422. data = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -1;
  423. return data;
  424. }
  425. static int cpsw_mdio_write(struct mii_dev *bus, int phy_id, int dev_addr,
  426. int phy_reg, u16 data)
  427. {
  428. u32 reg;
  429. if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
  430. return -EINVAL;
  431. wait_for_user_access();
  432. reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
  433. (phy_id << 16) | (data & USERACCESS_DATA));
  434. __raw_writel(reg, &mdio_regs->user[0].access);
  435. wait_for_user_access();
  436. return 0;
  437. }
  438. static void cpsw_mdio_init(char *name, u32 mdio_base, u32 div)
  439. {
  440. struct mii_dev *bus = mdio_alloc();
  441. mdio_regs = (struct cpsw_mdio_regs *)mdio_base;
  442. /* set enable and clock divider */
  443. __raw_writel(div | CONTROL_ENABLE, &mdio_regs->control);
  444. /*
  445. * wait for scan logic to settle:
  446. * the scan time consists of (a) a large fixed component, and (b) a
  447. * small component that varies with the mii bus frequency. These
  448. * were estimated using measurements at 1.1 and 2.2 MHz on tnetv107x
  449. * silicon. Since the effect of (b) was found to be largely
  450. * negligible, we keep things simple here.
  451. */
  452. udelay(1000);
  453. bus->read = cpsw_mdio_read;
  454. bus->write = cpsw_mdio_write;
  455. sprintf(bus->name, name);
  456. mdio_register(bus);
  457. }
  458. /* Set a self-clearing bit in a register, and wait for it to clear */
  459. static inline void setbit_and_wait_for_clear32(void *addr)
  460. {
  461. __raw_writel(CLEAR_BIT, addr);
  462. while (__raw_readl(addr) & CLEAR_BIT)
  463. ;
  464. }
  465. #define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
  466. ((mac)[2] << 16) | ((mac)[3] << 24))
  467. #define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
  468. static void cpsw_set_slave_mac(struct cpsw_slave *slave,
  469. struct cpsw_priv *priv)
  470. {
  471. __raw_writel(mac_hi(priv->dev->enetaddr), &slave->regs->sa_hi);
  472. __raw_writel(mac_lo(priv->dev->enetaddr), &slave->regs->sa_lo);
  473. }
  474. static void cpsw_slave_update_link(struct cpsw_slave *slave,
  475. struct cpsw_priv *priv, int *link)
  476. {
  477. struct phy_device *phy;
  478. u32 mac_control = 0;
  479. phy = priv->phydev;
  480. if (!phy)
  481. return;
  482. phy_startup(phy);
  483. *link = phy->link;
  484. if (*link) { /* link up */
  485. mac_control = priv->data.mac_control;
  486. if (phy->speed == 1000)
  487. mac_control |= GIGABITEN;
  488. if (phy->duplex == DUPLEX_FULL)
  489. mac_control |= FULLDUPLEXEN;
  490. if (phy->speed == 100)
  491. mac_control |= MIIEN;
  492. }
  493. if (mac_control == slave->mac_control)
  494. return;
  495. if (mac_control) {
  496. printf("link up on port %d, speed %d, %s duplex\n",
  497. slave->slave_num, phy->speed,
  498. (phy->duplex == DUPLEX_FULL) ? "full" : "half");
  499. } else {
  500. printf("link down on port %d\n", slave->slave_num);
  501. }
  502. __raw_writel(mac_control, &slave->sliver->mac_control);
  503. slave->mac_control = mac_control;
  504. }
  505. static int cpsw_update_link(struct cpsw_priv *priv)
  506. {
  507. int link = 0;
  508. struct cpsw_slave *slave;
  509. for_active_slave(slave, priv)
  510. cpsw_slave_update_link(slave, priv, &link);
  511. return link;
  512. }
  513. static inline u32 cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
  514. {
  515. if (priv->host_port == 0)
  516. return slave_num + 1;
  517. else
  518. return slave_num;
  519. }
  520. static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
  521. {
  522. u32 slave_port;
  523. setbit_and_wait_for_clear32(&slave->sliver->soft_reset);
  524. /* setup priority mapping */
  525. __raw_writel(0x76543210, &slave->sliver->rx_pri_map);
  526. __raw_writel(0x33221100, &slave->regs->tx_pri_map);
  527. /* setup max packet size, and mac address */
  528. __raw_writel(PKT_MAX, &slave->sliver->rx_maxlen);
  529. cpsw_set_slave_mac(slave, priv);
  530. slave->mac_control = 0; /* no link yet */
  531. /* enable forwarding */
  532. slave_port = cpsw_get_slave_port(priv, slave->slave_num);
  533. cpsw_ale_port_state(priv, slave_port, ALE_PORT_STATE_FORWARD);
  534. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << slave_port);
  535. priv->phy_mask |= 1 << slave->data->phy_addr;
  536. }
  537. static struct cpdma_desc *cpdma_desc_alloc(struct cpsw_priv *priv)
  538. {
  539. struct cpdma_desc *desc = priv->desc_free;
  540. if (desc)
  541. priv->desc_free = desc_read_ptr(desc, hw_next);
  542. return desc;
  543. }
  544. static void cpdma_desc_free(struct cpsw_priv *priv, struct cpdma_desc *desc)
  545. {
  546. if (desc) {
  547. desc_write(desc, hw_next, priv->desc_free);
  548. priv->desc_free = desc;
  549. }
  550. }
  551. static int cpdma_submit(struct cpsw_priv *priv, struct cpdma_chan *chan,
  552. void *buffer, int len)
  553. {
  554. struct cpdma_desc *desc, *prev;
  555. u32 mode;
  556. desc = cpdma_desc_alloc(priv);
  557. if (!desc)
  558. return -ENOMEM;
  559. if (len < PKT_MIN)
  560. len = PKT_MIN;
  561. mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
  562. desc_write(desc, hw_next, 0);
  563. desc_write(desc, hw_buffer, buffer);
  564. desc_write(desc, hw_len, len);
  565. desc_write(desc, hw_mode, mode | len);
  566. desc_write(desc, sw_buffer, buffer);
  567. desc_write(desc, sw_len, len);
  568. if (!chan->head) {
  569. /* simple case - first packet enqueued */
  570. chan->head = desc;
  571. chan->tail = desc;
  572. chan_write(chan, hdp, desc);
  573. goto done;
  574. }
  575. /* not the first packet - enqueue at the tail */
  576. prev = chan->tail;
  577. desc_write(prev, hw_next, desc);
  578. chan->tail = desc;
  579. /* next check if EOQ has been triggered already */
  580. if (desc_read(prev, hw_mode) & CPDMA_DESC_EOQ)
  581. chan_write(chan, hdp, desc);
  582. done:
  583. if (chan->rxfree)
  584. chan_write(chan, rxfree, 1);
  585. return 0;
  586. }
  587. static int cpdma_process(struct cpsw_priv *priv, struct cpdma_chan *chan,
  588. void **buffer, int *len)
  589. {
  590. struct cpdma_desc *desc = chan->head;
  591. u32 status;
  592. if (!desc)
  593. return -ENOENT;
  594. status = desc_read(desc, hw_mode);
  595. if (len)
  596. *len = status & 0x7ff;
  597. if (buffer)
  598. *buffer = desc_read_ptr(desc, sw_buffer);
  599. if (status & CPDMA_DESC_OWNER) {
  600. if (chan_read(chan, hdp) == 0) {
  601. if (desc_read(desc, hw_mode) & CPDMA_DESC_OWNER)
  602. chan_write(chan, hdp, desc);
  603. }
  604. return -EBUSY;
  605. }
  606. chan->head = desc_read_ptr(desc, hw_next);
  607. chan_write(chan, cp, desc);
  608. cpdma_desc_free(priv, desc);
  609. return 0;
  610. }
  611. static int _cpsw_init(struct cpsw_priv *priv, u8 *enetaddr)
  612. {
  613. struct cpsw_slave *slave;
  614. int i, ret;
  615. /* soft reset the controller and initialize priv */
  616. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  617. /* initialize and reset the address lookup engine */
  618. cpsw_ale_enable(priv, 1);
  619. cpsw_ale_clear(priv, 1);
  620. cpsw_ale_vlan_aware(priv, 0); /* vlan unaware mode */
  621. /* setup host port priority mapping */
  622. __raw_writel(0x76543210, &priv->host_port_regs->cpdma_tx_pri_map);
  623. __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
  624. /* disable priority elevation and enable statistics on all ports */
  625. __raw_writel(0, &priv->regs->ptype);
  626. /* enable statistics collection only on the host port */
  627. __raw_writel(BIT(priv->host_port), &priv->regs->stat_port_en);
  628. __raw_writel(0x7, &priv->regs->stat_port_en);
  629. cpsw_ale_port_state(priv, priv->host_port, ALE_PORT_STATE_FORWARD);
  630. cpsw_ale_add_ucast(priv, enetaddr, priv->host_port, ALE_SECURE);
  631. cpsw_ale_add_mcast(priv, net_bcast_ethaddr, 1 << priv->host_port);
  632. for_active_slave(slave, priv)
  633. cpsw_slave_init(slave, priv);
  634. cpsw_update_link(priv);
  635. /* init descriptor pool */
  636. for (i = 0; i < NUM_DESCS; i++) {
  637. desc_write(&priv->descs[i], hw_next,
  638. (i == (NUM_DESCS - 1)) ? 0 : &priv->descs[i+1]);
  639. }
  640. priv->desc_free = &priv->descs[0];
  641. /* initialize channels */
  642. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  643. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  644. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER2;
  645. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER2;
  646. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  647. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  648. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER2;
  649. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER2;
  650. } else {
  651. memset(&priv->rx_chan, 0, sizeof(struct cpdma_chan));
  652. priv->rx_chan.hdp = priv->dma_regs + CPDMA_RXHDP_VER1;
  653. priv->rx_chan.cp = priv->dma_regs + CPDMA_RXCP_VER1;
  654. priv->rx_chan.rxfree = priv->dma_regs + CPDMA_RXFREE;
  655. memset(&priv->tx_chan, 0, sizeof(struct cpdma_chan));
  656. priv->tx_chan.hdp = priv->dma_regs + CPDMA_TXHDP_VER1;
  657. priv->tx_chan.cp = priv->dma_regs + CPDMA_TXCP_VER1;
  658. }
  659. /* clear dma state */
  660. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  661. if (priv->data.version == CPSW_CTRL_VERSION_2) {
  662. for (i = 0; i < priv->data.channels; i++) {
  663. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER2 + 4
  664. * i);
  665. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  666. * i);
  667. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER2 + 4
  668. * i);
  669. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER2 + 4
  670. * i);
  671. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER2 + 4
  672. * i);
  673. }
  674. } else {
  675. for (i = 0; i < priv->data.channels; i++) {
  676. __raw_writel(0, priv->dma_regs + CPDMA_RXHDP_VER1 + 4
  677. * i);
  678. __raw_writel(0, priv->dma_regs + CPDMA_RXFREE + 4
  679. * i);
  680. __raw_writel(0, priv->dma_regs + CPDMA_RXCP_VER1 + 4
  681. * i);
  682. __raw_writel(0, priv->dma_regs + CPDMA_TXHDP_VER1 + 4
  683. * i);
  684. __raw_writel(0, priv->dma_regs + CPDMA_TXCP_VER1 + 4
  685. * i);
  686. }
  687. }
  688. __raw_writel(1, priv->dma_regs + CPDMA_TXCONTROL);
  689. __raw_writel(1, priv->dma_regs + CPDMA_RXCONTROL);
  690. /* submit rx descs */
  691. for (i = 0; i < PKTBUFSRX; i++) {
  692. ret = cpdma_submit(priv, &priv->rx_chan, net_rx_packets[i],
  693. PKTSIZE);
  694. if (ret < 0) {
  695. printf("error %d submitting rx desc\n", ret);
  696. break;
  697. }
  698. }
  699. return 0;
  700. }
  701. static void _cpsw_halt(struct cpsw_priv *priv)
  702. {
  703. writel(0, priv->dma_regs + CPDMA_TXCONTROL);
  704. writel(0, priv->dma_regs + CPDMA_RXCONTROL);
  705. /* soft reset the controller and initialize priv */
  706. setbit_and_wait_for_clear32(&priv->regs->soft_reset);
  707. /* clear dma state */
  708. setbit_and_wait_for_clear32(priv->dma_regs + CPDMA_SOFTRESET);
  709. }
  710. static int _cpsw_send(struct cpsw_priv *priv, void *packet, int length)
  711. {
  712. void *buffer;
  713. int len;
  714. int timeout = CPDMA_TIMEOUT;
  715. flush_dcache_range((unsigned long)packet,
  716. (unsigned long)packet + length);
  717. /* first reap completed packets */
  718. while (timeout-- &&
  719. (cpdma_process(priv, &priv->tx_chan, &buffer, &len) >= 0))
  720. ;
  721. if (timeout == -1) {
  722. printf("cpdma_process timeout\n");
  723. return -ETIMEDOUT;
  724. }
  725. return cpdma_submit(priv, &priv->tx_chan, packet, length);
  726. }
  727. static int _cpsw_recv(struct cpsw_priv *priv, uchar **pkt)
  728. {
  729. void *buffer;
  730. int len;
  731. int ret = -EAGAIN;
  732. ret = cpdma_process(priv, &priv->rx_chan, &buffer, &len);
  733. if (ret < 0)
  734. return ret;
  735. invalidate_dcache_range((unsigned long)buffer,
  736. (unsigned long)buffer + PKTSIZE_ALIGN);
  737. *pkt = buffer;
  738. return len;
  739. }
  740. static void cpsw_slave_setup(struct cpsw_slave *slave, int slave_num,
  741. struct cpsw_priv *priv)
  742. {
  743. void *regs = priv->regs;
  744. struct cpsw_slave_data *data = priv->data.slave_data + slave_num;
  745. slave->slave_num = slave_num;
  746. slave->data = data;
  747. slave->regs = regs + data->slave_reg_ofs;
  748. slave->sliver = regs + data->sliver_reg_ofs;
  749. }
  750. static int cpsw_phy_init(struct cpsw_priv *priv, struct cpsw_slave *slave)
  751. {
  752. struct phy_device *phydev;
  753. u32 supported = PHY_GBIT_FEATURES;
  754. phydev = phy_connect(priv->bus,
  755. slave->data->phy_addr,
  756. priv->dev,
  757. slave->data->phy_if);
  758. if (!phydev)
  759. return -1;
  760. phydev->supported &= supported;
  761. phydev->advertising = phydev->supported;
  762. priv->phydev = phydev;
  763. phy_config(phydev);
  764. return 1;
  765. }
  766. int _cpsw_register(struct cpsw_priv *priv)
  767. {
  768. struct cpsw_slave *slave;
  769. struct cpsw_platform_data *data = &priv->data;
  770. void *regs = (void *)data->cpsw_base;
  771. priv->slaves = malloc(sizeof(struct cpsw_slave) * data->slaves);
  772. if (!priv->slaves) {
  773. return -ENOMEM;
  774. }
  775. priv->host_port = data->host_port_num;
  776. priv->regs = regs;
  777. priv->host_port_regs = regs + data->host_port_reg_ofs;
  778. priv->dma_regs = regs + data->cpdma_reg_ofs;
  779. priv->ale_regs = regs + data->ale_reg_ofs;
  780. priv->descs = (void *)regs + data->bd_ram_ofs;
  781. int idx = 0;
  782. for_each_slave(slave, priv) {
  783. cpsw_slave_setup(slave, idx, priv);
  784. idx = idx + 1;
  785. }
  786. cpsw_mdio_init(priv->dev->name, data->mdio_base, data->mdio_div);
  787. priv->bus = miiphy_get_dev_by_name(priv->dev->name);
  788. for_active_slave(slave, priv)
  789. cpsw_phy_init(priv, slave);
  790. return 0;
  791. }
  792. static int cpsw_init(struct eth_device *dev, bd_t *bis)
  793. {
  794. struct cpsw_priv *priv = dev->priv;
  795. return _cpsw_init(priv, dev->enetaddr);
  796. }
  797. static void cpsw_halt(struct eth_device *dev)
  798. {
  799. struct cpsw_priv *priv = dev->priv;
  800. return _cpsw_halt(priv);
  801. }
  802. static int cpsw_send(struct eth_device *dev, void *packet, int length)
  803. {
  804. struct cpsw_priv *priv = dev->priv;
  805. return _cpsw_send(priv, packet, length);
  806. }
  807. static int cpsw_recv(struct eth_device *dev)
  808. {
  809. struct cpsw_priv *priv = dev->priv;
  810. uchar *pkt = NULL;
  811. int len;
  812. len = _cpsw_recv(priv, &pkt);
  813. if (len > 0) {
  814. net_process_received_packet(pkt, len);
  815. cpdma_submit(priv, &priv->rx_chan, pkt, PKTSIZE);
  816. }
  817. return len;
  818. }
  819. int cpsw_register(struct cpsw_platform_data *data)
  820. {
  821. struct cpsw_priv *priv;
  822. struct eth_device *dev;
  823. int ret;
  824. dev = calloc(sizeof(*dev), 1);
  825. if (!dev)
  826. return -ENOMEM;
  827. priv = calloc(sizeof(*priv), 1);
  828. if (!priv) {
  829. free(dev);
  830. return -ENOMEM;
  831. }
  832. priv->dev = dev;
  833. priv->data = *data;
  834. strcpy(dev->name, "cpsw");
  835. dev->iobase = 0;
  836. dev->init = cpsw_init;
  837. dev->halt = cpsw_halt;
  838. dev->send = cpsw_send;
  839. dev->recv = cpsw_recv;
  840. dev->priv = priv;
  841. eth_register(dev);
  842. ret = _cpsw_register(priv);
  843. if (ret < 0) {
  844. eth_unregister(dev);
  845. free(dev);
  846. free(priv);
  847. return ret;
  848. }
  849. return 1;
  850. }