ldpaa_eth.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710
  1. /*
  2. * Copyright (C) 2014 Freescale Semiconductor
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <common.h>
  7. #include <asm/io.h>
  8. #include <asm/types.h>
  9. #include <malloc.h>
  10. #include <net.h>
  11. #include <hwconfig.h>
  12. #include <phy.h>
  13. #include <linux/compat.h>
  14. #include "ldpaa_eth.h"
  15. #undef CONFIG_PHYLIB
  16. static int init_phy(struct eth_device *dev)
  17. {
  18. /*TODO for external PHY */
  19. return 0;
  20. }
  21. static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
  22. const struct dpaa_fd *fd)
  23. {
  24. u64 fd_addr;
  25. uint16_t fd_offset;
  26. uint32_t fd_length;
  27. struct ldpaa_fas *fas;
  28. uint32_t status, err;
  29. struct qbman_release_desc releasedesc;
  30. struct qbman_swp *swp = dflt_dpio->sw_portal;
  31. fd_addr = ldpaa_fd_get_addr(fd);
  32. fd_offset = ldpaa_fd_get_offset(fd);
  33. fd_length = ldpaa_fd_get_len(fd);
  34. debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
  35. if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
  36. /* Read the frame annotation status word and check for errors */
  37. fas = (struct ldpaa_fas *)
  38. ((uint8_t *)(fd_addr) +
  39. priv->buf_layout.private_data_size);
  40. status = le32_to_cpu(fas->status);
  41. if (status & LDPAA_ETH_RX_ERR_MASK) {
  42. printf("Rx frame error(s): 0x%08x\n",
  43. status & LDPAA_ETH_RX_ERR_MASK);
  44. goto error;
  45. } else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
  46. printf("Unsupported feature in bitmask: 0x%08x\n",
  47. status & LDPAA_ETH_RX_UNSUPP_MASK);
  48. goto error;
  49. }
  50. }
  51. debug("Rx frame: To Upper layer\n");
  52. net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
  53. fd_length);
  54. error:
  55. flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
  56. qbman_release_desc_clear(&releasedesc);
  57. qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
  58. do {
  59. /* Release buffer into the QBMAN */
  60. err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
  61. } while (err == -EBUSY);
  62. return;
  63. }
  64. static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
  65. {
  66. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
  67. const struct ldpaa_dq *dq;
  68. const struct dpaa_fd *fd;
  69. int i = 5, err = 0, status, loop = 20;
  70. static struct qbman_pull_desc pulldesc;
  71. struct qbman_swp *swp = dflt_dpio->sw_portal;
  72. while (--i) {
  73. qbman_pull_desc_clear(&pulldesc);
  74. qbman_pull_desc_set_numframes(&pulldesc, 1);
  75. qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
  76. err = qbman_swp_pull(swp, &pulldesc);
  77. if (err < 0) {
  78. printf("Dequeue frames error:0x%08x\n", err);
  79. continue;
  80. }
  81. do {
  82. loop--;
  83. dq = qbman_swp_dqrr_next(swp);
  84. if (!loop)
  85. break;
  86. } while (!dq);
  87. if (dq) {
  88. /* Check for valid frame. If not sent a consume
  89. * confirmation to QBMAN otherwise give it to NADK
  90. * application and then send consume confirmation to
  91. * QBMAN.
  92. */
  93. status = (uint8_t)ldpaa_dq_flags(dq);
  94. if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
  95. debug("Dequeue RX frames:");
  96. debug("No frame delivered\n");
  97. qbman_swp_dqrr_consume(swp, dq);
  98. break;
  99. }
  100. fd = ldpaa_dq_fd(dq);
  101. /* Obtain FD and process it */
  102. ldpaa_eth_rx(priv, fd);
  103. qbman_swp_dqrr_consume(swp, dq);
  104. break;
  105. }
  106. }
  107. return err;
  108. }
  109. static void ldpaa_eth_tx_conf(struct ldpaa_eth_priv *priv,
  110. const struct dpaa_fd *fd)
  111. {
  112. uint64_t fd_addr;
  113. struct ldpaa_fas *fas;
  114. uint32_t status, err;
  115. struct qbman_release_desc releasedesc;
  116. struct qbman_swp *swp = dflt_dpio->sw_portal;
  117. fd_addr = ldpaa_fd_get_addr(fd);
  118. debug("TX Conf frame:data addr=0x%p\n", (u64 *)fd_addr);
  119. /* Check the status from the Frame Annotation */
  120. if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
  121. fas = (struct ldpaa_fas *)
  122. ((uint8_t *)(fd_addr) +
  123. priv->buf_layout.private_data_size);
  124. status = le32_to_cpu(fas->status);
  125. if (status & LDPAA_ETH_TXCONF_ERR_MASK) {
  126. printf("TxConf frame error(s): 0x%08x\n",
  127. status & LDPAA_ETH_TXCONF_ERR_MASK);
  128. }
  129. }
  130. qbman_release_desc_clear(&releasedesc);
  131. qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
  132. do {
  133. /* Release buffer into the QBMAN */
  134. err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
  135. } while (err == -EBUSY);
  136. }
  137. static int ldpaa_eth_pull_dequeue_tx_conf(struct ldpaa_eth_priv *priv)
  138. {
  139. const struct ldpaa_dq *dq;
  140. const struct dpaa_fd *fd;
  141. int err = 0;
  142. int i = 5, status, loop = 20;
  143. static struct qbman_pull_desc pulldesc;
  144. struct qbman_swp *swp = dflt_dpio->sw_portal;
  145. while (--i) {
  146. qbman_pull_desc_clear(&pulldesc);
  147. qbman_pull_desc_set_numframes(&pulldesc, 1);
  148. qbman_pull_desc_set_fq(&pulldesc, priv->tx_conf_fqid);
  149. err = qbman_swp_pull(swp, &pulldesc);
  150. if (err < 0) {
  151. printf("Dequeue TX conf frames error:0x%08x\n", err);
  152. continue;
  153. }
  154. do {
  155. loop--;
  156. dq = qbman_swp_dqrr_next(swp);
  157. if (!loop)
  158. break;
  159. } while (!dq);
  160. if (dq) {
  161. /* Check for valid frame. If not sent a consume
  162. * confirmation to QBMAN otherwise give it to NADK
  163. * application and then send consume confirmation to
  164. * QBMAN.
  165. */
  166. status = (uint8_t)ldpaa_dq_flags(dq);
  167. if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
  168. debug("Dequeue TX conf frames:");
  169. debug("No frame is delivered\n");
  170. qbman_swp_dqrr_consume(swp, dq);
  171. break;
  172. }
  173. fd = ldpaa_dq_fd(dq);
  174. ldpaa_eth_tx_conf(priv, fd);
  175. qbman_swp_dqrr_consume(swp, dq);
  176. break;
  177. }
  178. }
  179. return err;
  180. }
  181. static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
  182. {
  183. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  184. struct dpaa_fd fd;
  185. u64 buffer_start;
  186. int data_offset, err;
  187. struct qbman_swp *swp = dflt_dpio->sw_portal;
  188. struct qbman_eq_desc ed;
  189. /* Setup the FD fields */
  190. memset(&fd, 0, sizeof(fd));
  191. data_offset = priv->tx_data_offset;
  192. do {
  193. err = qbman_swp_acquire(dflt_dpio->sw_portal,
  194. dflt_dpbp->dpbp_attr.bpid,
  195. &buffer_start, 1);
  196. } while (err == -EBUSY);
  197. if (err < 0) {
  198. printf("qbman_swp_acquire() failed\n");
  199. return -ENOMEM;
  200. }
  201. debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
  202. memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
  203. flush_dcache_range(buffer_start, buffer_start +
  204. LDPAA_ETH_RX_BUFFER_SIZE);
  205. ldpaa_fd_set_addr(&fd, (u64)buffer_start);
  206. ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
  207. ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
  208. ldpaa_fd_set_len(&fd, len);
  209. fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
  210. LDPAA_FD_CTRL_PTV1;
  211. qbman_eq_desc_clear(&ed);
  212. qbman_eq_desc_set_no_orp(&ed, 0);
  213. qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
  214. err = qbman_swp_enqueue(swp, &ed, (const struct qbman_fd *)(&fd));
  215. if (err < 0)
  216. printf("error enqueueing Tx frame\n");
  217. mdelay(1);
  218. err = ldpaa_eth_pull_dequeue_tx_conf(priv);
  219. if (err < 0)
  220. printf("error Tx Conf frame\n");
  221. return err;
  222. }
  223. static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
  224. {
  225. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  226. struct dpni_queue_attr rx_queue_attr;
  227. struct dpni_tx_flow_attr tx_flow_attr;
  228. uint8_t mac_addr[6];
  229. int err;
  230. if (net_dev->state == ETH_STATE_ACTIVE)
  231. return 0;
  232. /* DPNI initialization */
  233. err = ldpaa_dpni_setup(priv);
  234. if (err < 0)
  235. goto err_dpni_setup;
  236. err = ldpaa_dpbp_setup();
  237. if (err < 0)
  238. goto err_dpbp_setup;
  239. /* DPNI binding DPBP */
  240. err = ldpaa_dpni_bind(priv);
  241. if (err)
  242. goto err_bind;
  243. err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
  244. mac_addr);
  245. if (err) {
  246. printf("dpni_get_primary_mac_addr() failed\n");
  247. return err;
  248. }
  249. memcpy(net_dev->enetaddr, mac_addr, 0x6);
  250. /* setup the MAC address */
  251. if (net_dev->enetaddr[0] & 0x01) {
  252. printf("%s: MacAddress is multcast address\n", __func__);
  253. return 1;
  254. }
  255. #ifdef CONFIG_PHYLIB
  256. /* TODO Check this path */
  257. err = phy_startup(priv->phydev);
  258. if (err) {
  259. printf("%s: Could not initialize\n", priv->phydev->dev->name);
  260. return err;
  261. }
  262. #else
  263. priv->phydev->speed = SPEED_1000;
  264. priv->phydev->link = 1;
  265. priv->phydev->duplex = DUPLEX_FULL;
  266. #endif
  267. err = dpni_enable(dflt_mc_io, priv->dpni_handle);
  268. if (err < 0) {
  269. printf("dpni_enable() failed\n");
  270. return err;
  271. }
  272. /* TODO: support multiple Rx flows */
  273. err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
  274. &rx_queue_attr);
  275. if (err) {
  276. printf("dpni_get_rx_flow() failed\n");
  277. goto err_rx_flow;
  278. }
  279. priv->rx_dflt_fqid = rx_queue_attr.fqid;
  280. err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
  281. if (err) {
  282. printf("dpni_get_qdid() failed\n");
  283. goto err_qdid;
  284. }
  285. err = dpni_get_tx_flow(dflt_mc_io, priv->dpni_handle, priv->tx_flow_id,
  286. &tx_flow_attr);
  287. if (err) {
  288. printf("dpni_get_tx_flow() failed\n");
  289. goto err_tx_flow;
  290. }
  291. priv->tx_conf_fqid = tx_flow_attr.conf_err_attr.queue_attr.fqid;
  292. if (!priv->phydev->link)
  293. printf("%s: No link.\n", priv->phydev->dev->name);
  294. return priv->phydev->link ? 0 : -1;
  295. err_tx_flow:
  296. err_qdid:
  297. err_rx_flow:
  298. dpni_disable(dflt_mc_io, priv->dpni_handle);
  299. err_bind:
  300. ldpaa_dpbp_free();
  301. err_dpbp_setup:
  302. dpni_close(dflt_mc_io, priv->dpni_handle);
  303. err_dpni_setup:
  304. return err;
  305. }
  306. static void ldpaa_eth_stop(struct eth_device *net_dev)
  307. {
  308. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  309. int err = 0;
  310. if ((net_dev->state == ETH_STATE_PASSIVE) ||
  311. (net_dev->state == ETH_STATE_INIT))
  312. return;
  313. /* Stop Tx and Rx traffic */
  314. err = dpni_disable(dflt_mc_io, priv->dpni_handle);
  315. if (err < 0)
  316. printf("dpni_disable() failed\n");
  317. #ifdef CONFIG_PHYLIB
  318. phy_shutdown(priv->phydev);
  319. #endif
  320. ldpaa_dpbp_free();
  321. dpni_reset(dflt_mc_io, priv->dpni_handle);
  322. dpni_close(dflt_mc_io, priv->dpni_handle);
  323. }
  324. static void ldpaa_dpbp_drain_cnt(int count)
  325. {
  326. uint64_t buf_array[7];
  327. void *addr;
  328. int ret, i;
  329. BUG_ON(count > 7);
  330. do {
  331. ret = qbman_swp_acquire(dflt_dpio->sw_portal,
  332. dflt_dpbp->dpbp_attr.bpid,
  333. buf_array, count);
  334. if (ret < 0) {
  335. printf("qbman_swp_acquire() failed\n");
  336. return;
  337. }
  338. for (i = 0; i < ret; i++) {
  339. addr = (void *)buf_array[i];
  340. debug("Free: buffer addr =0x%p\n", addr);
  341. free(addr);
  342. }
  343. } while (ret);
  344. }
  345. static void ldpaa_dpbp_drain(void)
  346. {
  347. int i;
  348. for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
  349. ldpaa_dpbp_drain_cnt(7);
  350. }
  351. static int ldpaa_bp_add_7(uint16_t bpid)
  352. {
  353. uint64_t buf_array[7];
  354. u8 *addr;
  355. int i;
  356. struct qbman_release_desc rd;
  357. for (i = 0; i < 7; i++) {
  358. addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
  359. if (!addr) {
  360. printf("addr allocation failed\n");
  361. goto err_alloc;
  362. }
  363. memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
  364. flush_dcache_range((u64)addr,
  365. (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
  366. buf_array[i] = (uint64_t)addr;
  367. debug("Release: buffer addr =0x%p\n", addr);
  368. }
  369. release_bufs:
  370. /* In case the portal is busy, retry until successful.
  371. * This function is guaranteed to succeed in a reasonable amount
  372. * of time.
  373. */
  374. do {
  375. mdelay(1);
  376. qbman_release_desc_clear(&rd);
  377. qbman_release_desc_set_bpid(&rd, bpid);
  378. } while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
  379. return i;
  380. err_alloc:
  381. if (i)
  382. goto release_bufs;
  383. return 0;
  384. }
  385. static int ldpaa_dpbp_seed(uint16_t bpid)
  386. {
  387. int i;
  388. int count;
  389. for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
  390. count = ldpaa_bp_add_7(bpid);
  391. if (count < 7)
  392. printf("Buffer Seed= %d\n", count);
  393. }
  394. return 0;
  395. }
  396. static int ldpaa_dpbp_setup(void)
  397. {
  398. int err;
  399. err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
  400. &dflt_dpbp->dpbp_handle);
  401. if (err) {
  402. printf("dpbp_open() failed\n");
  403. goto err_open;
  404. }
  405. err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
  406. if (err) {
  407. printf("dpbp_enable() failed\n");
  408. goto err_enable;
  409. }
  410. err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
  411. &dflt_dpbp->dpbp_attr);
  412. if (err) {
  413. printf("dpbp_get_attributes() failed\n");
  414. goto err_get_attr;
  415. }
  416. err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
  417. if (err) {
  418. printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
  419. dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
  420. goto err_seed;
  421. }
  422. return 0;
  423. err_seed:
  424. err_get_attr:
  425. dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
  426. err_enable:
  427. dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
  428. err_open:
  429. return err;
  430. }
  431. static void ldpaa_dpbp_free(void)
  432. {
  433. ldpaa_dpbp_drain();
  434. dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
  435. dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
  436. dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
  437. }
  438. static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
  439. {
  440. int err;
  441. /* and get a handle for the DPNI this interface is associate with */
  442. err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
  443. if (err) {
  444. printf("dpni_open() failed\n");
  445. goto err_open;
  446. }
  447. err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
  448. &priv->dpni_attrs);
  449. if (err) {
  450. printf("dpni_get_attributes() failed (err=%d)\n", err);
  451. goto err_get_attr;
  452. }
  453. /* Configure our buffers' layout */
  454. priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
  455. DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
  456. DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
  457. priv->buf_layout.pass_parser_result = true;
  458. priv->buf_layout.pass_frame_status = true;
  459. priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
  460. /* ...rx, ... */
  461. err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
  462. &priv->buf_layout);
  463. if (err) {
  464. printf("dpni_set_rx_buffer_layout() failed");
  465. goto err_buf_layout;
  466. }
  467. /* ... tx, ... */
  468. priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
  469. err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
  470. &priv->buf_layout);
  471. if (err) {
  472. printf("dpni_set_tx_buffer_layout() failed");
  473. goto err_buf_layout;
  474. }
  475. /* ... tx-confirm. */
  476. priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
  477. err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
  478. &priv->buf_layout);
  479. if (err) {
  480. printf("dpni_set_tx_conf_buffer_layout() failed");
  481. goto err_buf_layout;
  482. }
  483. /* Now that we've set our tx buffer layout, retrieve the minimum
  484. * required tx data offset.
  485. */
  486. err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
  487. &priv->tx_data_offset);
  488. if (err) {
  489. printf("dpni_get_tx_data_offset() failed\n");
  490. goto err_data_offset;
  491. }
  492. /* Warn in case TX data offset is not multiple of 64 bytes. */
  493. WARN_ON(priv->tx_data_offset % 64);
  494. /* Accomodate SWA space. */
  495. priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
  496. debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
  497. return 0;
  498. err_data_offset:
  499. err_buf_layout:
  500. err_get_attr:
  501. dpni_close(dflt_mc_io, priv->dpni_handle);
  502. err_open:
  503. return err;
  504. }
  505. static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
  506. {
  507. struct dpni_pools_cfg pools_params;
  508. struct dpni_tx_flow_cfg dflt_tx_flow;
  509. int err = 0;
  510. pools_params.num_dpbp = 1;
  511. pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
  512. pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
  513. err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
  514. if (err) {
  515. printf("dpni_set_pools() failed\n");
  516. return err;
  517. }
  518. priv->tx_flow_id = DPNI_NEW_FLOW_ID;
  519. memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
  520. err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
  521. &priv->tx_flow_id, &dflt_tx_flow);
  522. if (err) {
  523. printf("dpni_set_tx_flow() failed\n");
  524. return err;
  525. }
  526. return 0;
  527. }
  528. static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
  529. {
  530. int err;
  531. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  532. sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
  533. net_dev->iobase = 0;
  534. net_dev->init = ldpaa_eth_open;
  535. net_dev->halt = ldpaa_eth_stop;
  536. net_dev->send = ldpaa_eth_tx;
  537. net_dev->recv = ldpaa_eth_pull_dequeue_rx;
  538. /*
  539. TODO: PHY MDIO information
  540. priv->bus = info->bus;
  541. priv->phyaddr = info->phy_addr;
  542. priv->enet_if = info->enet_if;
  543. */
  544. if (init_phy(net_dev))
  545. return 0;
  546. err = eth_register(net_dev);
  547. if (err < 0) {
  548. printf("eth_register() = %d\n", err);
  549. return err;
  550. }
  551. return 0;
  552. }
  553. int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
  554. {
  555. struct eth_device *net_dev = NULL;
  556. struct ldpaa_eth_priv *priv = NULL;
  557. int err = 0;
  558. /* Net device */
  559. net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  560. if (!net_dev) {
  561. printf("eth_device malloc() failed\n");
  562. return -ENOMEM;
  563. }
  564. memset(net_dev, 0, sizeof(struct eth_device));
  565. /* alloc the ldpaa ethernet private struct */
  566. priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
  567. if (!priv) {
  568. printf("ldpaa_eth_priv malloc() failed\n");
  569. return -ENOMEM;
  570. }
  571. memset(priv, 0, sizeof(struct ldpaa_eth_priv));
  572. net_dev->priv = (void *)priv;
  573. priv->net_dev = (struct eth_device *)net_dev;
  574. priv->dpni_id = obj_desc.id;
  575. err = ldpaa_eth_netdev_init(net_dev);
  576. if (err)
  577. goto err_netdev_init;
  578. debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
  579. return 0;
  580. err_netdev_init:
  581. free(priv);
  582. net_dev->priv = NULL;
  583. free(net_dev);
  584. return err;
  585. }