ldpaa_eth.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. * Copyright (C) 2014 Freescale Semiconductor
  3. *
  4. * SPDX-License-Identifier: GPL-2.0+
  5. */
  6. #include <common.h>
  7. #include <asm/io.h>
  8. #include <asm/types.h>
  9. #include <malloc.h>
  10. #include <net.h>
  11. #include <hwconfig.h>
  12. #include <phy.h>
  13. #include <linux/compat.h>
  14. #include "ldpaa_eth.h"
  15. #undef CONFIG_PHYLIB
  16. static int init_phy(struct eth_device *dev)
  17. {
  18. /*TODO for external PHY */
  19. return 0;
  20. }
  21. static void ldpaa_eth_rx(struct ldpaa_eth_priv *priv,
  22. const struct dpaa_fd *fd)
  23. {
  24. u64 fd_addr;
  25. uint16_t fd_offset;
  26. uint32_t fd_length;
  27. struct ldpaa_fas *fas;
  28. uint32_t status, err;
  29. struct qbman_release_desc releasedesc;
  30. struct qbman_swp *swp = dflt_dpio->sw_portal;
  31. fd_addr = ldpaa_fd_get_addr(fd);
  32. fd_offset = ldpaa_fd_get_offset(fd);
  33. fd_length = ldpaa_fd_get_len(fd);
  34. debug("Rx frame:data addr=0x%p size=0x%x\n", (u64 *)fd_addr, fd_length);
  35. if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
  36. /* Read the frame annotation status word and check for errors */
  37. fas = (struct ldpaa_fas *)
  38. ((uint8_t *)(fd_addr) +
  39. priv->buf_layout.private_data_size);
  40. status = le32_to_cpu(fas->status);
  41. if (status & LDPAA_ETH_RX_ERR_MASK) {
  42. printf("Rx frame error(s): 0x%08x\n",
  43. status & LDPAA_ETH_RX_ERR_MASK);
  44. goto error;
  45. } else if (status & LDPAA_ETH_RX_UNSUPP_MASK) {
  46. printf("Unsupported feature in bitmask: 0x%08x\n",
  47. status & LDPAA_ETH_RX_UNSUPP_MASK);
  48. goto error;
  49. }
  50. }
  51. debug("Rx frame: To Upper layer\n");
  52. net_process_received_packet((uint8_t *)(fd_addr) + fd_offset,
  53. fd_length);
  54. error:
  55. flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
  56. qbman_release_desc_clear(&releasedesc);
  57. qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
  58. do {
  59. /* Release buffer into the QBMAN */
  60. err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
  61. } while (err == -EBUSY);
  62. return;
  63. }
  64. static int ldpaa_eth_pull_dequeue_rx(struct eth_device *dev)
  65. {
  66. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)dev->priv;
  67. const struct ldpaa_dq *dq;
  68. const struct dpaa_fd *fd;
  69. int i = 5, err = 0, status, loop = 20;
  70. static struct qbman_pull_desc pulldesc;
  71. struct qbman_swp *swp = dflt_dpio->sw_portal;
  72. while (--i) {
  73. qbman_pull_desc_clear(&pulldesc);
  74. qbman_pull_desc_set_numframes(&pulldesc, 1);
  75. qbman_pull_desc_set_fq(&pulldesc, priv->rx_dflt_fqid);
  76. err = qbman_swp_pull(swp, &pulldesc);
  77. if (err < 0) {
  78. printf("Dequeue frames error:0x%08x\n", err);
  79. continue;
  80. }
  81. do {
  82. loop--;
  83. dq = qbman_swp_dqrr_next(swp);
  84. if (!loop)
  85. break;
  86. } while (!dq);
  87. if (dq) {
  88. /* Check for valid frame. If not sent a consume
  89. * confirmation to QBMAN otherwise give it to NADK
  90. * application and then send consume confirmation to
  91. * QBMAN.
  92. */
  93. status = (uint8_t)ldpaa_dq_flags(dq);
  94. if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
  95. debug("Dequeue RX frames:");
  96. debug("No frame delivered\n");
  97. qbman_swp_dqrr_consume(swp, dq);
  98. break;
  99. }
  100. fd = ldpaa_dq_fd(dq);
  101. /* Obtain FD and process it */
  102. ldpaa_eth_rx(priv, fd);
  103. qbman_swp_dqrr_consume(swp, dq);
  104. break;
  105. }
  106. }
  107. return err;
  108. }
  109. static void ldpaa_eth_tx_conf(struct ldpaa_eth_priv *priv,
  110. const struct dpaa_fd *fd)
  111. {
  112. uint64_t fd_addr;
  113. struct ldpaa_fas *fas;
  114. uint32_t status, err;
  115. struct qbman_release_desc releasedesc;
  116. struct qbman_swp *swp = dflt_dpio->sw_portal;
  117. fd_addr = ldpaa_fd_get_addr(fd);
  118. debug("TX Conf frame:data addr=0x%p\n", (u64 *)fd_addr);
  119. /* Check the status from the Frame Annotation */
  120. if (fd->simple.frc & LDPAA_FD_FRC_FASV) {
  121. fas = (struct ldpaa_fas *)
  122. ((uint8_t *)(fd_addr) +
  123. priv->buf_layout.private_data_size);
  124. status = le32_to_cpu(fas->status);
  125. if (status & LDPAA_ETH_TXCONF_ERR_MASK) {
  126. printf("TxConf frame error(s): 0x%08x\n",
  127. status & LDPAA_ETH_TXCONF_ERR_MASK);
  128. }
  129. }
  130. flush_dcache_range(fd_addr, fd_addr + LDPAA_ETH_RX_BUFFER_SIZE);
  131. qbman_release_desc_clear(&releasedesc);
  132. qbman_release_desc_set_bpid(&releasedesc, dflt_dpbp->dpbp_attr.bpid);
  133. do {
  134. /* Release buffer into the QBMAN */
  135. err = qbman_swp_release(swp, &releasedesc, &fd_addr, 1);
  136. } while (err == -EBUSY);
  137. }
  138. static int ldpaa_eth_pull_dequeue_tx_conf(struct ldpaa_eth_priv *priv)
  139. {
  140. const struct ldpaa_dq *dq;
  141. const struct dpaa_fd *fd;
  142. int err = 0;
  143. int i = 5, status, loop = 20;
  144. static struct qbman_pull_desc pulldesc;
  145. struct qbman_swp *swp = dflt_dpio->sw_portal;
  146. while (--i) {
  147. qbman_pull_desc_clear(&pulldesc);
  148. qbman_pull_desc_set_numframes(&pulldesc, 1);
  149. qbman_pull_desc_set_fq(&pulldesc, priv->tx_conf_fqid);
  150. err = qbman_swp_pull(swp, &pulldesc);
  151. if (err < 0) {
  152. printf("Dequeue TX conf frames error:0x%08x\n", err);
  153. continue;
  154. }
  155. do {
  156. loop--;
  157. dq = qbman_swp_dqrr_next(swp);
  158. if (!loop)
  159. break;
  160. } while (!dq);
  161. if (dq) {
  162. /* Check for valid frame. If not sent a consume
  163. * confirmation to QBMAN otherwise give it to NADK
  164. * application and then send consume confirmation to
  165. * QBMAN.
  166. */
  167. status = (uint8_t)ldpaa_dq_flags(dq);
  168. if ((status & LDPAA_DQ_STAT_VALIDFRAME) == 0) {
  169. debug("Dequeue TX conf frames:");
  170. debug("No frame is delivered\n");
  171. qbman_swp_dqrr_consume(swp, dq);
  172. break;
  173. }
  174. fd = ldpaa_dq_fd(dq);
  175. ldpaa_eth_tx_conf(priv, fd);
  176. qbman_swp_dqrr_consume(swp, dq);
  177. break;
  178. }
  179. }
  180. return err;
  181. }
  182. static int ldpaa_eth_tx(struct eth_device *net_dev, void *buf, int len)
  183. {
  184. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  185. struct dpaa_fd fd;
  186. u64 buffer_start;
  187. int data_offset, err;
  188. struct qbman_swp *swp = dflt_dpio->sw_portal;
  189. struct qbman_eq_desc ed;
  190. /* Setup the FD fields */
  191. memset(&fd, 0, sizeof(fd));
  192. data_offset = priv->tx_data_offset;
  193. do {
  194. err = qbman_swp_acquire(dflt_dpio->sw_portal,
  195. dflt_dpbp->dpbp_attr.bpid,
  196. &buffer_start, 1);
  197. } while (err == -EBUSY);
  198. if (err < 0) {
  199. printf("qbman_swp_acquire() failed\n");
  200. return -ENOMEM;
  201. }
  202. debug("TX data: malloc buffer start=0x%p\n", (u64 *)buffer_start);
  203. memcpy(((uint8_t *)(buffer_start) + data_offset), buf, len);
  204. flush_dcache_range(buffer_start, buffer_start +
  205. LDPAA_ETH_RX_BUFFER_SIZE);
  206. ldpaa_fd_set_addr(&fd, (u64)buffer_start);
  207. ldpaa_fd_set_offset(&fd, (uint16_t)(data_offset));
  208. ldpaa_fd_set_bpid(&fd, dflt_dpbp->dpbp_attr.bpid);
  209. ldpaa_fd_set_len(&fd, len);
  210. fd.simple.ctrl = LDPAA_FD_CTRL_ASAL | LDPAA_FD_CTRL_PTA |
  211. LDPAA_FD_CTRL_PTV1;
  212. qbman_eq_desc_clear(&ed);
  213. qbman_eq_desc_set_no_orp(&ed, 0);
  214. qbman_eq_desc_set_qd(&ed, priv->tx_qdid, priv->tx_flow_id, 0);
  215. err = qbman_swp_enqueue(swp, &ed, (const struct qbman_fd *)(&fd));
  216. if (err < 0)
  217. printf("error enqueueing Tx frame\n");
  218. mdelay(1);
  219. err = ldpaa_eth_pull_dequeue_tx_conf(priv);
  220. if (err < 0)
  221. printf("error Tx Conf frame\n");
  222. return err;
  223. }
  224. static int ldpaa_eth_open(struct eth_device *net_dev, bd_t *bd)
  225. {
  226. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  227. struct dpni_queue_attr rx_queue_attr;
  228. struct dpni_tx_flow_attr tx_flow_attr;
  229. uint8_t mac_addr[6];
  230. int err;
  231. if (net_dev->state == ETH_STATE_ACTIVE)
  232. return 0;
  233. /* DPNI initialization */
  234. err = ldpaa_dpni_setup(priv);
  235. if (err < 0)
  236. goto err_dpni_setup;
  237. err = ldpaa_dpbp_setup();
  238. if (err < 0)
  239. goto err_dpbp_setup;
  240. /* DPNI binding DPBP */
  241. err = ldpaa_dpni_bind(priv);
  242. if (err)
  243. goto err_bind;
  244. err = dpni_get_primary_mac_addr(dflt_mc_io, priv->dpni_handle,
  245. mac_addr);
  246. if (err) {
  247. printf("dpni_get_primary_mac_addr() failed\n");
  248. return err;
  249. }
  250. memcpy(net_dev->enetaddr, mac_addr, 0x6);
  251. /* setup the MAC address */
  252. if (net_dev->enetaddr[0] & 0x01) {
  253. printf("%s: MacAddress is multcast address\n", __func__);
  254. return 1;
  255. }
  256. #ifdef CONFIG_PHYLIB
  257. /* TODO Check this path */
  258. err = phy_startup(priv->phydev);
  259. if (err) {
  260. printf("%s: Could not initialize\n", priv->phydev->dev->name);
  261. return err;
  262. }
  263. #else
  264. priv->phydev->speed = SPEED_1000;
  265. priv->phydev->link = 1;
  266. priv->phydev->duplex = DUPLEX_FULL;
  267. #endif
  268. err = dpni_enable(dflt_mc_io, priv->dpni_handle);
  269. if (err < 0) {
  270. printf("dpni_enable() failed\n");
  271. return err;
  272. }
  273. /* TODO: support multiple Rx flows */
  274. err = dpni_get_rx_flow(dflt_mc_io, priv->dpni_handle, 0, 0,
  275. &rx_queue_attr);
  276. if (err) {
  277. printf("dpni_get_rx_flow() failed\n");
  278. goto err_rx_flow;
  279. }
  280. priv->rx_dflt_fqid = rx_queue_attr.fqid;
  281. err = dpni_get_qdid(dflt_mc_io, priv->dpni_handle, &priv->tx_qdid);
  282. if (err) {
  283. printf("dpni_get_qdid() failed\n");
  284. goto err_qdid;
  285. }
  286. err = dpni_get_tx_flow(dflt_mc_io, priv->dpni_handle, priv->tx_flow_id,
  287. &tx_flow_attr);
  288. if (err) {
  289. printf("dpni_get_tx_flow() failed\n");
  290. goto err_tx_flow;
  291. }
  292. priv->tx_conf_fqid = tx_flow_attr.conf_err_attr.queue_attr.fqid;
  293. if (!priv->phydev->link)
  294. printf("%s: No link.\n", priv->phydev->dev->name);
  295. return priv->phydev->link ? 0 : -1;
  296. err_tx_flow:
  297. err_qdid:
  298. err_rx_flow:
  299. dpni_disable(dflt_mc_io, priv->dpni_handle);
  300. err_bind:
  301. ldpaa_dpbp_free();
  302. err_dpbp_setup:
  303. dpni_close(dflt_mc_io, priv->dpni_handle);
  304. err_dpni_setup:
  305. return err;
  306. }
  307. static void ldpaa_eth_stop(struct eth_device *net_dev)
  308. {
  309. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  310. int err = 0;
  311. if ((net_dev->state == ETH_STATE_PASSIVE) ||
  312. (net_dev->state == ETH_STATE_INIT))
  313. return;
  314. /* Stop Tx and Rx traffic */
  315. err = dpni_disable(dflt_mc_io, priv->dpni_handle);
  316. if (err < 0)
  317. printf("dpni_disable() failed\n");
  318. #ifdef CONFIG_PHYLIB
  319. phy_shutdown(priv->phydev);
  320. #endif
  321. ldpaa_dpbp_free();
  322. dpni_reset(dflt_mc_io, priv->dpni_handle);
  323. dpni_close(dflt_mc_io, priv->dpni_handle);
  324. }
  325. static void ldpaa_dpbp_drain_cnt(int count)
  326. {
  327. uint64_t buf_array[7];
  328. void *addr;
  329. int ret, i;
  330. BUG_ON(count > 7);
  331. do {
  332. ret = qbman_swp_acquire(dflt_dpio->sw_portal,
  333. dflt_dpbp->dpbp_attr.bpid,
  334. buf_array, count);
  335. if (ret < 0) {
  336. printf("qbman_swp_acquire() failed\n");
  337. return;
  338. }
  339. for (i = 0; i < ret; i++) {
  340. addr = (void *)buf_array[i];
  341. debug("Free: buffer addr =0x%p\n", addr);
  342. free(addr);
  343. }
  344. } while (ret);
  345. }
  346. static void ldpaa_dpbp_drain(void)
  347. {
  348. int i;
  349. for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7)
  350. ldpaa_dpbp_drain_cnt(7);
  351. }
  352. static int ldpaa_bp_add_7(uint16_t bpid)
  353. {
  354. uint64_t buf_array[7];
  355. u8 *addr;
  356. int i;
  357. struct qbman_release_desc rd;
  358. for (i = 0; i < 7; i++) {
  359. addr = memalign(L1_CACHE_BYTES, LDPAA_ETH_RX_BUFFER_SIZE);
  360. if (!addr) {
  361. printf("addr allocation failed\n");
  362. goto err_alloc;
  363. }
  364. memset(addr, 0x00, LDPAA_ETH_RX_BUFFER_SIZE);
  365. flush_dcache_range((u64)addr,
  366. (u64)(addr + LDPAA_ETH_RX_BUFFER_SIZE));
  367. buf_array[i] = (uint64_t)addr;
  368. debug("Release: buffer addr =0x%p\n", addr);
  369. }
  370. release_bufs:
  371. /* In case the portal is busy, retry until successful.
  372. * This function is guaranteed to succeed in a reasonable amount
  373. * of time.
  374. */
  375. do {
  376. mdelay(1);
  377. qbman_release_desc_clear(&rd);
  378. qbman_release_desc_set_bpid(&rd, bpid);
  379. } while (qbman_swp_release(dflt_dpio->sw_portal, &rd, buf_array, i));
  380. return i;
  381. err_alloc:
  382. if (i)
  383. goto release_bufs;
  384. return 0;
  385. }
  386. static int ldpaa_dpbp_seed(uint16_t bpid)
  387. {
  388. int i;
  389. int count;
  390. for (i = 0; i < LDPAA_ETH_NUM_BUFS; i += 7) {
  391. count = ldpaa_bp_add_7(bpid);
  392. if (count < 7)
  393. printf("Buffer Seed= %d\n", count);
  394. }
  395. return 0;
  396. }
  397. static int ldpaa_dpbp_setup(void)
  398. {
  399. int err;
  400. err = dpbp_open(dflt_mc_io, dflt_dpbp->dpbp_attr.id,
  401. &dflt_dpbp->dpbp_handle);
  402. if (err) {
  403. printf("dpbp_open() failed\n");
  404. goto err_open;
  405. }
  406. err = dpbp_enable(dflt_mc_io, dflt_dpbp->dpbp_handle);
  407. if (err) {
  408. printf("dpbp_enable() failed\n");
  409. goto err_enable;
  410. }
  411. err = dpbp_get_attributes(dflt_mc_io, dflt_dpbp->dpbp_handle,
  412. &dflt_dpbp->dpbp_attr);
  413. if (err) {
  414. printf("dpbp_get_attributes() failed\n");
  415. goto err_get_attr;
  416. }
  417. err = ldpaa_dpbp_seed(dflt_dpbp->dpbp_attr.bpid);
  418. if (err) {
  419. printf("Buffer seeding failed for DPBP %d (bpid=%d)\n",
  420. dflt_dpbp->dpbp_attr.id, dflt_dpbp->dpbp_attr.bpid);
  421. goto err_seed;
  422. }
  423. return 0;
  424. err_seed:
  425. err_get_attr:
  426. dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
  427. err_enable:
  428. dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
  429. err_open:
  430. return err;
  431. }
  432. static void ldpaa_dpbp_free(void)
  433. {
  434. ldpaa_dpbp_drain();
  435. dpbp_disable(dflt_mc_io, dflt_dpbp->dpbp_handle);
  436. dpbp_reset(dflt_mc_io, dflt_dpbp->dpbp_handle);
  437. dpbp_close(dflt_mc_io, dflt_dpbp->dpbp_handle);
  438. }
  439. static int ldpaa_dpni_setup(struct ldpaa_eth_priv *priv)
  440. {
  441. int err;
  442. /* and get a handle for the DPNI this interface is associate with */
  443. err = dpni_open(dflt_mc_io, priv->dpni_id, &priv->dpni_handle);
  444. if (err) {
  445. printf("dpni_open() failed\n");
  446. goto err_open;
  447. }
  448. err = dpni_get_attributes(dflt_mc_io, priv->dpni_handle,
  449. &priv->dpni_attrs);
  450. if (err) {
  451. printf("dpni_get_attributes() failed (err=%d)\n", err);
  452. goto err_get_attr;
  453. }
  454. /* Configure our buffers' layout */
  455. priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT |
  456. DPNI_BUF_LAYOUT_OPT_FRAME_STATUS |
  457. DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
  458. priv->buf_layout.pass_parser_result = true;
  459. priv->buf_layout.pass_frame_status = true;
  460. priv->buf_layout.private_data_size = LDPAA_ETH_SWA_SIZE;
  461. /* ...rx, ... */
  462. err = dpni_set_rx_buffer_layout(dflt_mc_io, priv->dpni_handle,
  463. &priv->buf_layout);
  464. if (err) {
  465. printf("dpni_set_rx_buffer_layout() failed");
  466. goto err_buf_layout;
  467. }
  468. /* ... tx, ... */
  469. priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PARSER_RESULT;
  470. err = dpni_set_tx_buffer_layout(dflt_mc_io, priv->dpni_handle,
  471. &priv->buf_layout);
  472. if (err) {
  473. printf("dpni_set_tx_buffer_layout() failed");
  474. goto err_buf_layout;
  475. }
  476. /* ... tx-confirm. */
  477. priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE;
  478. err = dpni_set_tx_conf_buffer_layout(dflt_mc_io, priv->dpni_handle,
  479. &priv->buf_layout);
  480. if (err) {
  481. printf("dpni_set_tx_conf_buffer_layout() failed");
  482. goto err_buf_layout;
  483. }
  484. /* Now that we've set our tx buffer layout, retrieve the minimum
  485. * required tx data offset.
  486. */
  487. err = dpni_get_tx_data_offset(dflt_mc_io, priv->dpni_handle,
  488. &priv->tx_data_offset);
  489. if (err) {
  490. printf("dpni_get_tx_data_offset() failed\n");
  491. goto err_data_offset;
  492. }
  493. /* Warn in case TX data offset is not multiple of 64 bytes. */
  494. WARN_ON(priv->tx_data_offset % 64);
  495. /* Accomodate SWA space. */
  496. priv->tx_data_offset += LDPAA_ETH_SWA_SIZE;
  497. debug("priv->tx_data_offset=%d\n", priv->tx_data_offset);
  498. return 0;
  499. err_data_offset:
  500. err_buf_layout:
  501. err_get_attr:
  502. dpni_close(dflt_mc_io, priv->dpni_handle);
  503. err_open:
  504. return err;
  505. }
  506. static int ldpaa_dpni_bind(struct ldpaa_eth_priv *priv)
  507. {
  508. struct dpni_pools_cfg pools_params;
  509. struct dpni_tx_flow_cfg dflt_tx_flow;
  510. int err = 0;
  511. pools_params.num_dpbp = 1;
  512. pools_params.pools[0].dpbp_id = (uint16_t)dflt_dpbp->dpbp_attr.id;
  513. pools_params.pools[0].buffer_size = LDPAA_ETH_RX_BUFFER_SIZE;
  514. err = dpni_set_pools(dflt_mc_io, priv->dpni_handle, &pools_params);
  515. if (err) {
  516. printf("dpni_set_pools() failed\n");
  517. return err;
  518. }
  519. priv->tx_flow_id = DPNI_NEW_FLOW_ID;
  520. memset(&dflt_tx_flow, 0, sizeof(dflt_tx_flow));
  521. err = dpni_set_tx_flow(dflt_mc_io, priv->dpni_handle,
  522. &priv->tx_flow_id, &dflt_tx_flow);
  523. if (err) {
  524. printf("dpni_set_tx_flow() failed\n");
  525. return err;
  526. }
  527. return 0;
  528. }
  529. static int ldpaa_eth_netdev_init(struct eth_device *net_dev)
  530. {
  531. int err;
  532. struct ldpaa_eth_priv *priv = (struct ldpaa_eth_priv *)net_dev->priv;
  533. sprintf(net_dev->name, "DPNI%d", priv->dpni_id);
  534. net_dev->iobase = 0;
  535. net_dev->init = ldpaa_eth_open;
  536. net_dev->halt = ldpaa_eth_stop;
  537. net_dev->send = ldpaa_eth_tx;
  538. net_dev->recv = ldpaa_eth_pull_dequeue_rx;
  539. /*
  540. TODO: PHY MDIO information
  541. priv->bus = info->bus;
  542. priv->phyaddr = info->phy_addr;
  543. priv->enet_if = info->enet_if;
  544. */
  545. if (init_phy(net_dev))
  546. return 0;
  547. err = eth_register(net_dev);
  548. if (err < 0) {
  549. printf("eth_register() = %d\n", err);
  550. return err;
  551. }
  552. return 0;
  553. }
  554. int ldpaa_eth_init(struct dprc_obj_desc obj_desc)
  555. {
  556. struct eth_device *net_dev = NULL;
  557. struct ldpaa_eth_priv *priv = NULL;
  558. int err = 0;
  559. /* Net device */
  560. net_dev = (struct eth_device *)malloc(sizeof(struct eth_device));
  561. if (!net_dev) {
  562. printf("eth_device malloc() failed\n");
  563. return -ENOMEM;
  564. }
  565. memset(net_dev, 0, sizeof(struct eth_device));
  566. /* alloc the ldpaa ethernet private struct */
  567. priv = (struct ldpaa_eth_priv *)malloc(sizeof(struct ldpaa_eth_priv));
  568. if (!priv) {
  569. printf("ldpaa_eth_priv malloc() failed\n");
  570. return -ENOMEM;
  571. }
  572. memset(priv, 0, sizeof(struct ldpaa_eth_priv));
  573. net_dev->priv = (void *)priv;
  574. priv->net_dev = (struct eth_device *)net_dev;
  575. priv->dpni_id = obj_desc.id;
  576. err = ldpaa_eth_netdev_init(net_dev);
  577. if (err)
  578. goto err_netdev_init;
  579. debug("ldpaa ethernet: Probed interface %s\n", net_dev->name);
  580. return 0;
  581. err_netdev_init:
  582. free(priv);
  583. net_dev->priv = NULL;
  584. free(net_dev);
  585. return err;
  586. }