|
@@ -1025,6 +1025,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
|
|
|
if (rxq->descs == NULL)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN));
|
|
|
+
|
|
|
rxq->last_desc = rxq->size - 1;
|
|
|
|
|
|
/* Set Rx descriptors queue starting address */
|
|
@@ -1061,6 +1063,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
|
|
|
if (txq->descs == NULL)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
+ WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN));
|
|
|
+
|
|
|
txq->last_desc = txq->size - 1;
|
|
|
|
|
|
/* Set maximum bandwidth for enabled TXQs */
|
|
@@ -1694,18 +1698,20 @@ static int mvneta_probe(struct udevice *dev)
|
|
|
* be active. Make this area DMA safe by disabling the D-cache
|
|
|
*/
|
|
|
if (!buffer_loc.tx_descs) {
|
|
|
+ u32 size;
|
|
|
+
|
|
|
/* Align buffer area for descs and rx_buffers to 1MiB */
|
|
|
bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
|
|
|
mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
|
|
|
DCACHE_OFF);
|
|
|
buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
|
|
|
+ size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc),
|
|
|
+ ARCH_DMA_MINALIGN);
|
|
|
buffer_loc.rx_descs = (struct mvneta_rx_desc *)
|
|
|
- ((phys_addr_t)bd_space +
|
|
|
- MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc));
|
|
|
- buffer_loc.rx_buffers = (phys_addr_t)
|
|
|
- (bd_space +
|
|
|
- MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc) +
|
|
|
- MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc));
|
|
|
+ ((phys_addr_t)bd_space + size);
|
|
|
+ size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc),
|
|
|
+ ARCH_DMA_MINALIGN);
|
|
|
+ buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
|
|
|
}
|
|
|
|
|
|
pp->base = (void __iomem *)pdata->iobase;
|