瀏覽代碼

mvebu: neta: align DMA buffers

This makes sure the DMA buffers are properly aligned for the
hardware.

Reviewed-by: Stefan Roese <sr@denx.de>
Signed-off-by: Jon Nettleton <jon@solid-run.com>
Signed-off-by: Baruch Siach <baruch@tkos.co.il>
Acked-by: Joe Hershberger <joe.hershberger@ni.com>
Jon Nettleton 7 年之前
父節點
當前提交
199b27bb70
共有 1 個文件被更改,包括 12 次插入6 次删除
  1. 12 6
      drivers/net/mvneta.c

+ 12 - 6
drivers/net/mvneta.c

@@ -1025,6 +1025,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
 	if (rxq->descs == NULL)
 	if (rxq->descs == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
+	WARN_ON(rxq->descs != PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN));
+
 	rxq->last_desc = rxq->size - 1;
 	rxq->last_desc = rxq->size - 1;
 
 
 	/* Set Rx descriptors queue starting address */
 	/* Set Rx descriptors queue starting address */
@@ -1061,6 +1063,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 	if (txq->descs == NULL)
 	if (txq->descs == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
+	WARN_ON(txq->descs != PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN));
+
 	txq->last_desc = txq->size - 1;
 	txq->last_desc = txq->size - 1;
 
 
 	/* Set maximum bandwidth for enabled TXQs */
 	/* Set maximum bandwidth for enabled TXQs */
@@ -1694,18 +1698,20 @@ static int mvneta_probe(struct udevice *dev)
 	 * be active. Make this area DMA safe by disabling the D-cache
 	 * be active. Make this area DMA safe by disabling the D-cache
 	 */
 	 */
 	if (!buffer_loc.tx_descs) {
 	if (!buffer_loc.tx_descs) {
+		u32 size;
+
 		/* Align buffer area for descs and rx_buffers to 1MiB */
 		/* Align buffer area for descs and rx_buffers to 1MiB */
 		bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
 		bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
 		mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
 		mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
 						DCACHE_OFF);
 						DCACHE_OFF);
 		buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
 		buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
+		size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc),
+				ARCH_DMA_MINALIGN);
 		buffer_loc.rx_descs = (struct mvneta_rx_desc *)
 		buffer_loc.rx_descs = (struct mvneta_rx_desc *)
-			((phys_addr_t)bd_space +
-			 MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc));
-		buffer_loc.rx_buffers = (phys_addr_t)
-			(bd_space +
-			 MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc) +
-			 MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc));
+			((phys_addr_t)bd_space + size);
+		size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc),
+				ARCH_DMA_MINALIGN);
+		buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
 	}
 	}
 
 
 	pp->base = (void __iomem *)pdata->iobase;
 	pp->base = (void __iomem *)pdata->iobase;