diff mbox series

[U-Boot,1/2] mvebu: neta: align DMA buffers

Message ID 63028ec52402c6473fc144ac9f2a89f5f0604784.1527489235.git.baruch@tkos.co.il
State Superseded
Delegated to: Stefan Roese
Headers show
Series [U-Boot,1/2] mvebu: neta: align DMA buffers | expand

Commit Message

Baruch Siach May 28, 2018, 6:33 a.m. UTC
From: Jon Nettleton <jon@solid-run.com>

This makes sure the DMA buffers are properly aligned for the
hardware.

Signed-off-by: Jon Nettleton <jon@solid-run.com>
Signed-off-by: Baruch Siach <baruch@tkos.co.il>
---
 drivers/net/mvneta.c | 20 ++++++++++++++------
 1 file changed, 14 insertions(+), 6 deletions(-)

Comments

Stefan Roese May 29, 2018, 6:17 a.m. UTC | #1
On 28.05.2018 08:33, Baruch Siach wrote:
> From: Jon Nettleton <jon@solid-run.com>
> 
> This makes sure the DMA buffers are properly aligned for the
> hardware.
> 
> Signed-off-by: Jon Nettleton <jon@solid-run.com>
> Signed-off-by: Baruch Siach <baruch@tkos.co.il>
> ---
>   drivers/net/mvneta.c | 20 ++++++++++++++------
>   1 file changed, 14 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/net/mvneta.c b/drivers/net/mvneta.c
> index 7036b517b445..fc16986a00d6 100644
> --- a/drivers/net/mvneta.c
> +++ b/drivers/net/mvneta.c
> @@ -1025,6 +1025,9 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
>   	if (rxq->descs == NULL)
>   		return -ENOMEM;
>   
> +	BUG_ON(rxq->descs !=
> +		PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN));
> +

Not sure if this really "bugs" in U-Boot, but please use WARN_ON
instead. IIRC, the usage of BUG_ON is not recommended in Linux
anymore. And warning instead of completely crashing is definitely
better in this case.

Other than this:

Reviewed-by: Stefan Roese <sr@denx.de>

Thanks,
Stefan

>   	rxq->last_desc = rxq->size - 1;
>   
>   	/* Set Rx descriptors queue starting address */
> @@ -1061,6 +1064,9 @@ static int mvneta_txq_init(struct mvneta_port *pp,
>   	if (txq->descs == NULL)
>   		return -ENOMEM;
>   
> +	BUG_ON(txq->descs !=
> +		PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN));
> +
>   	txq->last_desc = txq->size - 1;
>   
>   	/* Set maximum bandwidth for enabled TXQs */
> @@ -1694,18 +1700,20 @@ static int mvneta_probe(struct udevice *dev)
>   	 * be active. Make this area DMA safe by disabling the D-cache
>   	 */
>   	if (!buffer_loc.tx_descs) {
> +		u32 size;
> +
>   		/* Align buffer area for descs and rx_buffers to 1MiB */
>   		bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
>   		mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
>   						DCACHE_OFF);
>   		buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
> +		size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc),
> +				ARCH_DMA_MINALIGN);
>   		buffer_loc.rx_descs = (struct mvneta_rx_desc *)
> -			((phys_addr_t)bd_space +
> -			 MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc));
> -		buffer_loc.rx_buffers = (phys_addr_t)
> -			(bd_space +
> -			 MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc) +
> -			 MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc));
> +			((phys_addr_t)bd_space + size);
> +		size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc),
> +				ARCH_DMA_MINALIGN);
> +		buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
>   	}
>   
>   	pp->base = (void __iomem *)pdata->iobase;
>
diff mbox series

Patch

diff --git a/drivers/net/mvneta.c b/drivers/net/mvneta.c
index 7036b517b445..fc16986a00d6 100644
--- a/drivers/net/mvneta.c
+++ b/drivers/net/mvneta.c
@@ -1025,6 +1025,9 @@  static int mvneta_rxq_init(struct mvneta_port *pp,
 	if (rxq->descs == NULL)
 		return -ENOMEM;
 
+	BUG_ON(rxq->descs !=
+		PTR_ALIGN(rxq->descs, ARCH_DMA_MINALIGN));
+
 	rxq->last_desc = rxq->size - 1;
 
 	/* Set Rx descriptors queue starting address */
@@ -1061,6 +1064,9 @@  static int mvneta_txq_init(struct mvneta_port *pp,
 	if (txq->descs == NULL)
 		return -ENOMEM;
 
+	BUG_ON(txq->descs !=
+		PTR_ALIGN(txq->descs, ARCH_DMA_MINALIGN));
+
 	txq->last_desc = txq->size - 1;
 
 	/* Set maximum bandwidth for enabled TXQs */
@@ -1694,18 +1700,20 @@  static int mvneta_probe(struct udevice *dev)
 	 * be active. Make this area DMA safe by disabling the D-cache
 	 */
 	if (!buffer_loc.tx_descs) {
+		u32 size;
+
 		/* Align buffer area for descs and rx_buffers to 1MiB */
 		bd_space = memalign(1 << MMU_SECTION_SHIFT, BD_SPACE);
 		mmu_set_region_dcache_behaviour((phys_addr_t)bd_space, BD_SPACE,
 						DCACHE_OFF);
 		buffer_loc.tx_descs = (struct mvneta_tx_desc *)bd_space;
+		size = roundup(MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc),
+				ARCH_DMA_MINALIGN);
 		buffer_loc.rx_descs = (struct mvneta_rx_desc *)
-			((phys_addr_t)bd_space +
-			 MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc));
-		buffer_loc.rx_buffers = (phys_addr_t)
-			(bd_space +
-			 MVNETA_MAX_TXD * sizeof(struct mvneta_tx_desc) +
-			 MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc));
+			((phys_addr_t)bd_space + size);
+		size += roundup(MVNETA_MAX_RXD * sizeof(struct mvneta_rx_desc),
+				ARCH_DMA_MINALIGN);
+		buffer_loc.rx_buffers = (phys_addr_t)(bd_space + size);
 	}
 
 	pp->base = (void __iomem *)pdata->iobase;