diff mbox series

[net-next,04/16] qlge: Remove bq_desc.maplen

Message ID 20190617074858.32467-4-bpoirier@suse.com
State Changes Requested
Delegated to: David Miller
Headers show
Series [net-next,01/16] qlge: Remove irq_cnt | expand

Commit Message

Benjamin Poirier June 17, 2019, 7:48 a.m. UTC
The size of the mapping is known statically in all cases, there's no need
to save it at runtime. Remove this member.

Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
---
 drivers/net/ethernet/qlogic/qlge/qlge.h      |  1 -
 drivers/net/ethernet/qlogic/qlge/qlge_main.c | 43 +++++++-------------
 2 files changed, 15 insertions(+), 29 deletions(-)

Comments

Manish Chopra June 26, 2019, 9:31 a.m. UTC | #1
> -----Original Message-----
> From: Benjamin Poirier <bpoirier@suse.com>
> Sent: Monday, June 17, 2019 1:19 PM
> To: Manish Chopra <manishc@marvell.com>; GR-Linux-NIC-Dev <GR-Linux-
> NIC-Dev@marvell.com>; netdev@vger.kernel.org
> Subject: [PATCH net-next 04/16] qlge: Remove bq_desc.maplen
> 
> The size of the mapping is known statically in all cases, there's no need to save
> it at runtime. Remove this member.
> 
> Signed-off-by: Benjamin Poirier <bpoirier@suse.com>
> ---
>  drivers/net/ethernet/qlogic/qlge/qlge.h      |  1 -
>  drivers/net/ethernet/qlogic/qlge/qlge_main.c | 43 +++++++-------------
>  2 files changed, 15 insertions(+), 29 deletions(-)
> 
> diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h
> b/drivers/net/ethernet/qlogic/qlge/qlge.h
> index ba61b4559dd6..f32da8c7679f 100644
> --- a/drivers/net/ethernet/qlogic/qlge/qlge.h
> +++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
> @@ -1373,7 +1373,6 @@ struct bq_desc {
>  	__le64 *addr;
>  	u32 index;
>  	DEFINE_DMA_UNMAP_ADDR(mapaddr);
> -	DEFINE_DMA_UNMAP_LEN(maplen);
>  };
> 
>  #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
> diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
> b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
> index 9df06ad3fb93..25dbaa9cc55d 100644
> --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
> +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
> @@ -1108,8 +1108,6 @@ static void ql_update_lbq(struct ql_adapter *qdev,
> struct rx_ring *rx_ring)
>  			map = lbq_desc->p.pg_chunk.map +
>  				lbq_desc->p.pg_chunk.offset;
>  			dma_unmap_addr_set(lbq_desc, mapaddr, map);
> -			dma_unmap_len_set(lbq_desc, maplen,
> -					  qdev->lbq_buf_size);
>  			*lbq_desc->addr = cpu_to_le64(map);
> 
>  			pci_dma_sync_single_for_device(qdev->pdev, map,
> @@ -1177,8 +1175,6 @@ static void ql_update_sbq(struct ql_adapter *qdev,
> struct rx_ring *rx_ring)
>  					return;
>  				}
>  				dma_unmap_addr_set(sbq_desc, mapaddr,
> map);
> -				dma_unmap_len_set(sbq_desc, maplen,
> -						  rx_ring->sbq_buf_size);
>  				*sbq_desc->addr = cpu_to_le64(map);
>  			}
> 
> @@ -1598,14 +1594,14 @@ static void ql_process_mac_rx_skb(struct
> ql_adapter *qdev,
> 
>  	pci_dma_sync_single_for_cpu(qdev->pdev,
>  				    dma_unmap_addr(sbq_desc, mapaddr),
> -				    dma_unmap_len(sbq_desc, maplen),
> +				    rx_ring->sbq_buf_size,
>  				    PCI_DMA_FROMDEVICE);
> 
>  	skb_put_data(new_skb, skb->data, length);
> 
>  	pci_dma_sync_single_for_device(qdev->pdev,
>  				       dma_unmap_addr(sbq_desc, mapaddr),
> -				       dma_unmap_len(sbq_desc, maplen),
> +				       rx_ring->sbq_buf_size,
>  				       PCI_DMA_FROMDEVICE);
>  	skb = new_skb;
> 
> @@ -1727,8 +1723,7 @@ static struct sk_buff *ql_build_rx_skb(struct
> ql_adapter *qdev,
>  		sbq_desc = ql_get_curr_sbuf(rx_ring);
>  		pci_unmap_single(qdev->pdev,
>  				dma_unmap_addr(sbq_desc, mapaddr),
> -				dma_unmap_len(sbq_desc, maplen),
> -				PCI_DMA_FROMDEVICE);
> +				rx_ring->sbq_buf_size,
> PCI_DMA_FROMDEVICE);
>  		skb = sbq_desc->p.skb;
>  		ql_realign_skb(skb, hdr_len);
>  		skb_put(skb, hdr_len);
> @@ -1758,19 +1753,15 @@ static struct sk_buff *ql_build_rx_skb(struct
> ql_adapter *qdev,
>  			 */
>  			sbq_desc = ql_get_curr_sbuf(rx_ring);
>  			pci_dma_sync_single_for_cpu(qdev->pdev,
> -						    dma_unmap_addr
> -						    (sbq_desc, mapaddr),
> -						    dma_unmap_len
> -						    (sbq_desc, maplen),
> +
> dma_unmap_addr(sbq_desc,
> +								   mapaddr),
> +						    rx_ring->sbq_buf_size,
>  						    PCI_DMA_FROMDEVICE);
>  			skb_put_data(skb, sbq_desc->p.skb->data, length);
>  			pci_dma_sync_single_for_device(qdev->pdev,
> -						       dma_unmap_addr
> -						       (sbq_desc,
> -							mapaddr),
> -						       dma_unmap_len
> -						       (sbq_desc,
> -							maplen),
> +
> dma_unmap_addr(sbq_desc,
> +								      mapaddr),
> +						       rx_ring->sbq_buf_size,
>  						       PCI_DMA_FROMDEVICE);
>  		} else {
>  			netif_printk(qdev, rx_status, KERN_DEBUG, qdev-
> >ndev, @@ -1781,10 +1772,8 @@ static struct sk_buff *ql_build_rx_skb(struct
> ql_adapter *qdev,
>  			ql_realign_skb(skb, length);
>  			skb_put(skb, length);
>  			pci_unmap_single(qdev->pdev,
> -					 dma_unmap_addr(sbq_desc,
> -							mapaddr),
> -					 dma_unmap_len(sbq_desc,
> -						       maplen),
> +					 dma_unmap_addr(sbq_desc,
> mapaddr),
> +					 rx_ring->sbq_buf_size,
>  					 PCI_DMA_FROMDEVICE);
>  			sbq_desc->p.skb = NULL;
>  		}
> @@ -1822,9 +1811,8 @@ static struct sk_buff *ql_build_rx_skb(struct
> ql_adapter *qdev,
>  				return NULL;
>  			}
>  			pci_unmap_page(qdev->pdev,
> -				       dma_unmap_addr(lbq_desc,
> -						      mapaddr),
> -				       dma_unmap_len(lbq_desc, maplen),
> +				       dma_unmap_addr(lbq_desc, mapaddr),
> +				       qdev->lbq_buf_size,
>  				       PCI_DMA_FROMDEVICE);
>  			skb_reserve(skb, NET_IP_ALIGN);
>  			netif_printk(qdev, rx_status, KERN_DEBUG, qdev-
> >ndev, @@ -1858,8 +1846,7 @@ static struct sk_buff *ql_build_rx_skb(struct
> ql_adapter *qdev,
>  		sbq_desc = ql_get_curr_sbuf(rx_ring);
>  		pci_unmap_single(qdev->pdev,
>  				 dma_unmap_addr(sbq_desc, mapaddr),
> -				 dma_unmap_len(sbq_desc, maplen),
> -				 PCI_DMA_FROMDEVICE);
> +				 rx_ring->sbq_buf_size,
> PCI_DMA_FROMDEVICE);
>  		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
>  			/*
>  			 * This is an non TCP/UDP IP frame, so @@ -2820,7
> +2807,7 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct
> rx_ring *rx_ring
>  		if (sbq_desc->p.skb) {
>  			pci_unmap_single(qdev->pdev,
>  					 dma_unmap_addr(sbq_desc,
> mapaddr),
> -					 dma_unmap_len(sbq_desc, maplen),
> +					 rx_ring->sbq_buf_size,
>  					 PCI_DMA_FROMDEVICE);
>  			dev_kfree_skb(sbq_desc->p.skb);
>  			sbq_desc->p.skb = NULL;
> --
> 2.21.0

Acked-by: Manish Chopra <manishc@marvell.com>
diff mbox series

Patch

diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index ba61b4559dd6..f32da8c7679f 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -1373,7 +1373,6 @@  struct bq_desc {
 	__le64 *addr;
 	u32 index;
 	DEFINE_DMA_UNMAP_ADDR(mapaddr);
-	DEFINE_DMA_UNMAP_LEN(maplen);
 };
 
 #define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 9df06ad3fb93..25dbaa9cc55d 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1108,8 +1108,6 @@  static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 			map = lbq_desc->p.pg_chunk.map +
 				lbq_desc->p.pg_chunk.offset;
 			dma_unmap_addr_set(lbq_desc, mapaddr, map);
-			dma_unmap_len_set(lbq_desc, maplen,
-					  qdev->lbq_buf_size);
 			*lbq_desc->addr = cpu_to_le64(map);
 
 			pci_dma_sync_single_for_device(qdev->pdev, map,
@@ -1177,8 +1175,6 @@  static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
 					return;
 				}
 				dma_unmap_addr_set(sbq_desc, mapaddr, map);
-				dma_unmap_len_set(sbq_desc, maplen,
-						  rx_ring->sbq_buf_size);
 				*sbq_desc->addr = cpu_to_le64(map);
 			}
 
@@ -1598,14 +1594,14 @@  static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
 
 	pci_dma_sync_single_for_cpu(qdev->pdev,
 				    dma_unmap_addr(sbq_desc, mapaddr),
-				    dma_unmap_len(sbq_desc, maplen),
+				    rx_ring->sbq_buf_size,
 				    PCI_DMA_FROMDEVICE);
 
 	skb_put_data(new_skb, skb->data, length);
 
 	pci_dma_sync_single_for_device(qdev->pdev,
 				       dma_unmap_addr(sbq_desc, mapaddr),
-				       dma_unmap_len(sbq_desc, maplen),
+				       rx_ring->sbq_buf_size,
 				       PCI_DMA_FROMDEVICE);
 	skb = new_skb;
 
@@ -1727,8 +1723,7 @@  static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 		sbq_desc = ql_get_curr_sbuf(rx_ring);
 		pci_unmap_single(qdev->pdev,
 				dma_unmap_addr(sbq_desc, mapaddr),
-				dma_unmap_len(sbq_desc, maplen),
-				PCI_DMA_FROMDEVICE);
+				rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE);
 		skb = sbq_desc->p.skb;
 		ql_realign_skb(skb, hdr_len);
 		skb_put(skb, hdr_len);
@@ -1758,19 +1753,15 @@  static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 			 */
 			sbq_desc = ql_get_curr_sbuf(rx_ring);
 			pci_dma_sync_single_for_cpu(qdev->pdev,
-						    dma_unmap_addr
-						    (sbq_desc, mapaddr),
-						    dma_unmap_len
-						    (sbq_desc, maplen),
+						    dma_unmap_addr(sbq_desc,
+								   mapaddr),
+						    rx_ring->sbq_buf_size,
 						    PCI_DMA_FROMDEVICE);
 			skb_put_data(skb, sbq_desc->p.skb->data, length);
 			pci_dma_sync_single_for_device(qdev->pdev,
-						       dma_unmap_addr
-						       (sbq_desc,
-							mapaddr),
-						       dma_unmap_len
-						       (sbq_desc,
-							maplen),
+						       dma_unmap_addr(sbq_desc,
+								      mapaddr),
+						       rx_ring->sbq_buf_size,
 						       PCI_DMA_FROMDEVICE);
 		} else {
 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1781,10 +1772,8 @@  static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 			ql_realign_skb(skb, length);
 			skb_put(skb, length);
 			pci_unmap_single(qdev->pdev,
-					 dma_unmap_addr(sbq_desc,
-							mapaddr),
-					 dma_unmap_len(sbq_desc,
-						       maplen),
+					 dma_unmap_addr(sbq_desc, mapaddr),
+					 rx_ring->sbq_buf_size,
 					 PCI_DMA_FROMDEVICE);
 			sbq_desc->p.skb = NULL;
 		}
@@ -1822,9 +1811,8 @@  static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 				return NULL;
 			}
 			pci_unmap_page(qdev->pdev,
-				       dma_unmap_addr(lbq_desc,
-						      mapaddr),
-				       dma_unmap_len(lbq_desc, maplen),
+				       dma_unmap_addr(lbq_desc, mapaddr),
+				       qdev->lbq_buf_size,
 				       PCI_DMA_FROMDEVICE);
 			skb_reserve(skb, NET_IP_ALIGN);
 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1858,8 +1846,7 @@  static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
 		sbq_desc = ql_get_curr_sbuf(rx_ring);
 		pci_unmap_single(qdev->pdev,
 				 dma_unmap_addr(sbq_desc, mapaddr),
-				 dma_unmap_len(sbq_desc, maplen),
-				 PCI_DMA_FROMDEVICE);
+				 rx_ring->sbq_buf_size, PCI_DMA_FROMDEVICE);
 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
 			/*
 			 * This is an non TCP/UDP IP frame, so
@@ -2820,7 +2807,7 @@  static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
 		if (sbq_desc->p.skb) {
 			pci_unmap_single(qdev->pdev,
 					 dma_unmap_addr(sbq_desc, mapaddr),
-					 dma_unmap_len(sbq_desc, maplen),
+					 rx_ring->sbq_buf_size,
 					 PCI_DMA_FROMDEVICE);
 			dev_kfree_skb(sbq_desc->p.skb);
 			sbq_desc->p.skb = NULL;