diff mbox series

[net-next,1/5] ionic: clean up page handling code

Message ID 20200831233558.71417-2-snelson@pensando.io
State Changes Requested
Delegated to: David Miller
Headers show
Series ionic: struct cleanups | expand

Commit Message

Shannon Nelson Aug. 31, 2020, 11:35 p.m. UTC
The internal page handling can be cleaned up by passing our
local page struct rather than dma addresses, and by putting
more of the mgmt code into the alloc and free routines.

Signed-off-by: Neel Patel <neel@pensando.io>
Signed-off-by: Shannon Nelson <snelson@pensando.io>
---
 .../net/ethernet/pensando/ionic/ionic_txrx.c  | 73 +++++++++++--------
 1 file changed, 42 insertions(+), 31 deletions(-)

Comments

David Miller Sept. 1, 2020, 12:14 a.m. UTC | #1
From: Shannon Nelson <snelson@pensando.io>
Date: Mon, 31 Aug 2020 16:35:54 -0700

> @@ -100,6 +100,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
>  		frag_len = min(len, (u16)PAGE_SIZE);
>  		len -= frag_len;
>  
> +		dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
> +					len, DMA_FROM_DEVICE);
>  		dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
>  			       PAGE_SIZE, DMA_FROM_DEVICE);
>  		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,

The unmap operation performs a sync, if necessary, for you.

That's the pattern of usage:

	map();
	device read/write memory
	unmap();

That's it, no more, no less.

The time to use sync is when you want to maintain the mapping and keep
using it.
Shannon Nelson Sept. 1, 2020, 4:19 a.m. UTC | #2
On 8/31/20 5:14 PM, David Miller wrote:
> From: Shannon Nelson <snelson@pensando.io>
> Date: Mon, 31 Aug 2020 16:35:54 -0700
>
>> @@ -100,6 +100,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
>>   		frag_len = min(len, (u16)PAGE_SIZE);
>>   		len -= frag_len;
>>   
>> +		dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
>> +					len, DMA_FROM_DEVICE);
>>   		dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
>>   			       PAGE_SIZE, DMA_FROM_DEVICE);
>>   		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
> The unmap operation performs a sync, if necessary, for you.
>
> That's the pattern of usage:
>
> 	map();
> 	device read/write memory
> 	unmap();
>
> That's it, no more, no less.
>
> The time to use sync is when you want to maintain the mapping and keep
> using it.

Thanks, I'll drop that part.
sln
diff mbox series

Patch

diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index c3291decd4c3..efc02b366e73 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -100,6 +100,8 @@  static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
 		frag_len = min(len, (u16)PAGE_SIZE);
 		len -= frag_len;
 
+		dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
+					len, DMA_FROM_DEVICE);
 		dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
 			       PAGE_SIZE, DMA_FROM_DEVICE);
 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
@@ -266,40 +268,49 @@  void ionic_rx_flush(struct ionic_cq *cq)
 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
 }
 
-static struct page *ionic_rx_page_alloc(struct ionic_queue *q,
-					dma_addr_t *dma_addr)
+static int ionic_rx_page_alloc(struct ionic_queue *q,
+			       struct ionic_page_info *page_info)
 {
 	struct ionic_lif *lif = q->lif;
 	struct ionic_rx_stats *stats;
 	struct net_device *netdev;
 	struct device *dev;
-	struct page *page;
 
 	netdev = lif->netdev;
 	dev = lif->ionic->dev;
 	stats = q_to_rx_stats(q);
-	page = alloc_page(GFP_ATOMIC);
-	if (unlikely(!page)) {
-		net_err_ratelimited("%s: Page alloc failed on %s!\n",
+
+	if (unlikely(!page_info)) {
+		net_err_ratelimited("%s: %s invalid page_info in alloc\n",
+				    netdev->name, q->name);
+		return -EINVAL;
+	}
+
+	page_info->page = dev_alloc_page();
+	if (unlikely(!page_info->page)) {
+		net_err_ratelimited("%s: %s page alloc failed\n",
 				    netdev->name, q->name);
 		stats->alloc_err++;
-		return NULL;
+		return -ENOMEM;
 	}
 
-	*dma_addr = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(dev, *dma_addr))) {
-		__free_page(page);
-		net_err_ratelimited("%s: DMA single map failed on %s!\n",
+	page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
+					   DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
+		put_page(page_info->page);
+		page_info->dma_addr = 0;
+		page_info->page = NULL;
+		net_err_ratelimited("%s: %s dma map failed\n",
 				    netdev->name, q->name);
 		stats->dma_map_err++;
-		return NULL;
+		return -EIO;
 	}
 
-	return page;
+	return 0;
 }
 
-static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
-			       dma_addr_t dma_addr)
+static void ionic_rx_page_free(struct ionic_queue *q,
+			       struct ionic_page_info *page_info)
 {
 	struct ionic_lif *lif = q->lif;
 	struct net_device *netdev;
@@ -308,15 +319,23 @@  static void ionic_rx_page_free(struct ionic_queue *q, struct page *page,
 	netdev = lif->netdev;
 	dev = lif->ionic->dev;
 
-	if (unlikely(!page)) {
-		net_err_ratelimited("%s: Trying to free unallocated buffer on %s!\n",
+	if (unlikely(!page_info)) {
+		net_err_ratelimited("%s: %s invalid page_info in free\n",
 				    netdev->name, q->name);
 		return;
 	}
 
-	dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+	if (unlikely(!page_info->page)) {
+		net_err_ratelimited("%s: %s invalid page in free\n",
+				    netdev->name, q->name);
+		return;
+	}
 
-	__free_page(page);
+	dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
+
+	put_page(page_info->page);
+	page_info->dma_addr = 0;
+	page_info->page = NULL;
 }
 
 void ionic_rx_fill(struct ionic_queue *q)
@@ -352,8 +371,7 @@  void ionic_rx_fill(struct ionic_queue *q)
 		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
 					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
 		desc_info->npages = nfrags;
-		page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
-		if (unlikely(!page_info->page)) {
+		if (unlikely(ionic_rx_page_alloc(q, page_info))) {
 			desc->addr = 0;
 			desc->len = 0;
 			return;
@@ -370,8 +388,7 @@  void ionic_rx_fill(struct ionic_queue *q)
 				continue;
 
 			sg_elem = &sg_desc->elems[j];
-			page_info->page = ionic_rx_page_alloc(q, &page_info->dma_addr);
-			if (unlikely(!page_info->page)) {
+			if (unlikely(ionic_rx_page_alloc(q, page_info))) {
 				sg_elem->addr = 0;
 				sg_elem->len = 0;
 				return;
@@ -409,14 +426,8 @@  void ionic_rx_empty(struct ionic_queue *q)
 		desc->addr = 0;
 		desc->len = 0;
 
-		for (i = 0; i < desc_info->npages; i++) {
-			if (likely(desc_info->pages[i].page)) {
-				ionic_rx_page_free(q, desc_info->pages[i].page,
-						   desc_info->pages[i].dma_addr);
-				desc_info->pages[i].page = NULL;
-				desc_info->pages[i].dma_addr = 0;
-			}
-		}
+		for (i = 0; i < desc_info->npages; i++)
+			ionic_rx_page_free(q, &desc_info->pages[i]);
 
 		desc_info->cb_arg = NULL;
 		idx = (idx + 1) & (q->num_descs - 1);