From patchwork Sat Jan 18 02:30:04 2014 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: "Brown, Aaron F" X-Patchwork-Id: 312251 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id EFFE92C009A for ; Sat, 18 Jan 2014 13:30:38 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753301AbaARCaf (ORCPT ); Fri, 17 Jan 2014 21:30:35 -0500 Received: from mga14.intel.com ([143.182.124.37]:24162 "EHLO mga14.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752608AbaARCaQ (ORCPT ); Fri, 17 Jan 2014 21:30:16 -0500 Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by azsmga102.ch.intel.com with ESMTP; 17 Jan 2014 18:30:11 -0800 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="4.95,677,1384329600"; d="scan'208";a="460779757" Received: from s252.jf.intel.com ([10.23.155.219]) by fmsmga001.fm.intel.com with ESMTP; 17 Jan 2014 18:30:11 -0800 From: Aaron Brown To: davem@davemloft.net Cc: Emil Tantilov , netdev@vger.kernel.org, gospo@redhat.com, sassmann@redhat.com, Alexander Duyck , Aaron Brown Subject: [net-next 6/7] ixgbevf: redo dma mapping using the tx buffer info Date: Fri, 17 Jan 2014 18:30:04 -0800 Message-Id: <1390012205-21995-7-git-send-email-aaron.f.brown@intel.com> X-Mailer: git-send-email 1.8.5.GIT In-Reply-To: <1390012205-21995-1-git-send-email-aaron.f.brown@intel.com> References: <1390012205-21995-1-git-send-email-aaron.f.brown@intel.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Emil Tantilov This patch takes advantage of the dma buffer always being present in the first descriptor and mapped as single. As such we can call dma_unmap_single and don't need to check for DMA mapping in ixgbevf_clean_tx_irq(). In addition this patch makes use of the DMA API. Signed-off-by: Emil Tantilov Signed-off-by: Alexander Duyck Tested-by: Phil Schmitt Signed-off-by: Aaron Brown --- drivers/net/ethernet/intel/ixgbevf/ixgbevf.h | 5 +- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 94 ++++++++++++++--------- 2 files changed, 59 insertions(+), 40 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h index bad3219..5482932 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h @@ -52,9 +52,9 @@ struct ixgbevf_tx_buffer { unsigned int bytecount; unsigned short gso_segs; __be16 protocol; - dma_addr_t dma; + DEFINE_DMA_UNMAP_ADDR(dma); + DEFINE_DMA_UNMAP_LEN(len); u32 tx_flags; - u16 length; }; struct ixgbevf_rx_buffer { @@ -147,7 +147,6 @@ struct ixgbevf_ring { #define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) -#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 4) #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 61425f8..0fc0433 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -145,28 +145,25 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, } static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_ring *tx_ring, - struct ixgbevf_tx_buffer - *tx_buffer_info) -{ - if (tx_buffer_info->dma) { - if (tx_buffer_info->tx_flags & IXGBE_TX_FLAGS_MAPPED_AS_PAGE) - dma_unmap_page(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, - DMA_TO_DEVICE); - else + struct ixgbevf_tx_buffer *tx_buffer) +{ + if (tx_buffer->skb) { + dev_kfree_skb_any(tx_buffer->skb); + if (dma_unmap_len(tx_buffer, len)) dma_unmap_single(tx_ring->dev, - tx_buffer_info->dma, - tx_buffer_info->length, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), DMA_TO_DEVICE); - tx_buffer_info->dma = 0; - } - if (tx_buffer_info->skb) { - dev_kfree_skb_any(tx_buffer_info->skb); - tx_buffer_info->skb = NULL; + } else if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); } - tx_buffer_info->time_stamp = 0; - /* tx_buffer_info must be completely set up in the transmit path */ + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); + /* tx_buffer must be completely set up in the transmit path */ } #define IXGBE_MAX_TXD_PWR 14 @@ -221,8 +218,18 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, total_bytes += tx_buffer->bytecount; total_packets += tx_buffer->gso_segs; + /* free the skb */ + dev_kfree_skb_any(tx_buffer->skb); + + /* unmap skb header data */ + dma_unmap_single(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + /* clear tx_buffer data */ - ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); /* unmap remaining buffers */ while (tx_desc != eop_desc) { @@ -237,7 +244,14 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector, tx_desc = IXGBEVF_TX_DESC(tx_ring, 0); } - ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer); + /* unmap any remaining paged data */ + if (dma_unmap_len(tx_buffer, len)) { + dma_unmap_page(tx_ring->dev, + dma_unmap_addr(tx_buffer, dma), + dma_unmap_len(tx_buffer, len), + DMA_TO_DEVICE); + dma_unmap_len_set(tx_buffer, len, 0); + } } tx_desc->wb.status = 0; @@ -2904,6 +2918,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, struct ixgbevf_tx_buffer *first) { + dma_addr_t dma; struct sk_buff *skb = first->skb; struct ixgbevf_tx_buffer *tx_buffer_info; unsigned int len; @@ -2921,14 +2936,16 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); - tx_buffer_info->length = size; tx_buffer_info->tx_flags = first->tx_flags; - tx_buffer_info->dma = dma_map_single(tx_ring->dev, - skb->data + offset, - size, DMA_TO_DEVICE); - if (dma_mapping_error(tx_ring->dev, tx_buffer_info->dma)) + dma = dma_map_single(tx_ring->dev, skb->data + offset, + size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer_info, len, size); + dma_unmap_addr_set(tx_buffer_info, dma, dma); + len -= size; total -= size; offset += size; @@ -2949,16 +2966,15 @@ static int ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, tx_buffer_info = &tx_ring->tx_buffer_info[i]; size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD); - tx_buffer_info->length = size; - tx_buffer_info->dma = - skb_frag_dma_map(tx_ring->dev, frag, - offset, size, DMA_TO_DEVICE); - tx_buffer_info->tx_flags |= - IXGBE_TX_FLAGS_MAPPED_AS_PAGE; - if (dma_mapping_error(tx_ring->dev, - tx_buffer_info->dma)) + dma = skb_frag_dma_map(tx_ring->dev, frag, + offset, size, DMA_TO_DEVICE); + if (dma_mapping_error(tx_ring->dev, dma)) goto dma_error; + /* record length, and DMA address */ + dma_unmap_len_set(tx_buffer_info, len, size); + dma_unmap_addr_set(tx_buffer_info, dma, dma); + len -= size; total -= size; offset += size; @@ -3043,11 +3059,15 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, i = tx_ring->next_to_use; while (count--) { + dma_addr_t dma; + unsigned int len; + tx_buffer_info = &tx_ring->tx_buffer_info[i]; + dma = dma_unmap_addr(tx_buffer_info, dma); + len = dma_unmap_len(tx_buffer_info, len); tx_desc = IXGBEVF_TX_DESC(tx_ring, i); - tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); - tx_desc->read.cmd_type_len = - cpu_to_le32(cmd_type_len | tx_buffer_info->length); + tx_desc->read.buffer_addr = cpu_to_le64(dma); + tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type_len | len); tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); i++; if (i == tx_ring->count)