diff mbox

[net-next,5/6] drivers: net: xgene-v2: Add transmit and receive

Message ID 1485889401-13909-6-git-send-email-isubramanian@apm.com
State Changes Requested, archived
Delegated to: David Miller
Headers show

Commit Message

Iyappan Subramanian Jan. 31, 2017, 7:03 p.m. UTC
This patch adds,
    - Transmit
    - Transmit completion poll
    - Receive poll
    - NAPI handler

and enables the driver.

Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>
Signed-off-by: Keyur Chudgar <kchudgar@apm.com>
---
 drivers/net/ethernet/apm/Kconfig           |   1 +
 drivers/net/ethernet/apm/Makefile          |   1 +
 drivers/net/ethernet/apm/xgene-v2/Kconfig  |  11 ++
 drivers/net/ethernet/apm/xgene-v2/Makefile |   6 +
 drivers/net/ethernet/apm/xgene-v2/main.c   | 200 ++++++++++++++++++++++++++++-
 5 files changed, 218 insertions(+), 1 deletion(-)
 create mode 100644 drivers/net/ethernet/apm/xgene-v2/Kconfig
 create mode 100644 drivers/net/ethernet/apm/xgene-v2/Makefile

Comments

Florian Fainelli Jan. 31, 2017, 8:33 p.m. UTC | #1
On 01/31/2017 11:03 AM, Iyappan Subramanian wrote:
> This patch adds,
>     - Transmit
>     - Transmit completion poll
>     - Receive poll
>     - NAPI handler
> 
> and enables the driver.
> 
> Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>
> Signed-off-by: Keyur Chudgar <kchudgar@apm.com>
> ---

> +
> +	tx_ring = pdata->tx_ring;
> +	tail = tx_ring->tail;
> +	len = skb_headlen(skb);
> +	raw_desc = &tx_ring->raw_desc[tail];
> +
> +	/* Tx descriptor not available */
> +	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)) ||
> +	    GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
> +		return NETDEV_TX_BUSY;
> +
> +	/* Packet buffers should be 64B aligned */
> +	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
> +				     GFP_ATOMIC);
> +	if (unlikely(!pkt_buf))
> +		goto out;

Can't you obtain a DMA-API mapping for skb->data and pass it down to the
hardware? This copy here is inefficient.

> +
> +	memcpy(pkt_buf, skb->data, len);
> +
> +	addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
> +	addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
> +	raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
> +				   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
> +				   SET_BITS(PKT_ADDRH,
> +					    dma_addr >> PKT_ADDRL_LEN));
> +
> +	dma_wmb();
> +
> +	raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
> +				   SET_BITS(PKT_SIZE, len) |
> +				   SET_BITS(E, 0));
> +
> +	skb_tx_timestamp(skb);
> +	xge_wr_csr(pdata, DMATXCTRL, 1);
> +
> +	pdata->stats.tx_packets++;
> +	pdata->stats.tx_bytes += skb->len;

This is both racy and incorrect. Racy because after you wrote DMATXCTRL,
your TX completion can run, and it can do that while interrupting your
CPU presumably, and free the SKB, therefore making you access a freed
SKB (or it should, if it does not), it's also incorrect, because before
you get signaled a TX completion, there is no guarantee that the packets
did actually make it through, you must update your stats in the TX
completion handler.

> +
> +	tx_ring->skbs[tail] = skb;
> +	tx_ring->pkt_bufs[tail] = pkt_buf;
> +	tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
> +
> +out:
> +	dev_kfree_skb_any(skb);

Don't do this, remember a pointer to the SKB, free the SKB in TX
completion handler, preferably in NAPI context.

> +
> +	return NETDEV_TX_OK;
> +}
> +
> +static void xge_txc_poll(struct net_device *ndev, unsigned int budget)
> +{
> +	struct xge_pdata *pdata = netdev_priv(ndev);
> +	struct device *dev = &pdata->pdev->dev;
> +	struct xge_desc_ring *tx_ring;
> +	struct xge_raw_desc *raw_desc;
> +	u64 addr_lo, addr_hi;
> +	dma_addr_t dma_addr;
> +	void *pkt_buf;
> +	bool pktsent;
> +	u32 data;
> +	u8 head;
> +	int i;
> +
> +	tx_ring = pdata->tx_ring;
> +	head = tx_ring->head;
> +
> +	data = xge_rd_csr(pdata, DMATXSTATUS);
> +	pktsent = data & TX_PKT_SENT;
> +	if (unlikely(!pktsent))
> +		return;
> +
> +	for (i = 0; i < budget; i++) {

TX completion handlers should run unbound and free the entire TX ring,
don't make it obey to an upper bound.

> +		raw_desc = &tx_ring->raw_desc[head];
> +
> +		if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
> +			break;
> +
> +		dma_rmb();
> +
> +		addr_hi = GET_BITS(PKT_ADDRH, le64_to_cpu(raw_desc->m1));
> +		addr_lo = GET_BITS(PKT_ADDRL, le64_to_cpu(raw_desc->m0));
> +		dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
> +
> +		pkt_buf = tx_ring->pkt_bufs[head];
> +
> +		/* clear pktstart address and pktsize */
> +		raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
> +					   SET_BITS(PKT_SIZE, 0));
> +		xge_wr_csr(pdata, DMATXSTATUS, 1);
> +
> +		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
> +
> +		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
> +	}
> +
> +	tx_ring->head = head;
> +}
> +
> +static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
> +{
> +	struct xge_pdata *pdata = netdev_priv(ndev);
> +	struct device *dev = &pdata->pdev->dev;
> +	dma_addr_t addr_hi, addr_lo, dma_addr;
> +	struct xge_desc_ring *rx_ring;
> +	struct xge_raw_desc *raw_desc;
> +	struct sk_buff *skb;
> +	int i, npkts, ret = 0;
> +	bool pktrcvd;
> +	u32 data;
> +	u8 head;
> +	u16 len;
> +
> +	rx_ring = pdata->rx_ring;
> +	head = rx_ring->head;
> +
> +	data = xge_rd_csr(pdata, DMARXSTATUS);
> +	pktrcvd = data & RXSTATUS_RXPKTRCVD;
> +
> +	if (unlikely(!pktrcvd))
> +		return 0;
> +
> +	npkts = 0;
> +	for (i = 0; i < budget; i++) {

So pktrcvd is not an indication of the produced number of packets, just
that there are packets, that's not very convenient, and it's redundant
with the very fact of being interrupted.

> +		raw_desc = &rx_ring->raw_desc[head];
> +
> +		if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
> +			break;
> +
> +		dma_rmb();
> +
> +		addr_hi = GET_BITS(PKT_ADDRH, le64_to_cpu(raw_desc->m1));
> +		addr_lo = GET_BITS(PKT_ADDRL, le64_to_cpu(raw_desc->m0));
> +		dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
> +		len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));

Is not there some kind of additional status that would indicate if the
packet is possibly invalid (oversize, undersize, etc.)?

> +
> +		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
> +				 DMA_FROM_DEVICE);
> +
> +		skb = rx_ring->skbs[head];
> +		skb_put(skb, len);
> +
> +		skb->protocol = eth_type_trans(skb, ndev);
> +
> +		pdata->stats.rx_packets++;
> +		pdata->stats.rx_bytes += len;
> +		napi_gro_receive(&pdata->napi, skb);
> +		npkts++;
kernel test robot Jan. 31, 2017, 8:44 p.m. UTC | #2
Hi Iyappan,

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Iyappan-Subramanian/drivers-net-xgene-v2-Add-RGMII-based-1G-driver/20170201-034317
config: i386-allmodconfig (attached as .config)
compiler: gcc-6 (Debian 6.2.0-3) 6.2.0 20160901
reproduce:
        # save the attached .config to linux build tree
        make ARCH=i386 

All warnings (new ones prefixed by >>):

   In file included from include/linux/byteorder/little_endian.h:4:0,
                    from arch/x86/include/uapi/asm/byteorder.h:4,
                    from include/asm-generic/bitops/le.h:5,
                    from arch/x86/include/asm/bitops.h:517,
                    from include/linux/bitops.h:36,
                    from include/linux/kernel.h:10,
                    from include/linux/list.h:8,
                    from include/linux/resource_ext.h:17,
                    from include/linux/acpi.h:26,
                    from drivers/net/ethernet/apm/xgene-v2/main.h:25,
                    from drivers/net/ethernet/apm/xgene-v2/main.c:22:
   drivers/net/ethernet/apm/xgene-v2/main.c: In function 'xge_refill_buffers':
>> drivers/net/ethernet/apm/xgene-v2/main.c:162:20: warning: right shift count >= width of type [-Wshift-count-overflow]
              dma_addr >> PKT_ADDRL_LEN));
                       ^
   include/uapi/linux/byteorder/little_endian.h:30:51: note: in definition of macro '__cpu_to_le64'
    #define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
                                                      ^
>> drivers/net/ethernet/apm/xgene-v2/main.c:161:9: note: in expansion of macro 'SET_BITS'
            SET_BITS(PKT_ADDRH,
            ^~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c: In function 'xge_start_xmit':
   drivers/net/ethernet/apm/xgene-v2/main.c:346:19: warning: right shift count >= width of type [-Wshift-count-overflow]
             dma_addr >> PKT_ADDRL_LEN));
                      ^
   include/uapi/linux/byteorder/little_endian.h:30:51: note: in definition of macro '__cpu_to_le64'
    #define __cpu_to_le64(x) ((__force __le64)(__u64)(x))
                                                      ^
   drivers/net/ethernet/apm/xgene-v2/main.c:345:8: note: in expansion of macro 'SET_BITS'
           SET_BITS(PKT_ADDRH,
           ^~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c: In function 'xge_rx_poll':
>> drivers/net/ethernet/apm/xgene-v2/main.c:453:23: warning: left shift count >= width of type [-Wshift-count-overflow]
      dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
                          ^~
--
   drivers/net/ethernet/apm/xgene-v2/ring.c: In function 'xge_setup_desc':
>> drivers/net/ethernet/apm/xgene-v2/ring.c:40:20: warning: right shift count >= width of type [-Wshift-count-overflow]
      dma_h = next_dma >> NEXT_DESC_ADDRL_LEN;
                       ^~
   drivers/net/ethernet/apm/xgene-v2/ring.c: In function 'xge_update_tx_desc_addr':
   drivers/net/ethernet/apm/xgene-v2/ring.c:51:42: warning: right shift count >= width of type [-Wshift-count-overflow]
     xge_wr_csr(pdata, DMATXDESCH, (dma_addr >> NEXT_DESC_ADDRL_LEN));
                                             ^~
   drivers/net/ethernet/apm/xgene-v2/ring.c: In function 'xge_update_rx_desc_addr':
   drivers/net/ethernet/apm/xgene-v2/ring.c:59:42: warning: right shift count >= width of type [-Wshift-count-overflow]
     xge_wr_csr(pdata, DMARXDESCH, (dma_addr >> NEXT_DESC_ADDRL_LEN));
                                             ^~

vim +162 drivers/net/ethernet/apm/xgene-v2/main.c

90db21d34 Iyappan Subramanian 2017-01-31   16   * GNU General Public License for more details.
90db21d34 Iyappan Subramanian 2017-01-31   17   *
90db21d34 Iyappan Subramanian 2017-01-31   18   * You should have received a copy of the GNU General Public License
90db21d34 Iyappan Subramanian 2017-01-31   19   * along with this program.  If not, see <http://www.gnu.org/licenses/>.
90db21d34 Iyappan Subramanian 2017-01-31   20   */
90db21d34 Iyappan Subramanian 2017-01-31   21  
90db21d34 Iyappan Subramanian 2017-01-31  @22  #include "main.h"
90db21d34 Iyappan Subramanian 2017-01-31   23  
90db21d34 Iyappan Subramanian 2017-01-31   24  static const struct acpi_device_id xge_acpi_match[];
90db21d34 Iyappan Subramanian 2017-01-31   25  
90db21d34 Iyappan Subramanian 2017-01-31   26  static int xge_get_resources(struct xge_pdata *pdata)
90db21d34 Iyappan Subramanian 2017-01-31   27  {
90db21d34 Iyappan Subramanian 2017-01-31   28  	struct platform_device *pdev;
90db21d34 Iyappan Subramanian 2017-01-31   29  	struct net_device *ndev;
90db21d34 Iyappan Subramanian 2017-01-31   30  	struct device *dev;
90db21d34 Iyappan Subramanian 2017-01-31   31  	struct resource *res;
90db21d34 Iyappan Subramanian 2017-01-31   32  	int phy_mode, ret = 0;
90db21d34 Iyappan Subramanian 2017-01-31   33  
90db21d34 Iyappan Subramanian 2017-01-31   34  	pdev = pdata->pdev;
90db21d34 Iyappan Subramanian 2017-01-31   35  	dev = &pdev->dev;
90db21d34 Iyappan Subramanian 2017-01-31   36  	ndev = pdata->ndev;
90db21d34 Iyappan Subramanian 2017-01-31   37  
90db21d34 Iyappan Subramanian 2017-01-31   38  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
90db21d34 Iyappan Subramanian 2017-01-31   39  	if (!res) {
90db21d34 Iyappan Subramanian 2017-01-31   40  		dev_err(dev, "Resource enet_csr not defined\n");
90db21d34 Iyappan Subramanian 2017-01-31   41  		return -ENODEV;
90db21d34 Iyappan Subramanian 2017-01-31   42  	}
90db21d34 Iyappan Subramanian 2017-01-31   43  
90db21d34 Iyappan Subramanian 2017-01-31   44  	pdata->resources.base_addr = devm_ioremap(dev, res->start,
90db21d34 Iyappan Subramanian 2017-01-31   45  						  resource_size(res));
90db21d34 Iyappan Subramanian 2017-01-31   46  	if (!pdata->resources.base_addr) {
90db21d34 Iyappan Subramanian 2017-01-31   47  		dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
90db21d34 Iyappan Subramanian 2017-01-31   48  		return -ENOMEM;
90db21d34 Iyappan Subramanian 2017-01-31   49  	}
90db21d34 Iyappan Subramanian 2017-01-31   50  
90db21d34 Iyappan Subramanian 2017-01-31   51  	if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
90db21d34 Iyappan Subramanian 2017-01-31   52  		eth_hw_addr_random(ndev);
90db21d34 Iyappan Subramanian 2017-01-31   53  
90db21d34 Iyappan Subramanian 2017-01-31   54  	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
90db21d34 Iyappan Subramanian 2017-01-31   55  
90db21d34 Iyappan Subramanian 2017-01-31   56  	phy_mode = device_get_phy_mode(dev);
90db21d34 Iyappan Subramanian 2017-01-31   57  	if (phy_mode < 0) {
90db21d34 Iyappan Subramanian 2017-01-31   58  		dev_err(dev, "Unable to get phy-connection-type\n");
90db21d34 Iyappan Subramanian 2017-01-31   59  		return phy_mode;
90db21d34 Iyappan Subramanian 2017-01-31   60  	}
90db21d34 Iyappan Subramanian 2017-01-31   61  	pdata->resources.phy_mode = phy_mode;
90db21d34 Iyappan Subramanian 2017-01-31   62  
90db21d34 Iyappan Subramanian 2017-01-31   63  	if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
90db21d34 Iyappan Subramanian 2017-01-31   64  		dev_err(dev, "Incorrect phy-connection-type specified\n");
90db21d34 Iyappan Subramanian 2017-01-31   65  		return -ENODEV;
90db21d34 Iyappan Subramanian 2017-01-31   66  	}
90db21d34 Iyappan Subramanian 2017-01-31   67  
90db21d34 Iyappan Subramanian 2017-01-31   68  	ret = platform_get_irq(pdev, 0);
90db21d34 Iyappan Subramanian 2017-01-31   69  	if (ret <= 0) {
90db21d34 Iyappan Subramanian 2017-01-31   70  		dev_err(dev, "Unable to get ENET IRQ\n");
90db21d34 Iyappan Subramanian 2017-01-31   71  		ret = ret ? : -ENXIO;
90db21d34 Iyappan Subramanian 2017-01-31   72  		return ret;
90db21d34 Iyappan Subramanian 2017-01-31   73  	}
90db21d34 Iyappan Subramanian 2017-01-31   74  	pdata->resources.irq = ret;
90db21d34 Iyappan Subramanian 2017-01-31   75  
90db21d34 Iyappan Subramanian 2017-01-31   76  	return 0;
90db21d34 Iyappan Subramanian 2017-01-31   77  }
90db21d34 Iyappan Subramanian 2017-01-31   78  
90db21d34 Iyappan Subramanian 2017-01-31   79  static void xge_delete_desc_rings(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31   80  {
90db21d34 Iyappan Subramanian 2017-01-31   81  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31   82  	struct device *dev = &pdata->pdev->dev;
90db21d34 Iyappan Subramanian 2017-01-31   83  	struct xge_desc_ring *ring;
90db21d34 Iyappan Subramanian 2017-01-31   84  
90db21d34 Iyappan Subramanian 2017-01-31   85  	ring = pdata->tx_ring;
90db21d34 Iyappan Subramanian 2017-01-31   86  	if (ring) {
90db21d34 Iyappan Subramanian 2017-01-31   87  		if (ring->skbs)
90db21d34 Iyappan Subramanian 2017-01-31   88  			devm_kfree(dev, ring->skbs);
90db21d34 Iyappan Subramanian 2017-01-31   89  		if (ring->pkt_bufs)
90db21d34 Iyappan Subramanian 2017-01-31   90  			devm_kfree(dev, ring->pkt_bufs);
90db21d34 Iyappan Subramanian 2017-01-31   91  		devm_kfree(dev, ring);
90db21d34 Iyappan Subramanian 2017-01-31   92  	}
90db21d34 Iyappan Subramanian 2017-01-31   93  
90db21d34 Iyappan Subramanian 2017-01-31   94  	ring = pdata->rx_ring;
90db21d34 Iyappan Subramanian 2017-01-31   95  	if (ring) {
90db21d34 Iyappan Subramanian 2017-01-31   96  		if (ring->skbs)
90db21d34 Iyappan Subramanian 2017-01-31   97  			devm_kfree(dev, ring->skbs);
90db21d34 Iyappan Subramanian 2017-01-31   98  		devm_kfree(dev, ring);
90db21d34 Iyappan Subramanian 2017-01-31   99  	}
90db21d34 Iyappan Subramanian 2017-01-31  100  }
90db21d34 Iyappan Subramanian 2017-01-31  101  
90db21d34 Iyappan Subramanian 2017-01-31  102  static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31  103  {
90db21d34 Iyappan Subramanian 2017-01-31  104  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  105  	struct device *dev = &pdata->pdev->dev;
90db21d34 Iyappan Subramanian 2017-01-31  106  	struct xge_desc_ring *ring;
90db21d34 Iyappan Subramanian 2017-01-31  107  	u16 size;
90db21d34 Iyappan Subramanian 2017-01-31  108  
90db21d34 Iyappan Subramanian 2017-01-31  109  	ring = devm_kzalloc(dev, sizeof(struct xge_desc_ring), GFP_KERNEL);
90db21d34 Iyappan Subramanian 2017-01-31  110  	if (!ring)
90db21d34 Iyappan Subramanian 2017-01-31  111  		return NULL;
90db21d34 Iyappan Subramanian 2017-01-31  112  
90db21d34 Iyappan Subramanian 2017-01-31  113  	ring->ndev = ndev;
90db21d34 Iyappan Subramanian 2017-01-31  114  
90db21d34 Iyappan Subramanian 2017-01-31  115  	size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
90db21d34 Iyappan Subramanian 2017-01-31  116  	ring->desc_addr = dmam_alloc_coherent(dev, size, &ring->dma_addr,
90db21d34 Iyappan Subramanian 2017-01-31  117  					      GFP_KERNEL | __GFP_ZERO);
90db21d34 Iyappan Subramanian 2017-01-31  118  	if (!ring->desc_addr) {
90db21d34 Iyappan Subramanian 2017-01-31  119  		devm_kfree(dev, ring);
90db21d34 Iyappan Subramanian 2017-01-31  120  		return NULL;
90db21d34 Iyappan Subramanian 2017-01-31  121  	}
90db21d34 Iyappan Subramanian 2017-01-31  122  
90db21d34 Iyappan Subramanian 2017-01-31  123  	xge_setup_desc(ring);
90db21d34 Iyappan Subramanian 2017-01-31  124  
90db21d34 Iyappan Subramanian 2017-01-31  125  	return ring;
90db21d34 Iyappan Subramanian 2017-01-31  126  }
90db21d34 Iyappan Subramanian 2017-01-31  127  
90db21d34 Iyappan Subramanian 2017-01-31  128  static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
90db21d34 Iyappan Subramanian 2017-01-31  129  {
90db21d34 Iyappan Subramanian 2017-01-31  130  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  131  	struct xge_desc_ring *ring = pdata->rx_ring;
90db21d34 Iyappan Subramanian 2017-01-31  132  	const u8 slots = XGENE_ENET_NUM_DESC - 1;
90db21d34 Iyappan Subramanian 2017-01-31  133  	struct device *dev = &pdata->pdev->dev;
90db21d34 Iyappan Subramanian 2017-01-31  134  	struct xge_raw_desc *raw_desc;
90db21d34 Iyappan Subramanian 2017-01-31  135  	u64 addr_lo, addr_hi;
90db21d34 Iyappan Subramanian 2017-01-31  136  	u8 tail = ring->tail;
90db21d34 Iyappan Subramanian 2017-01-31  137  	struct sk_buff *skb;
90db21d34 Iyappan Subramanian 2017-01-31  138  	dma_addr_t dma_addr;
90db21d34 Iyappan Subramanian 2017-01-31  139  	u16 len;
90db21d34 Iyappan Subramanian 2017-01-31  140  	int i;
90db21d34 Iyappan Subramanian 2017-01-31  141  
90db21d34 Iyappan Subramanian 2017-01-31  142  	for (i = 0; i < nbuf; i++) {
90db21d34 Iyappan Subramanian 2017-01-31  143  		raw_desc = &ring->raw_desc[tail];
90db21d34 Iyappan Subramanian 2017-01-31  144  
90db21d34 Iyappan Subramanian 2017-01-31  145  		len = XGENE_ENET_STD_MTU;
90db21d34 Iyappan Subramanian 2017-01-31  146  		skb = netdev_alloc_skb(ndev, len);
90db21d34 Iyappan Subramanian 2017-01-31  147  		if (unlikely(!skb))
90db21d34 Iyappan Subramanian 2017-01-31  148  			return -ENOMEM;
90db21d34 Iyappan Subramanian 2017-01-31  149  
90db21d34 Iyappan Subramanian 2017-01-31  150  		dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
90db21d34 Iyappan Subramanian 2017-01-31  151  		if (dma_mapping_error(dev, dma_addr)) {
90db21d34 Iyappan Subramanian 2017-01-31  152  			netdev_err(ndev, "DMA mapping error\n");
90db21d34 Iyappan Subramanian 2017-01-31  153  			dev_kfree_skb_any(skb);
90db21d34 Iyappan Subramanian 2017-01-31  154  			return -EINVAL;
90db21d34 Iyappan Subramanian 2017-01-31  155  		}
90db21d34 Iyappan Subramanian 2017-01-31  156  
90db21d34 Iyappan Subramanian 2017-01-31  157  		addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
90db21d34 Iyappan Subramanian 2017-01-31  158  		addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
90db21d34 Iyappan Subramanian 2017-01-31  159  		raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
90db21d34 Iyappan Subramanian 2017-01-31  160  					   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
90db21d34 Iyappan Subramanian 2017-01-31 @161  					   SET_BITS(PKT_ADDRH,
90db21d34 Iyappan Subramanian 2017-01-31 @162  						    dma_addr >> PKT_ADDRL_LEN));
90db21d34 Iyappan Subramanian 2017-01-31  163  
90db21d34 Iyappan Subramanian 2017-01-31  164  		dma_wmb();
90db21d34 Iyappan Subramanian 2017-01-31  165  		raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
90db21d34 Iyappan Subramanian 2017-01-31  166  					   SET_BITS(E, 1));
dde456a0a Iyappan Subramanian 2017-01-31  167  
90db21d34 Iyappan Subramanian 2017-01-31  168  		ring->skbs[tail] = skb;
90db21d34 Iyappan Subramanian 2017-01-31  169  		tail = (tail + 1) & slots;
90db21d34 Iyappan Subramanian 2017-01-31  170  	}
dde456a0a Iyappan Subramanian 2017-01-31  171  	xge_wr_csr(pdata, DMARXCTRL, 1);
90db21d34 Iyappan Subramanian 2017-01-31  172  
90db21d34 Iyappan Subramanian 2017-01-31  173  	ring->tail = tail;
90db21d34 Iyappan Subramanian 2017-01-31  174  
90db21d34 Iyappan Subramanian 2017-01-31  175  	return 0;
90db21d34 Iyappan Subramanian 2017-01-31  176  }
90db21d34 Iyappan Subramanian 2017-01-31  177  
90db21d34 Iyappan Subramanian 2017-01-31  178  static int xge_create_desc_rings(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31  179  {
90db21d34 Iyappan Subramanian 2017-01-31  180  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  181  	struct device *dev = &pdata->pdev->dev;
90db21d34 Iyappan Subramanian 2017-01-31  182  	struct xge_desc_ring *ring;
90db21d34 Iyappan Subramanian 2017-01-31  183  	int ret;
90db21d34 Iyappan Subramanian 2017-01-31  184  
90db21d34 Iyappan Subramanian 2017-01-31  185  	/* create tx ring */
90db21d34 Iyappan Subramanian 2017-01-31  186  	ring = xge_create_desc_ring(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  187  	if (!ring)
90db21d34 Iyappan Subramanian 2017-01-31  188  		return -ENOMEM;
90db21d34 Iyappan Subramanian 2017-01-31  189  
90db21d34 Iyappan Subramanian 2017-01-31  190  	ring->skbs = devm_kcalloc(dev, XGENE_ENET_NUM_DESC,
90db21d34 Iyappan Subramanian 2017-01-31  191  				  sizeof(struct sk_buff *), GFP_KERNEL);
90db21d34 Iyappan Subramanian 2017-01-31  192  	if (!ring->skbs)
90db21d34 Iyappan Subramanian 2017-01-31  193  		goto err;
90db21d34 Iyappan Subramanian 2017-01-31  194  
90db21d34 Iyappan Subramanian 2017-01-31  195  	ring->pkt_bufs = devm_kcalloc(dev, XGENE_ENET_NUM_DESC,
90db21d34 Iyappan Subramanian 2017-01-31  196  				  sizeof(void *), GFP_KERNEL);
90db21d34 Iyappan Subramanian 2017-01-31  197  	if (!ring->pkt_bufs)
90db21d34 Iyappan Subramanian 2017-01-31  198  		goto err;
90db21d34 Iyappan Subramanian 2017-01-31  199  
90db21d34 Iyappan Subramanian 2017-01-31  200  	pdata->tx_ring = ring;
90db21d34 Iyappan Subramanian 2017-01-31  201  	xge_update_tx_desc_addr(pdata);
90db21d34 Iyappan Subramanian 2017-01-31  202  
90db21d34 Iyappan Subramanian 2017-01-31  203  	/* create rx ring */
90db21d34 Iyappan Subramanian 2017-01-31  204  	ring = xge_create_desc_ring(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  205  	if (!ring)
90db21d34 Iyappan Subramanian 2017-01-31  206  		goto err;
90db21d34 Iyappan Subramanian 2017-01-31  207  
90db21d34 Iyappan Subramanian 2017-01-31  208  	ring->skbs = devm_kcalloc(dev, XGENE_ENET_NUM_DESC,
90db21d34 Iyappan Subramanian 2017-01-31  209  				  sizeof(struct sk_buff *), GFP_KERNEL);
90db21d34 Iyappan Subramanian 2017-01-31  210  	if (!ring->skbs)
90db21d34 Iyappan Subramanian 2017-01-31  211  		goto err;
90db21d34 Iyappan Subramanian 2017-01-31  212  
90db21d34 Iyappan Subramanian 2017-01-31  213  	pdata->rx_ring = ring;
90db21d34 Iyappan Subramanian 2017-01-31  214  	xge_update_rx_desc_addr(pdata);
90db21d34 Iyappan Subramanian 2017-01-31  215  
90db21d34 Iyappan Subramanian 2017-01-31  216  	ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
90db21d34 Iyappan Subramanian 2017-01-31  217  	if (!ret)
90db21d34 Iyappan Subramanian 2017-01-31  218  		return 0;
90db21d34 Iyappan Subramanian 2017-01-31  219  
90db21d34 Iyappan Subramanian 2017-01-31  220  err:
90db21d34 Iyappan Subramanian 2017-01-31  221  	xge_delete_desc_rings(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  222  
90db21d34 Iyappan Subramanian 2017-01-31  223  	return -ENOMEM;
90db21d34 Iyappan Subramanian 2017-01-31  224  }
90db21d34 Iyappan Subramanian 2017-01-31  225  
90db21d34 Iyappan Subramanian 2017-01-31  226  static int xge_init_hw(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31  227  {
90db21d34 Iyappan Subramanian 2017-01-31  228  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  229  	int ret;
90db21d34 Iyappan Subramanian 2017-01-31  230  
90db21d34 Iyappan Subramanian 2017-01-31  231  	ret = xge_port_reset(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  232  	if (ret)
90db21d34 Iyappan Subramanian 2017-01-31  233  		return ret;
90db21d34 Iyappan Subramanian 2017-01-31  234  
90db21d34 Iyappan Subramanian 2017-01-31  235  	xge_create_desc_rings(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  236  	xge_port_init(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  237  	pdata->nbufs = NUM_BUFS;
90db21d34 Iyappan Subramanian 2017-01-31  238  
90db21d34 Iyappan Subramanian 2017-01-31  239  	return 0;
90db21d34 Iyappan Subramanian 2017-01-31  240  }
90db21d34 Iyappan Subramanian 2017-01-31  241  
90db21d34 Iyappan Subramanian 2017-01-31  242  static irqreturn_t xge_irq(const int irq, void *data)
90db21d34 Iyappan Subramanian 2017-01-31  243  {
90db21d34 Iyappan Subramanian 2017-01-31  244  	struct xge_pdata *pdata = data;
90db21d34 Iyappan Subramanian 2017-01-31  245  
90db21d34 Iyappan Subramanian 2017-01-31  246  	if (napi_schedule_prep(&pdata->napi)) {
90db21d34 Iyappan Subramanian 2017-01-31  247  		xge_intr_disable(pdata);
90db21d34 Iyappan Subramanian 2017-01-31  248  		__napi_schedule(&pdata->napi);
90db21d34 Iyappan Subramanian 2017-01-31  249  	}
90db21d34 Iyappan Subramanian 2017-01-31  250  
90db21d34 Iyappan Subramanian 2017-01-31  251  	return IRQ_HANDLED;
90db21d34 Iyappan Subramanian 2017-01-31  252  }
90db21d34 Iyappan Subramanian 2017-01-31  253  
90db21d34 Iyappan Subramanian 2017-01-31  254  static int xge_request_irq(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31  255  {
90db21d34 Iyappan Subramanian 2017-01-31  256  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  257  	struct device *dev = &pdata->pdev->dev;
90db21d34 Iyappan Subramanian 2017-01-31  258  	int ret;
90db21d34 Iyappan Subramanian 2017-01-31  259  
90db21d34 Iyappan Subramanian 2017-01-31  260  	snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
90db21d34 Iyappan Subramanian 2017-01-31  261  
90db21d34 Iyappan Subramanian 2017-01-31  262  	ret = devm_request_irq(dev, pdata->resources.irq, xge_irq,
90db21d34 Iyappan Subramanian 2017-01-31  263  			       0, pdata->irq_name, pdata);
90db21d34 Iyappan Subramanian 2017-01-31  264  	if (ret)
90db21d34 Iyappan Subramanian 2017-01-31  265  		netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
90db21d34 Iyappan Subramanian 2017-01-31  266  
90db21d34 Iyappan Subramanian 2017-01-31  267  	return ret;
90db21d34 Iyappan Subramanian 2017-01-31  268  }
90db21d34 Iyappan Subramanian 2017-01-31  269  
90db21d34 Iyappan Subramanian 2017-01-31  270  static void xge_free_irq(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31  271  {
90db21d34 Iyappan Subramanian 2017-01-31  272  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  273  	struct device *dev = &pdata->pdev->dev;
90db21d34 Iyappan Subramanian 2017-01-31  274  
90db21d34 Iyappan Subramanian 2017-01-31  275  	devm_free_irq(dev, pdata->resources.irq, pdata);
90db21d34 Iyappan Subramanian 2017-01-31  276  }
90db21d34 Iyappan Subramanian 2017-01-31  277  
90db21d34 Iyappan Subramanian 2017-01-31  278  static int xge_open(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31  279  {
90db21d34 Iyappan Subramanian 2017-01-31  280  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  281  	int ret;
90db21d34 Iyappan Subramanian 2017-01-31  282  
dde456a0a Iyappan Subramanian 2017-01-31  283  	napi_enable(&pdata->napi);
dde456a0a Iyappan Subramanian 2017-01-31  284  
90db21d34 Iyappan Subramanian 2017-01-31  285  	ret = xge_request_irq(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  286  	if (ret)
90db21d34 Iyappan Subramanian 2017-01-31  287  		return ret;
90db21d34 Iyappan Subramanian 2017-01-31  288  
90db21d34 Iyappan Subramanian 2017-01-31  289  	xge_intr_enable(pdata);
90db21d34 Iyappan Subramanian 2017-01-31  290  
90db21d34 Iyappan Subramanian 2017-01-31  291  	xge_mac_enable(pdata);
90db21d34 Iyappan Subramanian 2017-01-31  292  	netif_start_queue(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  293  
90db21d34 Iyappan Subramanian 2017-01-31  294  	return 0;
90db21d34 Iyappan Subramanian 2017-01-31  295  }
90db21d34 Iyappan Subramanian 2017-01-31  296  
90db21d34 Iyappan Subramanian 2017-01-31  297  static int xge_close(struct net_device *ndev)
90db21d34 Iyappan Subramanian 2017-01-31  298  {
90db21d34 Iyappan Subramanian 2017-01-31  299  	struct xge_pdata *pdata = netdev_priv(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  300  
90db21d34 Iyappan Subramanian 2017-01-31  301  	netif_stop_queue(ndev);
90db21d34 Iyappan Subramanian 2017-01-31  302  	xge_mac_disable(pdata);
90db21d34 Iyappan Subramanian 2017-01-31  303  
dde456a0a Iyappan Subramanian 2017-01-31  304  	xge_intr_disable(pdata);
90db21d34 Iyappan Subramanian 2017-01-31  305  	xge_free_irq(ndev);
dde456a0a Iyappan Subramanian 2017-01-31  306  	napi_disable(&pdata->napi);
90db21d34 Iyappan Subramanian 2017-01-31  307  
90db21d34 Iyappan Subramanian 2017-01-31  308  	return 0;
90db21d34 Iyappan Subramanian 2017-01-31  309  }
90db21d34 Iyappan Subramanian 2017-01-31  310  
dde456a0a Iyappan Subramanian 2017-01-31  311  static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
dde456a0a Iyappan Subramanian 2017-01-31  312  {
dde456a0a Iyappan Subramanian 2017-01-31  313  	struct xge_pdata *pdata = netdev_priv(ndev);
dde456a0a Iyappan Subramanian 2017-01-31  314  	struct device *dev = &pdata->pdev->dev;
dde456a0a Iyappan Subramanian 2017-01-31  315  	static dma_addr_t dma_addr;
dde456a0a Iyappan Subramanian 2017-01-31  316  	struct xge_desc_ring *tx_ring;
dde456a0a Iyappan Subramanian 2017-01-31  317  	struct xge_raw_desc *raw_desc;
dde456a0a Iyappan Subramanian 2017-01-31  318  	u64 addr_lo, addr_hi;
dde456a0a Iyappan Subramanian 2017-01-31  319  	void *pkt_buf;
dde456a0a Iyappan Subramanian 2017-01-31  320  	u8 tail;
dde456a0a Iyappan Subramanian 2017-01-31  321  	u16 len;
dde456a0a Iyappan Subramanian 2017-01-31  322  
dde456a0a Iyappan Subramanian 2017-01-31  323  	tx_ring = pdata->tx_ring;
dde456a0a Iyappan Subramanian 2017-01-31  324  	tail = tx_ring->tail;
dde456a0a Iyappan Subramanian 2017-01-31  325  	len = skb_headlen(skb);
dde456a0a Iyappan Subramanian 2017-01-31  326  	raw_desc = &tx_ring->raw_desc[tail];
dde456a0a Iyappan Subramanian 2017-01-31  327  
dde456a0a Iyappan Subramanian 2017-01-31  328  	/* Tx descriptor not available */
dde456a0a Iyappan Subramanian 2017-01-31  329  	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)) ||
dde456a0a Iyappan Subramanian 2017-01-31  330  	    GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
dde456a0a Iyappan Subramanian 2017-01-31  331  		return NETDEV_TX_BUSY;
dde456a0a Iyappan Subramanian 2017-01-31  332  
dde456a0a Iyappan Subramanian 2017-01-31  333  	/* Packet buffers should be 64B aligned */
dde456a0a Iyappan Subramanian 2017-01-31  334  	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
dde456a0a Iyappan Subramanian 2017-01-31  335  				     GFP_ATOMIC);
dde456a0a Iyappan Subramanian 2017-01-31  336  	if (unlikely(!pkt_buf))
dde456a0a Iyappan Subramanian 2017-01-31  337  		goto out;
dde456a0a Iyappan Subramanian 2017-01-31  338  
dde456a0a Iyappan Subramanian 2017-01-31  339  	memcpy(pkt_buf, skb->data, len);
dde456a0a Iyappan Subramanian 2017-01-31  340  
dde456a0a Iyappan Subramanian 2017-01-31  341  	addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
dde456a0a Iyappan Subramanian 2017-01-31  342  	addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
dde456a0a Iyappan Subramanian 2017-01-31  343  	raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
dde456a0a Iyappan Subramanian 2017-01-31  344  				   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
dde456a0a Iyappan Subramanian 2017-01-31 @345  				   SET_BITS(PKT_ADDRH,
dde456a0a Iyappan Subramanian 2017-01-31  346  					    dma_addr >> PKT_ADDRL_LEN));
dde456a0a Iyappan Subramanian 2017-01-31  347  
dde456a0a Iyappan Subramanian 2017-01-31  348  	dma_wmb();

:::::: The code at line 162 was first introduced by commit
:::::: 90db21d344b12f41c93e821a69a96a60453e9dd8 drivers: net: xgene-v2: Add base driver

:::::: TO: Iyappan Subramanian <isubramanian@apm.com>
:::::: CC: 0day robot <fengguang.wu@intel.com>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
kernel test robot Jan. 31, 2017, 8:49 p.m. UTC | #3
Hi Iyappan,

[auto build test WARNING on net-next/master]

url:    https://github.com/0day-ci/linux/commits/Iyappan-Subramanian/drivers-net-xgene-v2-Add-RGMII-based-1G-driver/20170201-034317
config: parisc-allyesconfig (attached as .config)
compiler: hppa-linux-gnu-gcc (Debian 6.1.1-9) 6.1.1 20160705
reproduce:
        wget https://git.kernel.org/cgit/linux/kernel/git/wfg/lkp-tests.git/plain/sbin/make.cross -O ~/bin/make.cross
        chmod +x ~/bin/make.cross
        # save the attached .config to linux build tree
        make.cross ARCH=parisc 

All warnings (new ones prefixed by >>):

   In file included from include/linux/swab.h:4:0,
                    from include/uapi/linux/byteorder/big_endian.h:12,
                    from include/linux/byteorder/big_endian.h:4,
                    from arch/parisc/include/uapi/asm/byteorder.h:4,
                    from arch/parisc/include/asm/bitops.h:10,
                    from include/linux/bitops.h:36,
                    from include/linux/kernel.h:10,
                    from include/linux/list.h:8,
                    from include/linux/resource_ext.h:17,
                    from include/linux/acpi.h:26,
                    from drivers/net/ethernet/apm/xgene-v2/main.h:25,
                    from drivers/net/ethernet/apm/xgene-v2/main.c:22:
   drivers/net/ethernet/apm/xgene-v2/main.c: In function 'xge_refill_buffers':
   drivers/net/ethernet/apm/xgene-v2/main.c:162:20: warning: right shift count >= width of type [-Wshift-count-overflow]
              dma_addr >> PKT_ADDRL_LEN));
                       ^
   include/uapi/linux/swab.h:129:32: note: in definition of macro '__swab64'
     (__builtin_constant_p((__u64)(x)) ? \
                                   ^
>> include/linux/byteorder/generic.h:85:21: note: in expansion of macro '__cpu_to_le64'
    #define cpu_to_le64 __cpu_to_le64
                        ^~~~~~~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c:161:9: note: in expansion of macro 'SET_BITS'
            SET_BITS(PKT_ADDRH,
            ^~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c:162:20: warning: right shift count >= width of type [-Wshift-count-overflow]
              dma_addr >> PKT_ADDRL_LEN));
                       ^
   include/uapi/linux/swab.h:131:12: note: in definition of macro '__swab64'
     __fswab64(x))
               ^
>> include/linux/byteorder/generic.h:85:21: note: in expansion of macro '__cpu_to_le64'
    #define cpu_to_le64 __cpu_to_le64
                        ^~~~~~~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c:161:9: note: in expansion of macro 'SET_BITS'
            SET_BITS(PKT_ADDRH,
            ^~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c: In function 'xge_start_xmit':
   drivers/net/ethernet/apm/xgene-v2/main.c:346:19: warning: right shift count >= width of type [-Wshift-count-overflow]
             dma_addr >> PKT_ADDRL_LEN));
                      ^
   include/uapi/linux/swab.h:129:32: note: in definition of macro '__swab64'
     (__builtin_constant_p((__u64)(x)) ? \
                                   ^
>> include/linux/byteorder/generic.h:85:21: note: in expansion of macro '__cpu_to_le64'
    #define cpu_to_le64 __cpu_to_le64
                        ^~~~~~~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c:345:8: note: in expansion of macro 'SET_BITS'
           SET_BITS(PKT_ADDRH,
           ^~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c:346:19: warning: right shift count >= width of type [-Wshift-count-overflow]
             dma_addr >> PKT_ADDRL_LEN));
                      ^
   include/uapi/linux/swab.h:131:12: note: in definition of macro '__swab64'
     __fswab64(x))
               ^
>> include/linux/byteorder/generic.h:85:21: note: in expansion of macro '__cpu_to_le64'
    #define cpu_to_le64 __cpu_to_le64
                        ^~~~~~~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c:345:8: note: in expansion of macro 'SET_BITS'
           SET_BITS(PKT_ADDRH,
           ^~~~~~~~
   drivers/net/ethernet/apm/xgene-v2/main.c: In function 'xge_rx_poll':
   drivers/net/ethernet/apm/xgene-v2/main.c:453:23: warning: left shift count >= width of type [-Wshift-count-overflow]
      dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
                          ^~

vim +/__cpu_to_le64 +85 include/linux/byteorder/generic.h

^1da177e Linus Torvalds 2005-04-16  69   *	cpu_to_[bl]eXX(__uXX x)
^1da177e Linus Torvalds 2005-04-16  70   *	[bl]eXX_to_cpu(__uXX x)
^1da177e Linus Torvalds 2005-04-16  71   *
^1da177e Linus Torvalds 2005-04-16  72   * The same, but takes a pointer to the value to convert
^1da177e Linus Torvalds 2005-04-16  73   *	cpu_to_[bl]eXXp(__uXX x)
^1da177e Linus Torvalds 2005-04-16  74   *	[bl]eXX_to_cpup(__uXX x)
^1da177e Linus Torvalds 2005-04-16  75   *
^1da177e Linus Torvalds 2005-04-16  76   * The same, but change in situ
^1da177e Linus Torvalds 2005-04-16  77   *	cpu_to_[bl]eXXs(__uXX x)
^1da177e Linus Torvalds 2005-04-16  78   *	[bl]eXX_to_cpus(__uXX x)
^1da177e Linus Torvalds 2005-04-16  79   *
^1da177e Linus Torvalds 2005-04-16  80   * See asm-foo/byteorder.h for examples of how to provide
^1da177e Linus Torvalds 2005-04-16  81   * architecture-optimized versions
^1da177e Linus Torvalds 2005-04-16  82   *
^1da177e Linus Torvalds 2005-04-16  83   */
^1da177e Linus Torvalds 2005-04-16  84  
^1da177e Linus Torvalds 2005-04-16 @85  #define cpu_to_le64 __cpu_to_le64
^1da177e Linus Torvalds 2005-04-16  86  #define le64_to_cpu __le64_to_cpu
^1da177e Linus Torvalds 2005-04-16  87  #define cpu_to_le32 __cpu_to_le32
^1da177e Linus Torvalds 2005-04-16  88  #define le32_to_cpu __le32_to_cpu
^1da177e Linus Torvalds 2005-04-16  89  #define cpu_to_le16 __cpu_to_le16
^1da177e Linus Torvalds 2005-04-16  90  #define le16_to_cpu __le16_to_cpu
^1da177e Linus Torvalds 2005-04-16  91  #define cpu_to_be64 __cpu_to_be64
^1da177e Linus Torvalds 2005-04-16  92  #define be64_to_cpu __be64_to_cpu
^1da177e Linus Torvalds 2005-04-16  93  #define cpu_to_be32 __cpu_to_be32

:::::: The code at line 85 was first introduced by commit
:::::: 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Linux-2.6.12-rc2

:::::: TO: Linus Torvalds <torvalds@ppc970.osdl.org>
:::::: CC: Linus Torvalds <torvalds@ppc970.osdl.org>

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
David Laight Feb. 1, 2017, 11:09 a.m. UTC | #4
From Florian Fainelli
> Sent: 31 January 2017 20:33

> On 01/31/2017 11:03 AM, Iyappan Subramanian wrote:

> > This patch adds,

> >     - Transmit

> >     - Transmit completion poll

> >     - Receive poll

> >     - NAPI handler

> >

> > and enables the driver.

> >

> > Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>

> > Signed-off-by: Keyur Chudgar <kchudgar@apm.com>

> > ---

> 

> > +

> > +	tx_ring = pdata->tx_ring;

> > +	tail = tx_ring->tail;

> > +	len = skb_headlen(skb);

> > +	raw_desc = &tx_ring->raw_desc[tail];

> > +

> > +	/* Tx descriptor not available */

> > +	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)) ||

> > +	    GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))

> > +		return NETDEV_TX_BUSY;


Aren't you supposed to detect 'ring full' and stop the code
giving you packets to transmit.

> > +

> > +	/* Packet buffers should be 64B aligned */


Is that really a requirement of the hardware?
Almost all ethernet frames are 4n+2 aligned.

> > +	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,

> > +				     GFP_ATOMIC);

> > +	if (unlikely(!pkt_buf))

> > +		goto out;

> 

> Can't you obtain a DMA-API mapping for skb->data and pass it down to the

> hardware? This copy here is inefficient.

> 

> > +

> > +	memcpy(pkt_buf, skb->data, len);


You really need to verify that the len <= XGENE_ENET_STD_MTU.

Isn't this code only transmitting the 'head' of the packet?
What about the fragments??
...
	David
Iyappan Subramanian Feb. 27, 2017, 5:08 a.m. UTC | #5
Hi Florian,

On Tue, Jan 31, 2017 at 12:33 PM, Florian Fainelli <f.fainelli@gmail.com> wrote:
> On 01/31/2017 11:03 AM, Iyappan Subramanian wrote:
>> This patch adds,
>>     - Transmit
>>     - Transmit completion poll
>>     - Receive poll
>>     - NAPI handler
>>
>> and enables the driver.
>>
>> Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>
>> Signed-off-by: Keyur Chudgar <kchudgar@apm.com>
>> ---
>
>> +
>> +     tx_ring = pdata->tx_ring;
>> +     tail = tx_ring->tail;
>> +     len = skb_headlen(skb);
>> +     raw_desc = &tx_ring->raw_desc[tail];
>> +
>> +     /* Tx descriptor not available */
>> +     if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)) ||
>> +         GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
>> +             return NETDEV_TX_BUSY;
>> +
>> +     /* Packet buffers should be 64B aligned */
>> +     pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
>> +                                  GFP_ATOMIC);
>> +     if (unlikely(!pkt_buf))
>> +             goto out;
>
> Can't you obtain a DMA-API mapping for skb->data and pass it down to the
> hardware? This copy here is inefficient.

This hardware requires 64-byte alignment.

>
>> +
>> +     memcpy(pkt_buf, skb->data, len);
>> +
>> +     addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
>> +     addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
>> +     raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
>> +                                SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
>> +                                SET_BITS(PKT_ADDRH,
>> +                                         dma_addr >> PKT_ADDRL_LEN));
>> +
>> +     dma_wmb();
>> +
>> +     raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
>> +                                SET_BITS(PKT_SIZE, len) |
>> +                                SET_BITS(E, 0));
>> +
>> +     skb_tx_timestamp(skb);
>> +     xge_wr_csr(pdata, DMATXCTRL, 1);
>> +
>> +     pdata->stats.tx_packets++;
>> +     pdata->stats.tx_bytes += skb->len;
>
> This is both racy and incorrect. Racy because after you wrote DMATXCTRL,
> your TX completion can run, and it can do that while interrupting your
> CPU presumably, and free the SKB, therefore making you access a freed
> SKB (or it should, if it does not), it's also incorrect, because before
> you get signaled a TX completion, there is no guarantee that the packets
> did actually make it through, you must update your stats in the TX
> completion handler.

Thanks.  I'll move the tx stats part to Tx completion.

>
>> +
>> +     tx_ring->skbs[tail] = skb;
>> +     tx_ring->pkt_bufs[tail] = pkt_buf;
>> +     tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
>> +
>> +out:
>> +     dev_kfree_skb_any(skb);
>
> Don't do this, remember a pointer to the SKB, free the SKB in TX
> completion handler, preferably in NAPI context.

I'll implement this.

>
>> +
>> +     return NETDEV_TX_OK;
>> +}
>> +
>> +static void xge_txc_poll(struct net_device *ndev, unsigned int budget)
>> +{
>> +     struct xge_pdata *pdata = netdev_priv(ndev);
>> +     struct device *dev = &pdata->pdev->dev;
>> +     struct xge_desc_ring *tx_ring;
>> +     struct xge_raw_desc *raw_desc;
>> +     u64 addr_lo, addr_hi;
>> +     dma_addr_t dma_addr;
>> +     void *pkt_buf;
>> +     bool pktsent;
>> +     u32 data;
>> +     u8 head;
>> +     int i;
>> +
>> +     tx_ring = pdata->tx_ring;
>> +     head = tx_ring->head;
>> +
>> +     data = xge_rd_csr(pdata, DMATXSTATUS);
>> +     pktsent = data & TX_PKT_SENT;
>> +     if (unlikely(!pktsent))
>> +             return;
>> +
>> +     for (i = 0; i < budget; i++) {
>
> TX completion handlers should run unbound and free the entire TX ring,
> don't make it obey to an upper bound.

I'll do as suggested.

>
>> +             raw_desc = &tx_ring->raw_desc[head];
>> +
>> +             if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
>> +                     break;
>> +
>> +             dma_rmb();
>> +
>> +             addr_hi = GET_BITS(PKT_ADDRH, le64_to_cpu(raw_desc->m1));
>> +             addr_lo = GET_BITS(PKT_ADDRL, le64_to_cpu(raw_desc->m0));
>> +             dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
>> +
>> +             pkt_buf = tx_ring->pkt_bufs[head];
>> +
>> +             /* clear pktstart address and pktsize */
>> +             raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
>> +                                        SET_BITS(PKT_SIZE, 0));
>> +             xge_wr_csr(pdata, DMATXSTATUS, 1);
>> +
>> +             dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
>> +
>> +             head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
>> +     }
>> +
>> +     tx_ring->head = head;
>> +}
>> +
>> +static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
>> +{
>> +     struct xge_pdata *pdata = netdev_priv(ndev);
>> +     struct device *dev = &pdata->pdev->dev;
>> +     dma_addr_t addr_hi, addr_lo, dma_addr;
>> +     struct xge_desc_ring *rx_ring;
>> +     struct xge_raw_desc *raw_desc;
>> +     struct sk_buff *skb;
>> +     int i, npkts, ret = 0;
>> +     bool pktrcvd;
>> +     u32 data;
>> +     u8 head;
>> +     u16 len;
>> +
>> +     rx_ring = pdata->rx_ring;
>> +     head = rx_ring->head;
>> +
>> +     data = xge_rd_csr(pdata, DMARXSTATUS);
>> +     pktrcvd = data & RXSTATUS_RXPKTRCVD;
>> +
>> +     if (unlikely(!pktrcvd))
>> +             return 0;
>> +
>> +     npkts = 0;
>> +     for (i = 0; i < budget; i++) {
>
> So pktrcvd is not an indication of the produced number of packets, just
> that there are packets, that's not very convenient, and it's redundant
> with the very fact of being interrupted.

Agree, but the interrupt is common for Tx completion and Rx, this
check is still required.

>
>> +             raw_desc = &rx_ring->raw_desc[head];
>> +
>> +             if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
>> +                     break;
>> +
>> +             dma_rmb();
>> +
>> +             addr_hi = GET_BITS(PKT_ADDRH, le64_to_cpu(raw_desc->m1));
>> +             addr_lo = GET_BITS(PKT_ADDRL, le64_to_cpu(raw_desc->m0));
>> +             dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
>> +             len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
>
> Is not there some kind of additional status that would indicate if the
> packet is possibly invalid (oversize, undersize, etc.)?

I'll add error checking.

>
>> +
>> +             dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
>> +                              DMA_FROM_DEVICE);
>> +
>> +             skb = rx_ring->skbs[head];
>> +             skb_put(skb, len);
>> +
>> +             skb->protocol = eth_type_trans(skb, ndev);
>> +
>> +             pdata->stats.rx_packets++;
>> +             pdata->stats.rx_bytes += len;
>> +             napi_gro_receive(&pdata->napi, skb);
>> +             npkts++;
>
> --
> Florian
Iyappan Subramanian Feb. 27, 2017, 5:11 a.m. UTC | #6
On Wed, Feb 1, 2017 at 3:09 AM, David Laight <David.Laight@aculab.com> wrote:
> From Florian Fainelli
>> Sent: 31 January 2017 20:33
>> On 01/31/2017 11:03 AM, Iyappan Subramanian wrote:
>> > This patch adds,
>> >     - Transmit
>> >     - Transmit completion poll
>> >     - Receive poll
>> >     - NAPI handler
>> >
>> > and enables the driver.
>> >
>> > Signed-off-by: Iyappan Subramanian <isubramanian@apm.com>
>> > Signed-off-by: Keyur Chudgar <kchudgar@apm.com>
>> > ---
>>
>> > +
>> > +   tx_ring = pdata->tx_ring;
>> > +   tail = tx_ring->tail;
>> > +   len = skb_headlen(skb);
>> > +   raw_desc = &tx_ring->raw_desc[tail];
>> > +
>> > +   /* Tx descriptor not available */
>> > +   if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)) ||
>> > +       GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
>> > +           return NETDEV_TX_BUSY;
>
> Aren't you supposed to detect 'ring full' and stop the code
> giving you packets to transmit.

I'll add stop queue and wake queue.

>
>> > +
>> > +   /* Packet buffers should be 64B aligned */
>
> Is that really a requirement of the hardware?
> Almost all ethernet frames are 4n+2 aligned.

Yes, it's a hardware requirement.

>
>> > +   pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
>> > +                                GFP_ATOMIC);
>> > +   if (unlikely(!pkt_buf))
>> > +           goto out;
>>
>> Can't you obtain a DMA-API mapping for skb->data and pass it down to the
>> hardware? This copy here is inefficient.
>>
>> > +
>> > +   memcpy(pkt_buf, skb->data, len);
>
> You really need to verify that the len <= XGENE_ENET_STD_MTU.

This version of the driver, doesn't support jumbo frame.  So, the
check is not required.

>
> Isn't this code only transmitting the 'head' of the packet?
> What about the fragments??

This driver doesn't enable SG yet.

> ...
>         David
>
diff mbox

Patch

diff --git a/drivers/net/ethernet/apm/Kconfig b/drivers/net/ethernet/apm/Kconfig
index ec63d70..59efe5b 100644
--- a/drivers/net/ethernet/apm/Kconfig
+++ b/drivers/net/ethernet/apm/Kconfig
@@ -1 +1,2 @@ 
 source "drivers/net/ethernet/apm/xgene/Kconfig"
+source "drivers/net/ethernet/apm/xgene-v2/Kconfig"
diff --git a/drivers/net/ethernet/apm/Makefile b/drivers/net/ethernet/apm/Makefile
index 65ce32a..946b2a4 100644
--- a/drivers/net/ethernet/apm/Makefile
+++ b/drivers/net/ethernet/apm/Makefile
@@ -3,3 +3,4 @@ 
 #
 
 obj-$(CONFIG_NET_XGENE) += xgene/
+obj-$(CONFIG_NET_XGENE_V2) += xgene-v2/
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
new file mode 100644
index 0000000..1205861
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -0,0 +1,11 @@ 
+config NET_XGENE_V2
+	tristate "APM X-Gene SoC Ethernet-v2 Driver"
+	depends on HAS_DMA
+	depends on ARCH_XGENE || COMPILE_TEST
+	help
+	  This is the Ethernet driver for the on-chip ethernet interface
+	  which uses a linked list of DMA descriptor architecture (v2) for
+	  APM X-Gene SoCs.
+
+	  To compile this driver as a module, choose M here. This module will
+	  be called xgene-enet-v2.
diff --git a/drivers/net/ethernet/apm/xgene-v2/Makefile b/drivers/net/ethernet/apm/xgene-v2/Makefile
new file mode 100644
index 0000000..735309c
--- /dev/null
+++ b/drivers/net/ethernet/apm/xgene-v2/Makefile
@@ -0,0 +1,6 @@ 
+#
+# Makefile for APM X-Gene Ethernet v2 driver
+#
+
+xgene-enet-v2-objs := main.o mac.o enet.o ring.o
+obj-$(CONFIG_NET_XGENE_V2) += xgene-enet-v2.o
diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c
index 3881f27..fc90298 100644
--- a/drivers/net/ethernet/apm/xgene-v2/main.c
+++ b/drivers/net/ethernet/apm/xgene-v2/main.c
@@ -164,9 +164,11 @@  static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
 		dma_wmb();
 		raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
 					   SET_BITS(E, 1));
+
 		ring->skbs[tail] = skb;
 		tail = (tail + 1) & slots;
 	}
+	xge_wr_csr(pdata, DMARXCTRL, 1);
 
 	ring->tail = tail;
 
@@ -278,13 +280,14 @@  static int xge_open(struct net_device *ndev)
 	struct xge_pdata *pdata = netdev_priv(ndev);
 	int ret;
 
+	napi_enable(&pdata->napi);
+
 	ret = xge_request_irq(ndev);
 	if (ret)
 		return ret;
 
 	xge_intr_enable(pdata);
 
-	xge_wr_csr(pdata, DMARXCTRL, 1);
 	xge_mac_enable(pdata);
 	netif_start_queue(ndev);
 
@@ -298,11 +301,204 @@  static int xge_close(struct net_device *ndev)
 	netif_stop_queue(ndev);
 	xge_mac_disable(pdata);
 
+	xge_intr_disable(pdata);
 	xge_free_irq(ndev);
+	napi_disable(&pdata->napi);
 
 	return 0;
 }
 
+static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	static dma_addr_t dma_addr;
+	struct xge_desc_ring *tx_ring;
+	struct xge_raw_desc *raw_desc;
+	u64 addr_lo, addr_hi;
+	void *pkt_buf;
+	u8 tail;
+	u16 len;
+
+	tx_ring = pdata->tx_ring;
+	tail = tx_ring->tail;
+	len = skb_headlen(skb);
+	raw_desc = &tx_ring->raw_desc[tail];
+
+	/* Tx descriptor not available */
+	if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)) ||
+	    GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
+		return NETDEV_TX_BUSY;
+
+	/* Packet buffers should be 64B aligned */
+	pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
+				     GFP_ATOMIC);
+	if (unlikely(!pkt_buf))
+		goto out;
+
+	memcpy(pkt_buf, skb->data, len);
+
+	addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
+	addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
+	raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
+				   SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
+				   SET_BITS(PKT_ADDRH,
+					    dma_addr >> PKT_ADDRL_LEN));
+
+	dma_wmb();
+
+	raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
+				   SET_BITS(PKT_SIZE, len) |
+				   SET_BITS(E, 0));
+
+	skb_tx_timestamp(skb);
+	xge_wr_csr(pdata, DMATXCTRL, 1);
+
+	pdata->stats.tx_packets++;
+	pdata->stats.tx_bytes += skb->len;
+
+	tx_ring->skbs[tail] = skb;
+	tx_ring->pkt_bufs[tail] = pkt_buf;
+	tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
+
+out:
+	dev_kfree_skb_any(skb);
+
+	return NETDEV_TX_OK;
+}
+
+static void xge_txc_poll(struct net_device *ndev, unsigned int budget)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	struct xge_desc_ring *tx_ring;
+	struct xge_raw_desc *raw_desc;
+	u64 addr_lo, addr_hi;
+	dma_addr_t dma_addr;
+	void *pkt_buf;
+	bool pktsent;
+	u32 data;
+	u8 head;
+	int i;
+
+	tx_ring = pdata->tx_ring;
+	head = tx_ring->head;
+
+	data = xge_rd_csr(pdata, DMATXSTATUS);
+	pktsent = data & TX_PKT_SENT;
+	if (unlikely(!pktsent))
+		return;
+
+	for (i = 0; i < budget; i++) {
+		raw_desc = &tx_ring->raw_desc[head];
+
+		if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+			break;
+
+		dma_rmb();
+
+		addr_hi = GET_BITS(PKT_ADDRH, le64_to_cpu(raw_desc->m1));
+		addr_lo = GET_BITS(PKT_ADDRL, le64_to_cpu(raw_desc->m0));
+		dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
+
+		pkt_buf = tx_ring->pkt_bufs[head];
+
+		/* clear pktstart address and pktsize */
+		raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
+					   SET_BITS(PKT_SIZE, 0));
+		xge_wr_csr(pdata, DMATXSTATUS, 1);
+
+		dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
+
+		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+	}
+
+	tx_ring->head = head;
+}
+
+static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
+{
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	struct device *dev = &pdata->pdev->dev;
+	dma_addr_t addr_hi, addr_lo, dma_addr;
+	struct xge_desc_ring *rx_ring;
+	struct xge_raw_desc *raw_desc;
+	struct sk_buff *skb;
+	int i, npkts, ret = 0;
+	bool pktrcvd;
+	u32 data;
+	u8 head;
+	u16 len;
+
+	rx_ring = pdata->rx_ring;
+	head = rx_ring->head;
+
+	data = xge_rd_csr(pdata, DMARXSTATUS);
+	pktrcvd = data & RXSTATUS_RXPKTRCVD;
+
+	if (unlikely(!pktrcvd))
+		return 0;
+
+	npkts = 0;
+	for (i = 0; i < budget; i++) {
+		raw_desc = &rx_ring->raw_desc[head];
+
+		if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
+			break;
+
+		dma_rmb();
+
+		addr_hi = GET_BITS(PKT_ADDRH, le64_to_cpu(raw_desc->m1));
+		addr_lo = GET_BITS(PKT_ADDRL, le64_to_cpu(raw_desc->m0));
+		dma_addr = (addr_hi << PKT_ADDRL_LEN) | addr_lo;
+		len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
+
+		dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
+				 DMA_FROM_DEVICE);
+
+		skb = rx_ring->skbs[head];
+		skb_put(skb, len);
+
+		skb->protocol = eth_type_trans(skb, ndev);
+
+		pdata->stats.rx_packets++;
+		pdata->stats.rx_bytes += len;
+		napi_gro_receive(&pdata->napi, skb);
+		npkts++;
+
+		ret = xge_refill_buffers(ndev, 1);
+		xge_wr_csr(pdata, DMARXSTATUS, 1);
+
+		if (ret)
+			break;
+
+		head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
+	}
+
+	rx_ring->head = head;
+
+	return npkts;
+}
+
+static int xge_napi(struct napi_struct *napi, const int budget)
+{
+	struct net_device *ndev = napi->dev;
+	struct xge_pdata *pdata = netdev_priv(ndev);
+	int processed;
+
+	pdata = netdev_priv(ndev);
+
+	xge_txc_poll(ndev, budget);
+	processed = xge_rx_poll(ndev, budget);
+
+	if (processed < budget) {
+		napi_complete(napi);
+		xge_intr_enable(pdata);
+	}
+
+	return processed;
+}
+
 static int xge_set_mac_addr(struct net_device *ndev, void *addr)
 {
 	struct xge_pdata *pdata = netdev_priv(ndev);
@@ -345,6 +541,7 @@  static void xge_get_stats64(struct net_device *ndev,
 static const struct net_device_ops xgene_ndev_ops = {
 	.ndo_open = xge_open,
 	.ndo_stop = xge_close,
+	.ndo_start_xmit = xge_start_xmit,
 	.ndo_set_mac_address = xge_set_mac_addr,
 	.ndo_tx_timeout = xge_timeout,
 	.ndo_get_stats64 = xge_get_stats64,
@@ -388,6 +585,7 @@  static int xge_probe(struct platform_device *pdev)
 	if (ret)
 		goto err;
 
+	netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
 	ret = register_netdev(ndev);
 	if (ret) {
 		netdev_err(ndev, "Failed to register netdev\n");