From patchwork Mon Dec 10 15:46:32 2012 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Ariel Elior X-Patchwork-Id: 204962 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming@ozlabs.org Delivered-To: patchwork-incoming@ozlabs.org Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 49B202C031B for ; Tue, 11 Dec 2012 02:45:29 +1100 (EST) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754191Ab2LJPoS (ORCPT ); Mon, 10 Dec 2012 10:44:18 -0500 Received: from mms2.broadcom.com ([216.31.210.18]:4513 "EHLO mms2.broadcom.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753847Ab2LJPoP (ORCPT ); Mon, 10 Dec 2012 10:44:15 -0500 Received: from [10.9.200.133] by mms2.broadcom.com with ESMTP (Broadcom SMTP Relay (Email Firewall v6.5)); Mon, 10 Dec 2012 07:41:20 -0800 X-Server-Uuid: 4500596E-606A-40F9-852D-14843D8201B2 Received: from mail-irva-13.broadcom.com (10.11.16.103) by IRVEXCHHUB02.corp.ad.broadcom.com (10.9.200.133) with Microsoft SMTP Server id 8.2.247.2; Mon, 10 Dec 2012 07:43:51 -0800 Received: from lb-tlvb-pcie37.il.broadcom.com ( lb-tlvb-pcie37.il.broadcom.com [10.185.6.30]) by mail-irva-13.broadcom.com (Postfix) with ESMTP id 0B8EC40FE3; Mon, 10 Dec 2012 07:44:08 -0800 (PST) From: "Ariel Elior" To: "David Miller" cc: netdev , "Ariel Elior" , "Eilon Greenstein" Subject: [PATCH net-next v3 08/22] bnx2x: VF fastpath Date: Mon, 10 Dec 2012 17:46:32 +0200 Message-ID: <1355154406-10855-9-git-send-email-ariele@broadcom.com> X-Mailer: git-send-email 1.7.9.GIT In-Reply-To: <1355154406-10855-1-git-send-email-ariele@broadcom.com> References: <1355154406-10855-1-git-send-email-ariele@broadcom.com> MIME-Version: 1.0 X-WSS-ID: 7CD8DD2A3R017548266-01-01 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org When VF driver is transmitting it must supply the correct mac address in the parsing BD. This is used for firmware validation and enforcement and also for tx-switching. Refactor interrupt ack flow to allow for different BAR addresses of the hardware in the PF BAR vs the VF BAR. Signed-off-by: Ariel Elior Signed-off-by: Eilon Greenstein --- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c | 18 +++++- drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h | 68 +++++++++++----------- drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c | 13 +---- 3 files changed, 50 insertions(+), 49 deletions(-) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 4e87eec..e1bf928 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -3460,8 +3460,19 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) cpu_to_le16(vlan_tx_tag_get(skb)); tx_start_bd->bd_flags.as_bitfield |= (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); - } else - tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } else { + + /* when transmitting in a vf, start bd must hold the ethertype + * for fw to enforce it + */ + if (IS_VF(bp)) { + tx_start_bd->vlan_or_ethertype = + cpu_to_le16(ntohs(eth->h_proto)); + } else { + /* used by FW for packet accounting */ + tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod); + } + } /* turn on parsing and get a BD */ bd_prod = TX_BD(NEXT_TX_IDX(bd_prod)); @@ -3477,7 +3488,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev) hlen = bnx2x_set_pbd_csum_e2(bp, skb, &pbd_e2_parsing_data, xmit_type); - if (IS_MF_SI(bp)) { + + if (IS_MF_SI(bp) || IS_VF(bp)) { /* * fill in the MAC addresses in the PBD - for local * switching diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index 91e432d..6b0add2 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -499,6 +499,39 @@ int bnx2x_setup_tc(struct net_device *dev, u8 num_tc); /* select_queue callback */ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb); +static inline void bnx2x_update_rx_prod(struct bnx2x *bp, + struct bnx2x_fastpath *fp, + u16 bd_prod, u16 rx_comp_prod, + u16 rx_sge_prod) +{ + struct ustorm_eth_rx_producers rx_prods = {0}; + u32 i; + + /* Update producers */ + rx_prods.bd_prod = bd_prod; + rx_prods.cqe_prod = rx_comp_prod; + rx_prods.sge_prod = rx_sge_prod; + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + * This is only applicable for weak-ordered memory model archs such + * as IA-64. The following barrier is also mandatory since FW will + * assumes BDs must have buffers. + */ + wmb(); + + for (i = 0; i < sizeof(rx_prods)/4; i++) + REG_WR(bp, fp->ustorm_rx_prods_offset + i*4, + ((u32 *)&rx_prods)[i]); + + mmiowb(); /* keep prod updates ordered */ + + DP(NETIF_MSG_RX_STATUS, + "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", + fp->index, bd_prod, rx_comp_prod, rx_sge_prod); +} + /* reload helper */ int bnx2x_reload_if_running(struct net_device *dev); @@ -507,9 +540,6 @@ int bnx2x_change_mac_addr(struct net_device *dev, void *p); /* NAPI poll Rx part */ int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget); -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod); - /* NAPI poll Tx part */ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata); @@ -612,38 +642,6 @@ static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp) fp->fp_hc_idx = fp->sb_running_index[SM_RX_ID]; } -static inline void bnx2x_update_rx_prod_gen(struct bnx2x *bp, - struct bnx2x_fastpath *fp, u16 bd_prod, - u16 rx_comp_prod, u16 rx_sge_prod, u32 start) -{ - struct ustorm_eth_rx_producers rx_prods = {0}; - u32 i; - - /* Update producers */ - rx_prods.bd_prod = bd_prod; - rx_prods.cqe_prod = rx_comp_prod; - rx_prods.sge_prod = rx_sge_prod; - - /* - * Make sure that the BD and SGE data is updated before updating the - * producers since FW might read the BD/SGE right after the producer - * is updated. - * This is only applicable for weak-ordered memory model archs such - * as IA-64. The following barrier is also mandatory since FW will - * assumes BDs must have buffers. - */ - wmb(); - - for (i = 0; i < sizeof(rx_prods)/4; i++) - REG_WR(bp, start + i*4, ((u32 *)&rx_prods)[i]); - - mmiowb(); /* keep prod updates ordered */ - - DP(NETIF_MSG_RX_STATUS, - "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n", - fp->index, bd_prod, rx_comp_prod, rx_sge_prod); -} - static inline void bnx2x_igu_ack_sb_gen(struct bnx2x *bp, u8 igu_sb_id, u8 segment, u16 index, u8 op, u8 update, u32 igu_addr) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 6ac79ba..977d06a 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -1697,15 +1697,6 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe) return; } -void bnx2x_update_rx_prod(struct bnx2x *bp, struct bnx2x_fastpath *fp, - u16 bd_prod, u16 rx_comp_prod, u16 rx_sge_prod) -{ - u32 start = BAR_USTRORM_INTMEM + fp->ustorm_rx_prods_offset; - - bnx2x_update_rx_prod_gen(bp, fp, bd_prod, rx_comp_prod, rx_sge_prod, - start); -} - irqreturn_t bnx2x_interrupt(int irq, void *dev_instance) { struct bnx2x *bp = netdev_priv(dev_instance); @@ -4620,8 +4611,8 @@ static void bnx2x_attn_int(struct bnx2x *bp) void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment, u16 index, u8 op, u8 update) { - u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; - + u32 igu_addr = bp->igu_base_addr; + igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8; bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update, igu_addr); }