From patchwork Sat Feb 16 01:34:47 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Saeed Mahameed X-Patchwork-Id: 1043351 X-Patchwork-Delegate: davem@davemloft.net Return-Path: X-Original-To: patchwork-incoming-netdev@ozlabs.org Delivered-To: patchwork-incoming-netdev@ozlabs.org Authentication-Results: ozlabs.org; spf=none (mailfrom) smtp.mailfrom=vger.kernel.org (client-ip=209.132.180.67; helo=vger.kernel.org; envelope-from=netdev-owner@vger.kernel.org; receiver=) Authentication-Results: ozlabs.org; dmarc=fail (p=none dis=none) header.from=mellanox.com Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by ozlabs.org (Postfix) with ESMTP id 441Xkw4w31z9s1l for ; Sat, 16 Feb 2019 12:35:36 +1100 (AEDT) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733171AbfBPBfd (ORCPT ); Fri, 15 Feb 2019 20:35:33 -0500 Received: from mail-il-dmz.mellanox.com ([193.47.165.129]:46577 "EHLO mellanox.co.il" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S1732223AbfBPBfd (ORCPT ); Fri, 15 Feb 2019 20:35:33 -0500 Received: from Internal Mail-Server by MTLPINE1 (envelope-from saeedm@mellanox.com) with ESMTPS (AES256-SHA encrypted); 16 Feb 2019 03:35:26 +0200 Received: from sx1.mtl.com ([172.16.5.60]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x1G1Z4Hq028267; Sat, 16 Feb 2019 03:35:24 +0200 From: Saeed Mahameed To: "David S. Miller" Cc: netdev@vger.kernel.org, Bodong Wang , Or Gerlitz , Saeed Mahameed Subject: [net-next 08/13] net/mlx5: E-Switch, Support load/unload reps of specific vport types Date: Fri, 15 Feb 2019 17:34:47 -0800 Message-Id: <20190216013452.21131-9-saeedm@mellanox.com> X-Mailer: git-send-email 2.20.1 In-Reply-To: <20190216013452.21131-1-saeedm@mellanox.com> References: <20190216013452.21131-1-saeedm@mellanox.com> MIME-Version: 1.0 Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org From: Bodong Wang Currently the driver loads and unloads all reps in an unbreakable group. However, with ECPF, the reps of special vports such as uplink and host PF should always be loaded in switchdev mode where the reps for VFs will be loaded on-demand and unloaded on no-demand. This is a pre-step for that change. This patch doesn't change any functionality. Signed-off-by: Bodong Wang Reviewed-by: Or Gerlitz Signed-off-by: Saeed Mahameed --- .../mellanox/mlx5/core/eswitch_offloads.c | 109 ++++++++++++++---- 1 file changed, 87 insertions(+), 22 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index c6c9dad69ba8..7131d41796fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1291,25 +1291,47 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw, rep->rep_if[rep_type].state = REP_REGISTERED; } -static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports, - u8 rep_type) +static void __unload_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int vport; - - mlx5_esw_for_each_vf_rep_reverse(esw, vport, rep, nvports) - __esw_offloads_unload_rep(esw, rep, rep_type); rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); __esw_offloads_unload_rep(esw, rep, rep_type); } -static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports) +static void __unload_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int i; + + mlx5_esw_for_each_vf_rep_reverse(esw, i, rep, nvports) + __esw_offloads_unload_rep(esw, rep, rep_type); +} + +static void esw_offloads_unload_vf_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = NUM_REP_TYPES; + + while (rep_type-- > 0) + __unload_reps_vf_vport(esw, nvports, rep_type); +} + +static void __unload_reps_all_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + __unload_reps_vf_vport(esw, nvports, rep_type); + + /* Special vports must be the last to unload. */ + __unload_reps_special_vport(esw, rep_type); +} + +static void esw_offloads_unload_all_reps(struct mlx5_eswitch *esw, int nvports) { u8 rep_type = NUM_REP_TYPES; while (rep_type-- > 0) - esw_offloads_unload_reps_type(esw, nvports, rep_type); + __unload_reps_all_vport(esw, nvports, rep_type); } static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, @@ -1329,39 +1351,82 @@ static int __esw_offloads_load_rep(struct mlx5_eswitch *esw, return 0; } -static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports, - u8 rep_type) +static int __load_reps_special_vport(struct mlx5_eswitch *esw, u8 rep_type) { struct mlx5_eswitch_rep *rep; - int vport; int err; rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); err = __esw_offloads_load_rep(esw, rep, rep_type); - if (err) - goto out; + return err; +} - mlx5_esw_for_each_vf_rep(esw, vport, rep, nvports) { +static int __load_reps_vf_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + struct mlx5_eswitch_rep *rep; + int err, i; + + mlx5_esw_for_each_vf_rep(esw, i, rep, nvports) { err = __esw_offloads_load_rep(esw, rep, rep_type); if (err) - goto err_reps; + goto err_vf; } return 0; +err_vf: + __unload_reps_vf_vport(esw, --i, rep_type); + return err; +} + +static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports) +{ + u8 rep_type = 0; + int err; + + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + err = __load_reps_vf_vport(esw, nvports, rep_type); + if (err) + goto err_reps; + } + + return err; + err_reps: - esw_offloads_unload_reps_type(esw, --vport, rep_type); -out: + while (rep_type-- > 0) + __unload_reps_vf_vport(esw, nvports, rep_type); + return err; +} + +static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports, + u8 rep_type) +{ + int err; + + /* Special vports must be loaded first. */ + err = __load_reps_special_vport(esw, rep_type); + if (err) + return err; + + err = __load_reps_vf_vport(esw, nvports, rep_type); + if (err) + goto err_vfs; + + return 0; + +err_vfs: + __unload_reps_special_vport(esw, rep_type); return err; } -static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) +static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports) { u8 rep_type = 0; int err; for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { - err = esw_offloads_load_reps_type(esw, nvports, rep_type); + err = __load_reps_all_vport(esw, nvports, rep_type); if (err) goto err_reps; } @@ -1370,7 +1435,7 @@ static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports) err_reps: while (rep_type-- > 0) - esw_offloads_unload_reps_type(esw, nvports, rep_type); + __unload_reps_all_vport(esw, nvports, rep_type); return err; } @@ -1520,7 +1585,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports, if (err) return err; - err = esw_offloads_load_reps(esw, vf_nvports); + err = esw_offloads_load_all_reps(esw, vf_nvports); if (err) goto err_reps; @@ -1556,7 +1621,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw) u16 num_vfs = esw->dev->priv.sriov.num_vfs; esw_offloads_devcom_cleanup(esw); - esw_offloads_unload_reps(esw, num_vfs); + esw_offloads_unload_all_reps(esw, num_vfs); esw_offloads_steering_cleanup(esw); }