@@ -200,6 +200,8 @@ enum ice_feature {
ICE_F_PTP_EXTTS,
ICE_F_SMA_CTRL,
ICE_F_GNSS,
+ ICE_F_ROCE_LAG,
+ ICE_F_SRIOV_LAG,
ICE_F_MAX
};
@@ -567,6 +569,7 @@ struct ice_pf {
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
struct mutex adev_mutex; /* lock to protect aux device access */
+ struct mutex lag_mutex; /* protect ice_lag struct in PF */
u32 msg_enable;
struct ice_ptp ptp;
struct gnss_serial *gnss_serial;
@@ -636,6 +639,8 @@ struct ice_pf {
struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
};
+extern struct workqueue_struct *ice_lag_wq;
+
struct ice_netdev_priv {
struct ice_vsi *vsi;
struct ice_repr *repr;
@@ -120,6 +120,9 @@ struct ice_aqc_list_caps_elem {
#define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE 0x0076
#define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077
#define ICE_AQC_CAPS_NVM_MGMT 0x0080
+#define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092
+#define ICE_AQC_BIT_ROCEV2_LAG 0x01
+#define ICE_AQC_BIT_SRIOV_LAG 0x02
u8 major_ver;
u8 minor_ver;
@@ -2241,6 +2241,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
"%s: reset_restrict_support = %d\n", prefix,
caps->reset_restrict_support);
break;
+ case ICE_AQC_CAPS_FW_LAG_SUPPORT:
+ caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+ ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
+ prefix, caps->roce_lag);
+ caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+ ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
+ prefix, caps->sriov_lag);
+ break;
default:
/* Not one of the recognized common capabilities */
found = false;
@@ -4,8 +4,12 @@
/* Link Aggregation code */
#include "ice.h"
+#include "ice_lib.h"
#include "ice_lag.h"
+#define ICE_LAG_RES_SHARED BIT(14)
+#define ICE_LAG_RES_VALID BIT(15)
+
/**
* ice_lag_set_primary - set PF LAG state as Primary
* @lag: LAG info struct
@@ -225,6 +229,26 @@ static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
lag->role = ICE_LAG_NONE;
}
+/**
+ * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * @pf: PF struct
+ */
+static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
+{
+ struct ice_hw_common_caps *caps;
+
+ caps = &pf->hw.dev_caps.common_cap;
+ if (caps->roce_lag)
+ ice_set_feature_support(pf, ICE_F_ROCE_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
+
+ if (caps->sriov_lag)
+ ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
+ else
+ ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+}
+
/**
* ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct
@@ -264,26 +288,6 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
ice_display_lag_info(lag);
}
-/**
- * ice_lag_changelower_event - handle LAG changelower event
- * @lag: LAG info struct
- * @ptr: opaque data pointer
- *
- * ptr to be cast to netdev_notifier_changelowerstate_info
- */
-static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
-{
- struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-
- if (netdev != lag->netdev)
- return;
-
- netdev_dbg(netdev, "bonding info\n");
-
- if (!netif_is_lag_port(netdev))
- netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
-}
-
/**
* ice_lag_event_handler - handle LAG events from netdev
* @notif_blk: notifier block registered by this netdev
@@ -310,9 +314,6 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
case NETDEV_CHANGEUPPER:
ice_lag_changeupper_event(lag, ptr);
break;
- case NETDEV_CHANGELOWERSTATE:
- ice_lag_changelower_event(lag, ptr);
- break;
case NETDEV_BONDING_INFO:
ice_lag_info_event(lag, ptr);
break;
@@ -379,6 +380,8 @@ int ice_init_lag(struct ice_pf *pf)
struct ice_vsi *vsi;
int err;
+ ice_lag_init_feature_support_flag(pf);
+
pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
if (!pf->lag)
return -ENOMEM;
@@ -435,9 +438,7 @@ void ice_deinit_lag(struct ice_pf *pf)
if (lag->pf)
ice_unregister_lag_handler(lag);
- dev_put(lag->upper_netdev);
-
- dev_put(lag->peer_netdev);
+ flush_workqueue(ice_lag_wq);
kfree(lag);
@@ -3970,7 +3970,7 @@ bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
* @pf: pointer to the struct ice_pf instance
* @f: feature enum to set
*/
-static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
{
if (f < 0 || f >= ICE_F_MAX)
return;
@@ -162,6 +162,7 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
void ice_init_feature_support(struct ice_pf *pf);
bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
@@ -64,6 +64,7 @@ struct device *ice_hw_to_dev(struct ice_hw *hw)
}
static struct workqueue_struct *ice_wq;
+struct workqueue_struct *ice_lag_wq;
static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
@@ -3795,6 +3796,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
+ mutex_destroy(&pf->lag_mutex);
mutex_destroy(&pf->adev_mutex);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
@@ -3875,6 +3877,7 @@ static int ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
mutex_init(&pf->adev_mutex);
+ mutex_init(&pf->lag_mutex);
INIT_HLIST_HEAD(&pf->aq_wait_list);
spin_lock_init(&pf->aq_wait_lock);
@@ -5571,7 +5574,7 @@ static struct pci_driver ice_driver = {
*/
static int __init ice_module_init(void)
{
- int status;
+ int status = -ENOMEM;
pr_info("%s\n", ice_driver_string);
pr_info("%s\n", ice_copyright);
@@ -5579,15 +5582,27 @@ static int __init ice_module_init(void)
ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
if (!ice_wq) {
pr_err("Failed to create workqueue\n");
- return -ENOMEM;
+ return status;
+ }
+
+ ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
+ if (!ice_lag_wq) {
+ pr_err("Failed to create LAG workqueue\n");
+ goto err_dest_wq;
}
status = pci_register_driver(&ice_driver);
if (status) {
pr_err("failed to register PCI driver, err %d\n", status);
- destroy_workqueue(ice_wq);
+ goto err_dest_lag_wq;
}
+ return 0;
+
+err_dest_lag_wq:
+ destroy_workqueue(ice_lag_wq);
+err_dest_wq:
+ destroy_workqueue(ice_wq);
return status;
}
module_init(ice_module_init);
@@ -5602,6 +5617,7 @@ static void __exit ice_module_exit(void)
{
pci_unregister_driver(&ice_driver);
destroy_workqueue(ice_wq);
+ destroy_workqueue(ice_lag_wq);
pr_info("module unloaded\n");
}
module_exit(ice_module_exit);
@@ -277,6 +277,8 @@ struct ice_hw_common_caps {
u8 dcb;
u8 ieee_1588;
u8 rdma;
+ u8 roce_lag;
+ u8 sriov_lag;
bool nvm_update_pending_nvm;
bool nvm_update_pending_orom;