@@ -623,6 +623,11 @@ struct rtl8169_private {
} wk;
unsigned supports_gmii:1;
+ unsigned rtl_aspm_enabled:1;
+ struct delayed_work aspm_toggle;
+ atomic_t aspm_packet_count;
+ struct mutex config_lock;
+
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@@ -671,12 +676,14 @@ static inline struct device *tp_to_dev(struct rtl8169_private *tp)
static void rtl_lock_config_regs(struct rtl8169_private *tp)
{
+ mutex_lock(&tp->config_lock);
RTL_W8(tp, Cfg9346, Cfg9346_Lock);
}
static void rtl_unlock_config_regs(struct rtl8169_private *tp)
{
RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ mutex_unlock(&tp->config_lock);
}
static void rtl_pci_commit(struct rtl8169_private *tp)
@@ -2680,6 +2687,8 @@ static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable)
if (!pcie_aspm_support_enabled() || !pcie_aspm_capable(pdev))
return;
+ tp->rtl_aspm_enabled = enable;
+
if (enable) {
RTL_W8(tp, Config5, RTL_R8(tp, Config5) | ASPM_en);
RTL_W8(tp, Config2, RTL_R8(tp, Config2) | ClkReqEn);
@@ -4430,6 +4439,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp,
dirty_tx = tp->dirty_tx;
+ atomic_add(tp->cur_tx - dirty_tx, &tp->aspm_packet_count);
while (READ_ONCE(tp->cur_tx) != dirty_tx) {
unsigned int entry = dirty_tx % NUM_TX_DESC;
u32 status;
@@ -4574,6 +4584,8 @@ static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, int budget
rtl8169_mark_to_asic(desc);
}
+ atomic_add(count, &tp->aspm_packet_count);
+
return count;
}
@@ -4681,8 +4693,39 @@ static int r8169_phy_connect(struct rtl8169_private *tp)
return 0;
}
+#define ASPM_PACKET_THRESHOLD 10
+#define ASPM_TOGGLE_INTERVAL 1000
+
+static void rtl8169_aspm_toggle(struct work_struct *work)
+{
+ struct rtl8169_private *tp = container_of(work, struct rtl8169_private,
+ aspm_toggle.work);
+ int packet_count;
+ bool enable;
+
+ packet_count = atomic_xchg(&tp->aspm_packet_count, 0);
+
+ if (pcie_aspm_enabled(tp->pci_dev)) {
+ enable = packet_count <= ASPM_PACKET_THRESHOLD;
+
+ if (tp->rtl_aspm_enabled != enable) {
+ rtl_unlock_config_regs(tp);
+ rtl_hw_aspm_clkreq_enable(tp, enable);
+ rtl_lock_config_regs(tp);
+ }
+ } else if (tp->rtl_aspm_enabled) {
+ rtl_unlock_config_regs(tp);
+ rtl_hw_aspm_clkreq_enable(tp, false);
+ rtl_lock_config_regs(tp);
+ }
+
+ schedule_delayed_work(&tp->aspm_toggle, msecs_to_jiffies(ASPM_TOGGLE_INTERVAL));
+}
+
static void rtl8169_down(struct rtl8169_private *tp)
{
+ cancel_delayed_work_sync(&tp->aspm_toggle);
+
/* Clear all task flags */
bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
@@ -4709,6 +4752,10 @@ static void rtl8169_up(struct rtl8169_private *tp)
rtl_reset_work(tp);
phy_start(tp->phydev);
+
+ /* pcie_aspm_capable may change after system resume */
+ if (pcie_aspm_support_enabled() && pcie_aspm_capable(tp->pci_dev))
+ schedule_delayed_work(&tp->aspm_toggle, 0);
}
static int rtl8169_close(struct net_device *dev)
@@ -5297,11 +5344,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* Disable ASPM completely as that cause random device stop working
- * problems as well as full system hangs for some PCIe devices users.
- */
- pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
-
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
@@ -5331,6 +5373,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
+ mutex_init(&tp->config_lock);
+
tp->mmio_addr = pcim_iomap_table(pdev)[region];
xid = (RTL_R32(tp, TxConfig) >> 20) & 0xfcf;
@@ -5368,6 +5412,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&tp->wk.work, rtl_task);
+ INIT_DELAYED_WORK(&tp->aspm_toggle, rtl8169_aspm_toggle);
+
+ atomic_set(&tp->aspm_packet_count, 0);
+
rtl_init_mac_address(tp);
dev->ethtool_ops = &rtl8169_ethtool_ops;