diff mbox series

[Oracular/Unstable,3/4] UBUNTU: SAUCE: nvme-tcp: delay error recovery until the next KATO interval

Message ID 20240621154442.2004665-4-aleksandr.mikhalitsyn@canonical.com
State New
Headers show
Series UBUNTU: SAUCE: nvme-tcp: fix retry logic | expand

Commit Message

Aleksandr Mikhalitsyn June 21, 2024, 3:44 p.m. UTC
From: Hannes Reinecke <hare@suse.de>

Section 3.9 of the NVMe base spec states that:

  If a Keep Alive Timer expires:
    a) the controller shall ...

       and

    b) the host assumes all outstanding commands are not completed
       and re-issues commands as appropriate.

which means that we should _not_ retry any commands until KATO
expired (or the equivalent of the default KATO timeout if KATO
is not active).

So delay error recovery until a KATO timeout triggers

Signed-off-by: Hannes Reinecke <hare@suse.de>
Acked-by: Daniel Wagner <dwagner@suse.de>
Signed-off-by: Alexander Mikhalitsyn <aleksandr.mikhalitsyn@canonical.com>
Tested-by: Julian Pelizäus <julian.pelizaeus@canonical.com>
---
 drivers/nvme/host/core.c |  3 ++-
 drivers/nvme/host/nvme.h |  1 +
 drivers/nvme/host/tcp.c  | 12 ++++++++++--
 3 files changed, 13 insertions(+), 3 deletions(-)
diff mbox series

Patch

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 782090ce0bc1..bb15ce275184 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1228,7 +1228,7 @@  EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
  *   The host should send Keep Alive commands at half of the Keep Alive Timeout
  *   accounting for transport roundtrip times [..].
  */
-static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
+unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
 {
 	unsigned long delay = ctrl->kato * HZ / 2;
 
@@ -1242,6 +1242,7 @@  static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
 		delay /= 2;
 	return delay;
 }
+EXPORT_SYMBOL_GPL(nvme_keep_alive_work_period);
 
 static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
 {
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index f3a41133ac3f..970cd5d9d3a4 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -820,6 +820,7 @@  void nvme_unfreeze(struct nvme_ctrl *ctrl);
 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
 void nvme_start_freeze(struct nvme_ctrl *ctrl);
+unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl);
 
 static inline enum req_op nvme_req_op(struct nvme_command *cmd)
 {
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 1d67b8b1e98b..35db9de22939 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -571,13 +571,21 @@  static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
 	queue->ddgst_remaining = 0;
 }
 
+/*
+ * Error recovery needs to be started after KATO expired,
+ * always delay until the next KATO interval before
+ * starting error recovery.
+ */
 static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
 {
+	unsigned long delay = nvme_keep_alive_work_period(ctrl);
+
 	if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
 		return;
 
-	dev_warn(ctrl->device, "starting error recovery\n");
-	queue_delayed_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work, 0);
+	dev_warn(ctrl->device, "starting error recovery in %lu seconds\n",
+		 delay / HZ);
+	queue_delayed_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work, delay);
 }
 
 static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,