@@ -1228,7 +1228,7 @@ EXPORT_SYMBOL_NS_GPL(nvme_passthru_end, NVME_TARGET_PASSTHRU);
* The host should send Keep Alive commands at half of the Keep Alive Timeout
* accounting for transport roundtrip times [..].
*/
-static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
+unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
{
unsigned long delay = ctrl->kato * HZ / 2;
@@ -1242,6 +1242,7 @@ static unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl)
delay /= 2;
return delay;
}
+EXPORT_SYMBOL_GPL(nvme_keep_alive_work_period);
static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl)
{
@@ -820,6 +820,7 @@ void nvme_unfreeze(struct nvme_ctrl *ctrl);
void nvme_wait_freeze(struct nvme_ctrl *ctrl);
int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
void nvme_start_freeze(struct nvme_ctrl *ctrl);
+unsigned long nvme_keep_alive_work_period(struct nvme_ctrl *ctrl);
static inline enum req_op nvme_req_op(struct nvme_command *cmd)
{
@@ -571,13 +571,21 @@ static void nvme_tcp_init_recv_ctx(struct nvme_tcp_queue *queue)
queue->ddgst_remaining = 0;
}
+/*
+ * Error recovery needs to be started after KATO expired,
+ * always delay until the next KATO interval before
+ * starting error recovery.
+ */
static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
{
+ unsigned long delay = nvme_keep_alive_work_period(ctrl);
+
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return;
- dev_warn(ctrl->device, "starting error recovery\n");
- queue_delayed_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work, 0);
+ dev_warn(ctrl->device, "starting error recovery in %lu seconds\n",
+ delay / HZ);
+ queue_delayed_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work, delay);
}
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,