[PATCH 5/6] nvme-rdma: fix timeout handler

Sagi Grimberg sagi at grimberg.me
Wed Aug 5 04:17:00 EDT 2020


>> How is it not safe? when flush_work returns, the work is guaranteed
>> to have finished execution, and we only do that for states
>> RESETTING/CONNECTING which means that it either has already started
>> or already finished.
> 
> Though the state is NVME_CTRL_RESETTING, but it does not mean the work
> is already queued(started) or finished. There is a hole between Change 
> state
> and queue work.
> 
> Like this:
> static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
> {
>      if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
>          return;
> --------------------------------
> may interrupt by hard interrupt, and then timeout progress flush work
> at this time. Thus error recovery and nvme_rdma_complete_timed_out may
> concurrent to stop queue. will cause: error recovery may cancel request
> or nvme_rdma_complete_timed_out may complete request, but the queue may
> not be stoped. Thus will cause abnormal.
> --------------------------------
>      queue_work(nvme_reset_wq, &ctrl->err_work);
> }
> 
> Another, although the probability of occurrence is very low, reset work
> and nvme_rdma_complete_timed_out may also concurrent to stop queue, may
> also cause abnormal.

I see your point.

We can serialize ctrl teardown with a lock (similar to
dev->shutdown_lock that we have in pci).

Something like:
--
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 96fa3185d123..8c8f7492cab4 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1168,11 +1168,13 @@ static void nvme_rdma_error_recovery_work(struct 
work_struct *work)
         struct nvme_rdma_ctrl *ctrl = container_of(work,
                         struct nvme_rdma_ctrl, err_work);

+       mutex_lock(&ctrl->shutdown_lock);
         nvme_stop_keep_alive(&ctrl->ctrl);
         nvme_rdma_teardown_io_queues(ctrl, false);
         nvme_start_queues(&ctrl->ctrl);
         nvme_rdma_teardown_admin_queue(ctrl, false);
         blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+       mutex_unlock(&ctrl->shutdown_lock);

         if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
                 /* state change failure is ok if we started ctrl delete */
@@ -1964,7 +1966,9 @@ static void nvme_rdma_complete_timed_out(struct 
request *rq)

         /* fence other contexts that may complete the command */
         flush_work(&ctrl->err_work);
+       mutex_lock(&ctrl->shutdown_lock);
         nvme_rdma_stop_queue(queue);
+       mutex_unlock(&ctrl->shutdown_lock);
         if (blk_mq_request_completed(rq))
                 return;
         nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
@@ -2226,6 +2230,7 @@ static void nvme_rdma_shutdown_ctrl(struct 
nvme_rdma_ctrl *ctrl, bool shutdown)
         cancel_work_sync(&ctrl->err_work);
         cancel_delayed_work_sync(&ctrl->reconnect_work);

+       mutex_lock(&ctrl->shutdown_lock);
         nvme_rdma_teardown_io_queues(ctrl, shutdown);
         blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
         if (shutdown)
@@ -2233,6 +2238,7 @@ static void nvme_rdma_shutdown_ctrl(struct 
nvme_rdma_ctrl *ctrl, bool shutdown)
         else
                 nvme_disable_ctrl(&ctrl->ctrl);
         nvme_rdma_teardown_admin_queue(ctrl, shutdown);
+       mutex_unlock(&ctrl->shutdown_lock);
  }

  static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl)
--



More information about the Linux-nvme mailing list