[PATCH 5/6] nvme-rdma: fix timeout handler
Chao Leng
lengchao at huawei.com
Mon Aug 3 06:25:05 EDT 2020
On 2020/8/3 14:58, Sagi Grimberg wrote:
> Currently we check if the controller state != LIVE, and
> we directly fail the command under the assumption that this
> is the connect command or an admin command within the
> controller initialization sequence.
>
> This is wrong, we need to check if the request risking
> controller setup/teardown blocking if not completed and
> only then fail.
>
> The logic should be:
> - RESETTING, only fail fabrics/admin commands otherwise
> controller teardown will block. otherwise reset the timer
> and come back again.
> - CONNECTING, if this is a connect (or an admin command), we fail
> right away (unblock controller initialization), otherwise we
> treat it like anything else.
> - otherwise trigger error recovery and reset the timer (the
> error handler will take care of completing/delaying it).
>
> Signed-off-by: Sagi Grimberg <sagi at grimberg.me>
> ---
> drivers/nvme/host/rdma.c | 67 +++++++++++++++++++++++++++++-----------
> 1 file changed, 49 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
> index 44c76ffbb264..a58c6deaf691 100644
> --- a/drivers/nvme/host/rdma.c
> +++ b/drivers/nvme/host/rdma.c
> @@ -1180,6 +1180,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
> if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
> return;
>
> + dev_warn(ctrl->ctrl.device, "starting error recovery\n");
> queue_work(nvme_reset_wq, &ctrl->err_work);
> }
>
> @@ -1946,6 +1947,22 @@ static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id,
> return 0;
> }
>
> +static void nvme_rdma_complete_timed_out(struct request *rq)
> +{
> + struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
> + struct nvme_rdma_queue *queue = req->queue;
> + struct nvme_rdma_ctrl *ctrl = queue->ctrl;
> +
> + /* fence other contexts that may complete the command */
> + flush_work(&ctrl->err_work);
> + nvme_rdma_stop_queue(queue);
There maybe concurrent with error recovery, may cause abnormal because
nvme_rdma_stop_queue will return but the queue is not stoped,
maybe is stopping by the error recovery.
> + if (blk_mq_request_completed(rq))
> + return;
> + nvme_req(rq)->flags |= NVME_REQ_CANCELLED;
> + nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
> + blk_mq_complete_request(rq);
> +}
> +
> static enum blk_eh_timer_return
> nvme_rdma_timeout(struct request *rq, bool reserved)
> {
> @@ -1956,29 +1973,43 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
> dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n",
> rq->tag, nvme_rdma_queue_idx(queue));
>
> - /*
> - * Restart the timer if a controller reset is already scheduled. Any
> - * timed out commands would be handled before entering the connecting
> - * state.
> - */
> - if (ctrl->ctrl.state == NVME_CTRL_RESETTING)
> + switch (ctrl->ctrl.state) {
> + case NVME_CTRL_RESETTING:
> + if (!nvme_rdma_queue_idx(queue)) {
> + /*
> + * if we are in teardown we must complete immediately
> + * because we may block the teardown sequence (e.g.
> + * nvme_disable_ctrl timed out).
> + */
> + nvme_rdma_complete_timed_out(rq);
> + return BLK_EH_DONE;
> + }
> + /*
> + * Restart the timer if a controller reset is already scheduled.
> + * Any timed out commands would be handled before entering the
> + * connecting state.
> + */
> return BLK_EH_RESET_TIMER;
> -
> - if (ctrl->ctrl.state != NVME_CTRL_LIVE) {
> + case NVME_CTRL_CONNECTING:
> + if (reserved || !nvme_rdma_queue_idx(queue)) {
> + /*
> + * if we are connecting we must complete immediately
> + * connect (reserved) or admin requests because we may
> + * block controller setup sequence.
> + */
> + nvme_rdma_complete_timed_out(rq);
> + return BLK_EH_DONE;
> + }
> + /* fallthru */
> + default:
> /*
> - * Teardown immediately if controller times out while starting
> - * or we are already started error recovery. all outstanding
> - * requests are completed on shutdown, so we return BLK_EH_DONE.
> + * every other state should trigger the error recovery
> + * which will be handled by the flow and controller state
> + * machine
> */
> - flush_work(&ctrl->err_work);
> - nvme_rdma_teardown_io_queues(ctrl, false);
> - nvme_rdma_teardown_admin_queue(ctrl, false);
> - return BLK_EH_DONE;
> + nvme_rdma_error_recovery(ctrl);
> }
>
> - dev_warn(ctrl->ctrl.device, "starting error recovery\n");
> - nvme_rdma_error_recovery(ctrl);
> -
> return BLK_EH_RESET_TIMER;
> }
>
>
More information about the Linux-nvme
mailing list