[PATCH V2 3/4] nvme: tcp: complete non-IO requests atomically
Chao Leng
lengchao at huawei.com
Tue Oct 20 05:04:29 EDT 2020
On 2020/10/20 16:53, Ming Lei wrote:
> During controller's CONNECTING state, admin/fabric/connect requests
> are submitted for recovery controller, and we allow to abort this request
> directly in time out handler for not blocking setup procedure.
>
> So timout vs. normal completion race exists on these requests since
> admin/fabirc/connect queues won't be shutdown before handling timeout
> during CONNECTING state.
>
> Add atomic completion for requests from connect/fabric/admin queue for
> avoiding the race.
>
> CC: Chao Leng <lengchao at huawei.com>
> Cc: Sagi Grimberg <sagi at grimberg.me>
> Reported-by: Yi Zhang <yi.zhang at redhat.com>
> Tested-by: Yi Zhang <yi.zhang at redhat.com>
> Signed-off-by: Ming Lei <ming.lei at redhat.com>
> ---
> drivers/nvme/host/tcp.c | 40 +++++++++++++++++++++++++++++++++++++---
> 1 file changed, 37 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
> index d6a3e1487354..7e85bd4a8d1b 100644
> --- a/drivers/nvme/host/tcp.c
> +++ b/drivers/nvme/host/tcp.c
> @@ -30,6 +30,8 @@ static int so_priority;
> module_param(so_priority, int, 0644);
> MODULE_PARM_DESC(so_priority, "nvme tcp socket optimize priority");
>
> +#define REQ_STATE_COMPLETE 0
> +
> enum nvme_tcp_send_state {
> NVME_TCP_SEND_CMD_PDU = 0,
> NVME_TCP_SEND_H2C_PDU,
> @@ -56,6 +58,8 @@ struct nvme_tcp_request {
> size_t offset;
> size_t data_sent;
> enum nvme_tcp_send_state state;
> +
> + unsigned long comp_state;
I do not think adding state is a good idea.
It is similar to rq->state.
In the teardown process, after quiesced queues delete the timer and
cancel the timeout work maybe a better option.
I will send the patch later.
The patch is already tested with roce more than one week.
> };
>
> enum nvme_tcp_queue_flags {
> @@ -469,6 +473,33 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
> queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
> }
>
> +/*
> + * requests originated from admin, fabrics and connect_q have to be
> + * completed atomically because we don't cover the race between timeout
> + * and normal completion for these queues.
> + */
> +static inline bool nvme_tcp_need_atomic_complete(struct request *rq)
> +{
> + return !rq->rq_disk;
> +}
> +
> +static inline void nvme_tcp_clear_rq_complete(struct request *rq)
> +{
> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> + if (unlikely(nvme_tcp_need_atomic_complete(rq)))
> + clear_bit(REQ_STATE_COMPLETE, &req->comp_state);
> +}
> +
> +static inline bool nvme_tcp_mark_rq_complete(struct request *rq)
> +{
> + struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq);
> +
> + if (unlikely(nvme_tcp_need_atomic_complete(rq)))
> + return !test_and_set_bit(REQ_STATE_COMPLETE, &req->comp_state);
> + return true;
> +}
> +
> static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
> struct nvme_completion *cqe)
> {
> @@ -483,7 +514,8 @@ static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
> return -EINVAL;
> }
>
> - if (!nvme_try_complete_req(rq, cqe->status, cqe->result))
> + if (nvme_tcp_mark_rq_complete(rq) &&
> + !nvme_try_complete_req(rq, cqe->status, cqe->result))
> nvme_complete_rq(rq);
> queue->nr_cqe++;
>
> @@ -674,7 +706,8 @@ static inline void nvme_tcp_end_request(struct request *rq, u16 status)
> {
> union nvme_result res = {};
>
> - if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
> + if (nvme_tcp_mark_rq_complete(rq) &&
> + !nvme_try_complete_req(rq, cpu_to_le16(status << 1), res))
> nvme_complete_rq(rq);
> }
>
> @@ -2174,7 +2207,7 @@ static void nvme_tcp_complete_timed_out(struct request *rq)
> /* fence other contexts that may complete the command */
> mutex_lock(&to_tcp_ctrl(ctrl)->teardown_lock);
> nvme_tcp_stop_queue(ctrl, nvme_tcp_queue_id(req->queue));
> - if (!blk_mq_request_completed(rq)) {
> + if (nvme_tcp_mark_rq_complete(rq) && !blk_mq_request_completed(rq)) {
> nvme_req(rq)->status = NVME_SC_HOST_ABORTED_CMD;
> blk_mq_complete_request(rq);
> }
> @@ -2315,6 +2348,7 @@ static blk_status_t nvme_tcp_queue_rq(struct blk_mq_hw_ctx *hctx,
> if (unlikely(ret))
> return ret;
>
> + nvme_tcp_clear_rq_complete(rq);
> blk_mq_start_request(rq);
>
> nvme_tcp_queue_request(req, true, bd->last);
>
More information about the Linux-nvme
mailing list