IRQ/nvme_pci_complete_rq: NULL pointer dereference yet again
Alex G.
mr.nuke.me at gmail.com
Thu Apr 5 16:44:21 PDT 2018
On 04/05/2018 06:39 PM, Alex G. wrote:
> On 04/05/2018 06:05 PM, Keith Busch wrote:
>> Just trying to confirm a suspicion, could you retry with the following?
>
> Started the test. Will let you know tomorrow of result.
Actually, it crashed very fast [1]
[1] http://gtech.myftp.org/~mrnuke/nvme_logs/log-20180405-1838.log
>
> Alex
>> ---
>> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
>> index b6f43b738f03..f9847a9f2973 100644
>> --- a/drivers/nvme/host/pci.c
>> +++ b/drivers/nvme/host/pci.c
>> @@ -890,12 +890,23 @@ static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
>> if (unlikely(nvmeq->cq_vector < 0)) {
>> ret = BLK_STS_IOERR;
>> spin_unlock_irq(&nvmeq->q_lock);
>> - goto out_cleanup_iod;
>> + goto out_unmap_iod;
>> }
>> __nvme_submit_cmd(nvmeq, &cmnd);
>> nvme_process_cq(nvmeq);
>> spin_unlock_irq(&nvmeq->q_lock);
>> return BLK_STS_OK;
>> +
>> +out_unmap_iod:
>> + nvme_unmap_data(dev, req);
>> + {
>> + struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
>> + iod->nents = 0;
>> + iod->sg = NULL;
>> + iod->npages = -1;
>> + }
>> + return ret;
>> +
>> out_cleanup_iod:
>> nvme_free_iod(dev, req);
>> out_free_cmd:
>> --
>>
More information about the Linux-nvme
mailing list