[PATCH V1] nvme-pci: Fix NULL pointer dereference in nvme_pci_prp_iter_next

Leon Romanovsky leon at kernel.org
Tue Feb 3 01:42:47 PST 2026


On Mon, Feb 02, 2026 at 11:59:04AM -0700, Keith Busch wrote:
> On Mon, Feb 02, 2026 at 06:36:24PM +0100, Christoph Hellwig wrote:
> > diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> > index 2a52cf46d960..f944b747e1bd 100644
> > --- a/drivers/nvme/host/pci.c
> > +++ b/drivers/nvme/host/pci.c
> > @@ -816,6 +816,22 @@ static void nvme_unmap_data(struct request *req)
> >  		nvme_free_descriptors(req);
> >  }
> >  
> > +static bool nvme_pci_alloc_dma_vecs(struct request *req,
> > +		struct blk_dma_iter *iter)
> > +{
> > +	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> > +	struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
> > +
> > +	iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
> > +			GFP_ATOMIC);
> > +	if (!iod->dma_vecs)
> > +		return false;
> > +	iod->dma_vecs[0].addr = iter->addr;
> > +	iod->dma_vecs[0].len = iter->len;
> > +	iod->nr_dma_vecs = 1;
> > +	return true;
> > +}
> > +
> >  static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
> >  		struct blk_dma_iter *iter)
> >  {
> > @@ -826,6 +842,8 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
> >  	if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
> >  		return false;
> >  	if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
> > +		if (!iod->nr_dma_vecs && !nvme_pci_alloc_dma_vecs(req, iter))
> > +			return false;
> 
> In the case where this iteration caused dma_need_unmap() to toggle to
> true, this is the iteration that allocates the dma_vecs, and it
> initializes the first entry to this iter. But the next lines proceed to
> the save this iter in the next index, so it's doubly accounted for and
> will get unmapped twice in the completion.
> 
> Also, if the allocation fails, we should set iter->status to
> BLK_STS_RESOURCE so the callers know why the iteration can't continue.
> Otherwise, the caller will think the request is badly formed if you
> return false from here without setting iter->status.
> 
> Here's my quick take. Boot tested with swiotlb enabled, but haven't
> tried to test the changing dma_need_unmap() scenario.
> ---
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index 9fc4a60280a07..233378faab9bd 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -816,6 +816,28 @@ static void nvme_unmap_data(struct request *req)
>  		nvme_free_descriptors(req);
>  }
>  
> +static bool nvme_pci_prp_save_mapping(struct blk_dma_iter *iter,
> +				      struct request *req)
> +{
> +	struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
> +
> +	if (!iod->dma_vecs) {
> +		struct nvme_queue *nvmeq = req->mq_hctx->driver_data;
> +
> +		iod->dma_vecs = mempool_alloc(nvmeq->dev->dmavec_mempool,
> +				GFP_ATOMIC);
> +		if (!iod->dma_vecs) {
> +			iter->status = BLK_STS_RESOURCE;
> +			return false;
> +		}
> +	}
> +
> +	iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
> +	iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
> +	iod->nr_dma_vecs++;
> +	return true;
> +}
> +
>  static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
>  		struct blk_dma_iter *iter)
>  {
> @@ -825,11 +847,8 @@ static bool nvme_pci_prp_iter_next(struct request *req, struct device *dma_dev,
>  		return true;
>  	if (!blk_rq_dma_map_iter_next(req, dma_dev, iter))
>  		return false;
> -	if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev)) {
> -		iod->dma_vecs[iod->nr_dma_vecs].addr = iter->addr;
> -		iod->dma_vecs[iod->nr_dma_vecs].len = iter->len;
> -		iod->nr_dma_vecs++;
> -	}
> +	if (!dma_use_iova(&iod->dma_state) && dma_need_unmap(dma_dev))

Can dev->dma_skip_sync be modified in parallel with this check?  
If so, dma_need_unmap() may return different results depending on the  
time at which it is invoked.

> +		return nvme_pci_prp_save_mapping(iter, req);

Thanks



More information about the Linux-nvme mailing list