[PATCH 2/2] nvme-pci: fix error unwind in nvme_map_data

Marc Orr marcorr at google.com
Wed Jan 20 10:23:39 EST 2021


On Wed, Jan 20, 2021 at 1:49 AM Christoph Hellwig <hch at lst.de> wrote:
>
> Properly unwind step by step using refactored helpers from nvme_unmap_data
> to avoid a potential double dma_unmap on a mapping failure.
>
> Reported-by: Marc Orr <marcorr at google.com>
> Signed-off-by: Christoph Hellwig <hch at lst.de>

Should these patches go to stable?

> ---
>  drivers/nvme/host/pci.c | 28 ++++++++++++++++++----------
>  1 file changed, 18 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
> index e29ece9e4d4b8e..4e93d1b52df202 100644
> --- a/drivers/nvme/host/pci.c
> +++ b/drivers/nvme/host/pci.c
> @@ -683,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
>                         __le64 *old_prp_list = prp_list;
>                         prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma);
>                         if (!prp_list)
> -                               return BLK_STS_RESOURCE;
> +                               goto free_prps;
>                         list[iod->npages++] = prp_list;
>                         prp_list[0] = old_prp_list[i - 1];
>                         old_prp_list[i - 1] = cpu_to_le64(prp_dma);
> @@ -703,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev,
>                 dma_addr = sg_dma_address(sg);
>                 dma_len = sg_dma_len(sg);
>         }
> -
>  done:
>         cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
>         cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma);
> -
>         return BLK_STS_OK;
> -
> - bad_sgl:
> +free_prps:
> +       nvme_free_prps(dev, req);
> +       return BLK_STS_RESOURCE;
> +bad_sgl:
>         WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents),
>                         "Invalid SGL for payload:%d nents:%d\n",
>                         blk_rq_payload_bytes(req), iod->nents);
> @@ -782,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
>
>                         sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma);
>                         if (!sg_list)
> -                               return BLK_STS_RESOURCE;
> +                               goto free_sgls;
>
>                         i = 0;
>                         nvme_pci_iod_list(req)[iod->npages++] = sg_list;
> @@ -795,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev,
>         } while (--entries > 0);
>
>         return BLK_STS_OK;
> +free_sgls:
> +       nvme_free_sgls(dev, req);
> +       return BLK_STS_RESOURCE;
>  }
>
>  static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev,
> @@ -863,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
>         sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
>         iod->nents = blk_rq_map_sg(req->q, req, iod->sg);
>         if (!iod->nents)
> -               goto out;
> +               goto out_free_sg;
>
>         if (is_pci_p2pdma_page(sg_page(iod->sg)))
>                 nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg,
> @@ -872,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
>                 nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents,
>                                              rq_dma_dir(req), DMA_ATTR_NO_WARN);
>         if (!nr_mapped)
> -               goto out;
> +               goto out_free_sg;
>
>         iod->use_sgl = nvme_pci_use_sgls(dev, req);
>         if (iod->use_sgl)
>                 ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped);
>         else
>                 ret = nvme_pci_setup_prps(dev, req, &cmnd->rw);
> -out:
>         if (ret != BLK_STS_OK)
> -               nvme_unmap_data(dev, req);
> +               goto out_dma_unmap;
> +       return BLK_STS_OK;
> +
> +out_dma_unmap:

nit: Naming this label out_sg_unmap or out_unmap_sg might be a little
more clear, since the line below doesn't have the word dma anywhere.

> +       nvme_unmap_sg(dev, req);
> +out_free_sg:
> +       mempool_free(iod->sg, dev->iod_mempool);
>         return ret;
>  }

Reviewed-by: Marc Orr <marcorr at google.com>

>
> --
> 2.29.2
>



More information about the Linux-nvme mailing list