[PATCH v9 2/8] iov_iter: introduce iov_iter_get_pages_[alloc_]flags()

John Hubbard jhubbard at nvidia.com
Mon Sep 5 16:21:13 PDT 2022


On 8/25/22 08:24, Logan Gunthorpe wrote:
> Add iov_iter_get_pages_flags() and iov_iter_get_pages_alloc_flags()
> which take a flags argument that is passed to get_user_pages_fast().
> 
> This is so that FOLL_PCI_P2PDMA can be passed when appropriate.
> 
> Signed-off-by: Logan Gunthorpe <logang at deltatee.com>
> ---
>  include/linux/uio.h |  6 ++++++
>  lib/iov_iter.c      | 40 ++++++++++++++++++++++++++++++++++++----
>  2 files changed, 42 insertions(+), 4 deletions(-)
> 

Yes.

I also don't object to Christoph's additional request to refactor and
rename around iov_iter_get_pages2() and such, but this version here is
perfectly good, so please feel free to add:

Reviewed-by: John Hubbard <jhubbard at nvidia.com>

Looking ahead, interestingly enough, it turns out that this approach to
the wrappers is really pretty close to what I posted in patch 4/7 of my
"convert most filesystems to pin_user_pages_fast()" series [1]. Instead
of passing gup_flags through, I tried to avoid that (because FOLL_PIN
is, as a mild design preference, supposed to remain internal to gup.c),
but given that you need to do it, I think I can just build on top of
your approach, and pass in FOLL_PIN via your new gup_flags arg.

Along those lines, I've copied you in on "New topic branch for block + 
gup work?", let's see what happens over there.

[1] https://lore.kernel.org/r/20220831041843.973026-1-jhubbard@nvidia.com


thanks,

-- 
John Hubbard
NVIDIA
> diff --git a/include/linux/uio.h b/include/linux/uio.h
> index 5896af36199c..76ba69edb8c5 100644
> --- a/include/linux/uio.h
> +++ b/include/linux/uio.h
> @@ -247,8 +247,14 @@ void iov_iter_pipe(struct iov_iter *i, unsigned int direction, struct pipe_inode
>  void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count);
>  void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray,
>  		     loff_t start, size_t count);
> +ssize_t iov_iter_get_pages_flags(struct iov_iter *i, struct page **pages,
> +		size_t maxsize, unsigned maxpages, size_t *start,
> +		unsigned int gup_flags);
>  ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages,
>  			size_t maxsize, unsigned maxpages, size_t *start);
> +ssize_t iov_iter_get_pages_alloc_flags(struct iov_iter *i,
> +		struct page ***pages, size_t maxsize, size_t *start,
> +		unsigned int gup_flags);
>  ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, struct page ***pages,
>  			size_t maxsize, size_t *start);
>  int iov_iter_npages(const struct iov_iter *i, int maxpages);
> diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> index 4b7fce72e3e5..dedb78f3c655 100644
> --- a/lib/iov_iter.c
> +++ b/lib/iov_iter.c
> @@ -1427,7 +1427,8 @@ static struct page *first_bvec_segment(const struct iov_iter *i,
>  
>  static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
>  		   struct page ***pages, size_t maxsize,
> -		   unsigned int maxpages, size_t *start)
> +		   unsigned int maxpages, size_t *start,
> +		   unsigned int gup_flags)
>  {
>  	unsigned int n;
>  
> @@ -1439,7 +1440,6 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
>  		maxsize = MAX_RW_COUNT;
>  
>  	if (likely(user_backed_iter(i))) {
> -		unsigned int gup_flags = 0;
>  		unsigned long addr;
>  		int res;
>  
> @@ -1497,10 +1497,24 @@ ssize_t iov_iter_get_pages2(struct iov_iter *i,
>  		return 0;
>  	BUG_ON(!pages);
>  
> -	return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start);
> +	return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
> +					  start, 0);
>  }
>  EXPORT_SYMBOL(iov_iter_get_pages2);
>  
> +ssize_t iov_iter_get_pages_flags(struct iov_iter *i, struct page **pages,
> +		size_t maxsize, unsigned maxpages, size_t *start,
> +		unsigned int gup_flags)
> +{
> +	if (!maxpages)
> +		return 0;
> +	BUG_ON(!pages);
> +
> +	return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages,
> +					  start, gup_flags);
> +}
> +EXPORT_SYMBOL_GPL(iov_iter_get_pages_flags);
> +
>  ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
>  		   struct page ***pages, size_t maxsize,
>  		   size_t *start)
> @@ -1509,7 +1523,7 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
>  
>  	*pages = NULL;
>  
> -	len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start);
> +	len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start, 0);
>  	if (len <= 0) {
>  		kvfree(*pages);
>  		*pages = NULL;
> @@ -1518,6 +1532,24 @@ ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i,
>  }
>  EXPORT_SYMBOL(iov_iter_get_pages_alloc2);
>  
> +ssize_t iov_iter_get_pages_alloc_flags(struct iov_iter *i,
> +		struct page ***pages, size_t maxsize,
> +		size_t *start, unsigned int gup_flags)
> +{
> +	ssize_t len;
> +
> +	*pages = NULL;
> +
> +	len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start,
> +					 gup_flags);
> +	if (len <= 0) {
> +		kvfree(*pages);
> +		*pages = NULL;
> +	}
> +	return len;
> +}
> +EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc_flags);
> +
>  size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
>  			       struct iov_iter *i)
>  {





More information about the Linux-nvme mailing list