[PATCH 19/26] cifs: Remove support for ITER_KVEC/BVEC/FOLIOQ from smb_extract_iter_to_rdma()
Stefan Metzmacher
metze at samba.org
Sun Mar 29 03:39:46 PDT 2026
Hi David,
in ksmbd-for-next smbdirect_map_sges_from_iter is also called
in the server from smb_direct_writev() =>
smbdirect_connection_send_iter() =>
smbdirect_connection_send_single_iter()
So we still need ITER_KVEC.
Thanks!
metze
Am 26.03.26 um 11:45 schrieb David Howells:
> netfslib now only presents an bvecq queue and an associated ITER_BVECQ
> iterator to the filesystem, so it isn't going to see ITER_KVEC, ITER_BVEC
> or ITER_FOLIOQ iterators. So remove that code.
>
> Signed-off-by: David Howells <dhowells at redhat.com>
> cc: Steve French <sfrench at samba.org>
> cc: Paulo Alcantara <pc at manguebit.org>
> cc: Shyam Prasad N <sprasad at microsoft.com>
> cc: Tom Talpey <tom at talpey.com>
> cc: linux-cifs at vger.kernel.org
> cc: netfs at lists.linux.dev
> cc: linux-fsdevel at vger.kernel.org
> ---
> fs/smb/client/smbdirect.c | 165 --------------------------------------
> 1 file changed, 165 deletions(-)
>
> diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
> index f8a6be83db98..d9e026d5e9f9 100644
> --- a/fs/smb/client/smbdirect.c
> +++ b/fs/smb/client/smbdirect.c
> @@ -3142,162 +3142,6 @@ static bool smb_set_sge(struct smb_extract_to_rdma *rdma,
> return true;
> }
>
> -/*
> - * Extract page fragments from a BVEC-class iterator and add them to an RDMA
> - * element list. The pages are not pinned.
> - */
> -static ssize_t smb_extract_bvec_to_rdma(struct iov_iter *iter,
> - struct smb_extract_to_rdma *rdma,
> - ssize_t maxsize)
> -{
> - const struct bio_vec *bv = iter->bvec;
> - unsigned long start = iter->iov_offset;
> - unsigned int i;
> - ssize_t ret = 0;
> -
> - for (i = 0; i < iter->nr_segs; i++) {
> - size_t off, len;
> -
> - len = bv[i].bv_len;
> - if (start >= len) {
> - start -= len;
> - continue;
> - }
> -
> - len = min_t(size_t, maxsize, len - start);
> - off = bv[i].bv_offset + start;
> -
> - if (!smb_set_sge(rdma, bv[i].bv_page, off, len))
> - return -EIO;
> -
> - ret += len;
> - maxsize -= len;
> - if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0)
> - break;
> - start = 0;
> - }
> -
> - if (ret > 0)
> - iov_iter_advance(iter, ret);
> - return ret;
> -}
> -
> -/*
> - * Extract fragments from a KVEC-class iterator and add them to an RDMA list.
> - * This can deal with vmalloc'd buffers as well as kmalloc'd or static buffers.
> - * The pages are not pinned.
> - */
> -static ssize_t smb_extract_kvec_to_rdma(struct iov_iter *iter,
> - struct smb_extract_to_rdma *rdma,
> - ssize_t maxsize)
> -{
> - const struct kvec *kv = iter->kvec;
> - unsigned long start = iter->iov_offset;
> - unsigned int i;
> - ssize_t ret = 0;
> -
> - for (i = 0; i < iter->nr_segs; i++) {
> - struct page *page;
> - unsigned long kaddr;
> - size_t off, len, seg;
> -
> - len = kv[i].iov_len;
> - if (start >= len) {
> - start -= len;
> - continue;
> - }
> -
> - kaddr = (unsigned long)kv[i].iov_base + start;
> - off = kaddr & ~PAGE_MASK;
> - len = min_t(size_t, maxsize, len - start);
> - kaddr &= PAGE_MASK;
> -
> - maxsize -= len;
> - do {
> - seg = min_t(size_t, len, PAGE_SIZE - off);
> -
> - if (is_vmalloc_or_module_addr((void *)kaddr))
> - page = vmalloc_to_page((void *)kaddr);
> - else
> - page = virt_to_page((void *)kaddr);
> -
> - if (!smb_set_sge(rdma, page, off, seg))
> - return -EIO;
> -
> - ret += seg;
> - len -= seg;
> - kaddr += PAGE_SIZE;
> - off = 0;
> - } while (len > 0 && rdma->nr_sge < rdma->max_sge);
> -
> - if (rdma->nr_sge >= rdma->max_sge || maxsize <= 0)
> - break;
> - start = 0;
> - }
> -
> - if (ret > 0)
> - iov_iter_advance(iter, ret);
> - return ret;
> -}
> -
> -/*
> - * Extract folio fragments from a FOLIOQ-class iterator and add them to an RDMA
> - * list. The folios are not pinned.
> - */
> -static ssize_t smb_extract_folioq_to_rdma(struct iov_iter *iter,
> - struct smb_extract_to_rdma *rdma,
> - ssize_t maxsize)
> -{
> - const struct folio_queue *folioq = iter->folioq;
> - unsigned int slot = iter->folioq_slot;
> - ssize_t ret = 0;
> - size_t offset = iter->iov_offset;
> -
> - BUG_ON(!folioq);
> -
> - if (slot >= folioq_nr_slots(folioq)) {
> - folioq = folioq->next;
> - if (WARN_ON_ONCE(!folioq))
> - return -EIO;
> - slot = 0;
> - }
> -
> - do {
> - struct folio *folio = folioq_folio(folioq, slot);
> - size_t fsize = folioq_folio_size(folioq, slot);
> -
> - if (offset < fsize) {
> - size_t part = umin(maxsize, fsize - offset);
> -
> - if (!smb_set_sge(rdma, folio_page(folio, 0), offset, part))
> - return -EIO;
> -
> - offset += part;
> - ret += part;
> - maxsize -= part;
> - }
> -
> - if (offset >= fsize) {
> - offset = 0;
> - slot++;
> - if (slot >= folioq_nr_slots(folioq)) {
> - if (!folioq->next) {
> - WARN_ON_ONCE(ret < iter->count);
> - break;
> - }
> - folioq = folioq->next;
> - slot = 0;
> - }
> - }
> - } while (rdma->nr_sge < rdma->max_sge && maxsize > 0);
> -
> - iter->folioq = folioq;
> - iter->folioq_slot = slot;
> - iter->iov_offset = offset;
> - iter->count -= ret;
> - return ret;
> -}
> -
> /*
> * Extract memory fragments from a BVECQ-class iterator and add them to an RDMA
> * list. The folios are not pinned.
> @@ -3373,15 +3217,6 @@ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
> int before = rdma->nr_sge;
>
> switch (iov_iter_type(iter)) {
> - case ITER_BVEC:
> - ret = smb_extract_bvec_to_rdma(iter, rdma, len);
> - break;
> - case ITER_KVEC:
> - ret = smb_extract_kvec_to_rdma(iter, rdma, len);
> - break;
> - case ITER_FOLIOQ:
> - ret = smb_extract_folioq_to_rdma(iter, rdma, len);
> - break;
> case ITER_BVECQ:
> ret = smb_extract_bvecq_to_rdma(iter, rdma, len);
> break;
>
More information about the linux-afs
mailing list