[RFC PATCH 08/17] cifs: Use a bvecq for buffering instead of a folioq
David Howells
dhowells at redhat.com
Wed Mar 4 06:03:15 PST 2026
Use a bvecq for internal buffering for crypto purposes instead of a folioq
so that the latter can be phased out.
Signed-off-by: David Howells <dhowells at redhat.com>
cc: Paulo Alcantara <pc at manguebit.org>
cc: Matthew Wilcox <willy at infradead.org>
cc: Christoph Hellwig <hch at infradead.org>
cc: Steve French <sfrench at samba.org>
cc: linux-cifs at vger.kernel.org
cc: netfs at lists.linux.dev
cc: linux-fsdevel at vger.kernel.org
---
fs/smb/client/cifsglob.h | 2 +-
fs/smb/client/smb2ops.c | 70 +++++++++++++++++++---------------------
2 files changed, 34 insertions(+), 38 deletions(-)
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 080ea601c209..12202d9537e0 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -290,7 +290,7 @@ struct smb_rqst {
struct kvec *rq_iov; /* array of kvecs */
unsigned int rq_nvec; /* number of kvecs in array */
struct iov_iter rq_iter; /* Data iterator */
- struct folio_queue *rq_buffer; /* Buffer for encryption */
+ struct bvecq *rq_buffer; /* Buffer for encryption */
};
struct mid_q_entry;
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index fea9a35caa57..76baf21404df 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -4517,19 +4517,17 @@ crypt_message(struct TCP_Server_Info *server, int num_rqst,
}
/*
- * Copy data from an iterator to the folios in a folio queue buffer.
+ * Copy data from an iterator to the pages in a bvec queue buffer.
*/
-static bool cifs_copy_iter_to_folioq(struct iov_iter *iter, size_t size,
- struct folio_queue *buffer)
+static bool cifs_copy_iter_to_bvecq(struct iov_iter *iter, size_t size,
+ struct bvecq *buffer)
{
for (; buffer; buffer = buffer->next) {
- for (int s = 0; s < folioq_count(buffer); s++) {
- struct folio *folio = folioq_folio(buffer, s);
- size_t part = folioq_folio_size(buffer, s);
+ for (int s = 0; s < buffer->nr_segs; s++) {
+ struct bio_vec *bv = &buffer->bv[s];
+ size_t part = umin(bv->bv_len, size);
- part = umin(part, size);
-
- if (copy_folio_from_iter(folio, 0, part, iter) != part)
+ if (copy_page_from_iter(bv->bv_page, 0, part, iter) != part)
return false;
size -= part;
}
@@ -4541,7 +4539,7 @@ void
smb3_free_compound_rqst(int num_rqst, struct smb_rqst *rqst)
{
for (int i = 0; i < num_rqst; i++)
- netfs_free_folioq_buffer(rqst[i].rq_buffer);
+ netfs_free_bvecq_buffer(rqst[i].rq_buffer);
}
/*
@@ -4568,7 +4566,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
for (int i = 1; i < num_rqst; i++) {
struct smb_rqst *old = &old_rq[i - 1];
struct smb_rqst *new = &new_rq[i];
- struct folio_queue *buffer = NULL;
+ struct bvecq *buffer = NULL;
size_t size = iov_iter_count(&old->rq_iter);
orig_len += smb_rqst_len(server, old);
@@ -4576,17 +4574,16 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, int num_rqst,
new->rq_nvec = old->rq_nvec;
if (size > 0) {
- size_t cur_size = 0;
- rc = netfs_alloc_folioq_buffer(NULL, &buffer, &cur_size,
- size, GFP_NOFS);
- if (rc < 0)
+ rc = -ENOMEM;
+ buffer = netfs_alloc_bvecq_buffer(size, 0, GFP_NOFS);
+ if (!buffer)
goto err_free;
new->rq_buffer = buffer;
- iov_iter_folio_queue(&new->rq_iter, ITER_SOURCE,
- buffer, 0, 0, size);
+ iov_iter_bvec_queue(&new->rq_iter, ITER_SOURCE,
+ buffer, 0, 0, size);
- if (!cifs_copy_iter_to_folioq(&old->rq_iter, size, buffer)) {
+ if (!cifs_copy_iter_to_bvecq(&old->rq_iter, size, buffer)) {
rc = smb_EIO1(smb_eio_trace_tx_copy_iter_to_buf, size);
goto err_free;
}
@@ -4676,16 +4673,15 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
}
static int
-cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size,
- size_t skip, struct iov_iter *iter)
+cifs_copy_bvecq_to_iter(struct bvecq *bq, size_t data_size,
+ size_t skip, struct iov_iter *iter)
{
- for (; folioq; folioq = folioq->next) {
- for (int s = 0; s < folioq_count(folioq); s++) {
- struct folio *folio = folioq_folio(folioq, s);
- size_t fsize = folio_size(folio);
- size_t n, len = umin(fsize - skip, data_size);
+ for (; bq; bq = bq->next) {
+ for (int s = 0; s < bq->nr_segs; s++) {
+ struct bio_vec *bv = &bq->bv[s];
+ size_t n, len = umin(bv->bv_len - skip, data_size);
- n = copy_folio_to_iter(folio, skip, len, iter);
+ n = copy_page_to_iter(bv->bv_page, bv->bv_offset + skip, len, iter);
if (n != len) {
cifs_dbg(VFS, "%s: something went wrong\n", __func__);
return smb_EIO2(smb_eio_trace_rx_copy_to_iter,
@@ -4701,7 +4697,7 @@ cifs_copy_folioq_to_iter(struct folio_queue *folioq, size_t data_size,
static int
handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
- char *buf, unsigned int buf_len, struct folio_queue *buffer,
+ char *buf, unsigned int buf_len, struct bvecq *buffer,
unsigned int buffer_len, bool is_offloaded)
{
unsigned int data_offset;
@@ -4810,8 +4806,8 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
}
/* Copy the data to the output I/O iterator. */
- rdata->result = cifs_copy_folioq_to_iter(buffer, buffer_len,
- cur_off, &rdata->subreq.io_iter);
+ rdata->result = cifs_copy_bvecq_to_iter(buffer, buffer_len,
+ cur_off, &rdata->subreq.io_iter);
if (rdata->result != 0) {
if (is_offloaded)
mid->mid_state = MID_RESPONSE_MALFORMED;
@@ -4849,7 +4845,7 @@ handle_read_data(struct TCP_Server_Info *server, struct mid_q_entry *mid,
struct smb2_decrypt_work {
struct work_struct decrypt;
struct TCP_Server_Info *server;
- struct folio_queue *buffer;
+ struct bvecq *buffer;
char *buf;
unsigned int len;
};
@@ -4863,7 +4859,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
struct mid_q_entry *mid;
struct iov_iter iter;
- iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len);
+ iov_iter_bvec_queue(&iter, ITER_DEST, dw->buffer, 0, 0, dw->len);
rc = decrypt_raw_data(dw->server, dw->buf, dw->server->vals->read_rsp_size,
&iter, true);
if (rc) {
@@ -4912,7 +4908,7 @@ static void smb2_decrypt_offload(struct work_struct *work)
}
free_pages:
- netfs_free_folioq_buffer(dw->buffer);
+ netfs_free_bvecq_buffer(dw->buffer);
cifs_small_buf_release(dw->buf);
kfree(dw);
}
@@ -4950,12 +4946,12 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
dw->len = len;
len = round_up(dw->len, PAGE_SIZE);
- size_t cur_size = 0;
- rc = netfs_alloc_folioq_buffer(NULL, &dw->buffer, &cur_size, len, GFP_NOFS);
- if (rc < 0)
+ rc = -ENOMEM;
+ dw->buffer = netfs_alloc_bvecq_buffer(len, 0, GFP_NOFS);
+ if (!dw->buffer)
goto discard_data;
- iov_iter_folio_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
+ iov_iter_bvec_queue(&iter, ITER_DEST, dw->buffer, 0, 0, len);
/* Read the data into the buffer and clear excess bufferage. */
rc = cifs_read_iter_from_socket(server, &iter, dw->len);
@@ -5013,7 +5009,7 @@ receive_encrypted_read(struct TCP_Server_Info *server, struct mid_q_entry **mid,
}
free_pages:
- netfs_free_folioq_buffer(dw->buffer);
+ netfs_free_bvecq_buffer(dw->buffer);
free_dw:
kfree(dw);
return rc;
More information about the linux-afs
mailing list