[RFC v2 16/21] vfio/mlx5: Explicitly store page list
Leon Romanovsky
leon at kernel.org
Thu Sep 12 04:15:51 PDT 2024
From: Leon Romanovsky <leonro at nvidia.com>
As a preparation to removal scatter-gather table and unifying
receive and send list, explicitly store page list.
Signed-off-by: Leon Romanovsky <leonro at nvidia.com>
---
drivers/vfio/pci/mlx5/cmd.c | 29 ++++++++++++-----------------
drivers/vfio/pci/mlx5/cmd.h | 1 +
2 files changed, 13 insertions(+), 17 deletions(-)
diff --git a/drivers/vfio/pci/mlx5/cmd.c b/drivers/vfio/pci/mlx5/cmd.c
index 1832a6c1f35d..34ae3e299a9e 100644
--- a/drivers/vfio/pci/mlx5/cmd.c
+++ b/drivers/vfio/pci/mlx5/cmd.c
@@ -422,6 +422,7 @@ void mlx5vf_free_data_buffer(struct mlx5_vhca_data_buffer *buf)
for_each_sgtable_page(&buf->table.sgt, &sg_iter, 0)
__free_page(sg_page_iter_page(&sg_iter));
sg_free_append_table(&buf->table);
+ kvfree(buf->page_list);
kfree(buf);
}
@@ -434,39 +435,33 @@ static int mlx5vf_add_migration_pages(struct mlx5_vhca_data_buffer *buf,
unsigned int to_fill;
int ret;
- to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*page_list));
- page_list = kvzalloc(to_fill * sizeof(*page_list), GFP_KERNEL_ACCOUNT);
+ to_fill = min_t(unsigned int, npages, PAGE_SIZE / sizeof(*buf->page_list));
+ page_list = kvzalloc(to_fill * sizeof(*buf->page_list), GFP_KERNEL_ACCOUNT);
if (!page_list)
return -ENOMEM;
+ buf->page_list = page_list;
+
do {
filled = alloc_pages_bulk_array(GFP_KERNEL_ACCOUNT, to_fill,
- page_list);
- if (!filled) {
- ret = -ENOMEM;
- goto err;
- }
+ buf->page_list + buf->npages);
+ if (!filled)
+ return -ENOMEM;
+
to_alloc -= filled;
ret = sg_alloc_append_table_from_pages(
- &buf->table, page_list, filled, 0,
+ &buf->table, buf->page_list + buf->npages, filled, 0,
filled << PAGE_SHIFT, UINT_MAX, SG_MAX_SINGLE_ALLOC,
GFP_KERNEL_ACCOUNT);
if (ret)
- goto err;
+ return ret;
buf->npages += filled;
- /* clean input for another bulk allocation */
- memset(page_list, 0, filled * sizeof(*page_list));
to_fill = min_t(unsigned int, to_alloc,
- PAGE_SIZE / sizeof(*page_list));
+ PAGE_SIZE / sizeof(*buf->page_list));
} while (to_alloc > 0);
- kvfree(page_list);
return 0;
-
-err:
- kvfree(page_list);
- return ret;
}
struct mlx5_vhca_data_buffer *
diff --git a/drivers/vfio/pci/mlx5/cmd.h b/drivers/vfio/pci/mlx5/cmd.h
index 25dd6ff54591..5b764199db53 100644
--- a/drivers/vfio/pci/mlx5/cmd.h
+++ b/drivers/vfio/pci/mlx5/cmd.h
@@ -53,6 +53,7 @@ struct mlx5_vf_migration_header {
};
struct mlx5_vhca_data_buffer {
+ struct page **page_list;
struct sg_append_table table;
loff_t start_pos;
u64 length;
--
2.46.0
More information about the Linux-nvme
mailing list