[RFC v2 10/11] io_uring/rsrc: add dmabuf-backed buffer registeration
Ming Lei
ming.lei at redhat.com
Sat Jan 3 17:46:53 PST 2026
On Sun, Nov 23, 2025 at 10:51:30PM +0000, Pavel Begunkov wrote:
> Add an ability to register a dmabuf backed io_uring buffer. It also
> needs know which device to use for attachment, for that it takes
> target_fd and extracts the device through the new file op. Unlike normal
> buffers, it also retains the target file so that any imports from
> ineligible requests can be rejected in next patches.
>
> Suggested-by: Vishal Verma <vishal1.verma at intel.com>
> Suggested-by: David Wei <dw at davidwei.uk>
> Signed-off-by: Pavel Begunkov <asml.silence at gmail.com>
> ---
> io_uring/rsrc.c | 106 +++++++++++++++++++++++++++++++++++++++++++++++-
> io_uring/rsrc.h | 1 +
> 2 files changed, 106 insertions(+), 1 deletion(-)
>
> diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c
> index 691f9645d04c..7dfebf459dd0 100644
> --- a/io_uring/rsrc.c
> +++ b/io_uring/rsrc.c
> @@ -10,6 +10,8 @@
> #include <linux/compat.h>
> #include <linux/io_uring.h>
> #include <linux/io_uring/cmd.h>
> +#include <linux/dma-buf.h>
> +#include <linux/dma_token.h>
>
> #include <uapi/linux/io_uring.h>
>
> @@ -802,6 +804,106 @@ bool io_check_coalesce_buffer(struct page **page_array, int nr_pages,
> return true;
> }
>
> +struct io_regbuf_dma {
> + struct dma_token *token;
> + struct file *target_file;
> + struct dma_buf *dmabuf;
> +};
> +
> +static void io_release_reg_dmabuf(void *priv)
> +{
> + struct io_regbuf_dma *db = priv;
> +
> + dma_token_release(db->token);
> + dma_buf_put(db->dmabuf);
> + fput(db->target_file);
> + kfree(db);
> +}
> +
> +static struct io_rsrc_node *io_register_dmabuf(struct io_ring_ctx *ctx,
> + struct io_uring_reg_buffer *rb,
> + struct iovec *iov)
> +{
> + struct dma_token_params params = {};
> + struct io_rsrc_node *node = NULL;
> + struct io_mapped_ubuf *imu = NULL;
> + struct io_regbuf_dma *regbuf = NULL;
> + struct file *target_file = NULL;
> + struct dma_buf *dmabuf = NULL;
> + struct dma_token *token;
> + int ret;
> +
> + if (iov->iov_base || iov->iov_len)
> + return ERR_PTR(-EFAULT);
> +
> + node = io_rsrc_node_alloc(ctx, IORING_RSRC_BUFFER);
> + if (!node) {
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + imu = io_alloc_imu(ctx, 0);
> + if (!imu) {
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + regbuf = kzalloc(sizeof(*regbuf), GFP_KERNEL);
> + if (!regbuf) {
> + ret = -ENOMEM;
> + goto err;
> + }
> +
> + target_file = fget(rb->target_fd);
> + if (!target_file) {
> + ret = -EBADF;
> + goto err;
> + }
> +
> + dmabuf = dma_buf_get(rb->dmabuf_fd);
> + if (IS_ERR(dmabuf)) {
> + ret = PTR_ERR(dmabuf);
> + dmabuf = NULL;
> + goto err;
> + }
> +
> + params.dmabuf = dmabuf;
> + params.dir = DMA_BIDIRECTIONAL;
> + token = dma_token_create(target_file, ¶ms);
> + if (IS_ERR(token)) {
> + ret = PTR_ERR(token);
> + goto err;
> + }
> +
This way looks less flexible, for example, the same dma-buf may be used
on IOs to multiple disks, then it needs to be registered for each target
file.
Thanks,
Ming
More information about the Linux-nvme
mailing list