[PATCH v7 04/26] virtio_ring: split: extract the logic of creating vring
Xuan Zhuo
xuanzhuo at linux.alibaba.com
Wed Mar 9 01:20:03 PST 2022
Agree for all.
Thanks.
On Wed, 9 Mar 2022 14:46:01 +0800, Jason Wang <jasowang at redhat.com> wrote:
>
> 在 2022/3/8 下午8:34, Xuan Zhuo 写道:
> > Separate the logic of split to create vring queue.
> >
> > For the convenience of passing parameters, add a structure
> > vring_split.
> >
> > This feature is required for subsequent virtuqueue reset vring.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com>
> > ---
> > drivers/virtio/virtio_ring.c | 74 +++++++++++++++++++++++++-----------
> > 1 file changed, 51 insertions(+), 23 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > index b87130c8f312..d32793615451 100644
> > --- a/drivers/virtio/virtio_ring.c
> > +++ b/drivers/virtio/virtio_ring.c
> > @@ -85,6 +85,13 @@ struct vring_desc_extra {
> > u16 next; /* The next desc state in a list. */
> > };
> >
> > +struct vring_split {
> > + void *queue;
> > + dma_addr_t dma_addr;
> > + size_t queue_size_in_bytes;
> > + struct vring vring;
> > +};
>
>
> So this structure will be only used in vring_create_vring_split() which
> seems not that useful.
>
> More see below.
>
>
> > +
> > struct vring_virtqueue {
> > struct virtqueue vq;
> >
> > @@ -915,28 +922,21 @@ static void *virtqueue_detach_unused_buf_split(struct virtqueue *_vq)
> > return NULL;
> > }
> >
> > -static struct virtqueue *vring_create_virtqueue_split(
> > - unsigned int index,
> > - unsigned int num,
> > - unsigned int vring_align,
> > - struct virtio_device *vdev,
> > - bool weak_barriers,
> > - bool may_reduce_num,
> > - bool context,
> > - bool (*notify)(struct virtqueue *),
> > - void (*callback)(struct virtqueue *),
> > - const char *name)
> > +static int vring_create_vring_split(struct vring_split *vring,
> > + struct virtio_device *vdev,
> > + unsigned int vring_align,
> > + bool weak_barriers,
> > + bool may_reduce_num,
> > + u32 num)
>
>
> I'd rename this as vring_alloc_queue_split() and let it simply return
> the address of queue like vring_alloc_queue().
>
> And let it simple accept dma_addr_t *dma_adder instead of vring_split.
>
>
> > {
> > - struct virtqueue *vq;
> > void *queue = NULL;
> > dma_addr_t dma_addr;
> > size_t queue_size_in_bytes;
> > - struct vring vring;
> >
> > /* We assume num is a power of 2. */
> > if (num & (num - 1)) {
> > dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
> > - return NULL;
> > + return -EINVAL;
> > }
> >
> > /* TODO: allocate each queue chunk individually */
> > @@ -947,11 +947,11 @@ static struct virtqueue *vring_create_virtqueue_split(
> > if (queue)
> > break;
> > if (!may_reduce_num)
> > - return NULL;
> > + return -ENOMEM;
> > }
> >
> > if (!num)
> > - return NULL;
> > + return -ENOMEM;
> >
> > if (!queue) {
> > /* Try to get a single page. You are my only hope! */
> > @@ -959,21 +959,49 @@ static struct virtqueue *vring_create_virtqueue_split(
> > &dma_addr, GFP_KERNEL|__GFP_ZERO);
> > }
> > if (!queue)
> > - return NULL;
> > + return -ENOMEM;
> >
> > queue_size_in_bytes = vring_size(num, vring_align);
> > - vring_init(&vring, num, queue, vring_align);
> > + vring_init(&vring->vring, num, queue, vring_align);
>
>
> It's better to move this to its caller (vring_create_virtqueue_split),
> so we have rather simple logic below:
>
>
>
> > +
> > + vring->dma_addr = dma_addr;
> > + vring->queue = queue;
> > + vring->queue_size_in_bytes = queue_size_in_bytes;
> > +
> > + return 0;
> > +}
> > +
> > +static struct virtqueue *vring_create_virtqueue_split(
> > + unsigned int index,
> > + unsigned int num,
> > + unsigned int vring_align,
> > + struct virtio_device *vdev,
> > + bool weak_barriers,
> > + bool may_reduce_num,
> > + bool context,
> > + bool (*notify)(struct virtqueue *),
> > + void (*callback)(struct virtqueue *),
> > + const char *name)
> > +{
> > + struct vring_split vring;
> > + struct virtqueue *vq;
> > + int err;
> > +
> > + err = vring_create_vring_split(&vring, vdev, vring_align, weak_barriers,
> > + may_reduce_num, num);
> > + if (err)
> > + return NULL;
>
>
> queue = vring_alloc_queue_split(vdev, &dma_addr, ...);
>
> if (!queue)
>
> return -ENOMEM;
>
> vring_init();
>
> ...
>
> Thanks
>
>
> >
> > - vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
> > + vq = __vring_new_virtqueue(index, vring.vring, vdev, weak_barriers, context,
> > notify, callback, name);
> > if (!vq) {
> > - vring_free_queue(vdev, queue_size_in_bytes, queue,
> > - dma_addr);
> > + vring_free_queue(vdev, vring.queue_size_in_bytes, vring.queue,
> > + vring.dma_addr);
> > return NULL;
> > }
> >
> > - to_vvq(vq)->split.queue_dma_addr = dma_addr;
> > - to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
> > + to_vvq(vq)->split.queue_dma_addr = vring.dma_addr;
> > + to_vvq(vq)->split.queue_size_in_bytes = vring.queue_size_in_bytes;
> > to_vvq(vq)->we_own_ring = true;
> >
> > return vq;
>
More information about the linux-um
mailing list