[PATCH v11 08/40] virtio_ring: split: extract the logic of alloc queue
Xuan Zhuo
xuanzhuo at linux.alibaba.com
Fri Jul 1 01:45:58 PDT 2022
On Fri, 1 Jul 2022 16:26:25 +0800, Jason Wang <jasowang at redhat.com> wrote:
>
> 在 2022/6/29 14:56, Xuan Zhuo 写道:
> > Separate the logic of split to create vring queue.
> >
> > This feature is required for subsequent virtuqueue reset vring.
> >
> > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com>
> > ---
> > drivers/virtio/virtio_ring.c | 68 ++++++++++++++++++++++--------------
> > 1 file changed, 42 insertions(+), 26 deletions(-)
> >
> > diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
> > index 49d61e412dc6..a9ceb9c16c54 100644
> > --- a/drivers/virtio/virtio_ring.c
> > +++ b/drivers/virtio/virtio_ring.c
> > @@ -949,28 +949,19 @@ static void vring_free_split(struct vring_virtqueue_split *vring,
> > kfree(vring->desc_extra);
> > }
> >
> > -static struct virtqueue *vring_create_virtqueue_split(
> > - unsigned int index,
> > - unsigned int num,
> > - unsigned int vring_align,
> > - struct virtio_device *vdev,
> > - bool weak_barriers,
> > - bool may_reduce_num,
> > - bool context,
> > - bool (*notify)(struct virtqueue *),
> > - void (*callback)(struct virtqueue *),
> > - const char *name)
> > +static int vring_alloc_queue_split(struct vring_virtqueue_split *vring,
> > + struct virtio_device *vdev,
> > + u32 num,
> > + unsigned int vring_align,
> > + bool may_reduce_num)
> > {
> > - struct virtqueue *vq;
> > void *queue = NULL;
> > dma_addr_t dma_addr;
> > - size_t queue_size_in_bytes;
> > - struct vring vring;
> >
> > /* We assume num is a power of 2. */
> > if (num & (num - 1)) {
> > dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
> > - return NULL;
> > + return -EINVAL;
> > }
> >
> > /* TODO: allocate each queue chunk individually */
> > @@ -981,11 +972,11 @@ static struct virtqueue *vring_create_virtqueue_split(
> > if (queue)
> > break;
> > if (!may_reduce_num)
> > - return NULL;
> > + return -ENOMEM;
> > }
> >
> > if (!num)
> > - return NULL;
> > + return -ENOMEM;
> >
> > if (!queue) {
> > /* Try to get a single page. You are my only hope! */
> > @@ -993,21 +984,46 @@ static struct virtqueue *vring_create_virtqueue_split(
> > &dma_addr, GFP_KERNEL|__GFP_ZERO);
> > }
> > if (!queue)
> > - return NULL;
> > + return -ENOMEM;
> > +
> > + vring_init(&vring->vring, num, queue, vring_align);
> >
> > - queue_size_in_bytes = vring_size(num, vring_align);
> > - vring_init(&vring, num, queue, vring_align);
> > + vring->queue_dma_addr = dma_addr;
> > + vring->queue_size_in_bytes = vring_size(num, vring_align);
> > +
> > + return 0;
> > +}
> > +
> > +static struct virtqueue *vring_create_virtqueue_split(
> > + unsigned int index,
> > + unsigned int num,
> > + unsigned int vring_align,
> > + struct virtio_device *vdev,
> > + bool weak_barriers,
> > + bool may_reduce_num,
> > + bool context,
> > + bool (*notify)(struct virtqueue *),
> > + void (*callback)(struct virtqueue *),
> > + const char *name)
> > +{
> > + struct vring_virtqueue_split vring = {};
> > + struct virtqueue *vq;
> > + int err;
> > +
> > + err = vring_alloc_queue_split(&vring, vdev, num, vring_align,
> > + may_reduce_num);
> > + if (err)
> > + return NULL;
> >
> > - vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
> > - notify, callback, name);
> > + vq = __vring_new_virtqueue(index, vring.vring, vdev, weak_barriers,
> > + context, notify, callback, name);
> > if (!vq) {
> > - vring_free_queue(vdev, queue_size_in_bytes, queue,
> > - dma_addr);
> > + vring_free_split(&vring, vdev);
> > return NULL;
> > }
> >
> > - to_vvq(vq)->split.queue_dma_addr = dma_addr;
> > - to_vvq(vq)->split.queue_size_in_bytes = queue_size_in_bytes;
> > + to_vvq(vq)->split.queue_dma_addr = vring.queue_dma_addr;
>
>
> Nit: having two queue_dma_addr seems redundant (so did queue_size_in_bytes).
two?
Where is the problem I don't understand?
Thanks.
>
> Thanks
>
>
> > + to_vvq(vq)->split.queue_size_in_bytes = vring.queue_size_in_bytes;
> > to_vvq(vq)->we_own_ring = true;
> >
> > return vq;
>
More information about the linux-um
mailing list