[PATCH v9 31/32] virtio_net: support rx/tx queue resize

Xuan Zhuo xuanzhuo at linux.alibaba.com
Mon Apr 18 01:48:25 PDT 2022


On Mon, 18 Apr 2022 15:49:29 +0800, Jason Wang <jasowang at redhat.com> wrote:
> On Mon, Apr 18, 2022 at 11:24 AM Xuan Zhuo <xuanzhuo at linux.alibaba.com> wrote:
> >
> > On Wed, 13 Apr 2022 16:00:18 +0800, Jason Wang <jasowang at redhat.com> wrote:
> > >
> > > 在 2022/4/6 上午11:43, Xuan Zhuo 写道:
> > > > This patch implements the resize function of the rx, tx queues.
> > > > Based on this function, it is possible to modify the ring num of the
> > > > queue.
> > > >
> > > > There may be an exception during the resize process, the resize may
> > > > fail, or the vq can no longer be used. Either way, we must execute
> > > > napi_enable(). Because napi_disable is similar to a lock, napi_enable
> > > > must be called after calling napi_disable.
> > > >
> > > > Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com>
> > > > ---
> > > >   drivers/net/virtio_net.c | 81 ++++++++++++++++++++++++++++++++++++++++
> > > >   1 file changed, 81 insertions(+)
> > > >
> > > > diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> > > > index b8bf00525177..ba6859f305f7 100644
> > > > --- a/drivers/net/virtio_net.c
> > > > +++ b/drivers/net/virtio_net.c
> > > > @@ -251,6 +251,9 @@ struct padded_vnet_hdr {
> > > >     char padding[4];
> > > >   };
> > > >
> > > > +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > +static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> > > > +
> > > >   static bool is_xdp_frame(void *ptr)
> > > >   {
> > > >     return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> > > > @@ -1369,6 +1372,15 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> > > >   {
> > > >     napi_enable(napi);
> > > >
> > > > +   /* Check if vq is in reset state. The normal reset/resize process will
> > > > +    * be protected by napi. However, the protection of napi is only enabled
> > > > +    * during the operation, and the protection of napi will end after the
> > > > +    * operation is completed. If re-enable fails during the process, vq
> > > > +    * will remain unavailable with reset state.
> > > > +    */
> > > > +   if (vq->reset)
> > > > +           return;
> > >
> > >
> > > I don't get when could we hit this condition.
> > >
> > >
> > > > +
> > > >     /* If all buffers were filled by other side before we napi_enabled, we
> > > >      * won't get another interrupt, so process any outstanding packets now.
> > > >      * Call local_bh_enable after to trigger softIRQ processing.
> > > > @@ -1413,6 +1425,15 @@ static void refill_work(struct work_struct *work)
> > > >             struct receive_queue *rq = &vi->rq[i];
> > > >
> > > >             napi_disable(&rq->napi);
> > > > +
> > > > +           /* Check if vq is in reset state. See more in
> > > > +            * virtnet_napi_enable()
> > > > +            */
> > > > +           if (rq->vq->reset) {
> > > > +                   virtnet_napi_enable(rq->vq, &rq->napi);
> > > > +                   continue;
> > > > +           }
> > >
> > >
> > > Can we do something similar in virtnet_close() by canceling the work?
> > >
> > >
> > > > +
> > > >             still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
> > > >             virtnet_napi_enable(rq->vq, &rq->napi);
> > > >
> > > > @@ -1523,6 +1544,10 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> > > >     if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
> > > >             return;
> > > >
> > > > +   /* Check if vq is in reset state. See more in virtnet_napi_enable() */
> > > > +   if (sq->vq->reset)
> > > > +           return;
> > >
> > >
> > > We've disabled TX napi, any chance we can still hit this?
> >
> >
> > static int virtnet_poll(struct napi_struct *napi, int budget)
> > {
> >         struct receive_queue *rq =
> >                 container_of(napi, struct receive_queue, napi);
> >         struct virtnet_info *vi = rq->vq->vdev->priv;
> >         struct send_queue *sq;
> >         unsigned int received;
> >         unsigned int xdp_xmit = 0;
> >
> >         virtnet_poll_cleantx(rq);
> > ...
> > }
> >
> > This is called by rx poll. Although it is the logic of tx, it is not driven by
> > tx napi, but is called in rx poll.
>
> Ok, but we need guarantee the memory ordering in this case. Disable RX
> napi could be a solution for this.

Yes, I have realized this too. I have two solutions, disable rx napi or the
following.

Thanks.


diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9bf1b6530b38..7764d1dcb831 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -135,6 +135,7 @@ struct send_queue {
 	struct virtnet_sq_stats stats;

 	struct napi_struct napi;
+	bool reset;
 };

 /* Internal representation of a receive virtqueue */
@@ -1583,6 +1587,11 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
 		return;

 	if (__netif_tx_trylock(txq)) {
+		if (sq->reset) {
+			__netif_tx_unlock(txq);
+			return;
+		}
+
 		do {
 			virtqueue_disable_cb(sq->vq);
 			free_old_xmit_skbs(sq, true);
@@ -1828,6 +1837,56 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
 	return NETDEV_TX_OK;
 }

+static int virtnet_tx_resize(struct virtnet_info *vi,
+			     struct send_queue *sq, u32 ring_num)
+{
+	struct netdev_queue *txq;
+	int err, qindex;
+
+	qindex = sq - vi->sq;
+
+	virtnet_napi_tx_disable(&sq->napi);
+
+	txq = netdev_get_tx_queue(vi->dev, qindex);
+
+	__netif_tx_lock_bh(txq);
+	netif_stop_subqueue(vi->dev, qindex);
+	sq->reset = true;
+	__netif_tx_unlock_bh(txq);
+
+	err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
+	if (err)
+		netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
+
+	__netif_tx_lock_bh(txq);
+	sq->reset = false;
+	netif_start_subqueue(vi->dev, qindex);
+	__netif_tx_unlock_bh(txq);
+
+	virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
+	return err;
+}
+




More information about the linux-um mailing list