[PATCH v9 31/32] virtio_net: support rx/tx queue resize
Jason Wang
jasowang at redhat.com
Wed Apr 13 01:00:18 PDT 2022
在 2022/4/6 上午11:43, Xuan Zhuo 写道:
> This patch implements the resize function of the rx, tx queues.
> Based on this function, it is possible to modify the ring num of the
> queue.
>
> There may be an exception during the resize process, the resize may
> fail, or the vq can no longer be used. Either way, we must execute
> napi_enable(). Because napi_disable is similar to a lock, napi_enable
> must be called after calling napi_disable.
>
> Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com>
> ---
> drivers/net/virtio_net.c | 81 ++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 81 insertions(+)
>
> diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
> index b8bf00525177..ba6859f305f7 100644
> --- a/drivers/net/virtio_net.c
> +++ b/drivers/net/virtio_net.c
> @@ -251,6 +251,9 @@ struct padded_vnet_hdr {
> char padding[4];
> };
>
> +static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf);
> +static void virtnet_rq_free_unused_buf(struct virtqueue *vq, void *buf);
> +
> static bool is_xdp_frame(void *ptr)
> {
> return (unsigned long)ptr & VIRTIO_XDP_FLAG;
> @@ -1369,6 +1372,15 @@ static void virtnet_napi_enable(struct virtqueue *vq, struct napi_struct *napi)
> {
> napi_enable(napi);
>
> + /* Check if vq is in reset state. The normal reset/resize process will
> + * be protected by napi. However, the protection of napi is only enabled
> + * during the operation, and the protection of napi will end after the
> + * operation is completed. If re-enable fails during the process, vq
> + * will remain unavailable with reset state.
> + */
> + if (vq->reset)
> + return;
I don't get when could we hit this condition.
> +
> /* If all buffers were filled by other side before we napi_enabled, we
> * won't get another interrupt, so process any outstanding packets now.
> * Call local_bh_enable after to trigger softIRQ processing.
> @@ -1413,6 +1425,15 @@ static void refill_work(struct work_struct *work)
> struct receive_queue *rq = &vi->rq[i];
>
> napi_disable(&rq->napi);
> +
> + /* Check if vq is in reset state. See more in
> + * virtnet_napi_enable()
> + */
> + if (rq->vq->reset) {
> + virtnet_napi_enable(rq->vq, &rq->napi);
> + continue;
> + }
Can we do something similar in virtnet_close() by canceling the work?
> +
> still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
> virtnet_napi_enable(rq->vq, &rq->napi);
>
> @@ -1523,6 +1544,10 @@ static void virtnet_poll_cleantx(struct receive_queue *rq)
> if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
> return;
>
> + /* Check if vq is in reset state. See more in virtnet_napi_enable() */
> + if (sq->vq->reset)
> + return;
We've disabled TX napi, any chance we can still hit this?
> +
> if (__netif_tx_trylock(txq)) {
> do {
> virtqueue_disable_cb(sq->vq);
> @@ -1769,6 +1794,62 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
> return NETDEV_TX_OK;
> }
>
> +static int virtnet_rx_resize(struct virtnet_info *vi,
> + struct receive_queue *rq, u32 ring_num)
> +{
> + int err;
> +
> + napi_disable(&rq->napi);
> +
> + err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_free_unused_buf);
> + if (err)
> + goto err;
> +
> + if (!try_fill_recv(vi, rq, GFP_KERNEL))
> + schedule_delayed_work(&vi->refill, 0);
> +
> + virtnet_napi_enable(rq->vq, &rq->napi);
> + return 0;
> +
> +err:
> + netdev_err(vi->dev,
> + "reset rx reset vq fail: rx queue index: %td err: %d\n",
> + rq - vi->rq, err);
> + virtnet_napi_enable(rq->vq, &rq->napi);
> + return err;
> +}
> +
> +static int virtnet_tx_resize(struct virtnet_info *vi,
> + struct send_queue *sq, u32 ring_num)
> +{
> + struct netdev_queue *txq;
> + int err, qindex;
> +
> + qindex = sq - vi->sq;
> +
> + virtnet_napi_tx_disable(&sq->napi);
> +
> + txq = netdev_get_tx_queue(vi->dev, qindex);
> + __netif_tx_lock_bh(txq);
> + netif_stop_subqueue(vi->dev, qindex);
> + __netif_tx_unlock_bh(txq);
> +
> + err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf);
> + if (err)
> + goto err;
> +
> + netif_start_subqueue(vi->dev, qindex);
> + virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> + return 0;
> +
> +err:
I guess we can still start the queue in this case? (Since we don't
change the queue if resize fails).
> + netdev_err(vi->dev,
> + "reset tx reset vq fail: tx queue index: %td err: %d\n",
> + sq - vi->sq, err);
> + virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
> + return err;
> +}
> +
> /*
> * Send command via the control virtqueue and check status. Commands
> * supported by the hypervisor, as indicated by feature bits, should
More information about the linux-um
mailing list