[PATCH v7 17/26] virtio_pci: queue_reset: support VIRTIO_F_RING_RESET
Jason Wang
jasowang at redhat.com
Thu Mar 10 21:05:51 PST 2022
在 2022/3/9 下午5:32, Xuan Zhuo 写道:
> On Wed, 9 Mar 2022 16:54:10 +0800, Jason Wang <jasowang at redhat.com> wrote:
>> 在 2022/3/8 下午8:35, Xuan Zhuo 写道:
>>> This patch implements virtio pci support for QUEUE RESET.
>>>
>>> Performing reset on a queue is divided into these steps:
>>>
>>> 1. virtio_reset_vq() - notify the device to reset the queue
>>> 2. virtqueue_detach_unused_buf() - recycle the buffer submitted
>>> 3. virtqueue_reset_vring() - reset the vring (may re-alloc)
>>> 4. virtio_enable_resetq() - mmap vring to device, and enable the queue
>>>
>>> This patch implements virtio_reset_vq(), virtio_enable_resetq() in the
>>> pci scenario.
>>>
>>> Signed-off-by: Xuan Zhuo <xuanzhuo at linux.alibaba.com>
>>> ---
>>> drivers/virtio/virtio_pci_common.c | 8 +--
>>> drivers/virtio/virtio_pci_modern.c | 83 ++++++++++++++++++++++++++++++
>>> 2 files changed, 88 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
>>> index fdbde1db5ec5..863d3a8a0956 100644
>>> --- a/drivers/virtio/virtio_pci_common.c
>>> +++ b/drivers/virtio/virtio_pci_common.c
>>> @@ -248,9 +248,11 @@ static void vp_del_vq(struct virtqueue *vq)
>>> struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
>>> unsigned long flags;
>>>
>>> - spin_lock_irqsave(&vp_dev->lock, flags);
>>> - list_del(&info->node);
>>> - spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> + if (!vq->reset) {
>>> + spin_lock_irqsave(&vp_dev->lock, flags);
>>> + list_del(&info->node);
>>> + spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> + }
>>>
>>> vp_dev->del_vq(info);
>>> kfree(info);
>>> diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c
>>> index 49a4493732cf..3c67d3607802 100644
>>> --- a/drivers/virtio/virtio_pci_modern.c
>>> +++ b/drivers/virtio/virtio_pci_modern.c
>>> @@ -34,6 +34,9 @@ static void vp_transport_features(struct virtio_device *vdev, u64 features)
>>> if ((features & BIT_ULL(VIRTIO_F_SR_IOV)) &&
>>> pci_find_ext_capability(pci_dev, PCI_EXT_CAP_ID_SRIOV))
>>> __virtio_set_bit(vdev, VIRTIO_F_SR_IOV);
>>> +
>>> + if (features & BIT_ULL(VIRTIO_F_RING_RESET))
>>> + __virtio_set_bit(vdev, VIRTIO_F_RING_RESET);
>>> }
>>>
>>> /* virtio config->finalize_features() implementation */
>>> @@ -199,6 +202,82 @@ static int vp_active_vq(struct virtqueue *vq, u16 msix_vec)
>>> return 0;
>>> }
>>>
>>> +static int vp_modern_reset_vq(struct virtqueue *vq)
>>> +{
>>> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>>> + struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
>>> + struct virtio_pci_vq_info *info;
>>> + unsigned long flags;
>>> + unsigned int irq;
>>> +
>>> + if (!virtio_has_feature(vq->vdev, VIRTIO_F_RING_RESET))
>>> + return -ENOENT;
>>> +
>>> + vp_modern_set_queue_reset(mdev, vq->index);
>>> +
>>> + info = vp_dev->vqs[vq->index];
>>> +
>>> + /* delete vq from irq handler */
>>> + spin_lock_irqsave(&vp_dev->lock, flags);
>>> + list_del(&info->node);
>>> + spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> +
>>> + INIT_LIST_HEAD(&info->node);
>>> +
>>> + vq->reset = VIRTIO_VQ_RESET_STEP_DEVICE;
>>> +
>>> + /* sync irq callback. */
>>> + if (vp_dev->intx_enabled) {
>>> + irq = vp_dev->pci_dev->irq;
>>> +
>>> + } else {
>>> + if (info->msix_vector == VIRTIO_MSI_NO_VECTOR)
>>> + return 0;
>>> +
>>> + irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
>>> + }
>>> +
>>> + synchronize_irq(irq);
>>
>> Synchronize_irq() is not sufficient here since it breaks the effort of
>> the interrupt hardening which is done by commits:
>>
>> 080cd7c3ac87 virtio-pci: harden INTX interrupts
>> 9e35276a5344 virtio_pci: harden MSI-X interrupts
>>
>> Unfortunately 080cd7c3ac87 introduces an issue that disable_irq() were
>> used for the affinity managed irq but we're discussing a fix.
> I need to understand it first.
>
>>
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static int vp_modern_enable_reset_vq(struct virtqueue *vq)
>>> +{
>>> + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
>>> + struct virtio_pci_modern_device *mdev = &vp_dev->mdev;
>>> + struct virtio_pci_vq_info *info;
>>> + unsigned long flags, index;
>>> + int err;
>>> +
>>> + if (vq->reset != VIRTIO_VQ_RESET_STEP_VRING_ATTACH)
>>> + return -EBUSY;
>>> +
>>> + index = vq->index;
>>> + info = vp_dev->vqs[index];
>>> +
>>> + /* check queue reset status */
>>> + if (vp_modern_get_queue_reset(mdev, index) != 1)
>>> + return -EBUSY;
>>> +
>>> + err = vp_active_vq(vq, info->msix_vector);
>>> + if (err)
>>> + return err;
>>> +
>>> + if (vq->callback) {
>>> + spin_lock_irqsave(&vp_dev->lock, flags);
>>> + list_add(&info->node, &vp_dev->virtqueues);
>>> + spin_unlock_irqrestore(&vp_dev->lock, flags);
>>> + } else {
>>> + INIT_LIST_HEAD(&info->node);
>>> + }
>>> +
>>> + vp_modern_set_queue_enable(&vp_dev->mdev, index, true);
>>
>> Any reason we need to check queue_enable() here?
> The purpose of this function is to enable a reset vq, so call queue_enable() to
> activate it.
Ok, this is what spec mandate.
Thanks
>
> Thanks.
>
>> Thanks
>>
>>
>>> + vq->reset = VIRTIO_VQ_RESET_STEP_NONE;
>>> +
>>> + return 0;
>>> +}
>>> +
>>> static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector)
>>> {
>>> return vp_modern_config_vector(&vp_dev->mdev, vector);
>>> @@ -407,6 +486,8 @@ static const struct virtio_config_ops virtio_pci_config_nodev_ops = {
>>> .set_vq_affinity = vp_set_vq_affinity,
>>> .get_vq_affinity = vp_get_vq_affinity,
>>> .get_shm_region = vp_get_shm_region,
>>> + .reset_vq = vp_modern_reset_vq,
>>> + .enable_reset_vq = vp_modern_enable_reset_vq,
>>> };
>>>
>>> static const struct virtio_config_ops virtio_pci_config_ops = {
>>> @@ -425,6 +506,8 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
>>> .set_vq_affinity = vp_set_vq_affinity,
>>> .get_vq_affinity = vp_get_vq_affinity,
>>> .get_shm_region = vp_get_shm_region,
>>> + .reset_vq = vp_modern_reset_vq,
>>> + .enable_reset_vq = vp_modern_enable_reset_vq,
>>> };
>>>
>>> /* the PCI probing function */
More information about the linux-um
mailing list