The spinlock we use to protect the state of the simulator is sometimes
held for a long time (for example, when devices handle requests).
This also prevents us from calling functions that might sleep (such as
kthread_flush_work() in the next patch), and thus having to release
and retake the lock.
For these reasons, let's replace the spinlock with a mutex that gives
us more flexibility.
Suggested-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
---
drivers/vdpa/vdpa_sim/vdpa_sim.h | 4 ++--
drivers/vdpa/vdpa_sim/vdpa_sim.c | 34 ++++++++++++++--------------
drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 4 ++--
drivers/vdpa/vdpa_sim/vdpa_sim_net.c | 4 ++--
4 files changed, 23 insertions(+), 23 deletions(-)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
index ce83f9130a5d..4774292fba8c 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
@@ -60,8 +60,8 @@ struct vdpasim {
struct kthread_worker *worker;
struct kthread_work work;
struct vdpasim_dev_attr dev_attr;
- /* spinlock to synchronize virtqueue state */
- spinlock_t lock;
+ /* mutex to synchronize virtqueue state */
+ struct mutex mutex;
/* virtio config according to device type */
void *config;
struct vhost_iotlb *iommu;
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index 6feb29726c2a..a28103a67ae7 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -166,7 +166,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
if (IS_ERR(vdpasim->worker))
goto err_iommu;
- spin_lock_init(&vdpasim->lock);
+ mutex_init(&vdpasim->mutex);
spin_lock_init(&vdpasim->iommu_lock);
dev = &vdpasim->vdpa.dev;
@@ -275,13 +275,13 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
bool old_ready;
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
old_ready = vq->ready;
vq->ready = ready;
if (vq->ready && !old_ready) {
vdpasim_queue_ready(vdpasim, idx);
}
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
}
static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
@@ -299,9 +299,9 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
struct vringh *vrh = &vq->vring;
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
vrh->last_avail_idx = state->split.avail_index;
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
return 0;
}
@@ -398,9 +398,9 @@ static u8 vdpasim_get_status(struct vdpa_device *vdpa)
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
u8 status;
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
status = vdpasim->status;
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
return status;
}
@@ -409,19 +409,19 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
vdpasim->status = status;
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
}
static int vdpasim_reset(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
vdpasim->status = 0;
vdpasim_do_reset(vdpasim);
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
return 0;
}
@@ -430,9 +430,9 @@ static int vdpasim_suspend(struct vdpa_device *vdpa)
{
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
vdpasim->running = false;
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
return 0;
}
@@ -442,7 +442,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
int i;
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
vdpasim->running = true;
if (vdpasim->pending_kick) {
@@ -453,7 +453,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
vdpasim->pending_kick = false;
}
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
return 0;
}
@@ -525,14 +525,14 @@ static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
iommu = &vdpasim->iommu[asid];
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
if (vdpasim_get_vq_group(vdpa, i) == group)
vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
&vdpasim->iommu_lock);
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
return 0;
}
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
index eb4897c8541e..568119e1553f 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
@@ -290,7 +290,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
bool reschedule = false;
int i;
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
goto out;
@@ -321,7 +321,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
}
}
out:
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
if (reschedule)
vdpasim_schedule_work(vdpasim);
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
index e61a9ecbfafe..7ab434592bfe 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
@@ -201,7 +201,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
u64 rx_drops = 0, rx_overruns = 0, rx_errors = 0, tx_errors = 0;
int err;
- spin_lock(&vdpasim->lock);
+ mutex_lock(&vdpasim->mutex);
if (!vdpasim->running)
goto out;
@@ -264,7 +264,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
}
out:
- spin_unlock(&vdpasim->lock);
+ mutex_unlock(&vdpasim->mutex);
u64_stats_update_begin(&net->tx_stats.syncp);
net->tx_stats.pkts += tx_pkts;
--
2.39.2
On Thu, Mar 2, 2023 at 7:35 PM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> The spinlock we use to protect the state of the simulator is sometimes
> held for a long time (for example, when devices handle requests).
>
> This also prevents us from calling functions that might sleep (such as
> kthread_flush_work() in the next patch), and thus having to release
> and retake the lock.
>
> For these reasons, let's replace the spinlock with a mutex that gives
> us more flexibility.
>
> Suggested-by: Jason Wang <jasowang@redhat.com>
> Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
Acked-by: Jason Wang <jasowang@redhat.com>
Thanks
> ---
> drivers/vdpa/vdpa_sim/vdpa_sim.h | 4 ++--
> drivers/vdpa/vdpa_sim/vdpa_sim.c | 34 ++++++++++++++--------------
> drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 4 ++--
> drivers/vdpa/vdpa_sim/vdpa_sim_net.c | 4 ++--
> 4 files changed, 23 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
> index ce83f9130a5d..4774292fba8c 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
> @@ -60,8 +60,8 @@ struct vdpasim {
> struct kthread_worker *worker;
> struct kthread_work work;
> struct vdpasim_dev_attr dev_attr;
> - /* spinlock to synchronize virtqueue state */
> - spinlock_t lock;
> + /* mutex to synchronize virtqueue state */
> + struct mutex mutex;
> /* virtio config according to device type */
> void *config;
> struct vhost_iotlb *iommu;
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> index 6feb29726c2a..a28103a67ae7 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> @@ -166,7 +166,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
> if (IS_ERR(vdpasim->worker))
> goto err_iommu;
>
> - spin_lock_init(&vdpasim->lock);
> + mutex_init(&vdpasim->mutex);
> spin_lock_init(&vdpasim->iommu_lock);
>
> dev = &vdpasim->vdpa.dev;
> @@ -275,13 +275,13 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
> struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
> bool old_ready;
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
> old_ready = vq->ready;
> vq->ready = ready;
> if (vq->ready && !old_ready) {
> vdpasim_queue_ready(vdpasim, idx);
> }
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
> }
>
> static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
> @@ -299,9 +299,9 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
> struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
> struct vringh *vrh = &vq->vring;
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
> vrh->last_avail_idx = state->split.avail_index;
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> return 0;
> }
> @@ -398,9 +398,9 @@ static u8 vdpasim_get_status(struct vdpa_device *vdpa)
> struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
> u8 status;
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
> status = vdpasim->status;
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> return status;
> }
> @@ -409,19 +409,19 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
> {
> struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
> vdpasim->status = status;
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
> }
>
> static int vdpasim_reset(struct vdpa_device *vdpa)
> {
> struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
> vdpasim->status = 0;
> vdpasim_do_reset(vdpasim);
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> return 0;
> }
> @@ -430,9 +430,9 @@ static int vdpasim_suspend(struct vdpa_device *vdpa)
> {
> struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
> vdpasim->running = false;
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> return 0;
> }
> @@ -442,7 +442,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
> struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
> int i;
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
> vdpasim->running = true;
>
> if (vdpasim->pending_kick) {
> @@ -453,7 +453,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
> vdpasim->pending_kick = false;
> }
>
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> return 0;
> }
> @@ -525,14 +525,14 @@ static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
>
> iommu = &vdpasim->iommu[asid];
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
>
> for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
> if (vdpasim_get_vq_group(vdpa, i) == group)
> vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
> &vdpasim->iommu_lock);
>
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> return 0;
> }
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
> index eb4897c8541e..568119e1553f 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
> @@ -290,7 +290,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
> bool reschedule = false;
> int i;
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
>
> if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
> goto out;
> @@ -321,7 +321,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
> }
> }
> out:
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> if (reschedule)
> vdpasim_schedule_work(vdpasim);
> diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> index e61a9ecbfafe..7ab434592bfe 100644
> --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> @@ -201,7 +201,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
> u64 rx_drops = 0, rx_overruns = 0, rx_errors = 0, tx_errors = 0;
> int err;
>
> - spin_lock(&vdpasim->lock);
> + mutex_lock(&vdpasim->mutex);
>
> if (!vdpasim->running)
> goto out;
> @@ -264,7 +264,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
> }
>
> out:
> - spin_unlock(&vdpasim->lock);
> + mutex_unlock(&vdpasim->mutex);
>
> u64_stats_update_begin(&net->tx_stats.syncp);
> net->tx_stats.pkts += tx_pkts;
> --
> 2.39.2
>
On Tue, Mar 14, 2023 at 1:29 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Thu, Mar 2, 2023 at 7:35 PM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >
> > The spinlock we use to protect the state of the simulator is sometimes
> > held for a long time (for example, when devices handle requests).
> >
> > This also prevents us from calling functions that might sleep (such as
> > kthread_flush_work() in the next patch), and thus having to release
> > and retake the lock.
> >
> > For these reasons, let's replace the spinlock with a mutex that gives
> > us more flexibility.
> >
> > Suggested-by: Jason Wang <jasowang@redhat.com>
> > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
>
> Acked-by: Jason Wang <jasowang@redhat.com>
>
> Thanks
Btw, though it looks fine but we'd better double confirm virtio_vdpa works well.
(I think so since there's transport that might sleep).
Thanks
>
> > ---
> > drivers/vdpa/vdpa_sim/vdpa_sim.h | 4 ++--
> > drivers/vdpa/vdpa_sim/vdpa_sim.c | 34 ++++++++++++++--------------
> > drivers/vdpa/vdpa_sim/vdpa_sim_blk.c | 4 ++--
> > drivers/vdpa/vdpa_sim/vdpa_sim_net.c | 4 ++--
> > 4 files changed, 23 insertions(+), 23 deletions(-)
> >
> > diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.h b/drivers/vdpa/vdpa_sim/vdpa_sim.h
> > index ce83f9130a5d..4774292fba8c 100644
> > --- a/drivers/vdpa/vdpa_sim/vdpa_sim.h
> > +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.h
> > @@ -60,8 +60,8 @@ struct vdpasim {
> > struct kthread_worker *worker;
> > struct kthread_work work;
> > struct vdpasim_dev_attr dev_attr;
> > - /* spinlock to synchronize virtqueue state */
> > - spinlock_t lock;
> > + /* mutex to synchronize virtqueue state */
> > + struct mutex mutex;
> > /* virtio config according to device type */
> > void *config;
> > struct vhost_iotlb *iommu;
> > diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> > index 6feb29726c2a..a28103a67ae7 100644
> > --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
> > +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
> > @@ -166,7 +166,7 @@ struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
> > if (IS_ERR(vdpasim->worker))
> > goto err_iommu;
> >
> > - spin_lock_init(&vdpasim->lock);
> > + mutex_init(&vdpasim->mutex);
> > spin_lock_init(&vdpasim->iommu_lock);
> >
> > dev = &vdpasim->vdpa.dev;
> > @@ -275,13 +275,13 @@ static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
> > struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
> > bool old_ready;
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> > old_ready = vq->ready;
> > vq->ready = ready;
> > if (vq->ready && !old_ready) {
> > vdpasim_queue_ready(vdpasim, idx);
> > }
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> > }
> >
> > static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
> > @@ -299,9 +299,9 @@ static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
> > struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
> > struct vringh *vrh = &vq->vring;
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> > vrh->last_avail_idx = state->split.avail_index;
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > return 0;
> > }
> > @@ -398,9 +398,9 @@ static u8 vdpasim_get_status(struct vdpa_device *vdpa)
> > struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
> > u8 status;
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> > status = vdpasim->status;
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > return status;
> > }
> > @@ -409,19 +409,19 @@ static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
> > {
> > struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> > vdpasim->status = status;
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> > }
> >
> > static int vdpasim_reset(struct vdpa_device *vdpa)
> > {
> > struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> > vdpasim->status = 0;
> > vdpasim_do_reset(vdpasim);
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > return 0;
> > }
> > @@ -430,9 +430,9 @@ static int vdpasim_suspend(struct vdpa_device *vdpa)
> > {
> > struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> > vdpasim->running = false;
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > return 0;
> > }
> > @@ -442,7 +442,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
> > struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
> > int i;
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> > vdpasim->running = true;
> >
> > if (vdpasim->pending_kick) {
> > @@ -453,7 +453,7 @@ static int vdpasim_resume(struct vdpa_device *vdpa)
> > vdpasim->pending_kick = false;
> > }
> >
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > return 0;
> > }
> > @@ -525,14 +525,14 @@ static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
> >
> > iommu = &vdpasim->iommu[asid];
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> >
> > for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
> > if (vdpasim_get_vq_group(vdpa, i) == group)
> > vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
> > &vdpasim->iommu_lock);
> >
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > return 0;
> > }
> > diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
> > index eb4897c8541e..568119e1553f 100644
> > --- a/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
> > +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_blk.c
> > @@ -290,7 +290,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
> > bool reschedule = false;
> > int i;
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> >
> > if (!(vdpasim->status & VIRTIO_CONFIG_S_DRIVER_OK))
> > goto out;
> > @@ -321,7 +321,7 @@ static void vdpasim_blk_work(struct vdpasim *vdpasim)
> > }
> > }
> > out:
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > if (reschedule)
> > vdpasim_schedule_work(vdpasim);
> > diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> > index e61a9ecbfafe..7ab434592bfe 100644
> > --- a/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> > +++ b/drivers/vdpa/vdpa_sim/vdpa_sim_net.c
> > @@ -201,7 +201,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
> > u64 rx_drops = 0, rx_overruns = 0, rx_errors = 0, tx_errors = 0;
> > int err;
> >
> > - spin_lock(&vdpasim->lock);
> > + mutex_lock(&vdpasim->mutex);
> >
> > if (!vdpasim->running)
> > goto out;
> > @@ -264,7 +264,7 @@ static void vdpasim_net_work(struct vdpasim *vdpasim)
> > }
> >
> > out:
> > - spin_unlock(&vdpasim->lock);
> > + mutex_unlock(&vdpasim->mutex);
> >
> > u64_stats_update_begin(&net->tx_stats.syncp);
> > net->tx_stats.pkts += tx_pkts;
> > --
> > 2.39.2
> >
On Tue, Mar 14, 2023 at 01:31:25PM +0800, Jason Wang wrote: >On Tue, Mar 14, 2023 at 1:29 PM Jason Wang <jasowang@redhat.com> wrote: >> >> On Thu, Mar 2, 2023 at 7:35 PM Stefano Garzarella <sgarzare@redhat.com> wrote: >> > >> > The spinlock we use to protect the state of the simulator is sometimes >> > held for a long time (for example, when devices handle requests). >> > >> > This also prevents us from calling functions that might sleep (such as >> > kthread_flush_work() in the next patch), and thus having to release >> > and retake the lock. >> > >> > For these reasons, let's replace the spinlock with a mutex that gives >> > us more flexibility. >> > >> > Suggested-by: Jason Wang <jasowang@redhat.com> >> > Signed-off-by: Stefano Garzarella <sgarzare@redhat.com> >> >> Acked-by: Jason Wang <jasowang@redhat.com> >> >> Thanks > >Btw, though it looks fine but we'd better double confirm virtio_vdpa works well. I tested it, but I will do it more carefully to make sure everything is okay. > >(I think so since there's transport that might sleep). I see. Thanks, Stefano
© 2016 - 2026 Red Hat, Inc.