Abstract vhost worker operations (create/stop/wakeup) into an ops
structure to prepare for kthread mode support.
Signed-off-by: Cindy Lu <lulu@redhat.com>
---
drivers/vhost/vhost.c | 63 ++++++++++++++++++++++++++++++-------------
drivers/vhost/vhost.h | 11 ++++++++
2 files changed, 56 insertions(+), 18 deletions(-)
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 20571bd6f7bd..c162ad772f8f 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -243,7 +243,7 @@ static void vhost_worker_queue(struct vhost_worker *worker,
* test_and_set_bit() implies a memory barrier.
*/
llist_add(&work->node, &worker->work_list);
- vhost_task_wake(worker->vtsk);
+ worker->ops->wakeup(worker);
}
}
@@ -706,7 +706,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
WARN_ON(!llist_empty(&worker->work_list));
xa_erase(&dev->worker_xa, worker->id);
- vhost_task_stop(worker->vtsk);
+ worker->ops->stop(worker);
kfree(worker);
}
@@ -729,42 +729,69 @@ static void vhost_workers_free(struct vhost_dev *dev)
xa_destroy(&dev->worker_xa);
}
+static void vhost_task_wakeup(struct vhost_worker *worker)
+{
+ return vhost_task_wake(worker->vtsk);
+}
+
+static void vhost_task_do_stop(struct vhost_worker *worker)
+{
+ return vhost_task_stop(worker->vtsk);
+}
+
+static int vhost_task_worker_create(struct vhost_worker *worker,
+ struct vhost_dev *dev, const char *name)
+{
+ struct vhost_task *vtsk;
+ u32 id;
+ int ret;
+
+ vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
+ worker, name);
+ if (IS_ERR(vtsk))
+ return PTR_ERR(vtsk);
+
+ worker->vtsk = vtsk;
+ vhost_task_start(vtsk);
+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ if (ret < 0) {
+ vhost_task_do_stop(worker);
+ return ret;
+ }
+ worker->id = id;
+ return 0;
+}
+
+static const struct vhost_worker_ops vhost_task_ops = {
+ .create = vhost_task_worker_create,
+ .stop = vhost_task_do_stop,
+ .wakeup = vhost_task_wakeup,
+};
+
static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
{
struct vhost_worker *worker;
- struct vhost_task *vtsk;
char name[TASK_COMM_LEN];
int ret;
- u32 id;
+ const struct vhost_worker_ops *ops = &vhost_task_ops;
worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
if (!worker)
return NULL;
worker->dev = dev;
+ worker->ops = ops;
snprintf(name, sizeof(name), "vhost-%d", current->pid);
- vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
- worker, name);
- if (IS_ERR(vtsk))
- goto free_worker;
-
mutex_init(&worker->mutex);
init_llist_head(&worker->work_list);
worker->kcov_handle = kcov_common_handle();
- worker->vtsk = vtsk;
-
- vhost_task_start(vtsk);
-
- ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
+ ret = ops->create(worker, dev, name);
if (ret < 0)
- goto stop_worker;
- worker->id = id;
+ goto free_worker;
return worker;
-stop_worker:
- vhost_task_stop(vtsk);
free_worker:
kfree(worker);
return NULL;
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 19bb94922a0e..98895e299efa 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -26,6 +26,16 @@ struct vhost_work {
unsigned long flags;
};
+struct vhost_worker;
+struct vhost_dev;
+
+struct vhost_worker_ops {
+ int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
+ const char *name);
+ void (*stop)(struct vhost_worker *worker);
+ void (*wakeup)(struct vhost_worker *worker);
+};
+
struct vhost_worker {
struct vhost_task *vtsk;
struct vhost_dev *dev;
@@ -36,6 +46,7 @@ struct vhost_worker {
u32 id;
int attachment_cnt;
bool killed;
+ const struct vhost_worker_ops *ops;
};
/* Poll a file (eventfd or socket) */
--
2.45.0
On Fri, Mar 28, 2025 at 06:02:48PM +0800, Cindy Lu wrote:
> Abstract vhost worker operations (create/stop/wakeup) into an ops
> structure to prepare for kthread mode support.
>
> Signed-off-by: Cindy Lu <lulu@redhat.com>
I worry about the overhead of indirect calls here.
We have the wrappers, and only two options,
why did you decide to add it like this,
with ops?
> ---
> drivers/vhost/vhost.c | 63 ++++++++++++++++++++++++++++++-------------
> drivers/vhost/vhost.h | 11 ++++++++
> 2 files changed, 56 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> index 20571bd6f7bd..c162ad772f8f 100644
> --- a/drivers/vhost/vhost.c
> +++ b/drivers/vhost/vhost.c
> @@ -243,7 +243,7 @@ static void vhost_worker_queue(struct vhost_worker *worker,
> * test_and_set_bit() implies a memory barrier.
> */
> llist_add(&work->node, &worker->work_list);
> - vhost_task_wake(worker->vtsk);
> + worker->ops->wakeup(worker);
> }
> }
>
> @@ -706,7 +706,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
>
> WARN_ON(!llist_empty(&worker->work_list));
> xa_erase(&dev->worker_xa, worker->id);
> - vhost_task_stop(worker->vtsk);
> + worker->ops->stop(worker);
> kfree(worker);
> }
>
> @@ -729,42 +729,69 @@ static void vhost_workers_free(struct vhost_dev *dev)
> xa_destroy(&dev->worker_xa);
> }
>
> +static void vhost_task_wakeup(struct vhost_worker *worker)
> +{
> + return vhost_task_wake(worker->vtsk);
> +}
> +
> +static void vhost_task_do_stop(struct vhost_worker *worker)
> +{
> + return vhost_task_stop(worker->vtsk);
> +}
> +
> +static int vhost_task_worker_create(struct vhost_worker *worker,
> + struct vhost_dev *dev, const char *name)
> +{
> + struct vhost_task *vtsk;
> + u32 id;
> + int ret;
> +
> + vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
> + worker, name);
> + if (IS_ERR(vtsk))
> + return PTR_ERR(vtsk);
> +
> + worker->vtsk = vtsk;
> + vhost_task_start(vtsk);
> + ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
> + if (ret < 0) {
> + vhost_task_do_stop(worker);
> + return ret;
> + }
> + worker->id = id;
> + return 0;
> +}
> +
> +static const struct vhost_worker_ops vhost_task_ops = {
> + .create = vhost_task_worker_create,
> + .stop = vhost_task_do_stop,
> + .wakeup = vhost_task_wakeup,
> +};
> +
> static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
> {
> struct vhost_worker *worker;
> - struct vhost_task *vtsk;
> char name[TASK_COMM_LEN];
> int ret;
> - u32 id;
> + const struct vhost_worker_ops *ops = &vhost_task_ops;
>
> worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
> if (!worker)
> return NULL;
>
> worker->dev = dev;
> + worker->ops = ops;
> snprintf(name, sizeof(name), "vhost-%d", current->pid);
>
> - vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
> - worker, name);
> - if (IS_ERR(vtsk))
> - goto free_worker;
> -
> mutex_init(&worker->mutex);
> init_llist_head(&worker->work_list);
> worker->kcov_handle = kcov_common_handle();
> - worker->vtsk = vtsk;
> -
> - vhost_task_start(vtsk);
> -
> - ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
> + ret = ops->create(worker, dev, name);
> if (ret < 0)
> - goto stop_worker;
> - worker->id = id;
> + goto free_worker;
>
> return worker;
>
> -stop_worker:
> - vhost_task_stop(vtsk);
> free_worker:
> kfree(worker);
> return NULL;
> diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> index 19bb94922a0e..98895e299efa 100644
> --- a/drivers/vhost/vhost.h
> +++ b/drivers/vhost/vhost.h
> @@ -26,6 +26,16 @@ struct vhost_work {
> unsigned long flags;
> };
>
> +struct vhost_worker;
> +struct vhost_dev;
> +
> +struct vhost_worker_ops {
> + int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
> + const char *name);
> + void (*stop)(struct vhost_worker *worker);
> + void (*wakeup)(struct vhost_worker *worker);
> +};
> +
> struct vhost_worker {
> struct vhost_task *vtsk;
> struct vhost_dev *dev;
> @@ -36,6 +46,7 @@ struct vhost_worker {
> u32 id;
> int attachment_cnt;
> bool killed;
> + const struct vhost_worker_ops *ops;
> };
>
> /* Poll a file (eventfd or socket) */
> --
> 2.45.0
On 4/7/25 3:17 AM, Michael S. Tsirkin wrote: > On Fri, Mar 28, 2025 at 06:02:48PM +0800, Cindy Lu wrote: >> Abstract vhost worker operations (create/stop/wakeup) into an ops >> structure to prepare for kthread mode support. >> >> Signed-off-by: Cindy Lu <lulu@redhat.com> > > I worry about the overhead of indirect calls here. > > We have the wrappers, and only two options, > why did you decide to add it like this, > with ops? > That was from my review comment. Originally, I thought we could share more code. For example I thought vhost_run_work_kthread_list from patch 2 in this thread and kernel/vhost_task.c:vhost_task_fn could be merged.
On Tue, Apr 8, 2025 at 12:06 AM Mike Christie <michael.christie@oracle.com> wrote: > > On 4/7/25 3:17 AM, Michael S. Tsirkin wrote: > > On Fri, Mar 28, 2025 at 06:02:48PM +0800, Cindy Lu wrote: > >> Abstract vhost worker operations (create/stop/wakeup) into an ops > >> structure to prepare for kthread mode support. > >> > >> Signed-off-by: Cindy Lu <lulu@redhat.com> > > > > I worry about the overhead of indirect calls here. > > > > We have the wrappers, and only two options, > > why did you decide to add it like this, > > with ops? > > > That was from my review comment. Originally, I thought we > could share more code. For example I thought > vhost_run_work_kthread_list from patch 2 in this thread and > kernel/vhost_task.c:vhost_task_fn could be merged. > Hi Mike I guess you mean function vhost_run_work_list and vhost_run_work_kthread_list? sure, I will try to merge these two functions in next version Thanks Cindy
On 4/8/25 4:45 AM, Cindy Lu wrote: > On Tue, Apr 8, 2025 at 12:06 AM Mike Christie > <michael.christie@oracle.com> wrote: >> >> On 4/7/25 3:17 AM, Michael S. Tsirkin wrote: >>> On Fri, Mar 28, 2025 at 06:02:48PM +0800, Cindy Lu wrote: >>>> Abstract vhost worker operations (create/stop/wakeup) into an ops >>>> structure to prepare for kthread mode support. >>>> >>>> Signed-off-by: Cindy Lu <lulu@redhat.com> >>> >>> I worry about the overhead of indirect calls here. >>> >>> We have the wrappers, and only two options, >>> why did you decide to add it like this, >>> with ops? >>> >> That was from my review comment. Originally, I thought we >> could share more code. For example I thought >> vhost_run_work_kthread_list from patch 2 in this thread and >> kernel/vhost_task.c:vhost_task_fn could be merged. >> > Hi Mike > I guess you mean function vhost_run_work_list and vhost_run_work_kthread_list? > sure, I will try to merge these two functions in next version Oh no, I meant the opposite. I don't think it will work out like how I thought it would originally. I think Michael's concern about the extra indirect pointer access in the IO path may cause issues with net. For scsi I didn't see any issue but that's probably because we have other perf issues. So if Michael is saying to not do the ops then that's fine with me.
On Fri, Mar 28, 2025 at 06:02:48PM +0800, Cindy Lu wrote:
>Abstract vhost worker operations (create/stop/wakeup) into an ops
>structure to prepare for kthread mode support.
>
>Signed-off-by: Cindy Lu <lulu@redhat.com>
>---
> drivers/vhost/vhost.c | 63 ++++++++++++++++++++++++++++++-------------
> drivers/vhost/vhost.h | 11 ++++++++
> 2 files changed, 56 insertions(+), 18 deletions(-)
>
>diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
>index 20571bd6f7bd..c162ad772f8f 100644
>--- a/drivers/vhost/vhost.c
>+++ b/drivers/vhost/vhost.c
>@@ -243,7 +243,7 @@ static void vhost_worker_queue(struct vhost_worker *worker,
> * test_and_set_bit() implies a memory barrier.
> */
> llist_add(&work->node, &worker->work_list);
>- vhost_task_wake(worker->vtsk);
>+ worker->ops->wakeup(worker);
> }
> }
>
>@@ -706,7 +706,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
>
> WARN_ON(!llist_empty(&worker->work_list));
> xa_erase(&dev->worker_xa, worker->id);
>- vhost_task_stop(worker->vtsk);
>+ worker->ops->stop(worker);
> kfree(worker);
> }
>
>@@ -729,42 +729,69 @@ static void vhost_workers_free(struct vhost_dev *dev)
> xa_destroy(&dev->worker_xa);
> }
>
>+static void vhost_task_wakeup(struct vhost_worker *worker)
>+{
>+ return vhost_task_wake(worker->vtsk);
>+}
>+
>+static void vhost_task_do_stop(struct vhost_worker *worker)
>+{
>+ return vhost_task_stop(worker->vtsk);
>+}
>+
>+static int vhost_task_worker_create(struct vhost_worker *worker,
>+ struct vhost_dev *dev, const char *name)
>+{
>+ struct vhost_task *vtsk;
>+ u32 id;
>+ int ret;
>+
>+ vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
>+ worker, name);
>+ if (IS_ERR(vtsk))
>+ return PTR_ERR(vtsk);
>+
>+ worker->vtsk = vtsk;
>+ vhost_task_start(vtsk);
>+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
>+ if (ret < 0) {
>+ vhost_task_do_stop(worker);
>+ return ret;
>+ }
In the final code, xa_alloc() is duplicated among the functions that
create ktrhead or task, might it make sense to leave it out and do it in
vhost_worker_create() ?
Thanks,
Stefano
>+ worker->id = id;
>+ return 0;
>+}
>+
>+static const struct vhost_worker_ops vhost_task_ops = {
>+ .create = vhost_task_worker_create,
>+ .stop = vhost_task_do_stop,
>+ .wakeup = vhost_task_wakeup,
>+};
>+
> static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
> {
> struct vhost_worker *worker;
>- struct vhost_task *vtsk;
> char name[TASK_COMM_LEN];
> int ret;
>- u32 id;
>+ const struct vhost_worker_ops *ops = &vhost_task_ops;
>
> worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
> if (!worker)
> return NULL;
>
> worker->dev = dev;
>+ worker->ops = ops;
> snprintf(name, sizeof(name), "vhost-%d", current->pid);
>
>- vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
>- worker, name);
>- if (IS_ERR(vtsk))
>- goto free_worker;
>-
> mutex_init(&worker->mutex);
> init_llist_head(&worker->work_list);
> worker->kcov_handle = kcov_common_handle();
>- worker->vtsk = vtsk;
>-
>- vhost_task_start(vtsk);
>-
>- ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
>+ ret = ops->create(worker, dev, name);
> if (ret < 0)
>- goto stop_worker;
>- worker->id = id;
>+ goto free_worker;
>
> return worker;
>
>-stop_worker:
>- vhost_task_stop(vtsk);
> free_worker:
> kfree(worker);
> return NULL;
>diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
>index 19bb94922a0e..98895e299efa 100644
>--- a/drivers/vhost/vhost.h
>+++ b/drivers/vhost/vhost.h
>@@ -26,6 +26,16 @@ struct vhost_work {
> unsigned long flags;
> };
>
>+struct vhost_worker;
>+struct vhost_dev;
>+
>+struct vhost_worker_ops {
>+ int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
>+ const char *name);
>+ void (*stop)(struct vhost_worker *worker);
>+ void (*wakeup)(struct vhost_worker *worker);
>+};
>+
> struct vhost_worker {
> struct vhost_task *vtsk;
> struct vhost_dev *dev;
>@@ -36,6 +46,7 @@ struct vhost_worker {
> u32 id;
> int attachment_cnt;
> bool killed;
>+ const struct vhost_worker_ops *ops;
> };
>
> /* Poll a file (eventfd or socket) */
>--
>2.45.0
>
On Tue, Apr 1, 2025 at 9:48 PM Stefano Garzarella <sgarzare@redhat.com> wrote:
>
> On Fri, Mar 28, 2025 at 06:02:48PM +0800, Cindy Lu wrote:
> >Abstract vhost worker operations (create/stop/wakeup) into an ops
> >structure to prepare for kthread mode support.
> >
> >Signed-off-by: Cindy Lu <lulu@redhat.com>
> >---
> > drivers/vhost/vhost.c | 63 ++++++++++++++++++++++++++++++-------------
> > drivers/vhost/vhost.h | 11 ++++++++
> > 2 files changed, 56 insertions(+), 18 deletions(-)
> >
> >diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> >index 20571bd6f7bd..c162ad772f8f 100644
> >--- a/drivers/vhost/vhost.c
> >+++ b/drivers/vhost/vhost.c
> >@@ -243,7 +243,7 @@ static void vhost_worker_queue(struct vhost_worker *worker,
> > * test_and_set_bit() implies a memory barrier.
> > */
> > llist_add(&work->node, &worker->work_list);
> >- vhost_task_wake(worker->vtsk);
> >+ worker->ops->wakeup(worker);
> > }
> > }
> >
> >@@ -706,7 +706,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
> >
> > WARN_ON(!llist_empty(&worker->work_list));
> > xa_erase(&dev->worker_xa, worker->id);
> >- vhost_task_stop(worker->vtsk);
> >+ worker->ops->stop(worker);
> > kfree(worker);
> > }
> >
> >@@ -729,42 +729,69 @@ static void vhost_workers_free(struct vhost_dev *dev)
> > xa_destroy(&dev->worker_xa);
> > }
> >
> >+static void vhost_task_wakeup(struct vhost_worker *worker)
> >+{
> >+ return vhost_task_wake(worker->vtsk);
> >+}
> >+
> >+static void vhost_task_do_stop(struct vhost_worker *worker)
> >+{
> >+ return vhost_task_stop(worker->vtsk);
> >+}
> >+
> >+static int vhost_task_worker_create(struct vhost_worker *worker,
> >+ struct vhost_dev *dev, const char *name)
> >+{
> >+ struct vhost_task *vtsk;
> >+ u32 id;
> >+ int ret;
> >+
> >+ vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
> >+ worker, name);
> >+ if (IS_ERR(vtsk))
> >+ return PTR_ERR(vtsk);
> >+
> >+ worker->vtsk = vtsk;
> >+ vhost_task_start(vtsk);
> >+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
> >+ if (ret < 0) {
> >+ vhost_task_do_stop(worker);
> >+ return ret;
> >+ }
>
> In the final code, xa_alloc() is duplicated among the functions that
> create ktrhead or task, might it make sense to leave it out and do it in
> vhost_worker_create() ?
>
> Thanks,
> Stefano
>
Thanks a lot Stefano. I previously tried moving xa_alloc() out, but
that made the code strange.
I think keeping xa_alloc() in the create_ops function completes the
job in a single function, and maybe it could be used in some other
functions in the future
thanks
cindy
> >+ worker->id = id;
> >+ return 0;
> >+}
> >+
> >+static const struct vhost_worker_ops vhost_task_ops = {
> >+ .create = vhost_task_worker_create,
> >+ .stop = vhost_task_do_stop,
> >+ .wakeup = vhost_task_wakeup,
> >+};
> >+
> > static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
> > {
> > struct vhost_worker *worker;
> >- struct vhost_task *vtsk;
> > char name[TASK_COMM_LEN];
> > int ret;
> >- u32 id;
> >+ const struct vhost_worker_ops *ops = &vhost_task_ops;
> >
> > worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
> > if (!worker)
> > return NULL;
> >
> > worker->dev = dev;
> >+ worker->ops = ops;
> > snprintf(name, sizeof(name), "vhost-%d", current->pid);
> >
> >- vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
> >- worker, name);
> >- if (IS_ERR(vtsk))
> >- goto free_worker;
> >-
> > mutex_init(&worker->mutex);
> > init_llist_head(&worker->work_list);
> > worker->kcov_handle = kcov_common_handle();
> >- worker->vtsk = vtsk;
> >-
> >- vhost_task_start(vtsk);
> >-
> >- ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
> >+ ret = ops->create(worker, dev, name);
> > if (ret < 0)
> >- goto stop_worker;
> >- worker->id = id;
> >+ goto free_worker;
> >
> > return worker;
> >
> >-stop_worker:
> >- vhost_task_stop(vtsk);
> > free_worker:
> > kfree(worker);
> > return NULL;
> >diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> >index 19bb94922a0e..98895e299efa 100644
> >--- a/drivers/vhost/vhost.h
> >+++ b/drivers/vhost/vhost.h
> >@@ -26,6 +26,16 @@ struct vhost_work {
> > unsigned long flags;
> > };
> >
> >+struct vhost_worker;
> >+struct vhost_dev;
> >+
> >+struct vhost_worker_ops {
> >+ int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
> >+ const char *name);
> >+ void (*stop)(struct vhost_worker *worker);
> >+ void (*wakeup)(struct vhost_worker *worker);
> >+};
> >+
> > struct vhost_worker {
> > struct vhost_task *vtsk;
> > struct vhost_dev *dev;
> >@@ -36,6 +46,7 @@ struct vhost_worker {
> > u32 id;
> > int attachment_cnt;
> > bool killed;
> >+ const struct vhost_worker_ops *ops;
> > };
> >
> > /* Poll a file (eventfd or socket) */
> >--
> >2.45.0
> >
>
On Mon, 7 Apr 2025 at 05:14, Cindy Lu <lulu@redhat.com> wrote:
>
> On Tue, Apr 1, 2025 at 9:48 PM Stefano Garzarella <sgarzare@redhat.com> wrote:
> >
> > On Fri, Mar 28, 2025 at 06:02:48PM +0800, Cindy Lu wrote:
> > >Abstract vhost worker operations (create/stop/wakeup) into an ops
> > >structure to prepare for kthread mode support.
> > >
> > >Signed-off-by: Cindy Lu <lulu@redhat.com>
> > >---
> > > drivers/vhost/vhost.c | 63 ++++++++++++++++++++++++++++++-------------
> > > drivers/vhost/vhost.h | 11 ++++++++
> > > 2 files changed, 56 insertions(+), 18 deletions(-)
> > >
> > >diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
> > >index 20571bd6f7bd..c162ad772f8f 100644
> > >--- a/drivers/vhost/vhost.c
> > >+++ b/drivers/vhost/vhost.c
> > >@@ -243,7 +243,7 @@ static void vhost_worker_queue(struct vhost_worker *worker,
> > > * test_and_set_bit() implies a memory barrier.
> > > */
> > > llist_add(&work->node, &worker->work_list);
> > >- vhost_task_wake(worker->vtsk);
> > >+ worker->ops->wakeup(worker);
> > > }
> > > }
> > >
> > >@@ -706,7 +706,7 @@ static void vhost_worker_destroy(struct vhost_dev *dev,
> > >
> > > WARN_ON(!llist_empty(&worker->work_list));
> > > xa_erase(&dev->worker_xa, worker->id);
> > >- vhost_task_stop(worker->vtsk);
> > >+ worker->ops->stop(worker);
> > > kfree(worker);
> > > }
> > >
> > >@@ -729,42 +729,69 @@ static void vhost_workers_free(struct vhost_dev *dev)
> > > xa_destroy(&dev->worker_xa);
> > > }
> > >
> > >+static void vhost_task_wakeup(struct vhost_worker *worker)
> > >+{
> > >+ return vhost_task_wake(worker->vtsk);
> > >+}
> > >+
> > >+static void vhost_task_do_stop(struct vhost_worker *worker)
> > >+{
> > >+ return vhost_task_stop(worker->vtsk);
> > >+}
> > >+
> > >+static int vhost_task_worker_create(struct vhost_worker *worker,
> > >+ struct vhost_dev *dev, const char *name)
> > >+{
> > >+ struct vhost_task *vtsk;
> > >+ u32 id;
> > >+ int ret;
> > >+
> > >+ vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
> > >+ worker, name);
> > >+ if (IS_ERR(vtsk))
> > >+ return PTR_ERR(vtsk);
> > >+
> > >+ worker->vtsk = vtsk;
> > >+ vhost_task_start(vtsk);
> > >+ ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
> > >+ if (ret < 0) {
> > >+ vhost_task_do_stop(worker);
> > >+ return ret;
> > >+ }
> >
> > In the final code, xa_alloc() is duplicated among the functions that
> > create ktrhead or task, might it make sense to leave it out and do it in
> > vhost_worker_create() ?
> >
> > Thanks,
> > Stefano
> >
> Thanks a lot Stefano. I previously tried moving xa_alloc() out, but
> that made the code strange.
> I think keeping xa_alloc() in the create_ops function completes the
> job in a single function, and maybe it could be used in some other
> functions in the future
Sure, if you tried, and it doesn't add benefits, that's perfectly fine
to ignore this suggestion! ;-)
Thanks,
Stefano
> thanks
> cindy
>
> > >+ worker->id = id;
> > >+ return 0;
> > >+}
> > >+
> > >+static const struct vhost_worker_ops vhost_task_ops = {
> > >+ .create = vhost_task_worker_create,
> > >+ .stop = vhost_task_do_stop,
> > >+ .wakeup = vhost_task_wakeup,
> > >+};
> > >+
> > > static struct vhost_worker *vhost_worker_create(struct vhost_dev *dev)
> > > {
> > > struct vhost_worker *worker;
> > >- struct vhost_task *vtsk;
> > > char name[TASK_COMM_LEN];
> > > int ret;
> > >- u32 id;
> > >+ const struct vhost_worker_ops *ops = &vhost_task_ops;
> > >
> > > worker = kzalloc(sizeof(*worker), GFP_KERNEL_ACCOUNT);
> > > if (!worker)
> > > return NULL;
> > >
> > > worker->dev = dev;
> > >+ worker->ops = ops;
> > > snprintf(name, sizeof(name), "vhost-%d", current->pid);
> > >
> > >- vtsk = vhost_task_create(vhost_run_work_list, vhost_worker_killed,
> > >- worker, name);
> > >- if (IS_ERR(vtsk))
> > >- goto free_worker;
> > >-
> > > mutex_init(&worker->mutex);
> > > init_llist_head(&worker->work_list);
> > > worker->kcov_handle = kcov_common_handle();
> > >- worker->vtsk = vtsk;
> > >-
> > >- vhost_task_start(vtsk);
> > >-
> > >- ret = xa_alloc(&dev->worker_xa, &id, worker, xa_limit_32b, GFP_KERNEL);
> > >+ ret = ops->create(worker, dev, name);
> > > if (ret < 0)
> > >- goto stop_worker;
> > >- worker->id = id;
> > >+ goto free_worker;
> > >
> > > return worker;
> > >
> > >-stop_worker:
> > >- vhost_task_stop(vtsk);
> > > free_worker:
> > > kfree(worker);
> > > return NULL;
> > >diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
> > >index 19bb94922a0e..98895e299efa 100644
> > >--- a/drivers/vhost/vhost.h
> > >+++ b/drivers/vhost/vhost.h
> > >@@ -26,6 +26,16 @@ struct vhost_work {
> > > unsigned long flags;
> > > };
> > >
> > >+struct vhost_worker;
> > >+struct vhost_dev;
> > >+
> > >+struct vhost_worker_ops {
> > >+ int (*create)(struct vhost_worker *worker, struct vhost_dev *dev,
> > >+ const char *name);
> > >+ void (*stop)(struct vhost_worker *worker);
> > >+ void (*wakeup)(struct vhost_worker *worker);
> > >+};
> > >+
> > > struct vhost_worker {
> > > struct vhost_task *vtsk;
> > > struct vhost_dev *dev;
> > >@@ -36,6 +46,7 @@ struct vhost_worker {
> > > u32 id;
> > > int attachment_cnt;
> > > bool killed;
> > >+ const struct vhost_worker_ops *ops;
> > > };
> > >
> > > /* Poll a file (eventfd or socket) */
> > >--
> > >2.45.0
> > >
> >
>
© 2016 - 2025 Red Hat, Inc.