From: Hao Xiang <hao.xiang@linux.dev>
* Use a safe thread queue for DSA task enqueue/dequeue.
* Implement DSA task submission.
* Implement DSA batch task submission.
Signed-off-by: Hao Xiang <hao.xiang@linux.dev>
Signed-off-by: Yichen Wang <yichen.wang@bytedance.com>
---
include/qemu/dsa.h | 46 ++++++++++
util/dsa.c | 222 ++++++++++++++++++++++++++++++++++++++++-----
2 files changed, 246 insertions(+), 22 deletions(-)
diff --git a/include/qemu/dsa.h b/include/qemu/dsa.h
index 29b60654e9..9cc836b64c 100644
--- a/include/qemu/dsa.h
+++ b/include/qemu/dsa.h
@@ -27,6 +27,52 @@
#include <linux/idxd.h>
#include "x86intrin.h"
+typedef enum QemuDsaTaskType {
+ QEMU_DSA_TASK = 0,
+ QEMU_DSA_BATCH_TASK
+} QemuDsaTaskType;
+
+typedef enum QemuDsaTaskStatus {
+ QEMU_DSA_TASK_READY = 0,
+ QEMU_DSA_TASK_PROCESSING,
+ QEMU_DSA_TASK_COMPLETION
+} QemuDsaTaskStatus;
+
+typedef struct {
+ void *work_queue;
+} QemuDsaDevice;
+
+typedef QSIMPLEQ_HEAD(QemuDsaTaskQueue, QemuDsaBatchTask) QemuDsaTaskQueue;
+
+typedef struct {
+ QemuDsaDevice *dsa_devices;
+ int num_dsa_devices;
+ /* The index of the next DSA device to be used. */
+ uint32_t device_allocator_index;
+ bool running;
+ QemuMutex task_queue_lock;
+ QemuCond task_queue_cond;
+ QemuDsaTaskQueue task_queue;
+} QemuDsaDeviceGroup;
+
+typedef void (*qemu_dsa_completion_fn)(void *);
+
+typedef struct QemuDsaBatchTask {
+ struct dsa_hw_desc batch_descriptor;
+ struct dsa_hw_desc *descriptors;
+ struct dsa_completion_record batch_completion __attribute__((aligned(32)));
+ struct dsa_completion_record *completions;
+ QemuDsaDeviceGroup *group;
+ QemuDsaDevice *device;
+ qemu_dsa_completion_fn completion_callback;
+ QemuSemaphore sem_task_complete;
+ QemuDsaTaskType task_type;
+ QemuDsaTaskStatus status;
+ int batch_size;
+ QSIMPLEQ_ENTRY(QemuDsaBatchTask) entry;
+} QemuDsaBatchTask;
+
+
/**
* @brief Initializes DSA devices.
*
diff --git a/util/dsa.c b/util/dsa.c
index cdb0b9dda2..43280b31cd 100644
--- a/util/dsa.c
+++ b/util/dsa.c
@@ -30,27 +30,11 @@
#include <linux/idxd.h>
#include "x86intrin.h"
-#define DSA_WQ_SIZE 4096
+#define DSA_WQ_PORTAL_SIZE 4096
+#define DSA_WQ_DEPTH 128
#define MAX_DSA_DEVICES 16
-typedef QSIMPLEQ_HEAD(dsa_task_queue, dsa_batch_task) dsa_task_queue;
-
-typedef struct {
- void *work_queue;
-} QemuDsaDevice;
-
-typedef struct {
- QemuDsaDevice *dsa_devices;
- int num_dsa_devices;
- /* The index of the next DSA device to be used. */
- uint32_t device_allocator_index;
- bool running;
- QemuMutex task_queue_lock;
- QemuCond task_queue_cond;
- dsa_task_queue task_queue;
-} QemuDsaDeviceGroup;
-
-uint64_t max_retry_count;
+uint32_t max_retry_count;
static QemuDsaDeviceGroup dsa_group;
@@ -73,7 +57,7 @@ map_dsa_device(const char *dsa_wq_path)
dsa_wq_path, errno);
return MAP_FAILED;
}
- dsa_device = mmap(NULL, DSA_WQ_SIZE, PROT_WRITE,
+ dsa_device = mmap(NULL, DSA_WQ_PORTAL_SIZE, PROT_WRITE,
MAP_SHARED | MAP_POPULATE, fd, 0);
close(fd);
if (dsa_device == MAP_FAILED) {
@@ -105,7 +89,7 @@ static void
dsa_device_cleanup(QemuDsaDevice *instance)
{
if (instance->work_queue != MAP_FAILED) {
- munmap(instance->work_queue, DSA_WQ_SIZE);
+ munmap(instance->work_queue, DSA_WQ_PORTAL_SIZE);
}
}
@@ -233,6 +217,198 @@ dsa_device_group_get_next_device(QemuDsaDeviceGroup *group)
return &group->dsa_devices[current];
}
+/**
+ * @brief Empties out the DSA task queue.
+ *
+ * @param group A pointer to the DSA device group.
+ */
+static void
+dsa_empty_task_queue(QemuDsaDeviceGroup *group)
+{
+ qemu_mutex_lock(&group->task_queue_lock);
+ QemuDsaTaskQueue *task_queue = &group->task_queue;
+ while (!QSIMPLEQ_EMPTY(task_queue)) {
+ QSIMPLEQ_REMOVE_HEAD(task_queue, entry);
+ }
+ qemu_mutex_unlock(&group->task_queue_lock);
+}
+
+/**
+ * @brief Adds a task to the DSA task queue.
+ *
+ * @param group A pointer to the DSA device group.
+ * @param task A pointer to the DSA task to enqueue.
+ *
+ * @return int Zero if successful, otherwise a proper error code.
+ */
+static int
+dsa_task_enqueue(QemuDsaDeviceGroup *group,
+ QemuDsaBatchTask *task)
+{
+ bool notify = false;
+
+ qemu_mutex_lock(&group->task_queue_lock);
+
+ if (!group->running) {
+ error_report("DSA: Tried to queue task to stopped device queue.");
+ qemu_mutex_unlock(&group->task_queue_lock);
+ return -1;
+ }
+
+ /* The queue is empty. This enqueue operation is a 0->1 transition. */
+ if (QSIMPLEQ_EMPTY(&group->task_queue)) {
+ notify = true;
+ }
+
+ QSIMPLEQ_INSERT_TAIL(&group->task_queue, task, entry);
+
+ /* We need to notify the waiter for 0->1 transitions. */
+ if (notify) {
+ qemu_cond_signal(&group->task_queue_cond);
+ }
+
+ qemu_mutex_unlock(&group->task_queue_lock);
+
+ return 0;
+}
+
+/**
+ * @brief Takes a DSA task out of the task queue.
+ *
+ * @param group A pointer to the DSA device group.
+ * @return QemuDsaBatchTask* The DSA task being dequeued.
+ */
+__attribute__((unused))
+static QemuDsaBatchTask *
+dsa_task_dequeue(QemuDsaDeviceGroup *group)
+{
+ QemuDsaBatchTask *task = NULL;
+
+ qemu_mutex_lock(&group->task_queue_lock);
+
+ while (true) {
+ if (!group->running) {
+ goto exit;
+ }
+ task = QSIMPLEQ_FIRST(&group->task_queue);
+ if (task != NULL) {
+ break;
+ }
+ qemu_cond_wait(&group->task_queue_cond, &group->task_queue_lock);
+ }
+
+ QSIMPLEQ_REMOVE_HEAD(&group->task_queue, entry);
+
+exit:
+ qemu_mutex_unlock(&group->task_queue_lock);
+ return task;
+}
+
+/**
+ * @brief Submits a DSA work item to the device work queue.
+ *
+ * @param wq A pointer to the DSA work queue's device memory.
+ * @param descriptor A pointer to the DSA work item descriptor.
+ *
+ * @return Zero if successful, non-zero otherwise.
+ */
+static int
+submit_wi_int(void *wq, struct dsa_hw_desc *descriptor)
+{
+ uint32_t retry = 0;
+
+ _mm_sfence();
+
+ while (true) {
+ if (_enqcmd(wq, descriptor) == 0) {
+ break;
+ }
+ retry++;
+ if (retry > max_retry_count) {
+ error_report("Submit work retry %u times.", retry);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Synchronously submits a DSA work item to the
+ * device work queue.
+ *
+ * @param wq A pointer to the DSA work queue's device memory.
+ * @param descriptor A pointer to the DSA work item descriptor.
+ *
+ * @return int Zero if successful, non-zero otherwise.
+ */
+__attribute__((unused))
+static int
+submit_wi(void *wq, struct dsa_hw_desc *descriptor)
+{
+ return submit_wi_int(wq, descriptor);
+}
+
+/**
+ * @brief Asynchronously submits a DSA work item to the
+ * device work queue.
+ *
+ * @param task A pointer to the task.
+ *
+ * @return int Zero if successful, non-zero otherwise.
+ */
+__attribute__((unused))
+static int
+submit_wi_async(QemuDsaBatchTask *task)
+{
+ QemuDsaDeviceGroup *device_group = task->group;
+ QemuDsaDevice *device_instance = task->device;
+ int ret;
+
+ assert(task->task_type == QEMU_DSA_TASK);
+
+ task->status = QEMU_DSA_TASK_PROCESSING;
+
+ ret = submit_wi_int(device_instance->work_queue,
+ &task->descriptors[0]);
+ if (ret != 0) {
+ return ret;
+ }
+
+ return dsa_task_enqueue(device_group, task);
+}
+
+/**
+ * @brief Asynchronously submits a DSA batch work item to the
+ * device work queue.
+ *
+ * @param batch_task A pointer to the batch task.
+ *
+ * @return int Zero if successful, non-zero otherwise.
+ */
+__attribute__((unused))
+static int
+submit_batch_wi_async(QemuDsaBatchTask *batch_task)
+{
+ QemuDsaDeviceGroup *device_group = batch_task->group;
+ QemuDsaDevice *device_instance = batch_task->device;
+ int ret;
+
+ assert(batch_task->task_type == QEMU_DSA_BATCH_TASK);
+ assert(batch_task->batch_descriptor.desc_count <= batch_task->batch_size);
+ assert(batch_task->status == QEMU_DSA_TASK_READY);
+
+ batch_task->status = QEMU_DSA_TASK_PROCESSING;
+
+ ret = submit_wi_int(device_instance->work_queue,
+ &batch_task->batch_descriptor);
+ if (ret != 0) {
+ return ret;
+ }
+
+ return dsa_task_enqueue(device_group, batch_task);
+}
+
/**
* @brief Check if DSA is running.
*
@@ -246,7 +422,7 @@ bool qemu_dsa_is_running(void)
static void
dsa_globals_init(void)
{
- max_retry_count = UINT64_MAX;
+ max_retry_count = DSA_WQ_DEPTH;
}
/**
@@ -289,6 +465,8 @@ void qemu_dsa_stop(void)
if (!group->running) {
return;
}
+
+ dsa_empty_task_queue(group);
}
/**
--
Yichen Wang
© 2016 - 2024 Red Hat, Inc.