From: Nicolin Chen <nicolinc@nvidia.com>
Introduces write handling for VINTF and VCMDQ MMIO regions, including
status/config updates, queue index tracking, and BASE_L/BASE_H
processing. Writes to VCMDQ BASE_L/BASE_H trigger allocation or
teardown of an IOMMUFD HW queue.
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Signed-off-by: Shameer Kolothum <skolothumtho@nvidia.com>
---
hw/arm/tegra241-cmdqv.c | 213 ++++++++++++++++++++++++++++++++++++++++
1 file changed, 213 insertions(+)
diff --git a/hw/arm/tegra241-cmdqv.c b/hw/arm/tegra241-cmdqv.c
index 185ef957bc..5e9a980d27 100644
--- a/hw/arm/tegra241-cmdqv.c
+++ b/hw/arm/tegra241-cmdqv.c
@@ -210,11 +210,158 @@ static uint64_t tegra241_cmdqv_read(void *opaque, hwaddr offset, unsigned size)
}
}
+/* Note that offset aligns down to 0x1000 */
+static void tegra241_cmdqv_write_vintf(Tegra241CMDQV *cmdqv, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ switch (offset) {
+ case A_VINTF0_CONFIG:
+ /* Strip off HYP_OWN setting from guest kernel */
+ value &= ~R_VINTF0_CONFIG_HYP_OWN_MASK;
+
+ cmdqv->vintf_config = value;
+ if (value & R_VINTF0_CONFIG_ENABLE_MASK) {
+ cmdqv->vintf_status |= R_VINTF0_STATUS_ENABLE_OK_MASK;
+ } else {
+ cmdqv->vintf_status &= ~R_VINTF0_STATUS_ENABLE_OK_MASK;
+ }
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s unhandled write access at 0x%" PRIx64 "\n",
+ __func__, offset);
+ return;
+ }
+}
+
+static bool tegra241_cmdqv_setup_vcmdq(Tegra241CMDQV *cmdqv, int index,
+ Error **errp)
+{
+ SMMUv3State *smmu = cmdqv->smmu;
+ SMMUv3AccelState *s_accel = smmu->s_accel;
+ uint64_t base_mask = (uint64_t)R_VCMDQ0_BASE_L_ADDR_MASK |
+ (uint64_t)R_VCMDQ0_BASE_H_ADDR_MASK << 32;
+ uint64_t addr = cmdqv->vcmdq_base[index] & base_mask;
+ uint64_t log2 = cmdqv->vcmdq_base[index] & R_VCMDQ0_BASE_L_LOG2SIZE_MASK;
+ uint64_t size = 1ULL << (log2 + 4);
+ IOMMUFDHWqueue *vcmdq = cmdqv->vcmdq[index];
+ IOMMUFDViommu *viommu;
+ IOMMUFDHWqueue *hw_queue;
+ uint32_t hw_queue_id;
+
+ /* Ignore any invalid address. This may come as part of reset etc */
+ if (!cpu_physical_memory_is_ram(addr)) {
+ return true;
+ }
+
+ if (vcmdq) {
+ iommufd_backend_free_id(s_accel->viommu.iommufd, vcmdq->hw_queue_id);
+ cmdqv->vcmdq[index] = NULL;
+ g_free(vcmdq);
+ }
+
+ viommu = &s_accel->viommu;
+ if (!iommufd_backend_alloc_hw_queue(viommu->iommufd, viommu->viommu_id,
+ IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV,
+ index, addr, size, &hw_queue_id,
+ errp)) {
+ return false;
+ }
+ hw_queue = g_new(IOMMUFDHWqueue, 1);
+ hw_queue->hw_queue_id = hw_queue_id;
+ hw_queue->viommu = viommu;
+
+ cmdqv->vcmdq[index] = hw_queue;
+ return true;
+}
+
+/* Note that offset aligns down to 0x10000 */
+static void
+tegra241_cmdqv_write_vcmdq(Tegra241CMDQV *cmdqv, hwaddr offset, int index,
+ uint64_t value, unsigned size, Error **errp)
+{
+ uint32_t *ptr = NULL;
+ uint64_t off;
+
+ if (cmdqv->vcmdq_page0) {
+ off = (0x80 * index) + (offset - 0x10000);
+ ptr = (uint32_t *)(cmdqv->vcmdq_page0 + off);
+ }
+
+ switch (offset) {
+ case A_VCMDQ0_CONS_INDX:
+ if (ptr) {
+ *ptr = value;
+ }
+ cmdqv->vcmdq_cons_indx[index] = value;
+ return;
+ case A_VCMDQ0_PROD_INDX:
+ if (ptr) {
+ *ptr = value;
+ }
+ cmdqv->vcmdq_prod_indx[index] = (uint32_t)value;
+ return;
+ case A_VCMDQ0_CONFIG:
+ if (ptr) {
+ *ptr = (uint32_t)value;
+ } else {
+ if (value & R_VCMDQ0_CONFIG_CMDQ_EN_MASK) {
+ cmdqv->vcmdq_status[index] |= R_VCMDQ0_STATUS_CMDQ_EN_OK_MASK;
+ } else {
+ cmdqv->vcmdq_status[index] &= ~R_VCMDQ0_STATUS_CMDQ_EN_OK_MASK;
+ }
+ }
+ cmdqv->vcmdq_config[index] = (uint32_t)value;
+ return;
+ case A_VCMDQ0_GERRORN:
+ if (ptr) {
+ *ptr = (uint32_t)value;
+ }
+ cmdqv->vcmdq_gerrorn[index] = (uint32_t)value;
+ return;
+ case A_VCMDQ0_BASE_L:
+ if (size == 8) {
+ cmdqv->vcmdq_base[index] = value;
+ } else if (size == 4) {
+ cmdqv->vcmdq_base[index] =
+ (cmdqv->vcmdq_base[index] & 0xffffffff00000000ULL) |
+ (value & 0xffffffffULL);
+ }
+ tegra241_cmdqv_setup_vcmdq(cmdqv, index, errp);
+ return;
+ case A_VCMDQ0_BASE_H:
+ cmdqv->vcmdq_base[index] =
+ (cmdqv->vcmdq_base[index] & 0xffffffffULL) |
+ ((uint64_t)value << 32);
+ tegra241_cmdqv_setup_vcmdq(cmdqv, index, errp);
+ return;
+ case A_VCMDQ0_CONS_INDX_BASE_DRAM_L:
+ if (size == 8) {
+ cmdqv->vcmdq_cons_indx_base[index] = value;
+ } else if (size == 4) {
+ cmdqv->vcmdq_cons_indx_base[index] =
+ (cmdqv->vcmdq_cons_indx_base[index] & 0xffffffff00000000ULL) |
+ (value & 0xffffffffULL);
+ }
+ return;
+ case A_VCMDQ0_CONS_INDX_BASE_DRAM_H:
+ cmdqv->vcmdq_cons_indx_base[index] =
+ (cmdqv->vcmdq_cons_indx_base[index] & 0xffffffffULL) |
+ ((uint64_t)value << 32);
+ return;
+ default:
+ qemu_log_mask(LOG_UNIMP,
+ "%s unhandled write access at 0x%" PRIx64 "\n",
+ __func__, offset);
+ return;
+ }
+}
+
static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value,
unsigned size)
{
Tegra241CMDQV *cmdqv = (Tegra241CMDQV *)opaque;
Error *local_err = NULL;
+ int index;
if (!cmdqv->vcmdq_page0) {
tegra241_cmdqv_init_vcmdq_page0(cmdqv, &local_err);
@@ -223,6 +370,72 @@ static void tegra241_cmdqv_write(void *opaque, hwaddr offset, uint64_t value,
local_err = NULL;
}
}
+
+ if (offset > TEGRA241_CMDQV_IO_LEN) {
+ qemu_log_mask(LOG_UNIMP,
+ "%s offset 0x%" PRIx64 " off limit (0x50000)\n", __func__,
+ offset);
+ return;
+ }
+
+ switch (offset) {
+ case A_CONFIG:
+ cmdqv->config = value;
+ if (value & R_CONFIG_CMDQV_EN_MASK) {
+ cmdqv->status |= R_STATUS_CMDQV_ENABLED_MASK;
+ } else {
+ cmdqv->status &= ~R_STATUS_CMDQV_ENABLED_MASK;
+ }
+ break;
+ case A_VI_INT_MASK ... A_VI_INT_MASK_1:
+ cmdqv->vi_int_mask[(offset - A_VI_INT_MASK) / 4] = value;
+ break;
+ case A_CMDQ_ALLOC_MAP_0 ... A_CMDQ_ALLOC_MAP_127:
+ cmdqv->cmdq_alloc_map[(offset - A_CMDQ_ALLOC_MAP_0) / 4] = value;
+ break;
+ case A_VINTF0_CONFIG ... A_VINTF0_LVCMDQ_ERR_MAP_3:
+ tegra241_cmdqv_write_vintf(cmdqv, offset, value, size);
+ break;
+ case A_VI_VCMDQ0_CONS_INDX ... A_VI_VCMDQ127_GERRORN:
+ offset -= 0x20000;
+ QEMU_FALLTHROUGH;
+ case A_VCMDQ0_CONS_INDX ... A_VCMDQ127_GERRORN:
+ /*
+ * Align offset down to 0x10000 while extracting the index:
+ * VCMDQ0_CONS_INDX (0x10000) => 0x10000, 0
+ * VCMDQ1_CONS_INDX (0x10080) => 0x10000, 1
+ * VCMDQ2_CONS_INDX (0x10100) => 0x10000, 2
+ * ...
+ * VCMDQ127_CONS_INDX (0x13f80) => 0x10000, 127
+ */
+ index = (offset - 0x10000) / 0x80;
+ tegra241_cmdqv_write_vcmdq(cmdqv, offset - 0x80 * index, index, value,
+ size, &local_err);
+ break;
+ case A_VI_VCMDQ0_BASE_L ... A_VI_VCMDQ127_CONS_INDX_BASE_DRAM_H:
+ offset -= 0x20000;
+ QEMU_FALLTHROUGH;
+ case A_VCMDQ0_BASE_L ... A_VCMDQ127_CONS_INDX_BASE_DRAM_H:
+ /*
+ * Align offset down to 0x20000 while extracting the index:
+ * VCMDQ0_BASE_L (0x20000) => 0x20000, 0
+ * VCMDQ1_BASE_L (0x20080) => 0x20000, 1
+ * VCMDQ2_BASE_L (0x20100) => 0x20000, 2
+ * ...
+ * VCMDQ127_BASE_L (0x23f80) => 0x20000, 127
+ */
+ index = (offset - 0x20000) / 0x80;
+ tegra241_cmdqv_write_vcmdq(cmdqv, offset - 0x80 * index, index, value,
+ size, &local_err);
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "%s unhandled write access at 0x%" PRIx64 "\n",
+ __func__, offset);
+ }
+
+ if (local_err) {
+ error_report_err(local_err);
+ }
}
static const MemoryRegionOps mmio_cmdqv_ops = {
--
2.43.0