This patch implements the PROBE request. At the moment,
no reserved regions are returned.
At the moment reserved regions are stored per device.
Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
Waiting for clarifications on v0.4 spec
---
hw/virtio/trace-events | 2 +
hw/virtio/virtio-iommu.c | 173 ++++++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 173 insertions(+), 2 deletions(-)
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 9010fbd..9ccfad1 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -46,3 +46,5 @@ virtio_iommu_unmap_left_interval(uint64_t low, uint64_t high, uint64_t next_low,
virtio_iommu_unmap_right_interval(uint64_t low, uint64_t high, uint64_t next_low, uint64_t next_high) "Unmap right [0x%"PRIx64",0x%"PRIx64"], new interval=[0x%"PRIx64",0x%"PRIx64"]"
virtio_iommu_unmap_inc_interval(uint64_t low, uint64_t high) "Unmap inc [0x%"PRIx64",0x%"PRIx64"]"
virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d"
+virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t addr, uint64_t size, uint32_t flags, size_t filled) "dev= %d, subtype=%d addr=0x%"PRIx64" size=0x%"PRIx64" flags=%d filled=0x%lx"
+virtio_iommu_fill_none_property(uint32_t devid) "devid=%d"
diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c
index db46a91..281b0f8 100644
--- a/hw/virtio/virtio-iommu.c
+++ b/hw/virtio/virtio-iommu.c
@@ -37,6 +37,11 @@
/* Max size */
#define VIOMMU_DEFAULT_QUEUE_SIZE 256
+#define VIOMMU_PROBE_SIZE 512
+
+#define SUPPORTED_PROBE_PROPERTIES (\
+ VIRTIO_IOMMU_PROBE_T_NONE | \
+ VIRTIO_IOMMU_PROBE_T_RESV_MEM)
typedef struct viommu_as {
uint32_t id;
@@ -49,6 +54,7 @@ typedef struct viommu_dev {
viommu_as *as;
QLIST_ENTRY(viommu_dev) next;
VirtIOIOMMU *viommu;
+ GTree *reserved_regions;
} viommu_dev;
typedef struct viommu_interval {
@@ -63,6 +69,13 @@ typedef struct viommu_mapping {
uint32_t flags;
} viommu_mapping;
+typedef struct viommu_property_buffer {
+ viommu_dev *dev;
+ size_t filled;
+ uint8_t *start;
+ bool error;
+} viommu_property_buffer;
+
static inline uint16_t virtio_iommu_get_sid(IOMMUDevice *dev)
{
return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
@@ -101,6 +114,9 @@ static viommu_dev *virtio_iommu_get_dev(VirtIOIOMMU *s, uint32_t devid)
dev->viommu = s;
trace_virtio_iommu_get_dev(devid);
g_tree_insert(s->devices, GUINT_TO_POINTER(devid), dev);
+ dev->reserved_regions = g_tree_new_full((GCompareDataFunc)interval_cmp,
+ NULL, (GDestroyNotify)g_free,
+ (GDestroyNotify)g_free);
return dev;
}
@@ -114,6 +130,7 @@ static void virtio_iommu_put_dev(gpointer data)
}
trace_virtio_iommu_put_dev(dev->id);
+ g_tree_destroy(dev->reserved_regions);
g_free(dev);
}
@@ -369,6 +386,123 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s,
return VIRTIO_IOMMU_S_INVAL;
}
+static gboolean virtio_iommu_fill_resv_mem_prop(gpointer key,
+ gpointer value,
+ gpointer data)
+{
+ struct virtio_iommu_probe_resv_mem *resv =
+ (struct virtio_iommu_probe_resv_mem *)value;
+ struct virtio_iommu_probe_property *prop;
+ struct virtio_iommu_probe_resv_mem *current;
+ viommu_property_buffer *bufstate = (viommu_property_buffer *)data;
+ size_t size = sizeof(*resv), total_size;
+
+ total_size = size + 4;
+
+ if (bufstate->filled >= VIOMMU_PROBE_SIZE) {
+ bufstate->error = true;
+ return true;
+ }
+ prop = (struct virtio_iommu_probe_property *)
+ (bufstate->start + bufstate->filled);
+ prop->type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM) &
+ VIRTIO_IOMMU_PROBE_T_MASK;
+ prop->length = size;
+
+ current = (struct virtio_iommu_probe_resv_mem *)prop->value;
+ *current = *resv;
+ bufstate->filled += total_size;
+ trace_virtio_iommu_fill_resv_property(bufstate->dev->id,
+ resv->subtype, resv->addr,
+ resv->size, resv->flags,
+ bufstate->filled);
+ return false;
+}
+
+static int virtio_iommu_fill_none_prop(viommu_property_buffer *bufstate)
+{
+ struct virtio_iommu_probe_property *prop;
+
+ prop = (struct virtio_iommu_probe_property *)
+ (bufstate->start + bufstate->filled);
+ prop->type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_NONE)
+ & VIRTIO_IOMMU_PROBE_T_MASK;
+ prop->length = 0;
+ bufstate->filled += 4;
+ trace_virtio_iommu_fill_none_property(bufstate->dev->id);
+ return 0;
+}
+
+static int virtio_iommu_fill_property(int devid, int type,
+ viommu_property_buffer *bufstate)
+{
+ int ret = -ENOSPC;
+
+ if (bufstate->filled + 4 >= VIOMMU_PROBE_SIZE) {
+ bufstate->error = true;
+ goto out;
+ }
+
+ switch (type) {
+ case VIRTIO_IOMMU_PROBE_T_NONE:
+ ret = virtio_iommu_fill_none_prop(bufstate);
+ break;
+ case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
+ {
+ viommu_dev *dev = bufstate->dev;
+
+ g_tree_foreach(dev->reserved_regions,
+ virtio_iommu_fill_resv_mem_prop,
+ bufstate);
+ if (!bufstate->error) {
+ ret = 0;
+ }
+ break;
+ }
+ default:
+ ret = -ENOENT;
+ break;
+ }
+out:
+ if (ret) {
+ error_report("%s property of type=%d could not be filled (%d),"
+ " remaining size = 0x%lx",
+ __func__, type, ret, bufstate->filled);
+ }
+ return ret;
+}
+
+static int virtio_iommu_probe(VirtIOIOMMU *s,
+ struct virtio_iommu_req_probe *req,
+ uint8_t *buf)
+{
+ uint32_t devid = le32_to_cpu(req->device);
+ int16_t prop_types = SUPPORTED_PROBE_PROPERTIES, type;
+ viommu_property_buffer bufstate;
+ viommu_dev *dev;
+ int ret;
+
+ dev = g_tree_lookup(s->devices, GUINT_TO_POINTER(devid));
+ if (!dev) {
+ return -EINVAL;
+ }
+
+ bufstate.start = buf;
+ bufstate.filled = 0;
+ bufstate.dev = dev;
+
+ while ((type = ctz32(prop_types)) != 32) {
+ ret = virtio_iommu_fill_property(devid, 1 << type, &bufstate);
+ if (ret) {
+ break;
+ }
+ prop_types &= ~(1 << type);
+ }
+ virtio_iommu_fill_property(devid, VIRTIO_IOMMU_PROBE_T_NONE, &bufstate);
+
+ return VIRTIO_IOMMU_S_OK;
+}
+
#define get_payload_size(req) (\
sizeof((req)) - sizeof(struct virtio_iommu_req_tail))
@@ -433,6 +567,24 @@ static int virtio_iommu_handle_unmap(VirtIOIOMMU *s,
return virtio_iommu_unmap(s, &req);
}
+static int virtio_iommu_handle_probe(VirtIOIOMMU *s,
+ struct iovec *iov,
+ unsigned int iov_cnt,
+ uint8_t *buf)
+{
+ struct virtio_iommu_req_probe req;
+ size_t sz, payload_sz;
+
+ payload_sz = get_payload_size(req);
+
+ sz = iov_to_buf(iov, iov_cnt, 0, &req, payload_sz);
+ if (sz != payload_sz) {
+ return VIRTIO_IOMMU_S_INVAL;
+ }
+
+ return virtio_iommu_probe(s, &req, buf);
+}
+
static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
@@ -477,16 +629,31 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
case VIRTIO_IOMMU_T_UNMAP:
tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
break;
+ case VIRTIO_IOMMU_T_PROBE:
+ {
+ struct virtio_iommu_req_tail *ptail;
+ uint8_t *buf = g_malloc0(s->config.probe_size + sizeof(tail));
+
+ ptail = (struct virtio_iommu_req_tail *)buf + s->config.probe_size;
+ ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf);
+
+ sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
+ buf, s->config.probe_size + sizeof(tail));
+ g_free(buf);
+ assert(sz == s->config.probe_size + sizeof(tail));
+ goto push;
+ }
default:
tail.status = VIRTIO_IOMMU_S_UNSUPP;
}
- qemu_mutex_unlock(&s->mutex);
sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
&tail, sizeof(tail));
assert(sz == sizeof(tail));
- virtqueue_push(vq, elem, sizeof(tail));
+push:
+ qemu_mutex_unlock(&s->mutex);
+ virtqueue_push(vq, elem, sz);
virtio_notify(vdev, vq);
g_free(elem);
}
@@ -574,6 +741,7 @@ static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
virtio_add_feature(&f, VIRTIO_RING_F_INDIRECT_DESC);
virtio_add_feature(&f, VIRTIO_IOMMU_F_INPUT_RANGE);
virtio_add_feature(&f, VIRTIO_IOMMU_F_MAP_UNMAP);
+ virtio_add_feature(&f, VIRTIO_IOMMU_F_PROBE);
return f;
}
@@ -631,6 +799,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
s->config.page_size_mask = TARGET_PAGE_MASK;
s->config.input_range.end = -1UL;
+ s->config.probe_size = VIOMMU_PROBE_SIZE;
qemu_mutex_init(&s->mutex);
--
2.5.5
Hi Eric,
On 19.09.2017 09:46, Eric Auger wrote:
> This patch implements the PROBE request. At the moment,
> no reserved regions are returned.
>
> At the moment reserved regions are stored per device.
>
> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>
> ---
>
[...]
> +
> +static int virtio_iommu_fill_property(int devid, int type,
> + viommu_property_buffer *bufstate)
> +{
> + int ret = -ENOSPC;
> +
> + if (bufstate->filled + 4 >= VIOMMU_PROBE_SIZE) {
> + bufstate->error = true;
> + goto out;
> + }
> +
> + switch (type) {
> + case VIRTIO_IOMMU_PROBE_T_NONE:
> + ret = virtio_iommu_fill_none_prop(bufstate);
> + break;
> + case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
> + {
> + viommu_dev *dev = bufstate->dev;
> +
> + g_tree_foreach(dev->reserved_regions,
> + virtio_iommu_fill_resv_mem_prop,
> + bufstate);
> + if (!bufstate->error) {
> + ret = 0;
> + }
> + break;
> + }
> + default:
> + ret = -ENOENT;
> + break;
> + }
> +out:
> + if (ret) {
> + error_report("%s property of type=%d could not be filled (%d),"
> + " remaining size = 0x%lx",
> + __func__, type, ret, bufstate->filled);
> + }
> + return ret;
> +}
> +
> +static int virtio_iommu_probe(VirtIOIOMMU *s,
> + struct virtio_iommu_req_probe *req,
> + uint8_t *buf)
> +{
> + uint32_t devid = le32_to_cpu(req->device);
> + int16_t prop_types = SUPPORTED_PROBE_PROPERTIES, type;
> + viommu_property_buffer bufstate;
> + viommu_dev *dev;
> + int ret;
> +
> + dev = g_tree_lookup(s->devices, GUINT_TO_POINTER(devid));
> + if (!dev) {
> + return -EINVAL;
> + }
> +
> + bufstate.start = buf;
> + bufstate.filled = 0;
> + bufstate.dev = dev;
bufstate.error is not initialized which may cause false alarm in
virtio_iommu_fill_property()
> +
> + while ((type = ctz32(prop_types)) != 32) {
> + ret = virtio_iommu_fill_property(devid, 1 << type, &bufstate);
> + if (ret) {
> + break;
> + }
> + prop_types &= ~(1 << type);
> + }
> + virtio_iommu_fill_property(devid, VIRTIO_IOMMU_PROBE_T_NONE, &bufstate);
> +
> + return VIRTIO_IOMMU_S_OK;
> +}
> +
> #define get_payload_size(req) (\
> sizeof((req)) - sizeof(struct virtio_iommu_req_tail))
>
> @@ -433,6 +567,24 @@ static int virtio_iommu_handle_unmap(VirtIOIOMMU *s,
> return virtio_iommu_unmap(s, &req);
> }
Thanks,
Tomasz
> -----Original Message-----
> From: Tomasz Nowicki [mailto:tnowicki@caviumnetworks.com]
> Sent: Wednesday, September 27, 2017 4:23 PM
> To: Eric Auger <eric.auger@redhat.com>; eric.auger.pro@gmail.com;
> peter.maydell@linaro.org; alex.williamson@redhat.com; mst@redhat.com;
> qemu-arm@nongnu.org; qemu-devel@nongnu.org; jean-
> philippe.brucker@arm.com
> Cc: will.deacon@arm.com; kevin.tian@intel.com; marc.zyngier@arm.com;
> christoffer.dall@linaro.org; drjones@redhat.com; wei@redhat.com; Bharat
> Bhushan <bharat.bhushan@nxp.com>; peterx@redhat.com;
> linuc.decode@gmail.com
> Subject: Re: [RFC v4 10/16] virtio-iommu: Implement probe request
>
> Hi Eric,
>
> On 19.09.2017 09:46, Eric Auger wrote:
> > This patch implements the PROBE request. At the moment, no reserved
> > regions are returned.
> >
> > At the moment reserved regions are stored per device.
> >
> > Signed-off-by: Eric Auger <eric.auger@redhat.com>
> >
> > ---
> >
>
> [...]
>
> > +
> > +static int virtio_iommu_fill_property(int devid, int type,
> > + viommu_property_buffer
> > +*bufstate) {
> > + int ret = -ENOSPC;
> > +
> > + if (bufstate->filled + 4 >= VIOMMU_PROBE_SIZE) {
> > + bufstate->error = true;
> > + goto out;
> > + }
> > +
> > + switch (type) {
> > + case VIRTIO_IOMMU_PROBE_T_NONE:
> > + ret = virtio_iommu_fill_none_prop(bufstate);
> > + break;
> > + case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
> > + {
> > + viommu_dev *dev = bufstate->dev;
> > +
> > + g_tree_foreach(dev->reserved_regions,
> > + virtio_iommu_fill_resv_mem_prop,
> > + bufstate);
> > + if (!bufstate->error) {
> > + ret = 0;
> > + }
> > + break;
> > + }
> > + default:
> > + ret = -ENOENT;
> > + break;
> > + }
> > +out:
> > + if (ret) {
> > + error_report("%s property of type=%d could not be filled (%d),"
> > + " remaining size = 0x%lx",
> > + __func__, type, ret, bufstate->filled);
> > + }
> > + return ret;
> > +}
> > +
> > +static int virtio_iommu_probe(VirtIOIOMMU *s,
> > + struct virtio_iommu_req_probe *req,
> > + uint8_t *buf) {
> > + uint32_t devid = le32_to_cpu(req->device);
> > + int16_t prop_types = SUPPORTED_PROBE_PROPERTIES, type;
> > + viommu_property_buffer bufstate;
> > + viommu_dev *dev;
> > + int ret;
> > +
> > + dev = g_tree_lookup(s->devices, GUINT_TO_POINTER(devid));
> > + if (!dev) {
> > + return -EINVAL;
> > + }
> > +
> > + bufstate.start = buf;
> > + bufstate.filled = 0;
> > + bufstate.dev = dev;
>
> bufstate.error is not initialized which may cause false alarm in
> virtio_iommu_fill_property()
I observed below prints
"qemu-system-aarch64: virtio_iommu_fill_property property of type=2 could not be filled (-28), remaining size = 0x0 "
When I initialized the bufstate.error = 0,it goes.
Thanks
-Bharat
>
> > +
> > + while ((type = ctz32(prop_types)) != 32) {
> > + ret = virtio_iommu_fill_property(devid, 1 << type, &bufstate);
> > + if (ret) {
> > + break;
> > + }
> > + prop_types &= ~(1 << type);
> > + }
> > + virtio_iommu_fill_property(devid, VIRTIO_IOMMU_PROBE_T_NONE,
> > + &bufstate);
> > +
> > + return VIRTIO_IOMMU_S_OK;
> > +}
> > +
> > #define get_payload_size(req) (\
> > sizeof((req)) - sizeof(struct virtio_iommu_req_tail))
> >
> > @@ -433,6 +567,24 @@ static int
> virtio_iommu_handle_unmap(VirtIOIOMMU *s,
> > return virtio_iommu_unmap(s, &req);
> > }
>
> Thanks,
> Tomasz
Hi Bharat,
On 27/09/2017 13:00, Bharat Bhushan wrote:
>
>
>> -----Original Message-----
>> From: Tomasz Nowicki [mailto:tnowicki@caviumnetworks.com]
>> Sent: Wednesday, September 27, 2017 4:23 PM
>> To: Eric Auger <eric.auger@redhat.com>; eric.auger.pro@gmail.com;
>> peter.maydell@linaro.org; alex.williamson@redhat.com; mst@redhat.com;
>> qemu-arm@nongnu.org; qemu-devel@nongnu.org; jean-
>> philippe.brucker@arm.com
>> Cc: will.deacon@arm.com; kevin.tian@intel.com; marc.zyngier@arm.com;
>> christoffer.dall@linaro.org; drjones@redhat.com; wei@redhat.com; Bharat
>> Bhushan <bharat.bhushan@nxp.com>; peterx@redhat.com;
>> linuc.decode@gmail.com
>> Subject: Re: [RFC v4 10/16] virtio-iommu: Implement probe request
>>
>> Hi Eric,
>>
>> On 19.09.2017 09:46, Eric Auger wrote:
>>> This patch implements the PROBE request. At the moment, no reserved
>>> regions are returned.
>>>
>>> At the moment reserved regions are stored per device.
>>>
>>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>>>
>>> ---
>>>
>>
>> [...]
>>
>>> +
>>> +static int virtio_iommu_fill_property(int devid, int type,
>>> + viommu_property_buffer
>>> +*bufstate) {
>>> + int ret = -ENOSPC;
>>> +
>>> + if (bufstate->filled + 4 >= VIOMMU_PROBE_SIZE) {
>>> + bufstate->error = true;
>>> + goto out;
>>> + }
>>> +
>>> + switch (type) {
>>> + case VIRTIO_IOMMU_PROBE_T_NONE:
>>> + ret = virtio_iommu_fill_none_prop(bufstate);
>>> + break;
>>> + case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
>>> + {
>>> + viommu_dev *dev = bufstate->dev;
>>> +
>>> + g_tree_foreach(dev->reserved_regions,
>>> + virtio_iommu_fill_resv_mem_prop,
>>> + bufstate);
>>> + if (!bufstate->error) {
>>> + ret = 0;
>>> + }
>>> + break;
>>> + }
>>> + default:
>>> + ret = -ENOENT;
>>> + break;
>>> + }
>>> +out:
>>> + if (ret) {
>>> + error_report("%s property of type=%d could not be filled (%d),"
>>> + " remaining size = 0x%lx",
>>> + __func__, type, ret, bufstate->filled);
>>> + }
>>> + return ret;
>>> +}
>>> +
>>> +static int virtio_iommu_probe(VirtIOIOMMU *s,
>>> + struct virtio_iommu_req_probe *req,
>>> + uint8_t *buf) {
>>> + uint32_t devid = le32_to_cpu(req->device);
>>> + int16_t prop_types = SUPPORTED_PROBE_PROPERTIES, type;
>>> + viommu_property_buffer bufstate;
>>> + viommu_dev *dev;
>>> + int ret;
>>> +
>>> + dev = g_tree_lookup(s->devices, GUINT_TO_POINTER(devid));
>>> + if (!dev) {
>>> + return -EINVAL;
>>> + }
>>> +
>>> + bufstate.start = buf;
>>> + bufstate.filled = 0;
>>> + bufstate.dev = dev;
>>
>> bufstate.error is not initialized which may cause false alarm in
>> virtio_iommu_fill_property()
>
> I observed below prints
> "qemu-system-aarch64: virtio_iommu_fill_property property of type=2 could not be filled (-28), remaining size = 0x0 "
>
> When I initialized the bufstate.error = 0,it goes.
Sure, I will fix that soon.
Best Regards
Eric
>
> Thanks
> -Bharat
>
>>
>>> +
>>> + while ((type = ctz32(prop_types)) != 32) {
>>> + ret = virtio_iommu_fill_property(devid, 1 << type, &bufstate);
>>> + if (ret) {
>>> + break;
>>> + }
>>> + prop_types &= ~(1 << type);
>>> + }
>>> + virtio_iommu_fill_property(devid, VIRTIO_IOMMU_PROBE_T_NONE,
>>> + &bufstate);
>>> +
>>> + return VIRTIO_IOMMU_S_OK;
>>> +}
>>> +
>>> #define get_payload_size(req) (\
>>> sizeof((req)) - sizeof(struct virtio_iommu_req_tail))
>>>
>>> @@ -433,6 +567,24 @@ static int
>> virtio_iommu_handle_unmap(VirtIOIOMMU *s,
>>> return virtio_iommu_unmap(s, &req);
>>> }
>>
>> Thanks,
>> Tomasz
Hi Tomasz,
On 27/09/2017 12:53, Tomasz Nowicki wrote:
> Hi Eric,
>
> On 19.09.2017 09:46, Eric Auger wrote:
>> This patch implements the PROBE request. At the moment,
>> no reserved regions are returned.
>>
>> At the moment reserved regions are stored per device.
>>
>> Signed-off-by: Eric Auger <eric.auger@redhat.com>
>>
>> ---
>>
>
> [...]
>
>> +
>> +static int virtio_iommu_fill_property(int devid, int type,
>> + viommu_property_buffer *bufstate)
>> +{
>> + int ret = -ENOSPC;
>> +
>> + if (bufstate->filled + 4 >= VIOMMU_PROBE_SIZE) {
>> + bufstate->error = true;
>> + goto out;
>> + }
>> +
>> + switch (type) {
>> + case VIRTIO_IOMMU_PROBE_T_NONE:
>> + ret = virtio_iommu_fill_none_prop(bufstate);
>> + break;
>> + case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
>> + {
>> + viommu_dev *dev = bufstate->dev;
>> +
>> + g_tree_foreach(dev->reserved_regions,
>> + virtio_iommu_fill_resv_mem_prop,
>> + bufstate);
>> + if (!bufstate->error) {
>> + ret = 0;
>> + }
>> + break;
>> + }
>> + default:
>> + ret = -ENOENT;
>> + break;
>> + }
>> +out:
>> + if (ret) {
>> + error_report("%s property of type=%d could not be filled (%d),"
>> + " remaining size = 0x%lx",
>> + __func__, type, ret, bufstate->filled);
>> + }
>> + return ret;
>> +}
>> +
>> +static int virtio_iommu_probe(VirtIOIOMMU *s,
>> + struct virtio_iommu_req_probe *req,
>> + uint8_t *buf)
>> +{
>> + uint32_t devid = le32_to_cpu(req->device);
>> + int16_t prop_types = SUPPORTED_PROBE_PROPERTIES, type;
>> + viommu_property_buffer bufstate;
>> + viommu_dev *dev;
>> + int ret;
>> +
>> + dev = g_tree_lookup(s->devices, GUINT_TO_POINTER(devid));
>> + if (!dev) {
>> + return -EINVAL;
>> + }
>> +
>> + bufstate.start = buf;
>> + bufstate.filled = 0;
>> + bufstate.dev = dev;
>
> bufstate.error is not initialized which may cause false alarm in
> virtio_iommu_fill_property()
thanks for spotting that. I owe you several fixes in both vsmmuv3 and
virtio-iommu. Thank you for testing, again!
Best Regards
Eric
>
>> +
>> + while ((type = ctz32(prop_types)) != 32) {
>> + ret = virtio_iommu_fill_property(devid, 1 << type, &bufstate);
>> + if (ret) {
>> + break;
>> + }
>> + prop_types &= ~(1 << type);
>> + }
>> + virtio_iommu_fill_property(devid, VIRTIO_IOMMU_PROBE_T_NONE,
>> &bufstate);
>> +
>> + return VIRTIO_IOMMU_S_OK;
>> +}
>> +
>> #define get_payload_size(req) (\
>> sizeof((req)) - sizeof(struct virtio_iommu_req_tail))
>> @@ -433,6 +567,24 @@ static int
>> virtio_iommu_handle_unmap(VirtIOIOMMU *s,
>> return virtio_iommu_unmap(s, &req);
>> }
>
> Thanks,
> Tomasz
>
© 2016 - 2026 Red Hat, Inc.