Also, added logic for retrieving eBPF objects from QEMU.
eBPF objects stored in the hash table during runtime.
eBPF objects cached encoded in base64 in the .xml cache file.
Signed-off-by: Andrew Melnychenko <andrew@daynix.com>
---
src/qemu/qemu_capabilities.c | 181 +++++++++++++++++++++++++++++++++++
src/qemu/qemu_capabilities.h | 4 +
2 files changed, 185 insertions(+)
diff --git a/src/qemu/qemu_capabilities.c b/src/qemu/qemu_capabilities.c
index 3a1bfbf74d..d9b5d6fb22 100644
--- a/src/qemu/qemu_capabilities.c
+++ b/src/qemu/qemu_capabilities.c
@@ -698,6 +698,7 @@ VIR_ENUM_IMPL(virQEMUCaps,
/* 450 */
"run-with.async-teardown", /* QEMU_CAPS_RUN_WITH_ASYNC_TEARDOWN */
"virtio-blk-vhost-vdpa", /* QEMU_CAPS_DEVICE_VIRTIO_BLK_VHOST_VDPA */
+ "virtio-net.ebpf_rss_fds", /* QEMU_CAPS_VIRTIO_NET_EBPF_RSS_FDS */
);
@@ -789,6 +790,9 @@ struct _virQEMUCaps {
virQEMUCapsAccel kvm;
virQEMUCapsAccel hvf;
virQEMUCapsAccel tcg;
+
+ /* Hash of ebpf objects virQEMUEbpfOBjectData */
+ GHashTable *ebpfObjects;
};
struct virQEMUCapsSearchData {
@@ -796,6 +800,18 @@ struct virQEMUCapsSearchData {
const char *binaryFilter;
};
+struct virQEMUEbpfOBjectData {
+ void *ebpfData;
+ size_t ebpfSize;
+};
+
+void virQEMUEbpfOBjectDataDestroy(gpointer data) {
+ if (!data)
+ return;
+ struct virQEMUEbpfOBjectData *object = data;
+ g_free(object->ebpfData);
+ g_free(data);
+}
static virClass *virQEMUCapsClass;
static void virQEMUCapsDispose(void *obj);
@@ -836,6 +852,19 @@ const char *virQEMUCapsArchToString(virArch arch)
}
+const void *
+virQEMUCapsGetEbpf(virQEMUCaps *qemuCaps, const char *id, size_t *size)
+{
+ struct virQEMUEbpfOBjectData *object = virHashLookup(qemuCaps->ebpfObjects, id);
+
+ if (!object)
+ return NULL;
+
+ *size = object->ebpfSize;
+ return object->ebpfData;
+}
+
+
/* Checks whether a domain with @guest arch can run natively on @host.
*/
bool
@@ -1426,6 +1455,7 @@ static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsVirtioBlk[] = {
static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsVirtioNet[] = {
{ "acpi-index", QEMU_CAPS_ACPI_INDEX, NULL },
{ "rss", QEMU_CAPS_VIRTIO_NET_RSS, NULL },
+ { "ebpf_rss_fds", QEMU_CAPS_VIRTIO_NET_EBPF_RSS_FDS, NULL },
};
static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsPCIeRootPort[] = {
@@ -1804,6 +1834,8 @@ virQEMUCapsNew(void)
qemuCaps->invalidation = true;
qemuCaps->flags = virBitmapNew(QEMU_CAPS_LAST);
+ qemuCaps->ebpfObjects = virHashNew(virQEMUEbpfOBjectDataDestroy);
+
return qemuCaps;
}
@@ -1984,6 +2016,8 @@ virQEMUCaps *virQEMUCapsNewCopy(virQEMUCaps *qemuCaps)
ret->hypervCapabilities = g_memdup(qemuCaps->hypervCapabilities,
sizeof(virDomainCapsFeatureHyperv));
+ ret->ebpfObjects = g_hash_table_ref(qemuCaps->ebpfObjects);
+
return g_steal_pointer(&ret);
}
@@ -2026,6 +2060,8 @@ void virQEMUCapsDispose(void *obj)
g_free(qemuCaps->hypervCapabilities);
+ g_hash_table_unref(qemuCaps->ebpfObjects);
+
virQEMUCapsAccelClear(&qemuCaps->kvm);
virQEMUCapsAccelClear(&qemuCaps->hvf);
virQEMUCapsAccelClear(&qemuCaps->tcg);
@@ -4541,6 +4577,46 @@ virQEMUCapsValidateArch(virQEMUCaps *qemuCaps, xmlXPathContextPtr ctxt)
}
+static int
+virQEMUCapsParseEbpfObjects(virQEMUCaps *qemuCaps, xmlXPathContextPtr ctxt)
+{
+ g_autofree xmlNodePtr *nodes = NULL;
+ size_t i;
+ int n;
+
+ if ((n = virXPathNodeSet("./ebpf", ctxt, &nodes)) < 0) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("failed to parse qemu cached eBPF object"));
+ return -1;
+ }
+
+ for (i = 0; i < n; i++) {
+ g_autofree char *id = NULL;
+ g_autofree char *ebpf = NULL;
+ struct virQEMUEbpfOBjectData *ebpfObject = NULL;
+
+ if (!(id = virXMLPropString(nodes[i], "id"))) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("missing eBPF object ID in QEMU capabilities cache"));
+ return -1;
+ }
+
+ if (!(ebpf = virXMLNodeContentString(nodes[i]))) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s %s",
+ _("can't get eBPF base64 data from cache for ID: "), id);
+ return -1;
+ }
+
+ ebpfObject = g_malloc(sizeof(*ebpfObject));
+ ebpfObject->ebpfData = g_base64_decode(ebpf, &ebpfObject->ebpfSize);
+
+ virHashAddEntry(qemuCaps->ebpfObjects, id, ebpfObject);
+ }
+
+ return 0;
+}
+
+
/*
* Parsing a doc that looks like
*
@@ -4688,6 +4764,8 @@ virQEMUCapsLoadCache(virArch hostArch,
if (skipInvalidation)
qemuCaps->invalidation = false;
+ virQEMUCapsParseEbpfObjects(qemuCaps, ctxt);
+
return 0;
}
@@ -4925,6 +5003,32 @@ virQEMUCapsFormatHypervCapabilities(virQEMUCaps *qemuCaps,
}
+static int
+virQEMUCapsFormatEbpfObjectsIterator(void *payload, const char *name, void *opaque)
+{
+ virBuffer *buf = opaque;
+ struct virQEMUEbpfOBjectData *object = payload;
+ g_autofree char *objectBase64 = NULL;
+
+ if (!buf || !object)
+ return 0;
+
+ objectBase64 = g_base64_encode(object->ebpfData, object->ebpfSize);
+ if (!objectBase64)
+ return 0;
+
+ virBufferAsprintf(buf, "<ebpf id='%s'>\n", name);
+ virBufferAdjustIndent(buf, 2);
+
+ virBufferAddStr(buf, objectBase64);
+ virBufferAddLit(buf, "\n");
+
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, "</ebpf>\n");
+
+ return 0;
+}
+
char *
virQEMUCapsFormatCache(virQEMUCaps *qemuCaps)
{
@@ -5015,6 +5119,8 @@ virQEMUCapsFormatCache(virQEMUCaps *qemuCaps)
if (qemuCaps->kvmSupportsSecureGuest)
virBufferAddLit(&buf, "<kvmSupportsSecureGuest/>\n");
+ virHashForEach(qemuCaps->ebpfObjects, virQEMUCapsFormatEbpfObjectsIterator, &buf);
+
virBufferAdjustIndent(&buf, -2);
virBufferAddLit(&buf, "</qemuCaps>\n");
@@ -5437,6 +5543,79 @@ virQEMUCapsInitProcessCaps(virQEMUCaps *qemuCaps)
}
+static int
+virQEMUCapsProbeQMPEbpfObject(virQEMUCaps *qemuCaps, const char *id,
+ qemuMonitor *mon)
+{
+ void *ebpfObject = NULL;
+ size_t size = 0;
+
+ if (!id)
+ return -1;
+
+ ebpfObject = qemuMonitorGetEbpf(mon, id, &size);
+ if(ebpfObject == NULL)
+ return -1;
+
+ struct virQEMUEbpfOBjectData *object = g_malloc(sizeof(*object));
+ object->ebpfData = ebpfObject;
+ object->ebpfSize = size;
+
+ virHashAddEntry(qemuCaps->ebpfObjects, id, object);
+
+ return 0;
+}
+
+static void virQEMUCapsProbeQMPSchemaEbpf(virQEMUCaps *qemuCaps, GHashTable* schema, qemuMonitor *mon) {
+ if (schema == NULL)
+ return;
+
+ virJSONValue *requestSchema = virHashLookup(schema, "request-ebpf");
+ if (!requestSchema)
+ return;
+
+ const char *argType = virJSONValueObjectGetString(requestSchema, "arg-type");
+ if (!argType)
+ return;
+
+ virJSONValue *argSchema = virHashLookup(schema, argType);
+ if (!argSchema)
+ return;
+
+ /* Get member id type*/
+ virJSONValue *members = virJSONValueObjectGetArray(argSchema, "members");
+ if (!members)
+ return;
+
+ /* Find "id" type */
+ virJSONValue *idSchema = NULL;
+ for (size_t i = 0; (idSchema = virJSONValueArrayGet(members, i)) != NULL; ++i) {
+ const char *name = virJSONValueObjectGetString(idSchema, "name");
+ if (strncmp(name, "id", 3) == 0)
+ break;
+ }
+
+ if (!idSchema)
+ return;
+
+ const char *ebpfIds = virJSONValueObjectGetString(idSchema, "type");
+ virJSONValue *ebpfIdsSchema = virHashLookup(schema, ebpfIds);
+ if (!ebpfIdsSchema)
+ return;
+
+ virJSONValue *ebpfIdsArray = virJSONValueObjectGetArray(ebpfIdsSchema, "values");
+ if (!ebpfIdsArray)
+ return;
+
+ /* Try to request every eBPF */
+ virJSONValue *id = NULL;
+ for (size_t i = 0; (id = virJSONValueArrayGet(ebpfIdsArray, i)) != NULL; ++i) {
+ const char *name = virJSONValueGetString(id);
+ virQEMUCapsProbeQMPEbpfObject(qemuCaps, name, mon);
+ }
+}
+
+
static int
virQEMUCapsProbeQMPSchemaCapabilities(virQEMUCaps *qemuCaps,
qemuMonitor *mon)
@@ -5466,6 +5645,8 @@ virQEMUCapsProbeQMPSchemaCapabilities(virQEMUCaps *qemuCaps,
virQEMUCapsSet(qemuCaps, cmd->flag);
}
+ virQEMUCapsProbeQMPSchemaEbpf(qemuCaps, schema, mon);
+
return 0;
}
diff --git a/src/qemu/qemu_capabilities.h b/src/qemu/qemu_capabilities.h
index 3c4f7f625b..164dc003d0 100644
--- a/src/qemu/qemu_capabilities.h
+++ b/src/qemu/qemu_capabilities.h
@@ -677,6 +677,7 @@ typedef enum { /* virQEMUCapsFlags grouping marker for syntax-check */
/* 450 */
QEMU_CAPS_RUN_WITH_ASYNC_TEARDOWN, /* asynchronous teardown -run-with async-teardown=on|off */
QEMU_CAPS_DEVICE_VIRTIO_BLK_VHOST_VDPA, /* virtio-blk-vhost-vdpa block driver */
+ QEMU_CAPS_VIRTIO_NET_EBPF_RSS_FDS, /* virtio-net ebpf_rss_fds feature */
QEMU_CAPS_LAST /* this must always be the last item */
} virQEMUCapsFlags;
@@ -895,3 +896,6 @@ int
virQEMUCapsProbeQMPMachineTypes(virQEMUCaps *qemuCaps,
virDomainVirtType virtType,
qemuMonitor *mon);
+
+const void *
+virQEMUCapsGetEbpf(virQEMUCaps *qemuCaps, const char *id, size_t *size);
--
2.42.0
On Mon, Oct 09, 2023 at 09:16:12 +0300, Andrew Melnychenko wrote:
> Also, added logic for retrieving eBPF objects from QEMU.
> eBPF objects stored in the hash table during runtime.
> eBPF objects cached encoded in base64 in the .xml cache file.
>
> Signed-off-by: Andrew Melnychenko <andrew@daynix.com>
> ---
> src/qemu/qemu_capabilities.c | 181 +++++++++++++++++++++++++++++++++++
> src/qemu/qemu_capabilities.h | 4 +
> 2 files changed, 185 insertions(+)
Once again, make sure to check that each patch builds and passes tests.
This one fails to compile:
u/libvirt_driver_qemu_impl.a.p/qemu_capabilities.c.o -c ../../../libvirt/src/qemu/qemu_capabilities.c
../../../libvirt/src/qemu/qemu_capabilities.c:808:6: error: no previous prototype for ‘virQEMUEbpfOBjectDataDestroy’ [-Werror=missing-prototypes]
808 | void virQEMUEbpfOBjectDataDestroy(gpointer data) {
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c: In function ‘virQEMUEbpfOBjectDataDestroy’:
../../../libvirt/src/qemu/qemu_capabilities.c:811:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
811 | struct virQEMUEbpfOBjectData *object = data;
| ^~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c: In function ‘virQEMUCapsProbeQMPEbpfObject’:
../../../libvirt/src/qemu/qemu_capabilities.c:5560:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5560 | struct virQEMUEbpfOBjectData *object = g_malloc(sizeof(*object));
| ^~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c: In function ‘virQEMUCapsProbeQMPSchemaEbpf’:
../../../libvirt/src/qemu/qemu_capabilities.c:5573:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5573 | virJSONValue *requestSchema = virHashLookup(schema, "request-ebpf");
| ^~~~~~~~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c:5577:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5577 | const char *argType = virJSONValueObjectGetString(requestSchema, "arg-type");
| ^~~~~
../../../libvirt/src/qemu/qemu_capabilities.c:5581:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5581 | virJSONValue *argSchema = virHashLookup(schema, argType);
| ^~~~~~~~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c:5586:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5586 | virJSONValue *members = virJSONValueObjectGetArray(argSchema, "members");
| ^~~~~~~~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c:5591:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5591 | virJSONValue *idSchema = NULL;
| ^~~~~~~~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c:5601:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5601 | const char *ebpfIds = virJSONValueObjectGetString(idSchema, "type");
| ^~~~~
../../../libvirt/src/qemu/qemu_capabilities.c:5606:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5606 | virJSONValue *ebpfIdsArray = virJSONValueObjectGetArray(ebpfIdsSchema, "values");
| ^~~~~~~~~~~~
../../../libvirt/src/qemu/qemu_capabilities.c:5611:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
5611 | virJSONValue *id = NULL;
| ^~~~~~~~~~~~
cc1: all warnings being treated as errors
After the compile error is fixed there are further errors caught by
syntax-check.
> @@ -789,6 +790,9 @@ struct _virQEMUCaps {
> virQEMUCapsAccel kvm;
> virQEMUCapsAccel hvf;
> virQEMUCapsAccel tcg;
> +
> + /* Hash of ebpf objects virQEMUEbpfOBjectData */
> + GHashTable *ebpfObjects;
> };
>
> struct virQEMUCapsSearchData {
> @@ -796,6 +800,18 @@ struct virQEMUCapsSearchData {
> const char *binaryFilter;
> };
>
> +struct virQEMUEbpfOBjectData {
> + void *ebpfData;
> + size_t ebpfSize;
> +};
> +
> +void virQEMUEbpfOBjectDataDestroy(gpointer data) {
> + if (!data)
> + return;
> + struct virQEMUEbpfOBjectData *object = data;
> + g_free(object->ebpfData);
> + g_free(data);
> +}
As noted, store the program as the base64 string throughout libvirt,
none of the above will be needed.
>
> static virClass *virQEMUCapsClass;
> static void virQEMUCapsDispose(void *obj);
> @@ -836,6 +852,19 @@ const char *virQEMUCapsArchToString(virArch arch)
> }
>
>
> +const void *
> +virQEMUCapsGetEbpf(virQEMUCaps *qemuCaps, const char *id, size_t *size)
> +{
> + struct virQEMUEbpfOBjectData *object = virHashLookup(qemuCaps->ebpfObjects, id);
> +
> + if (!object)
> + return NULL;
> +
> + *size = object->ebpfSize;
> + return object->ebpfData;
> +}
> +
> +
> /* Checks whether a domain with @guest arch can run natively on @host.
> */
> bool
> @@ -1426,6 +1455,7 @@ static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsVirtioBlk[] = {
> static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsVirtioNet[] = {
> { "acpi-index", QEMU_CAPS_ACPI_INDEX, NULL },
> { "rss", QEMU_CAPS_VIRTIO_NET_RSS, NULL },
> + { "ebpf_rss_fds", QEMU_CAPS_VIRTIO_NET_EBPF_RSS_FDS, NULL },
The QEMU_CAPS_VIRTIO_NET_EBPF_RSS_FDS capability is not actually
connected to the 'request-ebpf' output caching.
The ebpf programs are cached if the 'request-ebpf' command is present
and in such case all programs are cached.
Please split the patch such that one adds the 'request-ebpf' related
changes and the second one adds the virtio-net capability.
> };
>
> static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsPCIeRootPort[] = {
> @@ -1804,6 +1834,8 @@ virQEMUCapsNew(void)
> qemuCaps->invalidation = true;
> qemuCaps->flags = virBitmapNew(QEMU_CAPS_LAST);
>
> + qemuCaps->ebpfObjects = virHashNew(virQEMUEbpfOBjectDataDestroy);
> +
> return qemuCaps;
> }
>
> @@ -1984,6 +2016,8 @@ virQEMUCaps *virQEMUCapsNewCopy(virQEMUCaps *qemuCaps)
> ret->hypervCapabilities = g_memdup(qemuCaps->hypervCapabilities,
> sizeof(virDomainCapsFeatureHyperv));
>
> + ret->ebpfObjects = g_hash_table_ref(qemuCaps->ebpfObjects);
virQEMUCapsNewCopy is a deep copy function, thus you must deep copy the
objects.
> +
> return g_steal_pointer(&ret);
[...]
> @@ -4541,6 +4577,46 @@ virQEMUCapsValidateArch(virQEMUCaps *qemuCaps, xmlXPathContextPtr ctxt)
> }
>
>
> +static int
> +virQEMUCapsParseEbpfObjects(virQEMUCaps *qemuCaps, xmlXPathContextPtr ctxt)
> +{
> + g_autofree xmlNodePtr *nodes = NULL;
> + size_t i;
> + int n;
> +
> + if ((n = virXPathNodeSet("./ebpf", ctxt, &nodes)) < 0) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> + _("failed to parse qemu cached eBPF object"));
> + return -1;
> + }
> +
> + for (i = 0; i < n; i++) {
> + g_autofree char *id = NULL;
> + g_autofree char *ebpf = NULL;
> + struct virQEMUEbpfOBjectData *ebpfObject = NULL;
> +
> + if (!(id = virXMLPropString(nodes[i], "id"))) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> + _("missing eBPF object ID in QEMU capabilities cache"));
Use virXMLPropStringRequired instead of reporting a custom error.
> + return -1;
> + }
> +
> + if (!(ebpf = virXMLNodeContentString(nodes[i]))) {
> + virReportError(VIR_ERR_INTERNAL_ERROR, "%s %s",
broken format string in the error
> + _("can't get eBPF base64 data from cache for ID: "), id);
Store the program as an attribute
> + return -1;
> + }
> +
> + ebpfObject = g_malloc(sizeof(*ebpfObject));
we use exclusively g_new0 for allocation of structs.
> + ebpfObject->ebpfData = g_base64_decode(ebpf, &ebpfObject->ebpfSize);
As noted store this as a string you'll save this step here.
> +
> + virHashAddEntry(qemuCaps->ebpfObjects, id, ebpfObject);
> + }
> +
> + return 0;
> +}
> +
> +
> /*
> * Parsing a doc that looks like
> *
> @@ -4688,6 +4764,8 @@ virQEMUCapsLoadCache(virArch hostArch,
> if (skipInvalidation)
> qemuCaps->invalidation = false;
>
> + virQEMUCapsParseEbpfObjects(qemuCaps, ctxt);
So the above function reports many errors, here you don't bother looking
for them.
> +
> return 0;
> }
>
> @@ -4925,6 +5003,32 @@ virQEMUCapsFormatHypervCapabilities(virQEMUCaps *qemuCaps,
> }
>
>
> +static int
> +virQEMUCapsFormatEbpfObjectsIterator(void *payload, const char *name, void *opaque)
> +{
> + virBuffer *buf = opaque;
> + struct virQEMUEbpfOBjectData *object = payload;
> + g_autofree char *objectBase64 = NULL;
> +
> + if (!buf || !object)
'buf' can't be null here, and neither 'object'.
> + return 0;
> +
> + objectBase64 = g_base64_encode(object->ebpfData, object->ebpfSize);
> + if (!objectBase64)
g_base64_encode can't return NULL thus this check is pointless.
> + return 0;
> +
> + virBufferAsprintf(buf, "<ebpf id='%s'>\n", name);
> + virBufferAdjustIndent(buf, 2);
> +
> + virBufferAddStr(buf, objectBase64);
> + virBufferAddLit(buf, "\n");
> +
> + virBufferAdjustIndent(buf, -2);
> + virBufferAddLit(buf, "</ebpf>\n");
This format is not acceptable. As noted, store it into a attribute
instead of the element so that we can reasonably have subelements might
it become required.
> +
> + return 0;
> +}
> +
> char *
> virQEMUCapsFormatCache(virQEMUCaps *qemuCaps)
> {
> @@ -5015,6 +5119,8 @@ virQEMUCapsFormatCache(virQEMUCaps *qemuCaps)
> if (qemuCaps->kvmSupportsSecureGuest)
> virBufferAddLit(&buf, "<kvmSupportsSecureGuest/>\n");
>
> + virHashForEach(qemuCaps->ebpfObjects, virQEMUCapsFormatEbpfObjectsIterator, &buf);
Missing container around the '<ebpf>' elements. I know that XML allows
multiple, but we prefer stashing them in a container.
Also use virHashForEachSorted here. It's obviously less efficient, but
the capability cache is also handled in tests thus we must use a variant
which has stable output.
> +
> virBufferAdjustIndent(&buf, -2);
> virBufferAddLit(&buf, "</qemuCaps>\n");
>
> @@ -5437,6 +5543,79 @@ virQEMUCapsInitProcessCaps(virQEMUCaps *qemuCaps)
> }
>
>
> +static int
> +virQEMUCapsProbeQMPEbpfObject(virQEMUCaps *qemuCaps, const char *id,
> + qemuMonitor *mon)
> +{
> + void *ebpfObject = NULL;
> + size_t size = 0;
> +
> + if (!id)
> + return -1;
This can't happen. If it did you'd not report an error.
> +
> + ebpfObject = qemuMonitorGetEbpf(mon, id, &size);
> + if(ebpfObject == NULL)
As noted in previous patch, you can't determine here that NULL is
failure. And you'd propagate the non-failure (without error set) to
upper layers.
> + return -1;
> +
> + struct virQEMUEbpfOBjectData *object = g_malloc(sizeof(*object));
> + object->ebpfData = ebpfObject;
> + object->ebpfSize = size;
> +
> + virHashAddEntry(qemuCaps->ebpfObjects, id, object);
This can return error and is not checked.
> +
> + return 0;
> +}
> +
> +static void virQEMUCapsProbeQMPSchemaEbpf(virQEMUCaps *qemuCaps, GHashTable* schema, qemuMonitor *mon) {
> + if (schema == NULL)
> + return;
Can't happen.
> +
> + virJSONValue *requestSchema = virHashLookup(schema, "request-ebpf");
> + if (!requestSchema)
> + return;
> +
> + const char *argType = virJSONValueObjectGetString(requestSchema, "arg-type");
> + if (!argType)
> + return;
> +
> + virJSONValue *argSchema = virHashLookup(schema, argType);
> + if (!argSchema)
> + return;
> +
> + /* Get member id type*/
> + virJSONValue *members = virJSONValueObjectGetArray(argSchema, "members");
> + if (!members)
> + return;
> +
> + /* Find "id" type */
> + virJSONValue *idSchema = NULL;
> + for (size_t i = 0; (idSchema = virJSONValueArrayGet(members, i)) != NULL; ++i) {
> + const char *name = virJSONValueObjectGetString(idSchema, "name");
> + if (strncmp(name, "id", 3) == 0)
> + break;
> + }
> +
> + if (!idSchema)
> + return;
> +
> + const char *ebpfIds = virJSONValueObjectGetString(idSchema, "type");
> + virJSONValue *ebpfIdsSchema = virHashLookup(schema, ebpfIds);
> + if (!ebpfIdsSchema)
> + return;
> +
> + virJSONValue *ebpfIdsArray = virJSONValueObjectGetArray(ebpfIdsSchema, "values");
> + if (!ebpfIdsArray)
> + return;
Instead of all of the above please use our existing schema query
infrastructure:
virQEMUQAPISchemaPathGet("request-ebpf/arg-type/id", schema, &ebpfIdsArray)
This should fill ebpfIdsArray with what the whole code above does.
Since the above code fails to compile I'll replace it by my variant:
static int
virQEMUCapsProbeQMPSchemaEbpf(virQEMUCaps *qemuCaps,
GHashTable* schema,
qemuMonitor *mon)
{
virJSONValue *ebpfIdsArray;
virJSONValue *ebpfIdsSchema;
size_t i;
if (virQEMUQAPISchemaPathGet("request-ebpf/arg-type/id", schema, &ebpfIdsSchema) != 1)
return 0;
if (!(ebpfIdsArray = virJSONValueObjectGetArray(ebpfIdsSchema, "values"))) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("malformed QMP schema of 'request-ebpf'"));
return -1;
}
/* Try to request every eBPF */
for (i = 0; i < virJSONValueArraySize(ebpfIdsArray); i++) {
virJSONValue *id = virJSONValueArrayGet(ebpfIdsArray, i);
if (virQEMUCapsProbeQMPEbpfObject(qemuCaps,
virJSONValueGetString(id),
mon) < 0)
return -1;
}
return 0;
}
> +
> + /* Try to request every eBPF */
> + virJSONValue *id = NULL;
> + for (size_t i = 0; (id = virJSONValueArrayGet(ebpfIdsArray, i)) != NULL; ++i) {
> + const char *name = virJSONValueGetString(id);
> + virQEMUCapsProbeQMPEbpfObject(qemuCaps, name, mon);
Ah, so you in the end ignore all the errors regardless.
> + }
> +}
> +
> +
> static int
> virQEMUCapsProbeQMPSchemaCapabilities(virQEMUCaps *qemuCaps,
> qemuMonitor *mon)
> @@ -5466,6 +5645,8 @@ virQEMUCapsProbeQMPSchemaCapabilities(virQEMUCaps *qemuCaps,
> virQEMUCapsSet(qemuCaps, cmd->flag);
> }
>
> + virQEMUCapsProbeQMPSchemaEbpf(qemuCaps, schema, mon);
> +
> return 0;
> }
>
Hi all, thank you for your comments.
On Mon, Oct 9, 2023 at 12:48 PM Peter Krempa <pkrempa@redhat.com> wrote:
>
> On Mon, Oct 09, 2023 at 09:16:12 +0300, Andrew Melnychenko wrote:
> > Also, added logic for retrieving eBPF objects from QEMU.
> > eBPF objects stored in the hash table during runtime.
> > eBPF objects cached encoded in base64 in the .xml cache file.
> >
> > Signed-off-by: Andrew Melnychenko <andrew@daynix.com>
> > ---
> > src/qemu/qemu_capabilities.c | 181 +++++++++++++++++++++++++++++++++++
> > src/qemu/qemu_capabilities.h | 4 +
> > 2 files changed, 185 insertions(+)
>
> Once again, make sure to check that each patch builds and passes tests.
> This one fails to compile:
>
> u/libvirt_driver_qemu_impl.a.p/qemu_capabilities.c.o -c ../../../libvirt/src/qemu/qemu_capabilities.c
> ../../../libvirt/src/qemu/qemu_capabilities.c:808:6: error: no previous prototype for ‘virQEMUEbpfOBjectDataDestroy’ [-Werror=missing-prototypes]
> 808 | void virQEMUEbpfOBjectDataDestroy(gpointer data) {
> | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c: In function ‘virQEMUEbpfOBjectDataDestroy’:
> ../../../libvirt/src/qemu/qemu_capabilities.c:811:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 811 | struct virQEMUEbpfOBjectData *object = data;
> | ^~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c: In function ‘virQEMUCapsProbeQMPEbpfObject’:
> ../../../libvirt/src/qemu/qemu_capabilities.c:5560:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5560 | struct virQEMUEbpfOBjectData *object = g_malloc(sizeof(*object));
> | ^~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c: In function ‘virQEMUCapsProbeQMPSchemaEbpf’:
> ../../../libvirt/src/qemu/qemu_capabilities.c:5573:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5573 | virJSONValue *requestSchema = virHashLookup(schema, "request-ebpf");
> | ^~~~~~~~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c:5577:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5577 | const char *argType = virJSONValueObjectGetString(requestSchema, "arg-type");
> | ^~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c:5581:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5581 | virJSONValue *argSchema = virHashLookup(schema, argType);
> | ^~~~~~~~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c:5586:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5586 | virJSONValue *members = virJSONValueObjectGetArray(argSchema, "members");
> | ^~~~~~~~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c:5591:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5591 | virJSONValue *idSchema = NULL;
> | ^~~~~~~~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c:5601:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5601 | const char *ebpfIds = virJSONValueObjectGetString(idSchema, "type");
> | ^~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c:5606:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5606 | virJSONValue *ebpfIdsArray = virJSONValueObjectGetArray(ebpfIdsSchema, "values");
> | ^~~~~~~~~~~~
> ../../../libvirt/src/qemu/qemu_capabilities.c:5611:5: error: ISO C90 forbids mixed declarations and code [-Werror=declaration-after-statement]
> 5611 | virJSONValue *id = NULL;
> | ^~~~~~~~~~~~
> cc1: all warnings being treated as errors
>
>
> After the compile error is fixed there are further errors caught by
> syntax-check.
>
>
> > @@ -789,6 +790,9 @@ struct _virQEMUCaps {
> > virQEMUCapsAccel kvm;
> > virQEMUCapsAccel hvf;
> > virQEMUCapsAccel tcg;
> > +
> > + /* Hash of ebpf objects virQEMUEbpfOBjectData */
> > + GHashTable *ebpfObjects;
> > };
> >
> > struct virQEMUCapsSearchData {
> > @@ -796,6 +800,18 @@ struct virQEMUCapsSearchData {
> > const char *binaryFilter;
> > };
> >
> > +struct virQEMUEbpfOBjectData {
> > + void *ebpfData;
> > + size_t ebpfSize;
> > +};
> > +
> > +void virQEMUEbpfOBjectDataDestroy(gpointer data) {
> > + if (!data)
> > + return;
> > + struct virQEMUEbpfOBjectData *object = data;
> > + g_free(object->ebpfData);
> > + g_free(data);
> > +}
>
>
> As noted, store the program as the base64 string throughout libvirt,
> none of the above will be needed.
Ok.
>
>
> >
> > static virClass *virQEMUCapsClass;
> > static void virQEMUCapsDispose(void *obj);
> > @@ -836,6 +852,19 @@ const char *virQEMUCapsArchToString(virArch arch)
> > }
> >
> >
> > +const void *
> > +virQEMUCapsGetEbpf(virQEMUCaps *qemuCaps, const char *id, size_t *size)
> > +{
> > + struct virQEMUEbpfOBjectData *object = virHashLookup(qemuCaps->ebpfObjects, id);
> > +
> > + if (!object)
> > + return NULL;
> > +
> > + *size = object->ebpfSize;
> > + return object->ebpfData;
> > +}
> > +
> > +
> > /* Checks whether a domain with @guest arch can run natively on @host.
> > */
> > bool
> > @@ -1426,6 +1455,7 @@ static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsVirtioBlk[] = {
> > static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsVirtioNet[] = {
> > { "acpi-index", QEMU_CAPS_ACPI_INDEX, NULL },
> > { "rss", QEMU_CAPS_VIRTIO_NET_RSS, NULL },
> > + { "ebpf_rss_fds", QEMU_CAPS_VIRTIO_NET_EBPF_RSS_FDS, NULL },
>
> The QEMU_CAPS_VIRTIO_NET_EBPF_RSS_FDS capability is not actually
> connected to the 'request-ebpf' output caching.
>
> The ebpf programs are cached if the 'request-ebpf' command is present
> and in such case all programs are cached.
>
> Please split the patch such that one adds the 'request-ebpf' related
> changes and the second one adds the virtio-net capability.
I'll prepare new hunks in the next version.
>
> > };
> >
> > static struct virQEMUCapsDevicePropsFlags virQEMUCapsDevicePropsPCIeRootPort[] = {
> > @@ -1804,6 +1834,8 @@ virQEMUCapsNew(void)
> > qemuCaps->invalidation = true;
> > qemuCaps->flags = virBitmapNew(QEMU_CAPS_LAST);
> >
> > + qemuCaps->ebpfObjects = virHashNew(virQEMUEbpfOBjectDataDestroy);
> > +
> > return qemuCaps;
> > }
> >
> > @@ -1984,6 +2016,8 @@ virQEMUCaps *virQEMUCapsNewCopy(virQEMUCaps *qemuCaps)
> > ret->hypervCapabilities = g_memdup(qemuCaps->hypervCapabilities,
> > sizeof(virDomainCapsFeatureHyperv));
> >
> > + ret->ebpfObjects = g_hash_table_ref(qemuCaps->ebpfObjects);
>
> virQEMUCapsNewCopy is a deep copy function, thus you must deep copy the
> objects.
>
> > +
> > return g_steal_pointer(&ret);
>
> [...]
>
> > @@ -4541,6 +4577,46 @@ virQEMUCapsValidateArch(virQEMUCaps *qemuCaps, xmlXPathContextPtr ctxt)
> > }
> >
> >
> > +static int
> > +virQEMUCapsParseEbpfObjects(virQEMUCaps *qemuCaps, xmlXPathContextPtr ctxt)
> > +{
> > + g_autofree xmlNodePtr *nodes = NULL;
> > + size_t i;
> > + int n;
> > +
> > + if ((n = virXPathNodeSet("./ebpf", ctxt, &nodes)) < 0) {
> > + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> > + _("failed to parse qemu cached eBPF object"));
> > + return -1;
> > + }
> > +
> > + for (i = 0; i < n; i++) {
> > + g_autofree char *id = NULL;
> > + g_autofree char *ebpf = NULL;
> > + struct virQEMUEbpfOBjectData *ebpfObject = NULL;
> > +
> > + if (!(id = virXMLPropString(nodes[i], "id"))) {
> > + virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> > + _("missing eBPF object ID in QEMU capabilities cache"));
>
> Use virXMLPropStringRequired instead of reporting a custom error.
>
>
> > + return -1;
> > + }
> > +
> > + if (!(ebpf = virXMLNodeContentString(nodes[i]))) {
> > + virReportError(VIR_ERR_INTERNAL_ERROR, "%s %s",
>
> broken format string in the error
>
> > + _("can't get eBPF base64 data from cache for ID: "), id);
>
> Store the program as an attribute
>
>
> > + return -1;
> > + }
> > +
> > + ebpfObject = g_malloc(sizeof(*ebpfObject));
>
> we use exclusively g_new0 for allocation of structs.
>
> > + ebpfObject->ebpfData = g_base64_decode(ebpf, &ebpfObject->ebpfSize);
>
> As noted store this as a string you'll save this step here.
>
> > +
> > + virHashAddEntry(qemuCaps->ebpfObjects, id, ebpfObject);
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +
> > /*
> > * Parsing a doc that looks like
> > *
> > @@ -4688,6 +4764,8 @@ virQEMUCapsLoadCache(virArch hostArch,
> > if (skipInvalidation)
> > qemuCaps->invalidation = false;
> >
> > + virQEMUCapsParseEbpfObjects(qemuCaps, ctxt);
>
> So the above function reports many errors, here you don't bother looking
> for them.
>
> > +
> > return 0;
> > }
> >
> > @@ -4925,6 +5003,32 @@ virQEMUCapsFormatHypervCapabilities(virQEMUCaps *qemuCaps,
> > }
> >
> >
> > +static int
> > +virQEMUCapsFormatEbpfObjectsIterator(void *payload, const char *name, void *opaque)
> > +{
> > + virBuffer *buf = opaque;
> > + struct virQEMUEbpfOBjectData *object = payload;
> > + g_autofree char *objectBase64 = NULL;
> > +
> > + if (!buf || !object)
>
> 'buf' can't be null here, and neither 'object'.
>
> > + return 0;
> > +
> > + objectBase64 = g_base64_encode(object->ebpfData, object->ebpfSize);
> > + if (!objectBase64)
>
> g_base64_encode can't return NULL thus this check is pointless.
>
> > + return 0;
> > +
> > + virBufferAsprintf(buf, "<ebpf id='%s'>\n", name);
> > + virBufferAdjustIndent(buf, 2);
> > +
> > + virBufferAddStr(buf, objectBase64);
> > + virBufferAddLit(buf, "\n");
> > +
> > + virBufferAdjustIndent(buf, -2);
> > + virBufferAddLit(buf, "</ebpf>\n");
>
> This format is not acceptable. As noted, store it into a attribute
> instead of the element so that we can reasonably have subelements might
> it become required.
>
> > +
> > + return 0;
> > +}
> > +
> > char *
> > virQEMUCapsFormatCache(virQEMUCaps *qemuCaps)
> > {
> > @@ -5015,6 +5119,8 @@ virQEMUCapsFormatCache(virQEMUCaps *qemuCaps)
> > if (qemuCaps->kvmSupportsSecureGuest)
> > virBufferAddLit(&buf, "<kvmSupportsSecureGuest/>\n");
> >
> > + virHashForEach(qemuCaps->ebpfObjects, virQEMUCapsFormatEbpfObjectsIterator, &buf);
>
> Missing container around the '<ebpf>' elements. I know that XML allows
> multiple, but we prefer stashing them in a container.
>
> Also use virHashForEachSorted here. It's obviously less efficient, but
> the capability cache is also handled in tests thus we must use a variant
> which has stable output.
Ok. Is it should be something like this:
```
<ebpfs>
<ebpf id="rss" data="..."/>
<ebpf id="filter" data="..."/>
...
</ebpfs>
```
>
> > +
> > virBufferAdjustIndent(&buf, -2);
> > virBufferAddLit(&buf, "</qemuCaps>\n");
> >
> > @@ -5437,6 +5543,79 @@ virQEMUCapsInitProcessCaps(virQEMUCaps *qemuCaps)
> > }
> >
> >
> > +static int
> > +virQEMUCapsProbeQMPEbpfObject(virQEMUCaps *qemuCaps, const char *id,
> > + qemuMonitor *mon)
> > +{
> > + void *ebpfObject = NULL;
> > + size_t size = 0;
> > +
> > + if (!id)
> > + return -1;
>
> This can't happen. If it did you'd not report an error.
>
> > +
> > + ebpfObject = qemuMonitorGetEbpf(mon, id, &size);
> > + if(ebpfObject == NULL)
>
> As noted in previous patch, you can't determine here that NULL is
> failure. And you'd propagate the non-failure (without error set) to
> upper layers.
>
> > + return -1;
> > +
> > + struct virQEMUEbpfOBjectData *object = g_malloc(sizeof(*object));
> > + object->ebpfData = ebpfObject;
> > + object->ebpfSize = size;
> > +
> > + virHashAddEntry(qemuCaps->ebpfObjects, id, object);
>
> This can return error and is not checked.
>
> > +
> > + return 0;
> > +}
> > +
> > +static void virQEMUCapsProbeQMPSchemaEbpf(virQEMUCaps *qemuCaps, GHashTable* schema, qemuMonitor *mon) {
> > + if (schema == NULL)
> > + return;
>
> Can't happen.
>
> > +
> > + virJSONValue *requestSchema = virHashLookup(schema, "request-ebpf");
> > + if (!requestSchema)
> > + return;
> > +
> > + const char *argType = virJSONValueObjectGetString(requestSchema, "arg-type");
> > + if (!argType)
> > + return;
> > +
> > + virJSONValue *argSchema = virHashLookup(schema, argType);
> > + if (!argSchema)
> > + return;
> > +
> > + /* Get member id type*/
> > + virJSONValue *members = virJSONValueObjectGetArray(argSchema, "members");
> > + if (!members)
> > + return;
> > +
> > + /* Find "id" type */
> > + virJSONValue *idSchema = NULL;
> > + for (size_t i = 0; (idSchema = virJSONValueArrayGet(members, i)) != NULL; ++i) {
> > + const char *name = virJSONValueObjectGetString(idSchema, "name");
> > + if (strncmp(name, "id", 3) == 0)
> > + break;
> > + }
> > +
> > + if (!idSchema)
> > + return;
> > +
> > + const char *ebpfIds = virJSONValueObjectGetString(idSchema, "type");
> > + virJSONValue *ebpfIdsSchema = virHashLookup(schema, ebpfIds);
> > + if (!ebpfIdsSchema)
> > + return;
> > +
> > + virJSONValue *ebpfIdsArray = virJSONValueObjectGetArray(ebpfIdsSchema, "values");
> > + if (!ebpfIdsArray)
> > + return;
>
> Instead of all of the above please use our existing schema query
> infrastructure:
>
> virQEMUQAPISchemaPathGet("request-ebpf/arg-type/id", schema, &ebpfIdsArray)
>
> This should fill ebpfIdsArray with what the whole code above does.
>
>
> Since the above code fails to compile I'll replace it by my variant:
>
> static int
> virQEMUCapsProbeQMPSchemaEbpf(virQEMUCaps *qemuCaps,
> GHashTable* schema,
> qemuMonitor *mon)
> {
> virJSONValue *ebpfIdsArray;
> virJSONValue *ebpfIdsSchema;
> size_t i;
>
> if (virQEMUQAPISchemaPathGet("request-ebpf/arg-type/id", schema, &ebpfIdsSchema) != 1)
> return 0;
>
> if (!(ebpfIdsArray = virJSONValueObjectGetArray(ebpfIdsSchema, "values"))) {
> virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
> _("malformed QMP schema of 'request-ebpf'"));
> return -1;
> }
>
> /* Try to request every eBPF */
> for (i = 0; i < virJSONValueArraySize(ebpfIdsArray); i++) {
> virJSONValue *id = virJSONValueArrayGet(ebpfIdsArray, i);
>
> if (virQEMUCapsProbeQMPEbpfObject(qemuCaps,
> virJSONValueGetString(id),
> mon) < 0)
> return -1;
> }
>
> return 0;
> }
>
Thank you - missed the existing schema query and did it the hard way.
Your code will be used in the next patches.
>
> > +
> > + /* Try to request every eBPF */
> > + virJSONValue *id = NULL;
> > + for (size_t i = 0; (id = virJSONValueArrayGet(ebpfIdsArray, i)) != NULL; ++i) {
> > + const char *name = virJSONValueGetString(id);
> > + virQEMUCapsProbeQMPEbpfObject(qemuCaps, name, mon);
>
> Ah, so you in the end ignore all the errors regardless.
Well, the idea was if we can't retrieve eBPF object for some reason it
shouldn't stop the workflow - work continues without possible eBPF.
>
>
> > + }
> > +}
> > +
> > +
> > static int
> > virQEMUCapsProbeQMPSchemaCapabilities(virQEMUCaps *qemuCaps,
> > qemuMonitor *mon)
> > @@ -5466,6 +5645,8 @@ virQEMUCapsProbeQMPSchemaCapabilities(virQEMUCaps *qemuCaps,
> > virQEMUCapsSet(qemuCaps, cmd->flag);
> > }
> >
> > + virQEMUCapsProbeQMPSchemaEbpf(qemuCaps, schema, mon);
> > +
> > return 0;
> > }
> >
>
© 2016 - 2026 Red Hat, Inc.