Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@yandex-team.ru>
---
hw/net/virtio-net.c | 118 +++++++++++++++++++++-----------------------
1 file changed, 55 insertions(+), 63 deletions(-)
diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c
index 33116712eb..23be0e3047 100644
--- a/hw/net/virtio-net.c
+++ b/hw/net/virtio-net.c
@@ -3180,7 +3180,8 @@ static void virtio_net_get_features(VirtIODevice *vdev, uint64_t *features,
}
}
-static int virtio_net_post_load_device(void *opaque, int version_id)
+static bool virtio_net_post_load_device(void *opaque, int version_id,
+ Error **errp)
{
VirtIONet *n = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
@@ -3243,7 +3244,7 @@ static int virtio_net_post_load_device(void *opaque, int version_id)
}
virtio_net_commit_rss_config(n);
- return 0;
+ return true;
}
static int virtio_net_post_load_virtio(VirtIODevice *vdev)
@@ -3309,7 +3310,7 @@ struct VirtIONetMigTmp {
* pointer and count and also validate the count.
*/
-static int virtio_net_tx_waiting_pre_save(void *opaque)
+static bool virtio_net_tx_waiting_pre_save(void *opaque, Error **errp)
{
struct VirtIONetMigTmp *tmp = opaque;
@@ -3319,30 +3320,30 @@ static int virtio_net_tx_waiting_pre_save(void *opaque)
tmp->curr_queue_pairs_1 = 0;
}
- return 0;
+ return true;
}
-static int virtio_net_tx_waiting_pre_load(void *opaque)
+static bool virtio_net_tx_waiting_pre_load(void *opaque, Error **errp)
{
struct VirtIONetMigTmp *tmp = opaque;
/* Reuse the pointer setup from save */
- virtio_net_tx_waiting_pre_save(opaque);
+ virtio_net_tx_waiting_pre_save(opaque, &error_abort);
if (tmp->parent->curr_queue_pairs > tmp->parent->max_queue_pairs) {
- error_report("virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
+ error_setg(errp, "virtio-net: curr_queue_pairs %x > max_queue_pairs %x",
tmp->parent->curr_queue_pairs, tmp->parent->max_queue_pairs);
- return -EINVAL;
+ return false;
}
- return 0; /* all good */
+ return true;
}
static const VMStateDescription vmstate_virtio_net_tx_waiting = {
.name = "virtio-net-tx_waiting",
- .pre_load = virtio_net_tx_waiting_pre_load,
- .pre_save = virtio_net_tx_waiting_pre_save,
+ .pre_load_errp = virtio_net_tx_waiting_pre_load,
+ .pre_save_errp = virtio_net_tx_waiting_pre_save,
.fields = (const VMStateField[]) {
VMSTATE_STRUCT_VARRAY_POINTER_UINT16(vqs_1, struct VirtIONetMigTmp,
curr_queue_pairs_1,
@@ -3355,31 +3356,31 @@ static const VMStateDescription vmstate_virtio_net_tx_waiting = {
/* the 'has_ufo' flag is just tested; if the incoming stream has the
* flag set we need to check that we have it
*/
-static int virtio_net_ufo_post_load(void *opaque, int version_id)
+static bool virtio_net_ufo_post_load(void *opaque, int version_id, Error **errp)
{
struct VirtIONetMigTmp *tmp = opaque;
if (tmp->has_ufo && !peer_has_ufo(tmp->parent)) {
- error_report("virtio-net: saved image requires TUN_F_UFO support");
- return -EINVAL;
+ error_setg(errp, "virtio-net: saved image requires TUN_F_UFO support");
+ return false;
}
- return 0;
+ return true;
}
-static int virtio_net_ufo_pre_save(void *opaque)
+static bool virtio_net_ufo_pre_save(void *opaque, Error **errp)
{
struct VirtIONetMigTmp *tmp = opaque;
tmp->has_ufo = tmp->parent->has_ufo;
- return 0;
+ return true;
}
static const VMStateDescription vmstate_virtio_net_has_ufo = {
.name = "virtio-net-ufo",
- .post_load = virtio_net_ufo_post_load,
- .pre_save = virtio_net_ufo_pre_save,
+ .post_load_errp = virtio_net_ufo_post_load,
+ .pre_save_errp = virtio_net_ufo_pre_save,
.fields = (const VMStateField[]) {
VMSTATE_UINT8(has_ufo, struct VirtIONetMigTmp),
VMSTATE_END_OF_LIST()
@@ -3389,38 +3390,39 @@ static const VMStateDescription vmstate_virtio_net_has_ufo = {
/* the 'has_vnet_hdr' flag is just tested; if the incoming stream has the
* flag set we need to check that we have it
*/
-static int virtio_net_vnet_post_load(void *opaque, int version_id)
+static bool virtio_net_vnet_post_load(void *opaque, int version_id,
+ Error **errp)
{
struct VirtIONetMigTmp *tmp = opaque;
if (tmp->has_vnet_hdr && !peer_has_vnet_hdr(tmp->parent)) {
- error_report("virtio-net: saved image requires vnet_hdr=on");
- return -EINVAL;
+ error_setg(errp, "virtio-net: saved image requires vnet_hdr=on");
+ return false;
}
- return 0;
+ return true;
}
-static int virtio_net_vnet_pre_save(void *opaque)
+static bool virtio_net_vnet_pre_save(void *opaque, Error **errp)
{
struct VirtIONetMigTmp *tmp = opaque;
tmp->has_vnet_hdr = tmp->parent->has_vnet_hdr;
- return 0;
+ return true;
}
static const VMStateDescription vmstate_virtio_net_has_vnet = {
.name = "virtio-net-vnet",
- .post_load = virtio_net_vnet_post_load,
- .pre_save = virtio_net_vnet_pre_save,
+ .post_load_errp = virtio_net_vnet_post_load,
+ .pre_save_errp = virtio_net_vnet_pre_save,
.fields = (const VMStateField[]) {
VMSTATE_UINT32(has_vnet_hdr, struct VirtIONetMigTmp),
VMSTATE_END_OF_LIST()
},
};
-static int virtio_net_rss_post_load(void *opaque, int version_id)
+static bool virtio_net_rss_post_load(void *opaque, int version_id, Error **errp)
{
VirtIONet *n = VIRTIO_NET(opaque);
@@ -3428,7 +3430,7 @@ static int virtio_net_rss_post_load(void *opaque, int version_id)
n->rss_data.supported_hash_types = VIRTIO_NET_RSS_SUPPORTED_HASHES;
}
- return 0;
+ return true;
}
static bool virtio_net_rss_needed(void *opaque)
@@ -3440,7 +3442,7 @@ static const VMStateDescription vmstate_virtio_net_rss = {
.name = "virtio-net-device/rss",
.version_id = 2,
.minimum_version_id = 1,
- .post_load = virtio_net_rss_post_load,
+ .post_load_errp = virtio_net_rss_post_load,
.needed = virtio_net_rss_needed,
.fields = (const VMStateField[]) {
VMSTATE_BOOL(rss_data.enabled, VirtIONet),
@@ -3482,61 +3484,51 @@ static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev)
return &net->dev;
}
-static int vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field,
- JSONWriter *vmdesc)
+static bool vhost_user_net_save_state(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field,
+ JSONWriter *vmdesc, Error **errp)
{
VirtIONet *n = pv;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct vhost_dev *vhdev;
- Error *local_error = NULL;
int ret;
vhdev = virtio_net_get_vhost(vdev);
if (vhdev == NULL) {
- error_reportf_err(local_error,
- "Error getting vhost back-end of %s device %s: ",
- vdev->name, vdev->parent_obj.canonical_path);
- return -1;
+ error_setg(errp, "Error getting vhost back-end of %s device %s",
+ vdev->name, vdev->parent_obj.canonical_path);
+ return false;
}
- ret = vhost_save_backend_state(vhdev, f, &local_error);
+ ret = vhost_save_backend_state(vhdev, f, errp);
if (ret < 0) {
- error_reportf_err(local_error,
- "Error saving back-end state of %s device %s: ",
- vdev->name, vdev->parent_obj.canonical_path);
- return ret;
+ return false;
}
- return 0;
+ return true;
}
-static int vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size,
- const VMStateField *field)
+static bool vhost_user_net_load_state(QEMUFile *f, void *pv, size_t size,
+ const VMStateField *field, Error **errp)
{
VirtIONet *n = pv;
VirtIODevice *vdev = VIRTIO_DEVICE(n);
struct vhost_dev *vhdev;
- Error *local_error = NULL;
int ret;
vhdev = virtio_net_get_vhost(vdev);
if (vhdev == NULL) {
- error_reportf_err(local_error,
- "Error getting vhost back-end of %s device %s: ",
- vdev->name, vdev->parent_obj.canonical_path);
- return -1;
+ error_setg(errp, "Error getting vhost back-end of %s device %s",
+ vdev->name, vdev->parent_obj.canonical_path);
+ return false;
}
- ret = vhost_load_backend_state(vhdev, f, &local_error);
+ ret = vhost_load_backend_state(vhdev, f, errp);
if (ret < 0) {
- error_reportf_err(local_error,
- "Error loading back-end state of %s device %s: ",
- vdev->name, vdev->parent_obj.canonical_path);
- return ret;
+ return false;
}
- return 0;
+ return true;
}
static bool vhost_user_net_is_internal_migration(void *opaque)
@@ -3562,8 +3554,8 @@ static const VMStateDescription vhost_user_net_backend_state = {
.name = "backend",
.info = &(const VMStateInfo) {
.name = "virtio-net vhost-user backend state",
- .get = vhost_user_net_load_state,
- .put = vhost_user_net_save_state,
+ .load = vhost_user_net_load_state,
+ .save = vhost_user_net_save_state,
},
},
VMSTATE_END_OF_LIST()
@@ -3574,7 +3566,7 @@ static const VMStateDescription vmstate_virtio_net_device = {
.name = "virtio-net-device",
.version_id = VIRTIO_NET_VM_VERSION,
.minimum_version_id = VIRTIO_NET_VM_VERSION,
- .post_load = virtio_net_post_load_device,
+ .post_load_errp = virtio_net_post_load_device,
.fields = (const VMStateField[]) {
VMSTATE_UINT8_ARRAY(mac, VirtIONet, ETH_ALEN),
VMSTATE_STRUCT_POINTER(vqs, VirtIONet,
@@ -4144,7 +4136,7 @@ static void virtio_net_instance_init(Object *obj)
ebpf_rss_init(&n->ebpf_rss);
}
-static int virtio_net_pre_save(void *opaque)
+static bool virtio_net_pre_save(void *opaque, Error **errp)
{
VirtIONet *n = opaque;
@@ -4152,7 +4144,7 @@ static int virtio_net_pre_save(void *opaque)
* it might keep writing to memory. */
assert(!n->vhost_started);
- return 0;
+ return true;
}
static bool primary_unplug_pending(void *opaque)
@@ -4185,7 +4177,7 @@ static const VMStateDescription vmstate_virtio_net = {
VMSTATE_VIRTIO_DEVICE,
VMSTATE_END_OF_LIST()
},
- .pre_save = virtio_net_pre_save,
+ .pre_save_errp = virtio_net_pre_save,
.dev_unplug_pending = dev_unplug_pending,
};
--
2.48.1