We only create/destry the page list here. We will use it later.
Signed-off-by: Juan Quintela <quintela@redhat.com>
---
migration/ram.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 57 insertions(+)
diff --git a/migration/ram.c b/migration/ram.c
index 5bcbf7a9f9..23cc5625eb 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -510,6 +510,20 @@ typedef struct {
uint8_t id;
} __attribute__((packed)) MultiFDInit_t;
+typedef struct {
+ /* number of used pages */
+ uint32_t used;
+ /* number of allocated pages */
+ uint32_t allocated;
+ /* global number of generated multifd packets */
+ uint32_t seq;
+ /* offset of each page */
+ ram_addr_t *offset;
+ /* pointer to each page */
+ struct iovec *iov;
+ RAMBlock *block;
+} MultiFDPages_t;
+
typedef struct {
/* this fields are not changed once the thread is created */
/* channel number */
@@ -528,6 +542,8 @@ typedef struct {
bool running;
/* should this thread finish */
bool quit;
+ /* array of pages to sent */
+ MultiFDPages_t *pages;
} MultiFDSendParams;
typedef struct {
@@ -548,6 +564,8 @@ typedef struct {
bool running;
/* should this thread finish */
bool quit;
+ /* array of pages to receive */
+ MultiFDPages_t *pages;
} MultiFDRecvParams;
static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
@@ -612,10 +630,36 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
return msg.id;
}
+static MultiFDPages_t *multifd_pages_init(size_t size)
+{
+ MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
+
+ pages->allocated = size;
+ pages->iov = g_new0(struct iovec, size);
+ pages->offset = g_new0(ram_addr_t, size);
+
+ return pages;
+}
+
+static void multifd_pages_clear(MultiFDPages_t *pages)
+{
+ pages->used = 0;
+ pages->allocated = 0;
+ pages->seq = 0;
+ pages->block = NULL;
+ g_free(pages->iov);
+ pages->iov = NULL;
+ g_free(pages->offset);
+ pages->offset = NULL;
+ g_free(pages);
+}
+
struct {
MultiFDSendParams *params;
/* number of created threads */
int count;
+ /* array of pages to sent */
+ MultiFDPages_t *pages;
} *multifd_send_state;
static void multifd_send_terminate_threads(Error *err)
@@ -665,9 +709,13 @@ int multifd_save_cleanup(Error **errp)
qemu_sem_destroy(&p->sem);
g_free(p->name);
p->name = NULL;
+ multifd_pages_clear(p->pages);
+ p->pages = NULL;
}
g_free(multifd_send_state->params);
multifd_send_state->params = NULL;
+ multifd_pages_clear(multifd_send_state->pages);
+ multifd_send_state->pages = NULL;
g_free(multifd_send_state);
multifd_send_state = NULL;
return ret;
@@ -728,6 +776,7 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
int multifd_save_setup(void)
{
int thread_count;
+ uint32_t page_count = migrate_multifd_page_count();
uint8_t i;
if (!migrate_use_multifd()) {
@@ -737,6 +786,8 @@ int multifd_save_setup(void)
multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
atomic_set(&multifd_send_state->count, 0);
+ multifd_send_state->pages = multifd_pages_init(page_count);
+
for (i = 0; i < thread_count; i++) {
MultiFDSendParams *p = &multifd_send_state->params[i];
@@ -744,6 +795,7 @@ int multifd_save_setup(void)
qemu_sem_init(&p->sem, 0);
p->quit = false;
p->id = i;
+ p->pages = multifd_pages_init(page_count);
p->name = g_strdup_printf("multifdsend_%d", i);
socket_send_channel_create(multifd_new_send_channel_async, p);
}
@@ -801,6 +853,8 @@ int multifd_load_cleanup(Error **errp)
qemu_sem_destroy(&p->sem);
g_free(p->name);
p->name = NULL;
+ multifd_pages_clear(p->pages);
+ p->pages = NULL;
}
g_free(multifd_recv_state->params);
multifd_recv_state->params = NULL;
@@ -834,6 +888,7 @@ static void *multifd_recv_thread(void *opaque)
int multifd_load_setup(void)
{
int thread_count;
+ uint32_t page_count = migrate_multifd_page_count();
uint8_t i;
if (!migrate_use_multifd()) {
@@ -843,6 +898,7 @@ int multifd_load_setup(void)
multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
atomic_set(&multifd_recv_state->count, 0);
+
for (i = 0; i < thread_count; i++) {
MultiFDRecvParams *p = &multifd_recv_state->params[i];
@@ -850,6 +906,7 @@ int multifd_load_setup(void)
qemu_sem_init(&p->sem, 0);
p->quit = false;
p->id = i;
+ p->pages = multifd_pages_init(page_count);
p->name = g_strdup_printf("multifdrecv_%d", i);
}
return 0;
--
2.17.0
* Juan Quintela (quintela@redhat.com) wrote:
> We only create/destry the page list here. We will use it later.
>
> Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
> migration/ram.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 57 insertions(+)
>
> diff --git a/migration/ram.c b/migration/ram.c
> index 5bcbf7a9f9..23cc5625eb 100644
> --- a/migration/ram.c
> +++ b/migration/ram.c
> @@ -510,6 +510,20 @@ typedef struct {
> uint8_t id;
> } __attribute__((packed)) MultiFDInit_t;
>
> +typedef struct {
> + /* number of used pages */
> + uint32_t used;
> + /* number of allocated pages */
> + uint32_t allocated;
> + /* global number of generated multifd packets */
> + uint32_t seq;
> + /* offset of each page */
> + ram_addr_t *offset;
> + /* pointer to each page */
> + struct iovec *iov;
> + RAMBlock *block;
> +} MultiFDPages_t;
> +
> typedef struct {
> /* this fields are not changed once the thread is created */
> /* channel number */
> @@ -528,6 +542,8 @@ typedef struct {
> bool running;
> /* should this thread finish */
> bool quit;
> + /* array of pages to sent */
> + MultiFDPages_t *pages;
> } MultiFDSendParams;
>
> typedef struct {
> @@ -548,6 +564,8 @@ typedef struct {
> bool running;
> /* should this thread finish */
> bool quit;
> + /* array of pages to receive */
> + MultiFDPages_t *pages;
> } MultiFDRecvParams;
>
> static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
> @@ -612,10 +630,36 @@ static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
> return msg.id;
> }
>
> +static MultiFDPages_t *multifd_pages_init(size_t size)
> +{
> + MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
> +
> + pages->allocated = size;
> + pages->iov = g_new0(struct iovec, size);
> + pages->offset = g_new0(ram_addr_t, size);
> +
> + return pages;
> +}
> +
> +static void multifd_pages_clear(MultiFDPages_t *pages)
> +{
> + pages->used = 0;
> + pages->allocated = 0;
> + pages->seq = 0;
> + pages->block = NULL;
> + g_free(pages->iov);
> + pages->iov = NULL;
> + g_free(pages->offset);
> + pages->offset = NULL;
> + g_free(pages);
> +}
> +
> struct {
> MultiFDSendParams *params;
> /* number of created threads */
> int count;
> + /* array of pages to sent */
> + MultiFDPages_t *pages;
> } *multifd_send_state;
>
> static void multifd_send_terminate_threads(Error *err)
> @@ -665,9 +709,13 @@ int multifd_save_cleanup(Error **errp)
> qemu_sem_destroy(&p->sem);
> g_free(p->name);
> p->name = NULL;
> + multifd_pages_clear(p->pages);
> + p->pages = NULL;
> }
> g_free(multifd_send_state->params);
> multifd_send_state->params = NULL;
> + multifd_pages_clear(multifd_send_state->pages);
> + multifd_send_state->pages = NULL;
> g_free(multifd_send_state);
> multifd_send_state = NULL;
> return ret;
> @@ -728,6 +776,7 @@ static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
> int multifd_save_setup(void)
> {
> int thread_count;
> + uint32_t page_count = migrate_multifd_page_count();
> uint8_t i;
>
> if (!migrate_use_multifd()) {
> @@ -737,6 +786,8 @@ int multifd_save_setup(void)
> multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
> multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
> atomic_set(&multifd_send_state->count, 0);
> + multifd_send_state->pages = multifd_pages_init(page_count);
> +
> for (i = 0; i < thread_count; i++) {
> MultiFDSendParams *p = &multifd_send_state->params[i];
>
> @@ -744,6 +795,7 @@ int multifd_save_setup(void)
> qemu_sem_init(&p->sem, 0);
> p->quit = false;
> p->id = i;
> + p->pages = multifd_pages_init(page_count);
> p->name = g_strdup_printf("multifdsend_%d", i);
> socket_send_channel_create(multifd_new_send_channel_async, p);
> }
> @@ -801,6 +853,8 @@ int multifd_load_cleanup(Error **errp)
> qemu_sem_destroy(&p->sem);
> g_free(p->name);
> p->name = NULL;
> + multifd_pages_clear(p->pages);
> + p->pages = NULL;
> }
> g_free(multifd_recv_state->params);
> multifd_recv_state->params = NULL;
> @@ -834,6 +888,7 @@ static void *multifd_recv_thread(void *opaque)
> int multifd_load_setup(void)
> {
> int thread_count;
> + uint32_t page_count = migrate_multifd_page_count();
> uint8_t i;
>
> if (!migrate_use_multifd()) {
> @@ -843,6 +898,7 @@ int multifd_load_setup(void)
> multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
> multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
> atomic_set(&multifd_recv_state->count, 0);
> +
> for (i = 0; i < thread_count; i++) {
> MultiFDRecvParams *p = &multifd_recv_state->params[i];
>
> @@ -850,6 +906,7 @@ int multifd_load_setup(void)
> qemu_sem_init(&p->sem, 0);
> p->quit = false;
> p->id = i;
> + p->pages = multifd_pages_init(page_count);
> p->name = g_strdup_printf("multifdrecv_%d", i);
> }
> return 0;
> --
> 2.17.0
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
© 2016 - 2025 Red Hat, Inc.