Record hash results for each sampled page, crc32 is taken to calculate
hash results for each sampled 4K-page.
Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
---
migration/dirtyrate.c | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++
migration/dirtyrate.h | 15 ++++++
2 files changed, 151 insertions(+)
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index f6a94d8..66de426 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -10,6 +10,7 @@
* See the COPYING file in the top-level directory.
*/
+#include <zlib.h>
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "crypto/hash.h"
@@ -66,6 +67,141 @@ static void update_dirtyrate(uint64_t msec)
DirtyStat.dirty_rate = dirtyrate;
}
+/*
+ * get hash result for the sampled memory with length of 4K byte in ramblock,
+ * which starts from ramblock base address.
+ */
+static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
+ uint64_t vfn)
+{
+ struct iovec iov_array;
+ uint32_t crc;
+
+ iov_array.iov_base = info->ramblock_addr +
+ vfn * DIRTYRATE_SAMPLE_PAGE_SIZE;
+ iov_array.iov_len = DIRTYRATE_SAMPLE_PAGE_SIZE;
+
+ crc = crc32(0, iov_array.iov_base, iov_array.iov_len);
+
+ return crc;
+}
+
+static int save_ramblock_hash(struct RamblockDirtyInfo *info)
+{
+ unsigned int sample_pages_count;
+ int i;
+ int ret = -1;
+ GRand *rand = g_rand_new();
+
+ sample_pages_count = info->sample_pages_count;
+
+ /* ramblock size less than one page, return success to skip this ramblock */
+ if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
+ ret = 0;
+ goto out;
+ }
+
+ info->hash_result = g_try_malloc0_n(sample_pages_count,
+ sizeof(uint32_t));
+ if (!info->hash_result) {
+ ret = -1;
+ goto out;
+ }
+
+ info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
+ sizeof(uint64_t));
+ if (!info->sample_page_vfn) {
+ g_free(info->hash_result);
+ ret = -1;
+ goto out;
+ }
+
+ for (i = 0; i < sample_pages_count; i++) {
+ info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
+ info->ramblock_pages - 1);
+ info->hash_result[i] = get_ramblock_vfn_hash(info,
+ info->sample_page_vfn[i]);
+ }
+ ret = 0;
+
+out:
+ g_rand_free(rand);
+ return ret;
+}
+
+static void get_ramblock_dirty_info(RAMBlock *block,
+ struct RamblockDirtyInfo *info,
+ struct DirtyRateConfig *config)
+{
+ uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
+
+ /* Right shift 30 bits to calc block size in GB */
+ info->sample_pages_count = (qemu_ram_get_used_length(block) *
+ sample_pages_per_gigabytes) >>
+ DIRTYRATE_PAGE_SHIFT_GB;
+
+ /* Right shift 12 bits to calc page count in 4KB */
+ info->ramblock_pages = qemu_ram_get_used_length(block) >>
+ DIRTYRATE_PAGE_SHIFT_KB;
+ info->ramblock_addr = qemu_ram_get_host_addr(block);
+ strcpy(info->idstr, qemu_ram_get_idstr(block));
+}
+
+static struct RamblockDirtyInfo *
+alloc_ramblock_dirty_info(int *block_index,
+ struct RamblockDirtyInfo *block_dinfo)
+{
+ struct RamblockDirtyInfo *info = NULL;
+ int index = *block_index;
+
+ if (!block_dinfo) {
+ index = 0;
+ block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
+ } else {
+ index++;
+ block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
+ sizeof(struct RamblockDirtyInfo));
+ }
+ if (!block_dinfo) {
+ return NULL;
+ }
+
+ info = &block_dinfo[index];
+ *block_index = index;
+ memset(info, 0, sizeof(struct RamblockDirtyInfo));
+
+ return block_dinfo;
+}
+
+static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
+ struct DirtyRateConfig config,
+ int *block_index)
+{
+ struct RamblockDirtyInfo *info = NULL;
+ struct RamblockDirtyInfo *dinfo = NULL;
+ RAMBlock *block = NULL;
+ int index = 0;
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ dinfo = alloc_ramblock_dirty_info(&index, dinfo);
+ if (dinfo == NULL) {
+ return -1;
+ }
+ info = &dinfo[index];
+ get_ramblock_dirty_info(block, info, &config);
+ if (save_ramblock_hash(info) < 0) {
+ *block_dinfo = dinfo;
+ *block_index = index;
+ return -1;
+ }
+ }
+
+ *block_dinfo = dinfo;
+ *block_index = index;
+
+ return 0;
+}
+
static void calculate_dirtyrate(struct DirtyRateConfig config)
{
/* todo */
diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
index 8e25d93..e3adead 100644
--- a/migration/dirtyrate.h
+++ b/migration/dirtyrate.h
@@ -24,6 +24,21 @@
*/
#define RAMBLOCK_INFO_MAX_LEN 256
+/*
+ * Sample page size 4K as default.
+ */
+#define DIRTYRATE_SAMPLE_PAGE_SIZE 4096
+
+/*
+ * Sample page size 4K shift
+ */
+#define DIRTYRATE_PAGE_SHIFT_KB 12
+
+/*
+ * Sample page size 1G shift
+ */
+#define DIRTYRATE_PAGE_SHIFT_GB 30
+
/* Take 1s as default for calculation duration */
#define DEFAULT_FETCH_DIRTYRATE_TIME_SEC 1
--
1.8.3.1
* Chuan Zheng (zhengchuan@huawei.com) wrote:
> Record hash results for each sampled page, crc32 is taken to calculate
> hash results for each sampled 4K-page.
>
> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> ---
> migration/dirtyrate.c | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++
> migration/dirtyrate.h | 15 ++++++
> 2 files changed, 151 insertions(+)
>
> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> index f6a94d8..66de426 100644
> --- a/migration/dirtyrate.c
> +++ b/migration/dirtyrate.c
> @@ -10,6 +10,7 @@
> * See the COPYING file in the top-level directory.
> */
>
> +#include <zlib.h>
> #include "qemu/osdep.h"
> #include "qapi/error.h"
> #include "crypto/hash.h"
> @@ -66,6 +67,141 @@ static void update_dirtyrate(uint64_t msec)
> DirtyStat.dirty_rate = dirtyrate;
> }
>
> +/*
> + * get hash result for the sampled memory with length of 4K byte in ramblock,
> + * which starts from ramblock base address.
> + */
> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
> + uint64_t vfn)
> +{
> + struct iovec iov_array;
> + uint32_t crc;
> +
> + iov_array.iov_base = info->ramblock_addr +
> + vfn * DIRTYRATE_SAMPLE_PAGE_SIZE;
> + iov_array.iov_len = DIRTYRATE_SAMPLE_PAGE_SIZE;
> +
> + crc = crc32(0, iov_array.iov_base, iov_array.iov_len);
> +
> + return crc;
> +}
> +
> +static int save_ramblock_hash(struct RamblockDirtyInfo *info)
> +{
> + unsigned int sample_pages_count;
> + int i;
> + int ret = -1;
> + GRand *rand = g_rand_new();
> +
> + sample_pages_count = info->sample_pages_count;
> +
> + /* ramblock size less than one page, return success to skip this ramblock */
> + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
> + ret = 0;
> + goto out;
> + }
> +
> + info->hash_result = g_try_malloc0_n(sample_pages_count,
> + sizeof(uint32_t));
> + if (!info->hash_result) {
> + ret = -1;
> + goto out;
> + }
> +
> + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
> + sizeof(uint64_t));
> + if (!info->sample_page_vfn) {
> + g_free(info->hash_result);
> + ret = -1;
> + goto out;
> + }
> +
> + for (i = 0; i < sample_pages_count; i++) {
> + info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
> + info->ramblock_pages - 1);
> + info->hash_result[i] = get_ramblock_vfn_hash(info,
> + info->sample_page_vfn[i]);
> + }
> + ret = 0;
> +
> +out:
> + g_rand_free(rand);
> + return ret;
> +}
> +
> +static void get_ramblock_dirty_info(RAMBlock *block,
> + struct RamblockDirtyInfo *info,
> + struct DirtyRateConfig *config)
> +{
> + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
> +
> + /* Right shift 30 bits to calc block size in GB */
> + info->sample_pages_count = (qemu_ram_get_used_length(block) *
> + sample_pages_per_gigabytes) >>
> + DIRTYRATE_PAGE_SHIFT_GB;
> +
> + /* Right shift 12 bits to calc page count in 4KB */
> + info->ramblock_pages = qemu_ram_get_used_length(block) >>
> + DIRTYRATE_PAGE_SHIFT_KB;
> + info->ramblock_addr = qemu_ram_get_host_addr(block);
> + strcpy(info->idstr, qemu_ram_get_idstr(block));
> +}
> +
> +static struct RamblockDirtyInfo *
> +alloc_ramblock_dirty_info(int *block_index,
> + struct RamblockDirtyInfo *block_dinfo)
> +{
> + struct RamblockDirtyInfo *info = NULL;
> + int index = *block_index;
> +
> + if (!block_dinfo) {
> + index = 0;
> + block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
> + } else {
> + index++;
> + block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
> + sizeof(struct RamblockDirtyInfo));
> + }
> + if (!block_dinfo) {
> + return NULL;
> + }
> +
> + info = &block_dinfo[index];
> + *block_index = index;
> + memset(info, 0, sizeof(struct RamblockDirtyInfo));
> +
> + return block_dinfo;
> +}
> +
> +static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
> + struct DirtyRateConfig config,
> + int *block_index)
> +{
> + struct RamblockDirtyInfo *info = NULL;
> + struct RamblockDirtyInfo *dinfo = NULL;
> + RAMBlock *block = NULL;
> + int index = 0;
> +
> + RAMBLOCK_FOREACH_MIGRATABLE(block) {
> + dinfo = alloc_ramblock_dirty_info(&index, dinfo);
> + if (dinfo == NULL) {
> + return -1;
> + }
> + info = &dinfo[index];
> + get_ramblock_dirty_info(block, info, &config);
> + if (save_ramblock_hash(info) < 0) {
> + *block_dinfo = dinfo;
> + *block_index = index;
> + return -1;
> + }
> + }
> +
> + *block_dinfo = dinfo;
> + *block_index = index;
> +
> + return 0;
> +}
> +
> static void calculate_dirtyrate(struct DirtyRateConfig config)
> {
> /* todo */
> diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
> index 8e25d93..e3adead 100644
> --- a/migration/dirtyrate.h
> +++ b/migration/dirtyrate.h
> @@ -24,6 +24,21 @@
> */
> #define RAMBLOCK_INFO_MAX_LEN 256
>
> +/*
> + * Sample page size 4K as default.
> + */
> +#define DIRTYRATE_SAMPLE_PAGE_SIZE 4096
> +
> +/*
> + * Sample page size 4K shift
> + */
> +#define DIRTYRATE_PAGE_SHIFT_KB 12
> +
> +/*
> + * Sample page size 1G shift
> + */
> +#define DIRTYRATE_PAGE_SHIFT_GB 30
Your naming is really odd here; 'PAGE_SHIFT_KB' divides
by 4KB, where as 'PAGE_SHIFT_GB' divices by 1KB.
Simplify this; you can just do >>30 for GB because it's well known;
you don't need a #define constant for simple KB,MB,GB since
we all know them.
Also, I've asked before - do you really want 4KB explicitly - or
should you just use TARGET_PAGE_SIZE and TARGET_PAGE_BITS ?
Dave
> +
> /* Take 1s as default for calculation duration */
> #define DEFAULT_FETCH_DIRTYRATE_TIME_SEC 1
>
> --
> 1.8.3.1
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
On 2020/8/26 20:35, Dr. David Alan Gilbert wrote:
> * Chuan Zheng (zhengchuan@huawei.com) wrote:
>> Record hash results for each sampled page, crc32 is taken to calculate
>> hash results for each sampled 4K-page.
>>
>> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
>> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
>> ---
>> migration/dirtyrate.c | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++
>> migration/dirtyrate.h | 15 ++++++
>> 2 files changed, 151 insertions(+)
>>
>> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
>> index f6a94d8..66de426 100644
>> --- a/migration/dirtyrate.c
>> +++ b/migration/dirtyrate.c
>> @@ -10,6 +10,7 @@
>> * See the COPYING file in the top-level directory.
>> */
>>
>> +#include <zlib.h>
>> #include "qemu/osdep.h"
>> #include "qapi/error.h"
>> #include "crypto/hash.h"
>> @@ -66,6 +67,141 @@ static void update_dirtyrate(uint64_t msec)
>> DirtyStat.dirty_rate = dirtyrate;
>> }
>>
>> +/*
>> + * get hash result for the sampled memory with length of 4K byte in ramblock,
>> + * which starts from ramblock base address.
>> + */
>> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
>> + uint64_t vfn)
>> +{
>> + struct iovec iov_array;
>> + uint32_t crc;
>> +
>> + iov_array.iov_base = info->ramblock_addr +
>> + vfn * DIRTYRATE_SAMPLE_PAGE_SIZE;
>> + iov_array.iov_len = DIRTYRATE_SAMPLE_PAGE_SIZE;
>> +
>> + crc = crc32(0, iov_array.iov_base, iov_array.iov_len);
>> +
>> + return crc;
>> +}
>> +
>> +static int save_ramblock_hash(struct RamblockDirtyInfo *info)
>> +{
>> + unsigned int sample_pages_count;
>> + int i;
>> + int ret = -1;
>> + GRand *rand = g_rand_new();
>> +
>> + sample_pages_count = info->sample_pages_count;
>> +
>> + /* ramblock size less than one page, return success to skip this ramblock */
>> + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
>> + ret = 0;
>> + goto out;
>> + }
>> +
>> + info->hash_result = g_try_malloc0_n(sample_pages_count,
>> + sizeof(uint32_t));
>> + if (!info->hash_result) {
>> + ret = -1;
>> + goto out;
>> + }
>> +
>> + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
>> + sizeof(uint64_t));
>> + if (!info->sample_page_vfn) {
>> + g_free(info->hash_result);
>> + ret = -1;
>> + goto out;
>> + }
>> +
>> + for (i = 0; i < sample_pages_count; i++) {
>> + info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
>> + info->ramblock_pages - 1);
>> + info->hash_result[i] = get_ramblock_vfn_hash(info,
>> + info->sample_page_vfn[i]);
>> + }
>> + ret = 0;
>> +
>> +out:
>> + g_rand_free(rand);
>> + return ret;
>> +}
>> +
>> +static void get_ramblock_dirty_info(RAMBlock *block,
>> + struct RamblockDirtyInfo *info,
>> + struct DirtyRateConfig *config)
>> +{
>> + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
>> +
>> + /* Right shift 30 bits to calc block size in GB */
>> + info->sample_pages_count = (qemu_ram_get_used_length(block) *
>> + sample_pages_per_gigabytes) >>
>> + DIRTYRATE_PAGE_SHIFT_GB;
>> +
>> + /* Right shift 12 bits to calc page count in 4KB */
>> + info->ramblock_pages = qemu_ram_get_used_length(block) >>
>> + DIRTYRATE_PAGE_SHIFT_KB;
>> + info->ramblock_addr = qemu_ram_get_host_addr(block);
>> + strcpy(info->idstr, qemu_ram_get_idstr(block));
>> +}
>> +
>> +static struct RamblockDirtyInfo *
>> +alloc_ramblock_dirty_info(int *block_index,
>> + struct RamblockDirtyInfo *block_dinfo)
>> +{
>> + struct RamblockDirtyInfo *info = NULL;
>> + int index = *block_index;
>> +
>> + if (!block_dinfo) {
>> + index = 0;
>> + block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
>> + } else {
>> + index++;
>> + block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
>> + sizeof(struct RamblockDirtyInfo));
>> + }
>> + if (!block_dinfo) {
>> + return NULL;
>> + }
>> +
>> + info = &block_dinfo[index];
>> + *block_index = index;
>> + memset(info, 0, sizeof(struct RamblockDirtyInfo));
>> +
>> + return block_dinfo;
>> +}
>> +
>> +static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
>> + struct DirtyRateConfig config,
>> + int *block_index)
>> +{
>> + struct RamblockDirtyInfo *info = NULL;
>> + struct RamblockDirtyInfo *dinfo = NULL;
>> + RAMBlock *block = NULL;
>> + int index = 0;
>> +
>> + RAMBLOCK_FOREACH_MIGRATABLE(block) {
>> + dinfo = alloc_ramblock_dirty_info(&index, dinfo);
>> + if (dinfo == NULL) {
>> + return -1;
>> + }
>> + info = &dinfo[index];
>> + get_ramblock_dirty_info(block, info, &config);
>> + if (save_ramblock_hash(info) < 0) {
>> + *block_dinfo = dinfo;
>> + *block_index = index;
>> + return -1;
>> + }
>> + }
>> +
>> + *block_dinfo = dinfo;
>> + *block_index = index;
>> +
>> + return 0;
>> +}
>> +
>> static void calculate_dirtyrate(struct DirtyRateConfig config)
>> {
>> /* todo */
>> diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
>> index 8e25d93..e3adead 100644
>> --- a/migration/dirtyrate.h
>> +++ b/migration/dirtyrate.h
>> @@ -24,6 +24,21 @@
>> */
>> #define RAMBLOCK_INFO_MAX_LEN 256
>>
>> +/*
>> + * Sample page size 4K as default.
>> + */
>> +#define DIRTYRATE_SAMPLE_PAGE_SIZE 4096
>> +
>> +/*
>> + * Sample page size 4K shift
>> + */
>> +#define DIRTYRATE_PAGE_SHIFT_KB 12
>> +
>> +/*
>> + * Sample page size 1G shift
>> + */
>> +#define DIRTYRATE_PAGE_SHIFT_GB 30
>
> Your naming is really odd here; 'PAGE_SHIFT_KB' divides
> by 4KB, where as 'PAGE_SHIFT_GB' divices by 1KB.
>
> Simplify this; you can just do >>30 for GB because it's well known;
> you don't need a #define constant for simple KB,MB,GB since
> we all know them.
>
Hi, Dave.
Thank you for review.
OK, i will fix that in V6:)
> Also, I've asked before - do you really want 4KB explicitly - or
> should you just use TARGET_PAGE_SIZE and TARGET_PAGE_BITS ?
>
> Dave
>
TARGET_PAGE_SIZE will be 2M or 1G for HugePage.
As you see, what we get is hash result of every 'virtual' 4K-page.
We care about if it is dirty within 4K length in ramblock, which would be more
accurate than TARGET_PAGE_SIZE which could be 2M or 1G.
On the other hand, the hugepage will be broken up into 4K during migration.
I think it is better we do hash at 'virtual' 4K-page granularity.
>> +
>> /* Take 1s as default for calculation duration */
>> #define DEFAULT_FETCH_DIRTYRATE_TIME_SEC 1
>>
>> --
>> 1.8.3.1
>>
* Zheng Chuan (zhengchuan@huawei.com) wrote:
>
>
> On 2020/8/26 20:35, Dr. David Alan Gilbert wrote:
> > * Chuan Zheng (zhengchuan@huawei.com) wrote:
> >> Record hash results for each sampled page, crc32 is taken to calculate
> >> hash results for each sampled 4K-page.
> >>
> >> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> >> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> >> ---
> >> migration/dirtyrate.c | 136 ++++++++++++++++++++++++++++++++++++++++++++++++++
> >> migration/dirtyrate.h | 15 ++++++
> >> 2 files changed, 151 insertions(+)
> >>
> >> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> >> index f6a94d8..66de426 100644
> >> --- a/migration/dirtyrate.c
> >> +++ b/migration/dirtyrate.c
> >> @@ -10,6 +10,7 @@
> >> * See the COPYING file in the top-level directory.
> >> */
> >>
> >> +#include <zlib.h>
> >> #include "qemu/osdep.h"
> >> #include "qapi/error.h"
> >> #include "crypto/hash.h"
> >> @@ -66,6 +67,141 @@ static void update_dirtyrate(uint64_t msec)
> >> DirtyStat.dirty_rate = dirtyrate;
> >> }
> >>
> >> +/*
> >> + * get hash result for the sampled memory with length of 4K byte in ramblock,
> >> + * which starts from ramblock base address.
> >> + */
> >> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
> >> + uint64_t vfn)
> >> +{
> >> + struct iovec iov_array;
> >> + uint32_t crc;
> >> +
> >> + iov_array.iov_base = info->ramblock_addr +
> >> + vfn * DIRTYRATE_SAMPLE_PAGE_SIZE;
> >> + iov_array.iov_len = DIRTYRATE_SAMPLE_PAGE_SIZE;
> >> +
> >> + crc = crc32(0, iov_array.iov_base, iov_array.iov_len);
> >> +
> >> + return crc;
> >> +}
> >> +
> >> +static int save_ramblock_hash(struct RamblockDirtyInfo *info)
> >> +{
> >> + unsigned int sample_pages_count;
> >> + int i;
> >> + int ret = -1;
> >> + GRand *rand = g_rand_new();
> >> +
> >> + sample_pages_count = info->sample_pages_count;
> >> +
> >> + /* ramblock size less than one page, return success to skip this ramblock */
> >> + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
> >> + ret = 0;
> >> + goto out;
> >> + }
> >> +
> >> + info->hash_result = g_try_malloc0_n(sample_pages_count,
> >> + sizeof(uint32_t));
> >> + if (!info->hash_result) {
> >> + ret = -1;
> >> + goto out;
> >> + }
> >> +
> >> + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
> >> + sizeof(uint64_t));
> >> + if (!info->sample_page_vfn) {
> >> + g_free(info->hash_result);
> >> + ret = -1;
> >> + goto out;
> >> + }
> >> +
> >> + for (i = 0; i < sample_pages_count; i++) {
> >> + info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
> >> + info->ramblock_pages - 1);
> >> + info->hash_result[i] = get_ramblock_vfn_hash(info,
> >> + info->sample_page_vfn[i]);
> >> + }
> >> + ret = 0;
> >> +
> >> +out:
> >> + g_rand_free(rand);
> >> + return ret;
> >> +}
> >> +
> >> +static void get_ramblock_dirty_info(RAMBlock *block,
> >> + struct RamblockDirtyInfo *info,
> >> + struct DirtyRateConfig *config)
> >> +{
> >> + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
> >> +
> >> + /* Right shift 30 bits to calc block size in GB */
> >> + info->sample_pages_count = (qemu_ram_get_used_length(block) *
> >> + sample_pages_per_gigabytes) >>
> >> + DIRTYRATE_PAGE_SHIFT_GB;
> >> +
> >> + /* Right shift 12 bits to calc page count in 4KB */
> >> + info->ramblock_pages = qemu_ram_get_used_length(block) >>
> >> + DIRTYRATE_PAGE_SHIFT_KB;
> >> + info->ramblock_addr = qemu_ram_get_host_addr(block);
> >> + strcpy(info->idstr, qemu_ram_get_idstr(block));
> >> +}
> >> +
> >> +static struct RamblockDirtyInfo *
> >> +alloc_ramblock_dirty_info(int *block_index,
> >> + struct RamblockDirtyInfo *block_dinfo)
> >> +{
> >> + struct RamblockDirtyInfo *info = NULL;
> >> + int index = *block_index;
> >> +
> >> + if (!block_dinfo) {
> >> + index = 0;
> >> + block_dinfo = g_try_new(struct RamblockDirtyInfo, 1);
> >> + } else {
> >> + index++;
> >> + block_dinfo = g_try_realloc(block_dinfo, (index + 1) *
> >> + sizeof(struct RamblockDirtyInfo));
> >> + }
> >> + if (!block_dinfo) {
> >> + return NULL;
> >> + }
> >> +
> >> + info = &block_dinfo[index];
> >> + *block_index = index;
> >> + memset(info, 0, sizeof(struct RamblockDirtyInfo));
> >> +
> >> + return block_dinfo;
> >> +}
> >> +
> >> +static int record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
> >> + struct DirtyRateConfig config,
> >> + int *block_index)
> >> +{
> >> + struct RamblockDirtyInfo *info = NULL;
> >> + struct RamblockDirtyInfo *dinfo = NULL;
> >> + RAMBlock *block = NULL;
> >> + int index = 0;
> >> +
> >> + RAMBLOCK_FOREACH_MIGRATABLE(block) {
> >> + dinfo = alloc_ramblock_dirty_info(&index, dinfo);
> >> + if (dinfo == NULL) {
> >> + return -1;
> >> + }
> >> + info = &dinfo[index];
> >> + get_ramblock_dirty_info(block, info, &config);
> >> + if (save_ramblock_hash(info) < 0) {
> >> + *block_dinfo = dinfo;
> >> + *block_index = index;
> >> + return -1;
> >> + }
> >> + }
> >> +
> >> + *block_dinfo = dinfo;
> >> + *block_index = index;
> >> +
> >> + return 0;
> >> +}
> >> +
> >> static void calculate_dirtyrate(struct DirtyRateConfig config)
> >> {
> >> /* todo */
> >> diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h
> >> index 8e25d93..e3adead 100644
> >> --- a/migration/dirtyrate.h
> >> +++ b/migration/dirtyrate.h
> >> @@ -24,6 +24,21 @@
> >> */
> >> #define RAMBLOCK_INFO_MAX_LEN 256
> >>
> >> +/*
> >> + * Sample page size 4K as default.
> >> + */
> >> +#define DIRTYRATE_SAMPLE_PAGE_SIZE 4096
> >> +
> >> +/*
> >> + * Sample page size 4K shift
> >> + */
> >> +#define DIRTYRATE_PAGE_SHIFT_KB 12
> >> +
> >> +/*
> >> + * Sample page size 1G shift
> >> + */
> >> +#define DIRTYRATE_PAGE_SHIFT_GB 30
> >
> > Your naming is really odd here; 'PAGE_SHIFT_KB' divides
> > by 4KB, where as 'PAGE_SHIFT_GB' divices by 1KB.
> >
> > Simplify this; you can just do >>30 for GB because it's well known;
> > you don't need a #define constant for simple KB,MB,GB since
> > we all know them.
> >
> Hi, Dave.
> Thank you for review.
> OK, i will fix that in V6:)
> > Also, I've asked before - do you really want 4KB explicitly - or
> > should you just use TARGET_PAGE_SIZE and TARGET_PAGE_BITS ?
> >
> > Dave
> >
> TARGET_PAGE_SIZE will be 2M or 1G for HugePage.
> As you see, what we get is hash result of every 'virtual' 4K-page.
> We care about if it is dirty within 4K length in ramblock, which would be more
> accurate than TARGET_PAGE_SIZE which could be 2M or 1G.
> On the other hand, the hugepage will be broken up into 4K during migration.
>
> I think it is better we do hash at 'virtual' 4K-page granularity.
TARGET_PAGE_SIZE is never 2M or 1G; it's always based on the smallest
MMU page on the platform; on x86 it's always 4kB; it's the unit that the
migration code works in when dealing with pages.
(use TARGET_PAGE_BITS to shift by).
Dave
> >> +
> >> /* Take 1s as default for calculation duration */
> >> #define DEFAULT_FETCH_DIRTYRATE_TIME_SEC 1
> >>
> >> --
> >> 1.8.3.1
> >>
>
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
© 2016 - 2026 Red Hat, Inc.