Record hash results for each sampled page, crc32 is taken to calculate
hash results for each sampled length in TARGET_PAGE_SIZE.
Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
Reviewed-by: David Edmondson <david.edmondson@oracle.com>
---
migration/dirtyrate.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 111 insertions(+)
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index cf2d560..beb18cb 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -10,6 +10,7 @@
* See the COPYING file in the top-level directory.
*/
+#include <zlib.h>
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "cpu.h"
@@ -68,6 +69,116 @@ static void update_dirtyrate(uint64_t msec)
DirtyStat.dirty_rate = dirtyrate;
}
+/*
+ * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
+ * in ramblock, which starts from ramblock base address.
+ */
+static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
+ uint64_t vfn)
+{
+ uint32_t crc;
+
+ crc = crc32(0, (info->ramblock_addr +
+ vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
+
+ return crc;
+}
+
+static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
+{
+ unsigned int sample_pages_count;
+ int i;
+ GRand *rand;
+
+ sample_pages_count = info->sample_pages_count;
+
+ /* ramblock size less than one page, return success to skip this ramblock */
+ if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
+ return true;
+ }
+
+ info->hash_result = g_try_malloc0_n(sample_pages_count,
+ sizeof(uint32_t));
+ if (!info->hash_result) {
+ return false;
+ }
+
+ info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
+ sizeof(uint64_t));
+ if (!info->sample_page_vfn) {
+ g_free(info->hash_result);
+ return false;
+ }
+
+ rand = g_rand_new();
+ for (i = 0; i < sample_pages_count; i++) {
+ info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
+ info->ramblock_pages - 1);
+ info->hash_result[i] = get_ramblock_vfn_hash(info,
+ info->sample_page_vfn[i]);
+ }
+ g_rand_free(rand);
+
+ return true;
+}
+
+static void get_ramblock_dirty_info(RAMBlock *block,
+ struct RamblockDirtyInfo *info,
+ struct DirtyRateConfig *config)
+{
+ uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
+
+ /* Right shift 30 bits to calc ramblock size in GB */
+ info->sample_pages_count = (qemu_ram_get_used_length(block) *
+ sample_pages_per_gigabytes) >> 30;
+ /* Right shift TARGET_PAGE_BITS to calc page count */
+ info->ramblock_pages = qemu_ram_get_used_length(block) >>
+ TARGET_PAGE_BITS;
+ info->ramblock_addr = qemu_ram_get_host_addr(block);
+ strcpy(info->idstr, qemu_ram_get_idstr(block));
+}
+
+static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
+ struct DirtyRateConfig config,
+ int *block_index)
+{
+ struct RamblockDirtyInfo *info = NULL;
+ struct RamblockDirtyInfo *dinfo = NULL;
+ RAMBlock *block = NULL;
+ int total_index = 0;
+ int index = 0;
+ bool ret = true;
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ total_index++;
+ }
+
+ dinfo = g_try_malloc0_n(total_index, sizeof(struct RamblockDirtyInfo));
+ if (dinfo == NULL) {
+ total_index = 0;
+ ret = false;
+ goto out;
+ }
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ if (index >= total_index) {
+ break;
+ }
+ info = &dinfo[index];
+ get_ramblock_dirty_info(block, info, &config);
+ if (!save_ramblock_hash(info)) {
+ ret = false;
+ goto out;
+ }
+ index++;
+ }
+
+out:
+ *block_index = total_index;
+ *block_dinfo = dinfo;
+ return ret;
+}
+
static void calculate_dirtyrate(struct DirtyRateConfig config)
{
/* todo */
--
1.8.3.1
Chuan Zheng <zhengchuan@huawei.com> 于2020年9月14日周一 下午4:52写道:
>
> Record hash results for each sampled page, crc32 is taken to calculate
> hash results for each sampled length in TARGET_PAGE_SIZE.
>
> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> Reviewed-by: David Edmondson <david.edmondson@oracle.com>
> ---
> migration/dirtyrate.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++
> 1 file changed, 111 insertions(+)
>
> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> index cf2d560..beb18cb 100644
> --- a/migration/dirtyrate.c
> +++ b/migration/dirtyrate.c
> @@ -10,6 +10,7 @@
> * See the COPYING file in the top-level directory.
> */
>
> +#include <zlib.h>
> #include "qemu/osdep.h"
> #include "qapi/error.h"
> #include "cpu.h"
> @@ -68,6 +69,116 @@ static void update_dirtyrate(uint64_t msec)
> DirtyStat.dirty_rate = dirtyrate;
> }
>
> +/*
> + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
> + * in ramblock, which starts from ramblock base address.
> + */
> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
> + uint64_t vfn)
> +{
> + uint32_t crc;
> +
> + crc = crc32(0, (info->ramblock_addr +
> + vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
> +
> + return crc;
> +}
> +
> +static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
> +{
> + unsigned int sample_pages_count;
> + int i;
> + GRand *rand;
> +
> + sample_pages_count = info->sample_pages_count;
> +
> + /* ramblock size less than one page, return success to skip this ramblock */
> + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
> + return true;
> + }
> +
> + info->hash_result = g_try_malloc0_n(sample_pages_count,
> + sizeof(uint32_t));
> + if (!info->hash_result) {
> + return false;
> + }
> +
> + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
> + sizeof(uint64_t));
> + if (!info->sample_page_vfn) {
> + g_free(info->hash_result);
> + return false;
> + }
> +
> + rand = g_rand_new();
> + for (i = 0; i < sample_pages_count; i++) {
> + info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
> + info->ramblock_pages - 1);
> + info->hash_result[i] = get_ramblock_vfn_hash(info,
> + info->sample_page_vfn[i]);
> + }
> + g_rand_free(rand);
> +
> + return true;
> +}
> +
> +static void get_ramblock_dirty_info(RAMBlock *block,
> + struct RamblockDirtyInfo *info,
> + struct DirtyRateConfig *config)
> +{
> + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
> +
> + /* Right shift 30 bits to calc ramblock size in GB */
> + info->sample_pages_count = (qemu_ram_get_used_length(block) *
> + sample_pages_per_gigabytes) >> 30;
> + /* Right shift TARGET_PAGE_BITS to calc page count */
> + info->ramblock_pages = qemu_ram_get_used_length(block) >>
> + TARGET_PAGE_BITS;
> + info->ramblock_addr = qemu_ram_get_host_addr(block);
> + strcpy(info->idstr, qemu_ram_get_idstr(block));
> +}
> +
> +static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
> + struct DirtyRateConfig config,
> + int *block_index)
> +{
> + struct RamblockDirtyInfo *info = NULL;
> + struct RamblockDirtyInfo *dinfo = NULL;
> + RAMBlock *block = NULL;
> + int total_index = 0;
Maybe 'total_count' better?
> + int index = 0;
> + bool ret = true;
> +
> + RAMBLOCK_FOREACH_MIGRATABLE(block) {
> + total_index++;
> + }
> +
> + dinfo = g_try_malloc0_n(total_index, sizeof(struct RamblockDirtyInfo));
> + if (dinfo == NULL) {
> + total_index = 0;
No need to set 'total_index'.
In the end use the 'index'.
> + ret = false;
> + goto out;
> + }
> +
> + RAMBLOCK_FOREACH_MIGRATABLE(block) {
> + if (index >= total_index) {
> + break;
> + }
> + info = &dinfo[index];
> + get_ramblock_dirty_info(block, info, &config);
> + if (!save_ramblock_hash(info)) {
> + ret = false;
> + goto out;
> + }
> + index++;
> + }
> +
> +out:
> + *block_index = total_index;
Here 'total_index' should be 'index'?
In general I think this two iteration version is more understandable
that last one.
Thanks,
Li Qiang
> + *block_dinfo = dinfo;
> + return ret;
> +}
> +
> static void calculate_dirtyrate(struct DirtyRateConfig config)
> {
> /* todo */
> --
> 1.8.3.1
>
On 2020/9/14 22:59, Li Qiang wrote:
> Chuan Zheng <zhengchuan@huawei.com> 于2020年9月14日周一 下午4:52写道:
>>
>> Record hash results for each sampled page, crc32 is taken to calculate
>> hash results for each sampled length in TARGET_PAGE_SIZE.
>>
>> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
>> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
>> Reviewed-by: David Edmondson <david.edmondson@oracle.com>
>> ---
>> migration/dirtyrate.c | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++
>> 1 file changed, 111 insertions(+)
>>
>> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
>> index cf2d560..beb18cb 100644
>> --- a/migration/dirtyrate.c
>> +++ b/migration/dirtyrate.c
>> @@ -10,6 +10,7 @@
>> * See the COPYING file in the top-level directory.
>> */
>>
>> +#include <zlib.h>
>> #include "qemu/osdep.h"
>> #include "qapi/error.h"
>> #include "cpu.h"
>> @@ -68,6 +69,116 @@ static void update_dirtyrate(uint64_t msec)
>> DirtyStat.dirty_rate = dirtyrate;
>> }
>>
>> +/*
>> + * get hash result for the sampled memory with length of TARGET_PAGE_SIZE
>> + * in ramblock, which starts from ramblock base address.
>> + */
>> +static uint32_t get_ramblock_vfn_hash(struct RamblockDirtyInfo *info,
>> + uint64_t vfn)
>> +{
>> + uint32_t crc;
>> +
>> + crc = crc32(0, (info->ramblock_addr +
>> + vfn * TARGET_PAGE_SIZE), TARGET_PAGE_SIZE);
>> +
>> + return crc;
>> +}
>> +
>> +static bool save_ramblock_hash(struct RamblockDirtyInfo *info)
>> +{
>> + unsigned int sample_pages_count;
>> + int i;
>> + GRand *rand;
>> +
>> + sample_pages_count = info->sample_pages_count;
>> +
>> + /* ramblock size less than one page, return success to skip this ramblock */
>> + if (unlikely(info->ramblock_pages == 0 || sample_pages_count == 0)) {
>> + return true;
>> + }
>> +
>> + info->hash_result = g_try_malloc0_n(sample_pages_count,
>> + sizeof(uint32_t));
>> + if (!info->hash_result) {
>> + return false;
>> + }
>> +
>> + info->sample_page_vfn = g_try_malloc0_n(sample_pages_count,
>> + sizeof(uint64_t));
>> + if (!info->sample_page_vfn) {
>> + g_free(info->hash_result);
>> + return false;
>> + }
>> +
>> + rand = g_rand_new();
>> + for (i = 0; i < sample_pages_count; i++) {
>> + info->sample_page_vfn[i] = g_rand_int_range(rand, 0,
>> + info->ramblock_pages - 1);
>> + info->hash_result[i] = get_ramblock_vfn_hash(info,
>> + info->sample_page_vfn[i]);
>> + }
>> + g_rand_free(rand);
>> +
>> + return true;
>> +}
>> +
>> +static void get_ramblock_dirty_info(RAMBlock *block,
>> + struct RamblockDirtyInfo *info,
>> + struct DirtyRateConfig *config)
>> +{
>> + uint64_t sample_pages_per_gigabytes = config->sample_pages_per_gigabytes;
>> +
>> + /* Right shift 30 bits to calc ramblock size in GB */
>> + info->sample_pages_count = (qemu_ram_get_used_length(block) *
>> + sample_pages_per_gigabytes) >> 30;
>> + /* Right shift TARGET_PAGE_BITS to calc page count */
>> + info->ramblock_pages = qemu_ram_get_used_length(block) >>
>> + TARGET_PAGE_BITS;
>> + info->ramblock_addr = qemu_ram_get_host_addr(block);
>> + strcpy(info->idstr, qemu_ram_get_idstr(block));
>> +}
>> +
>> +static bool record_ramblock_hash_info(struct RamblockDirtyInfo **block_dinfo,
>> + struct DirtyRateConfig config,
>> + int *block_index)
>> +{
>> + struct RamblockDirtyInfo *info = NULL;
>> + struct RamblockDirtyInfo *dinfo = NULL;
>> + RAMBlock *block = NULL;
>> + int total_index = 0;
>
> Maybe 'total_count' better?
>
>> + int index = 0;
>> + bool ret = true;
>> +
>> + RAMBLOCK_FOREACH_MIGRATABLE(block) {
>> + total_index++;
>> + }
>> +
>> + dinfo = g_try_malloc0_n(total_index, sizeof(struct RamblockDirtyInfo));
>> + if (dinfo == NULL) {
>> + total_index = 0;
>
> No need to set 'total_index'.
> In the end use the 'index'.
>
>> + ret = false;
>> + goto out;
>> + }
>> +
>> + RAMBLOCK_FOREACH_MIGRATABLE(block) {
>> + if (index >= total_index) {
>> + break;
>> + }
>> + info = &dinfo[index];
>> + get_ramblock_dirty_info(block, info, &config);
>> + if (!save_ramblock_hash(info)) {
>> + ret = false;
>> + goto out;
>> + }
>> + index++;
>> + }
>> +
>> +out:
>> + *block_index = total_index;
>
> Here 'total_index' should be 'index'?
>
Hi, Qiang.
Thanks for your review.
Yes, it should be index:), will fix in V9
> In general I think this two iteration version is more understandable
> that last one.
>
> Thanks,
> Li Qiang
>
>> + *block_dinfo = dinfo;
>> + return ret;
>> +}
>> +
>> static void calculate_dirtyrate(struct DirtyRateConfig config)
>> {
>> /* todo */
>> --
>> 1.8.3.1
>>
> .
>
© 2016 - 2025 Red Hat, Inc.