Implement calculate_dirtyrate() function.
Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
---
migration/dirtyrate.c | 45 +++++++++++++++++++++++++++++++++++++++++++--
1 file changed, 43 insertions(+), 2 deletions(-)
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
index 850126d..95ee23e 100644
--- a/migration/dirtyrate.c
+++ b/migration/dirtyrate.c
@@ -162,6 +162,21 @@ static void get_ramblock_dirty_info(RAMBlock *block,
strcpy(info->idstr, qemu_ram_get_idstr(block));
}
+static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
+{
+ int i;
+
+ if (!infos) {
+ return;
+ }
+
+ for (i = 0; i < count; i++) {
+ g_free(infos[i].sample_page_vfn);
+ g_free(infos[i].hash_result);
+ }
+ g_free(infos);
+}
+
static struct RamblockDirtyInfo *
alloc_ramblock_dirty_info(int *block_index,
struct RamblockDirtyInfo *block_dinfo)
@@ -301,8 +316,34 @@ static int compare_page_hash_info(struct RamblockDirtyInfo *info,
static void calculate_dirtyrate(struct DirtyRateConfig config)
{
- /* todo */
- return;
+ struct RamblockDirtyInfo *block_dinfo = NULL;
+ int block_index = 0;
+ int64_t msec = 0;
+ int64_t initial_time;
+
+ rcu_register_thread();
+ reset_dirtyrate_stat();
+ rcu_read_lock();
+ initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
+ if (record_ramblock_hash_info(&block_dinfo, config, &block_index) < 0) {
+ goto out;
+ }
+ rcu_read_unlock();
+
+ msec = config.sample_period_seconds * 1000;
+ msec = set_sample_page_period(msec, initial_time);
+
+ rcu_read_lock();
+ if (compare_page_hash_info(block_dinfo, block_index) < 0) {
+ goto out;
+ }
+
+ update_dirtyrate(msec);
+
+out:
+ rcu_read_unlock();
+ free_ramblock_dirty_info(block_dinfo, block_index + 1);
+ rcu_unregister_thread();
}
void *get_dirtyrate_thread(void *arg)
--
1.8.3.1
On Saturday, 2020-08-29 at 10:52:55 +08, Chuan Zheng wrote:
> Implement calculate_dirtyrate() function.
>
> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
> ---
> migration/dirtyrate.c | 45 +++++++++++++++++++++++++++++++++++++++++++--
> 1 file changed, 43 insertions(+), 2 deletions(-)
>
> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
> index 850126d..95ee23e 100644
> --- a/migration/dirtyrate.c
> +++ b/migration/dirtyrate.c
> @@ -162,6 +162,21 @@ static void get_ramblock_dirty_info(RAMBlock *block,
> strcpy(info->idstr, qemu_ram_get_idstr(block));
> }
>
> +static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
> +{
> + int i;
> +
> + if (!infos) {
> + return;
> + }
> +
> + for (i = 0; i < count; i++) {
> + g_free(infos[i].sample_page_vfn);
> + g_free(infos[i].hash_result);
> + }
> + g_free(infos);
> +}
> +
> static struct RamblockDirtyInfo *
> alloc_ramblock_dirty_info(int *block_index,
> struct RamblockDirtyInfo *block_dinfo)
> @@ -301,8 +316,34 @@ static int compare_page_hash_info(struct RamblockDirtyInfo *info,
>
> static void calculate_dirtyrate(struct DirtyRateConfig config)
> {
> - /* todo */
> - return;
> + struct RamblockDirtyInfo *block_dinfo = NULL;
> + int block_index = 0;
> + int64_t msec = 0;
> + int64_t initial_time;
> +
> + rcu_register_thread();
> + reset_dirtyrate_stat();
> + rcu_read_lock();
> + initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
> + if (record_ramblock_hash_info(&block_dinfo, config, &block_index) < 0) {
> + goto out;
> + }
> + rcu_read_unlock();
> +
> + msec = config.sample_period_seconds * 1000;
> + msec = set_sample_page_period(msec, initial_time);
> +
> + rcu_read_lock();
> + if (compare_page_hash_info(block_dinfo, block_index) < 0) {
> + goto out;
> + }
> +
> + update_dirtyrate(msec);
> +
> +out:
> + rcu_read_unlock();
This still holds the RCU lock across update_dirtyrate(), which I
understood to be unnecessary.
> + free_ramblock_dirty_info(block_dinfo, block_index + 1);
> + rcu_unregister_thread();
> }
>
> void *get_dirtyrate_thread(void *arg)
> --
> 1.8.3.1
dme.
--
I'd come on over but I haven't got a raincoat.
On 2020/8/31 17:13, David Edmondson wrote:
> On Saturday, 2020-08-29 at 10:52:55 +08, Chuan Zheng wrote:
>
>> Implement calculate_dirtyrate() function.
>>
>> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
>> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
>> ---
>> migration/dirtyrate.c | 45 +++++++++++++++++++++++++++++++++++++++++++--
>> 1 file changed, 43 insertions(+), 2 deletions(-)
>>
>> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
>> index 850126d..95ee23e 100644
>> --- a/migration/dirtyrate.c
>> +++ b/migration/dirtyrate.c
>> @@ -162,6 +162,21 @@ static void get_ramblock_dirty_info(RAMBlock *block,
>> strcpy(info->idstr, qemu_ram_get_idstr(block));
>> }
>>
>> +static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
>> +{
>> + int i;
>> +
>> + if (!infos) {
>> + return;
>> + }
>> +
>> + for (i = 0; i < count; i++) {
>> + g_free(infos[i].sample_page_vfn);
>> + g_free(infos[i].hash_result);
>> + }
>> + g_free(infos);
>> +}
>> +
>> static struct RamblockDirtyInfo *
>> alloc_ramblock_dirty_info(int *block_index,
>> struct RamblockDirtyInfo *block_dinfo)
>> @@ -301,8 +316,34 @@ static int compare_page_hash_info(struct RamblockDirtyInfo *info,
>>
>> static void calculate_dirtyrate(struct DirtyRateConfig config)
>> {
>> - /* todo */
>> - return;
>> + struct RamblockDirtyInfo *block_dinfo = NULL;
>> + int block_index = 0;
>> + int64_t msec = 0;
>> + int64_t initial_time;
>> +
>> + rcu_register_thread();
>> + reset_dirtyrate_stat();
>> + rcu_read_lock();
>> + initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>> + if (record_ramblock_hash_info(&block_dinfo, config, &block_index) < 0) {
>> + goto out;
>> + }
>> + rcu_read_unlock();
>> +
>> + msec = config.sample_period_seconds * 1000;
>> + msec = set_sample_page_period(msec, initial_time);
>> +
>> + rcu_read_lock();
>> + if (compare_page_hash_info(block_dinfo, block_index) < 0) {
>> + goto out;
>> + }
>> +
>> + update_dirtyrate(msec);
>> +
>> +out:
>> + rcu_read_unlock();
>
> This still holds the RCU lock across update_dirtyrate(), which I
> understood to be unnecessary.
>It does need to update_dirtyrate if we goto out when erro happens.
In order to remove update_dirtyrate out of RCU, it need to add flag
like is_need_update, like:
if (record_ramblock_hash_info(&block_dinfo, config, &block_index) < 0) {
is_need_update = false;
goto out;
}
if (is_need_update)
update_dirtyrate(msec);
I doubt it is worth doing that or it will does any hurt if i holds
the RCU lock across update_dirtyrate()?
>> + free_ramblock_dirty_info(block_dinfo, block_index + 1);
>> + rcu_unregister_thread();
>> }
>>
>> void *get_dirtyrate_thread(void *arg)
>> --
>> 1.8.3.1
>
> dme.
>
On Monday, 2020-08-31 at 19:24:04 +08, Zheng Chuan wrote:
> On 2020/8/31 17:13, David Edmondson wrote:
>> On Saturday, 2020-08-29 at 10:52:55 +08, Chuan Zheng wrote:
>>
>>> Implement calculate_dirtyrate() function.
>>>
>>> Signed-off-by: Chuan Zheng <zhengchuan@huawei.com>
>>> Signed-off-by: YanYing Zhuang <ann.zhuangyanying@huawei.com>
>>> ---
>>> migration/dirtyrate.c | 45 +++++++++++++++++++++++++++++++++++++++++++--
>>> 1 file changed, 43 insertions(+), 2 deletions(-)
>>>
>>> diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c
>>> index 850126d..95ee23e 100644
>>> --- a/migration/dirtyrate.c
>>> +++ b/migration/dirtyrate.c
>>> @@ -162,6 +162,21 @@ static void get_ramblock_dirty_info(RAMBlock *block,
>>> strcpy(info->idstr, qemu_ram_get_idstr(block));
>>> }
>>>
>>> +static void free_ramblock_dirty_info(struct RamblockDirtyInfo *infos, int count)
>>> +{
>>> + int i;
>>> +
>>> + if (!infos) {
>>> + return;
>>> + }
>>> +
>>> + for (i = 0; i < count; i++) {
>>> + g_free(infos[i].sample_page_vfn);
>>> + g_free(infos[i].hash_result);
>>> + }
>>> + g_free(infos);
>>> +}
>>> +
>>> static struct RamblockDirtyInfo *
>>> alloc_ramblock_dirty_info(int *block_index,
>>> struct RamblockDirtyInfo *block_dinfo)
>>> @@ -301,8 +316,34 @@ static int compare_page_hash_info(struct RamblockDirtyInfo *info,
>>>
>>> static void calculate_dirtyrate(struct DirtyRateConfig config)
>>> {
>>> - /* todo */
>>> - return;
>>> + struct RamblockDirtyInfo *block_dinfo = NULL;
>>> + int block_index = 0;
>>> + int64_t msec = 0;
>>> + int64_t initial_time;
>>> +
>>> + rcu_register_thread();
>>> + reset_dirtyrate_stat();
>>> + rcu_read_lock();
>>> + initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
>>> + if (record_ramblock_hash_info(&block_dinfo, config, &block_index) < 0) {
>>> + goto out;
>>> + }
>>> + rcu_read_unlock();
>>> +
>>> + msec = config.sample_period_seconds * 1000;
>>> + msec = set_sample_page_period(msec, initial_time);
>>> +
>>> + rcu_read_lock();
>>> + if (compare_page_hash_info(block_dinfo, block_index) < 0) {
>>> + goto out;
>>> + }
>>> +
>>> + update_dirtyrate(msec);
>>> +
>>> +out:
>>> + rcu_read_unlock();
>>
>> This still holds the RCU lock across update_dirtyrate(), which I
>> understood to be unnecessary.
>>It does need to update_dirtyrate if we goto out when erro happens.
> In order to remove update_dirtyrate out of RCU, it need to add flag
> like is_need_update, like:
> if (record_ramblock_hash_info(&block_dinfo, config, &block_index) < 0) {
> is_need_update = false;
> goto out;
> }
>
> if (is_need_update)
> update_dirtyrate(msec);
>
> I doubt it is worth doing that or it will does any hurt if i holds
> the RCU lock across update_dirtyrate()?
Honestly I'm not sure if holding the RCU lock a little longer will be
measurable or not, perhaps someone with more experience will have a
better idea.
>
>>> + free_ramblock_dirty_info(block_dinfo, block_index + 1);
>>> + rcu_unregister_thread();
>>> }
>>>
>>> void *get_dirtyrate_thread(void *arg)
>>> --
>>> 1.8.3.1
>>
>> dme.
>>
dme.
--
We wanna wait, but here we go again.
© 2016 - 2026 Red Hat, Inc.