From: Yu Kuai <yukuai3@huawei.com>
With previous modification to delay freeing policy data after rcu grace
period, now it's safe to protect prfill() with rcu directly because
it's guaranteed policy_data won't be freed by concurrent deactivating
policy.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/blk-cgroup-rwstat.c | 4 +---
block/blk-cgroup.c | 2 --
2 files changed, 1 insertion(+), 5 deletions(-)
diff --git a/block/blk-cgroup-rwstat.c b/block/blk-cgroup-rwstat.c
index a55fb0c53558..b8ab8c0063a3 100644
--- a/block/blk-cgroup-rwstat.c
+++ b/block/blk-cgroup-rwstat.c
@@ -101,10 +101,9 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
struct cgroup_subsys_state *pos_css;
unsigned int i;
- lockdep_assert_held(&blkg->q->queue_lock);
+ WARN_ON_ONCE(!rcu_read_lock_held());
memset(sum, 0, sizeof(*sum));
- rcu_read_lock();
blkg_for_each_descendant_pre(pos_blkg, pos_css, blkg) {
struct blkg_rwstat *rwstat;
@@ -119,6 +118,5 @@ void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
for (i = 0; i < BLKG_RWSTAT_NR; i++)
sum->cnt[i] += blkg_rwstat_read_counter(rwstat, i);
}
- rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blkg_rwstat_recursive_sum);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 0f6039d468a6..fb40262971c9 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -713,10 +713,8 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
- spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
- spin_unlock_irq(&blkg->q->queue_lock);
}
rcu_read_unlock();
--
2.51.0