From: Yu Kuai <yukuai3@huawei.com>
Currently blkcg_print_blkgs() must be protected by rcu to iterate blkgs
from blkcg, and then prfill() must be protected by queue_lock to prevent
policy_data to be freed by deactivating policy. For consequence,
queue_lock have to be nested under rcu from blkcg_print_blkgs().
This patch delay freeing policy_data after rcu grace period, so that it's
possible to protect prfill() just with rcu lock held.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/bfq-cgroup.c | 10 ++++++++--
block/blk-cgroup.h | 2 ++
block/blk-iocost.c | 14 ++++++++++++--
block/blk-iolatency.c | 10 +++++++++-
block/blk-throttle.c | 13 +++++++++++--
5 files changed, 42 insertions(+), 7 deletions(-)
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 9fb9f3533150..a7e705d98751 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -548,14 +548,20 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
bfqg->rq_pos_tree = RB_ROOT;
}
-static void bfq_pd_free(struct blkg_policy_data *pd)
+static void bfqg_release(struct rcu_head *rcu)
{
+ struct blkg_policy_data *pd =
+ container_of(rcu, struct blkg_policy_data, rcu_head);
struct bfq_group *bfqg = pd_to_bfqg(pd);
- bfqg_stats_exit(&bfqg->stats);
bfqg_put(bfqg);
}
+static void bfq_pd_free(struct blkg_policy_data *pd)
+{
+ call_rcu(&pd->rcu_head, bfqg_release);
+}
+
static void bfq_pd_reset_stats(struct blkg_policy_data *pd)
{
struct bfq_group *bfqg = pd_to_bfqg(pd);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 1cce3294634d..fd206d1fa3c9 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -140,6 +140,8 @@ struct blkg_policy_data {
struct blkcg_gq *blkg;
int plid;
bool online;
+
+ struct rcu_head rcu_head;
};
/*
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 5bfd70311359..3593547930cc 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -3017,6 +3017,16 @@ static void ioc_pd_init(struct blkg_policy_data *pd)
spin_unlock_irqrestore(&ioc->lock, flags);
}
+static void iocg_release(struct rcu_head *rcu)
+{
+ struct blkg_policy_data *pd =
+ container_of(rcu, struct blkg_policy_data, rcu_head);
+ struct ioc_gq *iocg = pd_to_iocg(pd);
+
+ free_percpu(iocg->pcpu_stat);
+ kfree(iocg);
+}
+
static void ioc_pd_free(struct blkg_policy_data *pd)
{
struct ioc_gq *iocg = pd_to_iocg(pd);
@@ -3041,8 +3051,8 @@ static void ioc_pd_free(struct blkg_policy_data *pd)
hrtimer_cancel(&iocg->waitq_timer);
}
- free_percpu(iocg->pcpu_stat);
- kfree(iocg);
+
+ call_rcu(&pd->rcu_head, iocg_release);
}
static void ioc_pd_stat(struct blkg_policy_data *pd, struct seq_file *s)
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 45bd18f68541..ce25fbb8aaf6 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -1031,13 +1031,21 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
iolatency_clear_scaling(blkg);
}
-static void iolatency_pd_free(struct blkg_policy_data *pd)
+static void iolat_release(struct rcu_head *rcu)
{
+ struct blkg_policy_data *pd =
+ container_of(rcu, struct blkg_policy_data, rcu_head);
struct iolatency_grp *iolat = pd_to_lat(pd);
+
free_percpu(iolat->stats);
kfree(iolat);
}
+static void iolatency_pd_free(struct blkg_policy_data *pd)
+{
+ call_rcu(&pd->rcu_head, iolat_release);
+}
+
static struct cftype iolatency_files[] = {
{
.name = "latency",
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 2c5b64b1a724..cb3bfdb4684a 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -360,16 +360,25 @@ static void throtl_pd_online(struct blkg_policy_data *pd)
tg_update_has_rules(tg);
}
-static void throtl_pd_free(struct blkg_policy_data *pd)
+static void tg_release(struct rcu_head *rcu)
{
+ struct blkg_policy_data *pd =
+ container_of(rcu, struct blkg_policy_data, rcu_head);
struct throtl_grp *tg = pd_to_tg(pd);
- timer_delete_sync(&tg->service_queue.pending_timer);
blkg_rwstat_exit(&tg->stat_bytes);
blkg_rwstat_exit(&tg->stat_ios);
kfree(tg);
}
+static void throtl_pd_free(struct blkg_policy_data *pd)
+{
+ struct throtl_grp *tg = pd_to_tg(pd);
+
+ timer_delete_sync(&tg->service_queue.pending_timer);
+ call_rcu(&pd->rcu_head, tg_release);
+}
+
static struct throtl_grp *
throtl_rb_first(struct throtl_service_queue *parent_sq)
{
--
2.51.0