From: Yu Kuai <yukuai3@huawei.com>
Now that blkcg_mutex is used to protect blkgs, memory allocation no
longer need to be non-blocking, this is not needed.
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/blk-cgroup.c | 20 ++------------------
1 file changed, 2 insertions(+), 18 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b8bb2f3506aa..030499d70543 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -894,16 +894,10 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
goto fail_unlock;
}
- if (radix_tree_preload(GFP_KERNEL)) {
- blkg_free(new_blkg);
- ret = -ENOMEM;
- goto fail_unlock;
- }
-
if (!blkcg_policy_enabled(q, pol)) {
blkg_free(new_blkg);
ret = -EOPNOTSUPP;
- goto fail_preloaded;
+ goto fail_unlock;
}
blkg = blkg_lookup(pos, q);
@@ -913,12 +907,10 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
blkg = blkg_create(pos, disk, new_blkg);
if (IS_ERR(blkg)) {
ret = PTR_ERR(blkg);
- goto fail_preloaded;
+ goto fail_unlock;
}
}
- radix_tree_preload_end();
-
if (pos == blkcg)
goto success;
}
@@ -926,8 +918,6 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
ctx->blkg = blkg;
return 0;
-fail_preloaded:
- radix_tree_preload_end();
fail_unlock:
mutex_unlock(&q->blkcg_mutex);
/*
@@ -1480,14 +1470,12 @@ int blkcg_init_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
struct blkcg_gq *new_blkg, *blkg;
- bool preloaded;
new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
if (!new_blkg)
return -ENOMEM;
mutex_lock(&q->blkcg_mutex);
- preloaded = !radix_tree_preload(GFP_NOIO);
/* Make sure the root blkg exists. */
blkg = blkg_create(&blkcg_root, disk, new_blkg);
@@ -1495,16 +1483,12 @@ int blkcg_init_disk(struct gendisk *disk)
goto err_unlock;
q->root_blkg = blkg;
- if (preloaded)
- radix_tree_preload_end();
mutex_unlock(&q->blkcg_mutex);
return 0;
err_unlock:
mutex_unlock(&q->blkcg_mutex);
- if (preloaded)
- radix_tree_preload_end();
return PTR_ERR(blkg);
}
--
2.51.0