From: Yu Kuai <yukuai3@huawei.com>
Remove followings helpers that is now unused:
- blkg_conf_open_bdev()
- blkg_conf_open_bdev_frozen()
- blkg_conf_prep()
- blkg_conf_exit()
- blkg_conf_exit_frozen()
Signed-off-by: Yu Kuai <yukuai3@huawei.com>
---
block/blk-cgroup.c | 224 +--------------------------------------------
block/blk-cgroup.h | 6 --
2 files changed, 1 insertion(+), 229 deletions(-)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 4b7324c1d0d5..d93654334854 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -729,8 +729,7 @@ EXPORT_SYMBOL_GPL(__blkg_prfill_u64);
* @input: input string
*
* Initialize @ctx which can be used to parse blkg config input string @input.
- * Once initialized, @ctx can be used with blkg_conf_open_bdev() and
- * blkg_conf_prep(), and must be cleaned up with blkg_conf_exit().
+ * Once initialized, @ctx can be used with blkg_conf_start().
*/
void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input)
{
@@ -738,92 +737,6 @@ void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input)
}
EXPORT_SYMBOL_GPL(blkg_conf_init);
-/**
- * blkg_conf_open_bdev - parse and open bdev for per-blkg config update
- * @ctx: blkg_conf_ctx initialized with blkg_conf_init()
- *
- * Parse the device node prefix part, MAJ:MIN, of per-blkg config update from
- * @ctx->input and get and store the matching bdev in @ctx->bdev. @ctx->body is
- * set to point past the device node prefix.
- *
- * This function may be called multiple times on @ctx and the extra calls become
- * NOOPs. blkg_conf_prep() implicitly calls this function. Use this function
- * explicitly if bdev access is needed without resolving the blkcg / policy part
- * of @ctx->input. Returns -errno on error.
- */
-int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx)
-{
- char *input = ctx->input;
- unsigned int major, minor;
- struct block_device *bdev;
- int key_len;
-
- if (ctx->bdev)
- return 0;
-
- if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
- return -EINVAL;
-
- input += key_len;
- if (!isspace(*input))
- return -EINVAL;
- input = skip_spaces(input);
-
- bdev = blkdev_get_no_open(MKDEV(major, minor), false);
- if (!bdev)
- return -ENODEV;
- if (bdev_is_partition(bdev)) {
- blkdev_put_no_open(bdev);
- return -ENODEV;
- }
-
- mutex_lock(&bdev->bd_queue->rq_qos_mutex);
- if (!disk_live(bdev->bd_disk)) {
- blkdev_put_no_open(bdev);
- mutex_unlock(&bdev->bd_queue->rq_qos_mutex);
- return -ENODEV;
- }
-
- ctx->body = input;
- ctx->bdev = bdev;
- return 0;
-}
-/*
- * Similar to blkg_conf_open_bdev, but additionally freezes the queue,
- * acquires q->elevator_lock, and ensures the correct locking order
- * between q->elevator_lock and q->rq_qos_mutex.
- *
- * This function returns negative error on failure. On success it returns
- * memflags which must be saved and later passed to blkg_conf_exit_frozen
- * for restoring the memalloc scope.
- */
-unsigned long __must_check blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx)
-{
- int ret;
- unsigned long memflags;
-
- if (ctx->bdev)
- return -EINVAL;
-
- ret = blkg_conf_open_bdev(ctx);
- if (ret < 0)
- return ret;
- /*
- * At this point, we haven’t started protecting anything related to QoS,
- * so we release q->rq_qos_mutex here, which was first acquired in blkg_
- * conf_open_bdev. Later, we re-acquire q->rq_qos_mutex after freezing
- * the queue and acquiring q->elevator_lock to maintain the correct
- * locking order.
- */
- mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
-
- memflags = blk_mq_freeze_queue(ctx->bdev->bd_queue);
- mutex_lock(&ctx->bdev->bd_queue->elevator_lock);
- mutex_lock(&ctx->bdev->bd_queue->rq_qos_mutex);
-
- return memflags;
-}
-
void blkg_conf_end(struct blkg_conf_ctx *ctx)
{
struct request_queue *q = bdev_get_queue(ctx->bdev);
@@ -885,141 +798,6 @@ int blkg_conf_start(struct blkcg *blkcg, struct blkg_conf_ctx *ctx)
}
EXPORT_SYMBOL_GPL(blkg_conf_start);
-/**
- * blkg_conf_prep - parse and prepare for per-blkg config update
- * @blkcg: target block cgroup
- * @pol: target policy
- * @ctx: blkg_conf_ctx initialized with blkg_conf_init()
- *
- * Parse per-blkg config update from @ctx->input and initialize @ctx
- * accordingly. On success, @ctx->body points to the part of @ctx->input
- * following MAJ:MIN, @ctx->bdev points to the target block device and
- * @ctx->blkg to the blkg being configured.
- *
- * blkg_conf_open_bdev() may be called on @ctx beforehand. On success, this
- * function returns with queue lock held and must be followed by
- * blkg_conf_exit().
- */
-int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- struct blkg_conf_ctx *ctx)
- __acquires(&bdev->bd_queue->blkcg_mutex)
-{
- struct gendisk *disk;
- struct request_queue *q;
- struct blkcg_gq *blkg;
- int ret;
-
- ret = blkg_conf_open_bdev(ctx);
- if (ret)
- return ret;
-
- disk = ctx->bdev->bd_disk;
- q = disk->queue;
-
- /* Prevent concurrent with blkcg_deactivate_policy() */
- mutex_lock(&q->blkcg_mutex);
-
- if (!blkcg_policy_enabled(q, pol)) {
- ret = -EOPNOTSUPP;
- goto fail_unlock;
- }
-
- blkg = blkg_lookup(blkcg, q);
- if (blkg)
- goto success;
-
- /*
- * Create blkgs walking down from blkcg_root to @blkcg, so that all
- * non-root blkgs have access to their parents.
- */
- while (true) {
- struct blkcg *pos = blkcg;
- struct blkcg *parent;
-
- parent = blkcg_parent(blkcg);
- while (parent && !blkg_lookup(parent, q)) {
- pos = parent;
- parent = blkcg_parent(parent);
- }
-
- if (!blkcg_policy_enabled(q, pol)) {
- ret = -EOPNOTSUPP;
- goto fail_unlock;
- }
-
- blkg = blkg_lookup(pos, q);
- if (!blkg) {
- blkg = blkg_create(pos, disk);
- if (IS_ERR(blkg)) {
- ret = PTR_ERR(blkg);
- goto fail_unlock;
- }
- }
-
- if (pos == blkcg)
- goto success;
- }
-success:
- ctx->blkg = blkg;
- return 0;
-
-fail_unlock:
- mutex_unlock(&q->blkcg_mutex);
- /*
- * If queue was bypassing, we should retry. Do so after a
- * short msleep(). It isn't strictly necessary but queue
- * can be bypassing for some time and it's always nice to
- * avoid busy looping.
- */
- if (ret == -EBUSY) {
- msleep(10);
- ret = restart_syscall();
- }
- return ret;
-}
-EXPORT_SYMBOL_GPL(blkg_conf_prep);
-
-/**
- * blkg_conf_exit - clean up per-blkg config update
- * @ctx: blkg_conf_ctx initialized with blkg_conf_init()
- *
- * Clean up after per-blkg config update. This function must be called on all
- * blkg_conf_ctx's initialized with blkg_conf_init().
- */
-void blkg_conf_exit(struct blkg_conf_ctx *ctx)
- __releases(&ctx->bdev->bd_queue->blkcg_mutex)
- __releases(&ctx->bdev->bd_queue->rq_qos_mutex)
-{
- if (ctx->blkg) {
- mutex_unlock(&bdev_get_queue(ctx->bdev)->blkcg_mutex);
- ctx->blkg = NULL;
- }
-
- if (ctx->bdev) {
- mutex_unlock(&ctx->bdev->bd_queue->rq_qos_mutex);
- blkdev_put_no_open(ctx->bdev);
- ctx->body = NULL;
- ctx->bdev = NULL;
- }
-}
-EXPORT_SYMBOL_GPL(blkg_conf_exit);
-
-/*
- * Similar to blkg_conf_exit, but also unfreezes the queue and releases
- * q->elevator_lock. Should be used when blkg_conf_open_bdev_frozen
- * is used to open the bdev.
- */
-void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags)
-{
- if (ctx->bdev) {
- struct request_queue *q = ctx->bdev->bd_queue;
-
- blkg_conf_exit(ctx);
- mutex_unlock(&q->elevator_lock);
- blk_mq_unfreeze_queue(q, memflags);
- }
-}
-
static void blkg_iostat_add(struct blkg_iostat *dst, struct blkg_iostat *src)
{
int i;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index c3d16d52c275..aec801255821 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -221,12 +221,6 @@ struct blkg_conf_ctx {
};
void blkg_conf_init(struct blkg_conf_ctx *ctx, char *input);
-int blkg_conf_open_bdev(struct blkg_conf_ctx *ctx);
-unsigned long blkg_conf_open_bdev_frozen(struct blkg_conf_ctx *ctx);
-int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
- struct blkg_conf_ctx *ctx);
-void blkg_conf_exit(struct blkg_conf_ctx *ctx);
-void blkg_conf_exit_frozen(struct blkg_conf_ctx *ctx, unsigned long memflags);
void blkg_conf_end(struct blkg_conf_ctx *ctx);
int blkg_conf_start(struct blkcg *blkcg, struct blkg_conf_ctx *ctx);
--
2.51.0