From nobody Sun Feb 8 12:58:32 2026 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7328EC7EE29 for ; Fri, 9 Jun 2023 14:59:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S240590AbjFIO7s (ORCPT ); Fri, 9 Jun 2023 10:59:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:42084 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S231709AbjFIO7p (ORCPT ); Fri, 9 Jun 2023 10:59:45 -0400 Received: from mail-wr1-x42a.google.com (mail-wr1-x42a.google.com [IPv6:2a00:1450:4864:20::42a]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 23AA818C; Fri, 9 Jun 2023 07:59:44 -0700 (PDT) Received: by mail-wr1-x42a.google.com with SMTP id ffacd0b85a97d-30aeee7c8a0so1449607f8f.1; Fri, 09 Jun 2023 07:59:44 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20221208; t=1686322782; x=1688914782; h=content-transfer-encoding:mime-version:message-id:date:subject:cc :to:from:from:to:cc:subject:date:message-id:reply-to; bh=XI39v/7BPEpOgTd1ALuJFcrtG1R04Si+uRN4qfzVrdk=; b=JOwpClW4kxDpACnhAcaLLPR5zm45hHTlHF+6x2FqO0IEX4WGcGlz/H6b2nd6egp8i5 Xpjl+QnSgcNXwBm75739S0XBZosZBOakIOur62fA7AsgKC2t2lmtLA5qHmYPg+MHsSg0 sXCM/AR1/GGQ1uo5j1AKeHIrKOo9aKQQ2CJBzXKnFxzr5aIdkCsaJvDNlXNEXy2IJ5SQ Y4REPYLTCg7528tfe82OtJF2kqvG7diQtak2N6Sdl3ZbZPQ6FLMs3998Ww4nP+QR9Jpf tnZIL5+R7npG3luYHPeM4XOaslpkmATga3Rdmc+0rhUaBoutCQt1BmQ+32Bk0h6ajZFE BY0g== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20221208; t=1686322782; x=1688914782; h=content-transfer-encoding:mime-version:message-id:date:subject:cc :to:from:x-gm-message-state:from:to:cc:subject:date:message-id :reply-to; bh=XI39v/7BPEpOgTd1ALuJFcrtG1R04Si+uRN4qfzVrdk=; b=HF6Fi86aMDCrQp8nD3yrZyvJJsfmR05QS3ICEgtTmbjRwpLLyCQJa/DAF9QHonTBH5 VNj7lXalk/mbohWgBMQTXTCny30oOV4fzKxxf67aSTsxybqsKEnTzLwgrwQaUzzFvKlo Zs6TUjAwLlZTP990675auG7OoJ197ZM8cmoumhwqR20RQTFyxGvzho6VtEoThVOuTtjx TtAeHMlpMfIYQYfxd7ultXA41lbmMSP82lgSjNL7Sf2TSdtbQBgwRVhX6548ZX6OVH90 B7xbLuSHVjOgr6oVQyYM6KXs5U4HCKEo9Uvg+nqaY0pzYv8Xlh5QidkIfThuFoOKSCP2 8+Kg== X-Gm-Message-State: AC+VfDxTEmrv2FvpF63fjT4UGkPtavwnT10yk/dvjp3gb5WnL0Oh+WD5 ioojqgZPG7r3JYa89PzMV90= X-Google-Smtp-Source: ACHHUZ6dIZ+sbjDFcD2M2x+4e10P9a9GzWEM3rkygzIEfY/A25ZxwQISgmoYzwY6D0yXMEuHobuwiw== X-Received: by 2002:a5d:4946:0:b0:30a:f20b:e71 with SMTP id r6-20020a5d4946000000b0030af20b0e71mr1808984wrs.33.1686322781508; Fri, 09 Jun 2023 07:59:41 -0700 (PDT) Received: from localhost.localdomain (host-95-252-166-216.retail.telecomitalia.it. [95.252.166.216]) by smtp.gmail.com with ESMTPSA id x14-20020adfec0e000000b002f6176cc6desm4703705wrn.110.2023.06.09.07.59.40 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Fri, 09 Jun 2023 07:59:40 -0700 (PDT) From: "Fabio M. De Francesco" To: Benjamin LaHaise , Alexander Viro , Christian Brauner , linux-aio@kvack.org, linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org Cc: "Fabio M. De Francesco" , Ira Weiny , Matthew Wilcox Subject: [PATCH] fs/aio: Stop allocating aio rings from HIGHMEM Date: Fri, 9 Jun 2023 16:59:37 +0200 Message-Id: <20230609145937.17610-1-fmdefrancesco@gmail.com> X-Mailer: git-send-email 2.40.1 MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" There is no need to allocate aio rings from HIGHMEM because of very little memory needed here. Therefore, use GFP_USER flag in find_or_create_page() and get rid of kmap*() mappings. Cc: Al Viro Cc: Ira Weiny Suggested-by: Matthew Wilcox Signed-off-by: Fabio M. De Francesco Reviewed-by: Ira Weiny --- fs/aio.c | 26 +++++++++----------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/fs/aio.c b/fs/aio.c index b0b17bd098bb..77e33619de40 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -530,7 +530,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned = int nr_events) for (i =3D 0; i < nr_pages; i++) { struct page *page; page =3D find_or_create_page(file->f_mapping, - i, GFP_HIGHUSER | __GFP_ZERO); + i, GFP_USER | __GFP_ZERO); if (!page) break; pr_debug("pid(%d) page[%d]->count=3D%d\n", @@ -571,7 +571,7 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned = int nr_events) ctx->user_id =3D ctx->mmap_base; ctx->nr_events =3D nr_events; /* trusted copy */ =20 - ring =3D kmap_atomic(ctx->ring_pages[0]); + ring =3D page_address(ctx->ring_pages[0]); ring->nr =3D nr_events; /* user copy */ ring->id =3D ~0U; ring->head =3D ring->tail =3D 0; @@ -579,7 +579,6 @@ static int aio_setup_ring(struct kioctx *ctx, unsigned = int nr_events) ring->compat_features =3D AIO_RING_COMPAT_FEATURES; ring->incompat_features =3D AIO_RING_INCOMPAT_FEATURES; ring->header_length =3D sizeof(struct aio_ring); - kunmap_atomic(ring); flush_dcache_page(ctx->ring_pages[0]); =20 return 0; @@ -682,9 +681,8 @@ static int ioctx_add_table(struct kioctx *ctx, struct m= m_struct *mm) * we are protected from page migration * changes ring_pages by ->ring_lock. */ - ring =3D kmap_atomic(ctx->ring_pages[0]); + ring =3D page_address(ctx->ring_pages[0]); ring->id =3D ctx->id; - kunmap_atomic(ring); return 0; } =20 @@ -1025,9 +1023,8 @@ static void user_refill_reqs_available(struct kioctx = *ctx) * against ctx->completed_events below will make sure we do the * safe/right thing. */ - ring =3D kmap_atomic(ctx->ring_pages[0]); + ring =3D page_address(ctx->ring_pages[0]); head =3D ring->head; - kunmap_atomic(ring); =20 refill_reqs_available(ctx, head, ctx->tail); } @@ -1133,12 +1130,11 @@ static void aio_complete(struct aio_kiocb *iocb) if (++tail >=3D ctx->nr_events) tail =3D 0; =20 - ev_page =3D kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); + ev_page =3D page_address(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); event =3D ev_page + pos % AIO_EVENTS_PER_PAGE; =20 *event =3D iocb->ki_res; =20 - kunmap_atomic(ev_page); flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); =20 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, @@ -1152,10 +1148,9 @@ static void aio_complete(struct aio_kiocb *iocb) =20 ctx->tail =3D tail; =20 - ring =3D kmap_atomic(ctx->ring_pages[0]); + ring =3D page_address(ctx->ring_pages[0]); head =3D ring->head; ring->tail =3D tail; - kunmap_atomic(ring); flush_dcache_page(ctx->ring_pages[0]); =20 ctx->completed_events++; @@ -1215,10 +1210,9 @@ static long aio_read_events_ring(struct kioctx *ctx, mutex_lock(&ctx->ring_lock); =20 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ - ring =3D kmap_atomic(ctx->ring_pages[0]); + ring =3D page_address(ctx->ring_pages[0]); head =3D ring->head; tail =3D ring->tail; - kunmap_atomic(ring); =20 /* * Ensure that once we've read the current tail pointer, that @@ -1250,10 +1244,9 @@ static long aio_read_events_ring(struct kioctx *ctx, avail =3D min(avail, nr - ret); avail =3D min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); =20 - ev =3D kmap(page); + ev =3D page_address(page); copy_ret =3D copy_to_user(event + ret, ev + pos, sizeof(*ev) * avail); - kunmap(page); =20 if (unlikely(copy_ret)) { ret =3D -EFAULT; @@ -1265,9 +1258,8 @@ static long aio_read_events_ring(struct kioctx *ctx, head %=3D ctx->nr_events; } =20 - ring =3D kmap_atomic(ctx->ring_pages[0]); + ring =3D page_address(ctx->ring_pages[0]); ring->head =3D head; - kunmap_atomic(ring); flush_dcache_page(ctx->ring_pages[0]); =20 pr_debug("%li h%u t%u\n", ret, head, tail); --=20 2.40.1