From nobody Fri Dec 19 21:00:01 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id B37B2C74A5B for ; Sun, 19 Mar 2023 00:25:50 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S230317AbjCSAZs (ORCPT ); Sat, 18 Mar 2023 20:25:48 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:47686 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S230336AbjCSAYd (ORCPT ); Sat, 18 Mar 2023 20:24:33 -0400 Received: from mail-wr1-x431.google.com (mail-wr1-x431.google.com [IPv6:2a00:1450:4864:20::431]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 69F312A17A; Sat, 18 Mar 2023 17:21:45 -0700 (PDT) Received: by mail-wr1-x431.google.com with SMTP id y14so7381892wrq.4; Sat, 18 Mar 2023 17:21:45 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20210112; t=1679185221; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:from:to:cc:subject:date :message-id:reply-to; bh=MTgm8eMaEME071JBKDzGLJgbn/ShH65iS57yVvkZwv4=; b=huWPKdm+tRAG9YrVt08wts9oXX1u/L6VFZzCMnd846EM/acbas78Ee3zKgv7qmyDtD bQ4WNX1Lu+YmfrpGKGF9SyFdXzzkh/lHE4Iecb+gC8DONfvV1VWDU6hLr7f8aTtTmN7t HsZ+37pEqNuXSetVcUlvZvK5lC1BPpSHqEtT5J2tC7y9SBf3pcvrpBPKD0wEiJ0KPZiC ECZxUl8khPH3gNwSahF7FMoeapYF2Ooz4nvhyNQPEhGwJDLaneWyb6hY1U7a+bvCC0nr vFI3/O5GQGCBZMjn7KZkrweyNUhrDcliQX2NCWuxFeYw+PB+NuIWEt//techJMcX6FDx cMHA== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20210112; t=1679185221; h=content-transfer-encoding:mime-version:references:in-reply-to :message-id:date:subject:cc:to:from:x-gm-message-state:from:to:cc :subject:date:message-id:reply-to; bh=MTgm8eMaEME071JBKDzGLJgbn/ShH65iS57yVvkZwv4=; b=jUnRG0+NUetU68lVUW6GIjyl6oI5YY5vWcxAoMZeH2NSqrKKdeO0PmK6BM15eO7dYK lr5fEe3buiFjXMsVjArrMYz8gjAmz8hfvxRgBH6sQCtKAA7R0d3T5fLStLlo5MccQQJq T/My8jdl/vrtxKCAMEo63zDkyjazTkTW/FnRgVUM7KdMNQI3H8V3lVnXFclkORsCk5NV Uqc/FpXeHIibWeu5Vk3k+kIFbc2ZlqYJ+hyo53nOkkDnTHlGDt8vz/FHcCPbEgHZn6DJ n/jncpb22gzWXOZclEhNJFV1UqE00n2OtZ+HkErmutE/GRMQyBOSh8TZTy8/F9TERqWr ZqSQ== X-Gm-Message-State: AO0yUKWYU5mjti0zgdUOB611qSXHeP4dPQ0jFpqe+jgyljRMmv2xW0Mg LKtRRw314KN+3ovnnaUWLbuYfX46T6s= X-Google-Smtp-Source: AK7set/8N5Yaps2ztad7B6HhBHCnmCxkP9fmkUJlMKQ2+v0qjXt1bYYTjp1A7l8qM/wVSHyDxqm1wQ== X-Received: by 2002:adf:fd12:0:b0:2ce:306d:6515 with SMTP id e18-20020adffd12000000b002ce306d6515mr9876348wrr.34.1679185221216; Sat, 18 Mar 2023 17:20:21 -0700 (PDT) Received: from lucifer.home (host86-146-209-214.range86-146.btcentralplus.com. [86.146.209.214]) by smtp.googlemail.com with ESMTPSA id x14-20020adfdd8e000000b002cff0c57b98sm5399639wrl.18.2023.03.18.17.20.20 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Sat, 18 Mar 2023 17:20:20 -0700 (PDT) From: Lorenzo Stoakes To: linux-mm@kvack.org, linux-kernel@vger.kernel.org, linux-fsdevel@vger.kernel.org, Andrew Morton Cc: Baoquan He , Uladzislau Rezki , Matthew Wilcox , David Hildenbrand , Liu Shixin , Jiri Olsa , Lorenzo Stoakes Subject: [PATCH 4/4] mm: vmalloc: convert vread() to vread_iter() Date: Sun, 19 Mar 2023 00:20:12 +0000 Message-Id: <119871ea9507eac7be5d91db38acdb03981e049e.1679183626.git.lstoakes@gmail.com> X-Mailer: git-send-email 2.39.2 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Type: text/plain; charset="utf-8" Having previously laid the foundation for converting vread() to an iterator function, pull the trigger and do so. This patch attempts to provide minimal refactoring and to reflect the existing logic as best we can, with the exception of aligned_vread_iter() which drops the use of the deprecated kmap_atomic() in favour of kmap_local_page(). All existing logic to zero portions of memory not read remain and there should be no functional difference other than a performance improvement in /proc/kcore access to vmalloc regions. Now we have discarded with the need for a bounce buffer at all in read_kcore_iter(), we dispense with the one allocated there altogether. Signed-off-by: Lorenzo Stoakes --- fs/proc/kcore.c | 21 +-------- include/linux/vmalloc.h | 3 +- mm/vmalloc.c | 101 +++++++++++++++++++++------------------- 3 files changed, 57 insertions(+), 68 deletions(-) diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c index 25e0eeb8d498..8a07f04c9203 100644 --- a/fs/proc/kcore.c +++ b/fs/proc/kcore.c @@ -307,13 +307,9 @@ static void append_kcore_note(char *notes, size_t *i, = const char *name, *i =3D ALIGN(*i + descsz, 4); } =20 -static ssize_t -read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) +static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter) { - struct file *file =3D iocb->ki_filp; - char *buf =3D file->private_data; loff_t *ppos =3D &iocb->ki_pos; - size_t phdrs_offset, notes_offset, data_offset; size_t page_offline_frozen =3D 1; size_t phdrs_len, notes_len; @@ -507,9 +503,7 @@ read_kcore_iter(struct kiocb *iocb, struct iov_iter *it= er) =20 switch (m->type) { case KCORE_VMALLOC: - vread(buf, (char *)start, tsz); - /* we have to zero-fill user buffer even if no read */ - if (copy_to_iter(buf, tsz, iter) !=3D tsz) { + if (vread_iter((char *)start, tsz, iter) !=3D tsz) { ret =3D -EFAULT; goto out; } @@ -582,10 +576,6 @@ static int open_kcore(struct inode *inode, struct file= *filp) if (ret) return ret; =20 - filp->private_data =3D kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!filp->private_data) - return -ENOMEM; - if (kcore_need_update) kcore_update_ram(); if (i_size_read(inode) !=3D proc_root_kcore->size) { @@ -596,16 +586,9 @@ static int open_kcore(struct inode *inode, struct file= *filp) return 0; } =20 -static int release_kcore(struct inode *inode, struct file *file) -{ - kfree(file->private_data); - return 0; -} - static const struct proc_ops kcore_proc_ops =3D { .proc_read_iter =3D read_kcore_iter, .proc_open =3D open_kcore, - .proc_release =3D release_kcore, .proc_lseek =3D default_llseek, }; =20 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 69250efa03d1..f70ebdf21f22 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -9,6 +9,7 @@ #include /* pgprot_t */ #include #include +#include =20 #include =20 @@ -251,7 +252,7 @@ static inline void set_vm_flush_reset_perms(void *addr) #endif =20 /* for /proc/kcore */ -extern long vread(char *buf, char *addr, unsigned long count); +extern long vread_iter(char *addr, size_t count, struct iov_iter *iter); =20 /* * Internals. Don't use.. diff --git a/mm/vmalloc.c b/mm/vmalloc.c index c24b27664a97..3a32754266dc 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include @@ -3446,20 +3445,20 @@ EXPORT_SYMBOL(vmalloc_32_user); * small helper routine , copy contents to buf from addr. * If the page is not present, fill zero. */ - -static int aligned_vread(char *buf, char *addr, unsigned long count) +static void aligned_vread_iter(char *addr, size_t count, + struct iov_iter *iter) { - struct page *p; - int copied =3D 0; + struct page *page; =20 - while (count) { + while (count > 0) { unsigned long offset, length; + size_t copied =3D 0; =20 offset =3D offset_in_page(addr); length =3D PAGE_SIZE - offset; if (length > count) length =3D count; - p =3D vmalloc_to_page(addr); + page =3D vmalloc_to_page(addr); /* * To do safe access to this _mapped_ area, we need * lock. But adding lock here means that we need to add @@ -3467,23 +3466,24 @@ static int aligned_vread(char *buf, char *addr, uns= igned long count) * interface, rarely used. Instead of that, we'll use * kmap() and get small overhead in this access function. */ - if (p) { + if (page) { /* We can expect USER0 is not used -- see vread() */ - void *map =3D kmap_atomic(p); - memcpy(buf, map + offset, length); - kunmap_atomic(map); - } else - memset(buf, 0, length); + void *map =3D kmap_local_page(page); + + copied =3D copy_to_iter(map + offset, length, iter); + kunmap_local(map); + } + + if (copied < length) + iov_iter_zero(length - copied, iter); =20 addr +=3D length; - buf +=3D length; - copied +=3D length; count -=3D length; } - return copied; } =20 -static void vmap_ram_vread(char *buf, char *addr, int count, unsigned long= flags) +static void vmap_ram_vread_iter(char *addr, int count, unsigned long flags, + struct iov_iter *iter) { char *start; struct vmap_block *vb; @@ -3496,7 +3496,7 @@ static void vmap_ram_vread(char *buf, char *addr, int= count, unsigned long flags * handle it here. */ if (!(flags & VMAP_BLOCK)) { - aligned_vread(buf, addr, count); + aligned_vread_iter(addr, count, iter); return; } =20 @@ -3517,22 +3517,24 @@ static void vmap_ram_vread(char *buf, char *addr, i= nt count, unsigned long flags if (!count) break; start =3D vmap_block_vaddr(vb->va->va_start, rs); - while (addr < start) { + + if (addr < start) { + size_t to_zero =3D min_t(size_t, start - addr, count); + + iov_iter_zero(to_zero, iter); + addr +=3D to_zero; + count -=3D (int)to_zero; if (count =3D=3D 0) goto unlock; - *buf =3D '\0'; - buf++; - addr++; - count--; } + /*it could start reading from the middle of used region*/ offset =3D offset_in_page(addr); n =3D ((re - rs + 1) << PAGE_SHIFT) - offset; if (n > count) n =3D count; - aligned_vread(buf, start+offset, n); + aligned_vread_iter(start + offset, n, iter); =20 - buf +=3D n; addr +=3D n; count -=3D n; } @@ -3541,15 +3543,15 @@ static void vmap_ram_vread(char *buf, char *addr, i= nt count, unsigned long flags =20 finished: /* zero-fill the left dirty or free regions */ - if (count) - memset(buf, 0, count); + if (count > 0) + iov_iter_zero(count, iter); } =20 /** - * vread() - read vmalloc area in a safe way. - * @buf: buffer for reading data - * @addr: vm address. - * @count: number of bytes to be read. + * vread_iter() - read vmalloc area in a safe way to an iterator. + * @addr: vm address. + * @count: number of bytes to be read. + * @iter: the iterator to which data should be written. * * This function checks that addr is a valid vmalloc'ed area, and * copy data from that area to a given buffer. If the given memory range @@ -3569,13 +3571,13 @@ static void vmap_ram_vread(char *buf, char *addr, i= nt count, unsigned long flags * (same number as @count) or %0 if [addr...addr+count) doesn't * include any intersection with valid vmalloc area */ -long vread(char *buf, char *addr, unsigned long count) +long vread_iter(char *addr, size_t count, struct iov_iter *iter) { struct vmap_area *va; struct vm_struct *vm; - char *vaddr, *buf_start =3D buf; - unsigned long buflen =3D count; - unsigned long n, size, flags; + char *vaddr; + size_t buflen =3D count; + size_t n, size, flags; =20 might_sleep(); =20 @@ -3595,7 +3597,7 @@ long vread(char *buf, char *addr, unsigned long count) goto finished; =20 list_for_each_entry_from(va, &vmap_area_list, list) { - if (!count) + if (count =3D=3D 0) break; =20 vm =3D va->vm; @@ -3619,36 +3621,39 @@ long vread(char *buf, char *addr, unsigned long cou= nt) =20 if (addr >=3D vaddr + size) continue; - while (addr < vaddr) { + + if (addr < vaddr) { + size_t to_zero =3D min_t(size_t, vaddr - addr, count); + + iov_iter_zero(to_zero, iter); + addr +=3D to_zero; + count -=3D to_zero; if (count =3D=3D 0) goto finished; - *buf =3D '\0'; - buf++; - addr++; - count--; } + n =3D vaddr + size - addr; if (n > count) n =3D count; =20 if (flags & VMAP_RAM) - vmap_ram_vread(buf, addr, n, flags); + vmap_ram_vread_iter(addr, n, flags, iter); else if (!(vm->flags & VM_IOREMAP)) - aligned_vread(buf, addr, n); + aligned_vread_iter(addr, n, iter); else /* IOREMAP area is treated as memory hole */ - memset(buf, 0, n); - buf +=3D n; + iov_iter_zero(n, iter); + addr +=3D n; count -=3D n; } finished: up_read(&vmap_area_lock); =20 - if (buf =3D=3D buf_start) + if (count =3D=3D buflen) return 0; /* zero-fill memory holes */ - if (buf !=3D buf_start + buflen) - memset(buf, 0, buflen - (buf - buf_start)); + if (count > 0) + iov_iter_zero(count, iter); =20 return buflen; } --=20 2.39.2