From nobody Sat Oct 4 06:37:47 2025 Received: from szxga01-in.huawei.com (szxga01-in.huawei.com [45.249.212.187]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C8C892D9EDA for ; Wed, 20 Aug 2025 08:20:13 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=45.249.212.187 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755678016; cv=none; b=OaOlZgLlb2mOfU8cErqpX41t3whx0F/QZavaLbnvOqBK93O0MRqMu8cgEzrQMDFCPYpUbHF0jREvQnZMi13EMfD7WvxiGwHaqXV8GlaIEAdx7fyIqPad8AMYQEfrdQv4hJrUOUzvwUffwNuawdmredPi2z6UJm/LO2zt7KK5Z/k= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1755678016; c=relaxed/simple; bh=xRqktJYAmZYIDe5YU8Dg0AGIBUfpAGRQGnL2IOAnfog=; h=From:To:CC:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version:Content-Type; b=qdlWv6vcTSKm2vC8M0XHpv+JBMHtOhg5vvxMyGdEitV3sxK9Pcj9gMna+lN2RyE2tjkLvIAB9anM6jV/IxDa2d2n1sLU+eKrnzAdMN4atQ5B7nzORgFv+vfCFWwa4VYtIF5a8o67gX6z4xMW8P1Mh456s+SILIYGcXAd7LerrB4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com; spf=pass smtp.mailfrom=huawei.com; arc=none smtp.client-ip=45.249.212.187 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=quarantine dis=none) header.from=huawei.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=huawei.com Received: from mail.maildlp.com (unknown [172.19.88.105]) by szxga01-in.huawei.com (SkyGuard) with ESMTP id 4c6KBT5TTvz14Mfv; Wed, 20 Aug 2025 16:20:01 +0800 (CST) Received: from dggpemf200018.china.huawei.com (unknown [7.185.36.31]) by mail.maildlp.com (Postfix) with ESMTPS id 8A1FE14011F; Wed, 20 Aug 2025 16:20:05 +0800 (CST) Received: from huawei.com (10.50.85.135) by dggpemf200018.china.huawei.com (7.185.36.31) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.1544.11; Wed, 20 Aug 2025 16:20:04 +0800 From: Quanmin Yan To: CC: , , , , , , Subject: [RFC PATCH mm-next v2 04/12] mm/damon/paddr: support addr_unit for DAMOS_LRU_[DE]PRIO Date: Wed, 20 Aug 2025 16:06:14 +0800 Message-ID: <20250820080623.3799131-5-yanquanmin1@huawei.com> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20250820080623.3799131-1-yanquanmin1@huawei.com> References: <20250820080623.3799131-1-yanquanmin1@huawei.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: kwepems200002.china.huawei.com (7.221.188.68) To dggpemf200018.china.huawei.com (7.185.36.31) Content-Type: text/plain; charset="utf-8" From: SeongJae Park Add support of addr_unit for DAMOS_LRU_PRIO and DAMOS_LRU_DEPRIO action handling from the DAMOS operation implementation for the physical address space. Signed-off-by: SeongJae Park Signed-off-by: Quanmin Yan --- mm/damon/paddr.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 826c2064dbfd..ed71dd0bf80e 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -194,14 +194,15 @@ static unsigned long damon_pa_pageout(struct damon_re= gion *r, } =20 static inline unsigned long damon_pa_mark_accessed_or_deactivate( - struct damon_region *r, struct damos *s, bool mark_accessed, + struct damon_region *r, unsigned long addr_unit, + struct damos *s, bool mark_accessed, unsigned long *sz_filter_passed) { - unsigned long addr, applied =3D 0; + phys_addr_t addr, applied =3D 0; struct folio *folio; =20 - addr =3D r->ar.start; - while (addr < r->ar.end) { + addr =3D damon_pa_phys_addr(r->ar.start, addr_unit); + while (addr < damon_pa_phys_addr(r->ar.end, addr_unit)) { folio =3D damon_get_folio(PHYS_PFN(addr)); if (damon_pa_invalid_damos_folio(folio, s)) { addr +=3D PAGE_SIZE; @@ -211,7 +212,7 @@ static inline unsigned long damon_pa_mark_accessed_or_d= eactivate( if (damos_pa_filter_out(s, folio)) goto put_folio; else - *sz_filter_passed +=3D folio_size(folio); + *sz_filter_passed +=3D folio_size(folio) / addr_unit; =20 if (mark_accessed) folio_mark_accessed(folio); @@ -223,20 +224,22 @@ static inline unsigned long damon_pa_mark_accessed_or= _deactivate( folio_put(folio); } s->last_applied =3D folio; - return applied * PAGE_SIZE; + return applied * PAGE_SIZE / addr_unit; } =20 static unsigned long damon_pa_mark_accessed(struct damon_region *r, - struct damos *s, unsigned long *sz_filter_passed) + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) { - return damon_pa_mark_accessed_or_deactivate(r, s, true, + return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, true, sz_filter_passed); } =20 static unsigned long damon_pa_deactivate_pages(struct damon_region *r, - struct damos *s, unsigned long *sz_filter_passed) + unsigned long addr_unit, struct damos *s, + unsigned long *sz_filter_passed) { - return damon_pa_mark_accessed_or_deactivate(r, s, false, + return damon_pa_mark_accessed_or_deactivate(r, addr_unit, s, false, sz_filter_passed); } =20 @@ -309,9 +312,11 @@ static unsigned long damon_pa_apply_scheme(struct damo= n_ctx *ctx, case DAMOS_PAGEOUT: return damon_pa_pageout(r, aunit, scheme, sz_filter_passed); case DAMOS_LRU_PRIO: - return damon_pa_mark_accessed(r, scheme, sz_filter_passed); + return damon_pa_mark_accessed(r, aunit, scheme, + sz_filter_passed); case DAMOS_LRU_DEPRIO: - return damon_pa_deactivate_pages(r, scheme, sz_filter_passed); + return damon_pa_deactivate_pages(r, aunit, scheme, + sz_filter_passed); case DAMOS_MIGRATE_HOT: case DAMOS_MIGRATE_COLD: return damon_pa_migrate(r, scheme, sz_filter_passed); --=20 2.43.0