From nobody Mon Nov 10 23:10:46 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; dkim=fail; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=fail(p=none dis=none) header.from=kernel.org ARC-Seal: i=1; a=rsa-sha256; t=1586912608; cv=none; d=zohomail.com; s=zohoarc; b=i0rZjtuKwQTp6Nyeh6WXB7wNaZI9ymZMQg49Jhykfso/GQ/tLg2Tg22qwUeh98Gok8A/6bKzSFPBtIkiXoyQfrc6EpjGUd3wIRU4jiU4jjxAAcHAdcYidNv+vI6oDWSbchA89zQGhimlTTynt+aAZgXbIbiI8AqMOkIgvf2TwEw= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1586912608; h=Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Help:List-Unsubscribe:Message-ID:References:Sender:Subject:To; bh=BO1SkeFZDwQid1DnYnNWIGoRYAWPjcQagFivRN0W8rw=; b=UwL2yHbXnrp2HC77Ll3wllltnZWwBPyn6v/MRt4ybX2qNBnPmZWzYJhSJCRfS0cNl0BIlCRsZrJHE64wrMhy6mmVZuTl3m5bClSaIm5oQsU9jp8LehWFhGKK0k9WXH6uVHLIrJY7s7dQWjoYj/c6DKlYrkFLg7Cd2CCjAGlQTi8= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=fail; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=fail header.from= (p=none dis=none) header.from= Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1586912608876180.0278278850783; Tue, 14 Apr 2020 18:03:28 -0700 (PDT) Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1jOWSL-0001Ft-Qw; Wed, 15 Apr 2020 01:03:09 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1jOWSK-0001FQ-FS for xen-devel@lists.xenproject.org; Wed, 15 Apr 2020 01:03:08 +0000 Received: from mail.kernel.org (unknown [198.145.29.99]) by us1-rack-iad1.inumbo.com (Halon) with ESMTPS id d93f8b0a-7eb4-11ea-b4f4-bc764e2007e4; Wed, 15 Apr 2020 01:02:59 +0000 (UTC) Received: from sstabellini-ThinkPad-T480s.hsd1.ca.comcast.net (c-67-164-102-47.hsd1.ca.comcast.net [67.164.102.47]) (using TLSv1.2 with cipher ECDHE-RSA-AES128-GCM-SHA256 (128/128 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id A5AD72083B; Wed, 15 Apr 2020 01:02:58 +0000 (UTC) X-Inumbo-ID: d93f8b0a-7eb4-11ea-b4f4-bc764e2007e4 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1586912579; bh=YV7d8M1BR8baRsXNvbLsTI5FkEiel89KL9GK6xJVhRk=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=xea7/ciO1nz2/OVyRSuWlcKWtX9VHsqi0YcK0pKXrNW3WohMbZp62cZvsAySHk6lC lmVpXdQLoNORTEVTgpatQy3ZqT1yG4D0fHlow0JPbSIfdpBzChQhsDQEuzplpYEUMS eqaojGPaKVpVoUE8Nmsb9xPuEFXHcMdPUfuDpWWo= From: Stefano Stabellini To: xen-devel@lists.xenproject.org Subject: [PATCH 04/12] xen: split alloc_heap_pages in two halves for reusability Date: Tue, 14 Apr 2020 18:02:47 -0700 Message-Id: <20200415010255.10081-4-sstabellini@kernel.org> X-Mailer: git-send-email 2.17.1 In-Reply-To: References: X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: sstabellini@kernel.org, julien@xen.org, Wei Liu , andrew.cooper3@citrix.com, Ian Jackson , George Dunlap , jbeulich@suse.com, Stefano Stabellini , Volodymyr_Babchuk@epam.com Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" X-ZohoMail-DKIM: fail (Header signature does not verify) Content-Transfer-Encoding: quoted-printable MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" This patch splits the implementation of alloc_heap_pages into two halves so that the second half can be reused by the next patch. Signed-off-by: Stefano Stabellini CC: andrew.cooper3@citrix.com CC: jbeulich@suse.com CC: George Dunlap CC: Ian Jackson CC: Wei Liu --- Comments are welcome. I am not convinced that this is the right way to split it. Please let me know if you have any suggestions. --- xen/common/page_alloc.c | 94 +++++++++++++++++++++++------------------ 1 file changed, 53 insertions(+), 41 deletions(-) diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c index 10b7aeca48..79ae64d4b8 100644 --- a/xen/common/page_alloc.c +++ b/xen/common/page_alloc.c @@ -911,54 +911,18 @@ static struct page_info *get_free_buddy(unsigned int = zone_lo, } } =20 -/* Allocate 2^@order contiguous pages. */ -static struct page_info *alloc_heap_pages( - unsigned int zone_lo, unsigned int zone_hi, - unsigned int order, unsigned int memflags, - struct domain *d) +static void __alloc_heap_pages(struct page_info **pgo, + unsigned int order, + unsigned int memflags, + struct domain *d) { nodeid_t node; unsigned int i, buddy_order, zone, first_dirty; unsigned long request =3D 1UL << order; - struct page_info *pg; bool need_tlbflush =3D false; uint32_t tlbflush_timestamp =3D 0; unsigned int dirty_cnt =3D 0; - - /* Make sure there are enough bits in memflags for nodeID. */ - BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t))); - - ASSERT(zone_lo <=3D zone_hi); - ASSERT(zone_hi < NR_ZONES); - - if ( unlikely(order > MAX_ORDER) ) - return NULL; - - spin_lock(&heap_lock); - - /* - * Claimed memory is considered unavailable unless the request - * is made by a domain with sufficient unclaimed pages. - */ - if ( (outstanding_claims + request > total_avail_pages) && - ((memflags & MEMF_no_refcount) || - !d || d->outstanding_pages < request) ) - { - spin_unlock(&heap_lock); - return NULL; - } - - pg =3D get_free_buddy(zone_lo, zone_hi, order, memflags, d); - /* Try getting a dirty buddy if we couldn't get a clean one. */ - if ( !pg && !(memflags & MEMF_no_scrub) ) - pg =3D get_free_buddy(zone_lo, zone_hi, order, - memflags | MEMF_no_scrub, d); - if ( !pg ) - { - /* No suitable memory blocks. Fail the request. */ - spin_unlock(&heap_lock); - return NULL; - } + struct page_info *pg =3D *pgo; =20 node =3D phys_to_nid(page_to_maddr(pg)); zone =3D page_to_zone(pg); @@ -984,6 +948,7 @@ static struct page_info *alloc_heap_pages( first_dirty =3D 0; /* We've moved past original first_dirt= y */ } } + *pgo =3D pg; =20 ASSERT(avail[node][zone] >=3D request); avail[node][zone] -=3D request; @@ -1062,6 +1027,53 @@ static struct page_info *alloc_heap_pages( if ( need_tlbflush ) filtered_flush_tlb_mask(tlbflush_timestamp); =20 +} + +/* Allocate 2^@order contiguous pages. */ +static struct page_info *alloc_heap_pages( + unsigned int zone_lo, unsigned int zone_hi, + unsigned int order, unsigned int memflags, + struct domain *d) +{ + unsigned long request =3D 1UL << order; + struct page_info *pg; + + /* Make sure there are enough bits in memflags for nodeID. */ + BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t))); + + ASSERT(zone_lo <=3D zone_hi); + ASSERT(zone_hi < NR_ZONES); + + if ( unlikely(order > MAX_ORDER) ) + return NULL; + + spin_lock(&heap_lock); + + /* + * Claimed memory is considered unavailable unless the request + * is made by a domain with sufficient unclaimed pages. + */ + if ( (outstanding_claims + request > total_avail_pages) && + ((memflags & MEMF_no_refcount) || + !d || d->outstanding_pages < request) ) + { + spin_unlock(&heap_lock); + return NULL; + } + + pg =3D get_free_buddy(zone_lo, zone_hi, order, memflags, d); + /* Try getting a dirty buddy if we couldn't get a clean one. */ + if ( !pg && !(memflags & MEMF_no_scrub) ) + pg =3D get_free_buddy(zone_lo, zone_hi, order, + memflags | MEMF_no_scrub, d); + if ( !pg ) + { + /* No suitable memory blocks. Fail the request. */ + spin_unlock(&heap_lock); + return NULL; + } + + __alloc_heap_pages(&pg, order, memflags, d); return pg; } =20 --=20 2.17.1