From nobody Tue Dec 16 16:40:36 2025 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0DEF0C4167B for ; Fri, 8 Dec 2023 17:17:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1574462AbjLHRRc (ORCPT ); Fri, 8 Dec 2023 12:17:32 -0500 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:46326 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S235910AbjLHRRT (ORCPT ); Fri, 8 Dec 2023 12:17:19 -0500 Received: from galois.linutronix.de (Galois.linutronix.de [193.142.43.55]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id AF42411D; Fri, 8 Dec 2023 09:17:24 -0800 (PST) Date: Fri, 08 Dec 2023 17:17:22 -0000 DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020; t=1702055843; h=from:from:sender:sender:reply-to:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding; bh=hlXyqVoubxqlm253wXggA+J6rsOnpTpcTl/8mvZ/VJ4=; b=XzMiuw/TVp9r556JLRGsjsbscWpWP+d+iYp7s4857HCeGXRviyWlkXkRgBXqw4EC+LJEn+ JVPw43k1t0m2XvMR3gR3arrTpuagFxoVG1yZTF+Bzfrjgi28jQYxh2RFJx162IADkvgiYd tZcLyWZCi8IhRNoVk8T1nuw8GQo/3sWckLiUsrSU7NA4yHZ5Nhx/jWzVhFQV1DrmtIBAs9 +NhDU5t6kpgltpxTg0wv4cO5cZTtNS8LZhO/mi4jFUHNbr4htaSG9OlxxenBoXnX1V4Av9 8WOB47DVrESM4wOuJcFuS6cuoaJPOr1xQ/26zl3S647ydrV9IUU+Y5k55WqNCA== DKIM-Signature: v=1; a=ed25519-sha256; c=relaxed/relaxed; d=linutronix.de; s=2020e; t=1702055843; h=from:from:sender:sender:reply-to:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type: content-transfer-encoding:content-transfer-encoding; bh=hlXyqVoubxqlm253wXggA+J6rsOnpTpcTl/8mvZ/VJ4=; b=0Qvki++LAMOD90gqAsVkRHnKk1L4eRdbFF5vCll542S9eq1s4lgY+cscsRxRc6x7Xha+2l m4EgJ5zbBDXLseDA== From: "tip-bot2 for Kai Huang" Sender: tip-bot2@linutronix.de Reply-to: linux-kernel@vger.kernel.org To: linux-tip-commits@vger.kernel.org Subject: [tip: x86/tdx] x86/virt/tdx: Designate reserved areas for all TDMRs Cc: Kai Huang , Dave Hansen , Isaku Yamahata , "Kirill A. Shutemov" , Yuan Yao , x86@kernel.org, linux-kernel@vger.kernel.org MIME-Version: 1.0 Message-ID: <170205584265.398.18075210489719580719.tip-bot2@tip-bot2> Robot-ID: Robot-Unsubscribe: Contact to get blacklisted from these emails Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org The following commit has been merged into the x86/tdx branch of tip: Commit-ID: dde3b60d572c941e2c72d22c18a08586fb67bb50 Gitweb: https://git.kernel.org/tip/dde3b60d572c941e2c72d22c18a08586f= b67bb50 Author: Kai Huang AuthorDate: Fri, 08 Dec 2023 09:07:32 -08:00 Committer: Dave Hansen CommitterDate: Fri, 08 Dec 2023 09:12:39 -08:00 x86/virt/tdx: Designate reserved areas for all TDMRs As the last step of constructing TDMRs, populate reserved areas for all TDMRs. Cover all memory holes and PAMTs with a TMDR reserved area. [ dhansen: trim down chagnelog ] Signed-off-by: Kai Huang Signed-off-by: Dave Hansen Reviewed-by: Isaku Yamahata Reviewed-by: Kirill A. Shutemov Reviewed-by: Yuan Yao Reviewed-by: Dave Hansen Link: https://lore.kernel.org/all/20231208170740.53979-12-dave.hansen%40int= el.com --- arch/x86/virt/vmx/tdx/tdx.c | 217 +++++++++++++++++++++++++++++++++-- 1 file changed, 209 insertions(+), 8 deletions(-) diff --git a/arch/x86/virt/vmx/tdx/tdx.c b/arch/x86/virt/vmx/tdx/tdx.c index c2bff81..98283f7 100644 --- a/arch/x86/virt/vmx/tdx/tdx.c +++ b/arch/x86/virt/vmx/tdx/tdx.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -662,6 +663,207 @@ static unsigned long tdmrs_count_pamt_kb(struct tdmr_= info_list *tdmr_list) return pamt_size / 1024; } =20 +static int tdmr_add_rsvd_area(struct tdmr_info *tdmr, int *p_idx, u64 addr, + u64 size, u16 max_reserved_per_tdmr) +{ + struct tdmr_reserved_area *rsvd_areas =3D tdmr->reserved_areas; + int idx =3D *p_idx; + + /* Reserved area must be 4K aligned in offset and size */ + if (WARN_ON(addr & ~PAGE_MASK || size & ~PAGE_MASK)) + return -EINVAL; + + if (idx >=3D max_reserved_per_tdmr) { + pr_warn("initialization failed: TDMR [0x%llx, 0x%llx): reserved areas ex= hausted.\n", + tdmr->base, tdmr_end(tdmr)); + return -ENOSPC; + } + + /* + * Consume one reserved area per call. Make no effort to + * optimize or reduce the number of reserved areas which are + * consumed by contiguous reserved areas, for instance. + */ + rsvd_areas[idx].offset =3D addr - tdmr->base; + rsvd_areas[idx].size =3D size; + + *p_idx =3D idx + 1; + + return 0; +} + +/* + * Go through @tmb_list to find holes between memory areas. If any of + * those holes fall within @tdmr, set up a TDMR reserved area to cover + * the hole. + */ +static int tdmr_populate_rsvd_holes(struct list_head *tmb_list, + struct tdmr_info *tdmr, + int *rsvd_idx, + u16 max_reserved_per_tdmr) +{ + struct tdx_memblock *tmb; + u64 prev_end; + int ret; + + /* + * Start looking for reserved blocks at the + * beginning of the TDMR. + */ + prev_end =3D tdmr->base; + list_for_each_entry(tmb, tmb_list, list) { + u64 start, end; + + start =3D PFN_PHYS(tmb->start_pfn); + end =3D PFN_PHYS(tmb->end_pfn); + + /* Break if this region is after the TDMR */ + if (start >=3D tdmr_end(tdmr)) + break; + + /* Exclude regions before this TDMR */ + if (end < tdmr->base) + continue; + + /* + * Skip over memory areas that + * have already been dealt with. + */ + if (start <=3D prev_end) { + prev_end =3D end; + continue; + } + + /* Add the hole before this region */ + ret =3D tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end, + start - prev_end, + max_reserved_per_tdmr); + if (ret) + return ret; + + prev_end =3D end; + } + + /* Add the hole after the last region if it exists. */ + if (prev_end < tdmr_end(tdmr)) { + ret =3D tdmr_add_rsvd_area(tdmr, rsvd_idx, prev_end, + tdmr_end(tdmr) - prev_end, + max_reserved_per_tdmr); + if (ret) + return ret; + } + + return 0; +} + +/* + * Go through @tdmr_list to find all PAMTs. If any of those PAMTs + * overlaps with @tdmr, set up a TDMR reserved area to cover the + * overlapping part. + */ +static int tdmr_populate_rsvd_pamts(struct tdmr_info_list *tdmr_list, + struct tdmr_info *tdmr, + int *rsvd_idx, + u16 max_reserved_per_tdmr) +{ + int i, ret; + + for (i =3D 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + struct tdmr_info *tmp =3D tdmr_entry(tdmr_list, i); + unsigned long pamt_base, pamt_size, pamt_end; + + tdmr_get_pamt(tmp, &pamt_base, &pamt_size); + /* Each TDMR must already have PAMT allocated */ + WARN_ON_ONCE(!pamt_size || !pamt_base); + + pamt_end =3D pamt_base + pamt_size; + /* Skip PAMTs outside of the given TDMR */ + if ((pamt_end <=3D tdmr->base) || + (pamt_base >=3D tdmr_end(tdmr))) + continue; + + /* Only mark the part within the TDMR as reserved */ + if (pamt_base < tdmr->base) + pamt_base =3D tdmr->base; + if (pamt_end > tdmr_end(tdmr)) + pamt_end =3D tdmr_end(tdmr); + + ret =3D tdmr_add_rsvd_area(tdmr, rsvd_idx, pamt_base, + pamt_end - pamt_base, + max_reserved_per_tdmr); + if (ret) + return ret; + } + + return 0; +} + +/* Compare function called by sort() for TDMR reserved areas */ +static int rsvd_area_cmp_func(const void *a, const void *b) +{ + struct tdmr_reserved_area *r1 =3D (struct tdmr_reserved_area *)a; + struct tdmr_reserved_area *r2 =3D (struct tdmr_reserved_area *)b; + + if (r1->offset + r1->size <=3D r2->offset) + return -1; + if (r1->offset >=3D r2->offset + r2->size) + return 1; + + /* Reserved areas cannot overlap. The caller must guarantee. */ + WARN_ON_ONCE(1); + return -1; +} + +/* + * Populate reserved areas for the given @tdmr, including memory holes + * (via @tmb_list) and PAMTs (via @tdmr_list). + */ +static int tdmr_populate_rsvd_areas(struct tdmr_info *tdmr, + struct list_head *tmb_list, + struct tdmr_info_list *tdmr_list, + u16 max_reserved_per_tdmr) +{ + int ret, rsvd_idx =3D 0; + + ret =3D tdmr_populate_rsvd_holes(tmb_list, tdmr, &rsvd_idx, + max_reserved_per_tdmr); + if (ret) + return ret; + + ret =3D tdmr_populate_rsvd_pamts(tdmr_list, tdmr, &rsvd_idx, + max_reserved_per_tdmr); + if (ret) + return ret; + + /* TDX requires reserved areas listed in address ascending order */ + sort(tdmr->reserved_areas, rsvd_idx, sizeof(struct tdmr_reserved_area), + rsvd_area_cmp_func, NULL); + + return 0; +} + +/* + * Populate reserved areas for all TDMRs in @tdmr_list, including memory + * holes (via @tmb_list) and PAMTs. + */ +static int tdmrs_populate_rsvd_areas_all(struct tdmr_info_list *tdmr_list, + struct list_head *tmb_list, + u16 max_reserved_per_tdmr) +{ + int i; + + for (i =3D 0; i < tdmr_list->nr_consumed_tdmrs; i++) { + int ret; + + ret =3D tdmr_populate_rsvd_areas(tdmr_entry(tdmr_list, i), + tmb_list, tdmr_list, max_reserved_per_tdmr); + if (ret) + return ret; + } + + return 0; +} + /* * Construct a list of TDMRs on the preallocated space in @tdmr_list * to cover all TDX memory regions in @tmb_list based on the TDX module @@ -681,14 +883,13 @@ static int construct_tdmrs(struct list_head *tmb_list, tdmr_sysinfo->pamt_entry_size); if (ret) return ret; - /* - * TODO: - * - * - Designate reserved areas for each TDMR. - * - * Return -EINVAL until constructing TDMRs is done - */ - return -EINVAL; + + ret =3D tdmrs_populate_rsvd_areas_all(tdmr_list, tmb_list, + tdmr_sysinfo->max_reserved_per_tdmr); + if (ret) + tdmrs_free_pamt_all(tdmr_list); + + return ret; } =20 static int init_tdx_module(void)