From nobody Sat Feb 7 10:02:38 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C19453839C; Mon, 5 Aug 2024 13:30:20 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722864620; cv=none; b=C3MkVp+YvvY1Z2wH8sEX10DSdB+Fd4jf+rFiKA1LZPzO4wRXd3KPi30ifkoXsPm63JTgFtEz9/rJ52QTfmRb9i2YLcKT4c1hjq8NdpgmzqUz+lPx55THTJMhv/PdA7P/8tPIir47CV36hSDTDzpaLKXCWktzA6xXakeli1CiMvw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1722864620; c=relaxed/simple; bh=jvzpEDQxQeNDmwcUGKDpmc1NCKPIdKJclCjV+OVjxGs=; h=From:To:Cc:Subject:Date:Message-ID:In-Reply-To:References: MIME-Version; b=lS6rXCsnQPlUV6UGcYl4pDn/3DkZLT115T/JGvmERK7nZtYs8q2H/QgTrz782JhALvFubcFT6OakG1OGUzEqyX+ZEBXbU/phGTqhMVnBN50aqtoMmxZd4C6d4xNG+9ziJo0UN8uCbF2nb47Ft8r/eR9vuz4tJzZ8ZMbnEF6Yyc4= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=etfsJ5rN; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="etfsJ5rN" Received: by smtp.kernel.org (Postfix) with ESMTPSA id B5821C32782; Mon, 5 Aug 2024 13:30:16 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1722864620; bh=jvzpEDQxQeNDmwcUGKDpmc1NCKPIdKJclCjV+OVjxGs=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=etfsJ5rNXCceuoiIZmrGdyJmmHN6maFyLO6bqhvPlOqHK5hq3H2s3QIdsC92+hsB8 umBF+hu4AS7FKqAT43Rw29+DZQcirt4EpY6QklKOTXAF0y82HYKmqic5qkZextpjf7 ovh6Bjz7pc7c/GQH73/mEag2uSwQcBViuGn89XUH5J9w7QNq22RakwWJYjTc86WmS9 8cJu9yqbMSfgoF1nxpXWb4FE5skVJD5OUQ3+l6OdRKRSPhWkz84Xt21egjBjw7jPUO WYIrcry3AuNfE+sI6ndbbbHXHGaSg1Twd4e+nGxF/hyi2sxKTo/yXF//dnrF+v8z0I zCXJdUCBb25xQ== From: "Alexey Gladkov (Intel)" To: linux-kernel@vger.kernel.org, linux-coco@lists.linux.dev Cc: Thomas Gleixner , Ingo Molnar , Borislav Petkov , Dave Hansen , "H. Peter Anvin" , "Kirill A. Shutemov" , Andrew Morton , Yuan Yao , Geert Uytterhoeven , Yuntao Wang , Kai Huang , Baoquan He , Oleg Nesterov , cho@microsoft.com, decui@microsoft.com, John.Starks@microsoft.com Subject: [PATCH v2 2/5] x86/tdx: Add validation of userspace MMIO instructions Date: Mon, 5 Aug 2024 15:29:43 +0200 Message-ID: <6f989aea155817ef2f8a5fd2240ccff3f74d4edd.1722862355.git.legion@kernel.org> X-Mailer: git-send-email 2.45.2 In-Reply-To: References: Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Instructions from kernel space are considered trusted. If the MMIO instruction is from userspace it must be checked. For userspace instructions, it is need to check that the INSN has not changed at the time of #VE and before the execution of the instruction. Once the userspace instruction parsed is enforced that the address points to mapped memory of current process and that address does not point to private memory. After parsing the userspace instruction, it is necessary to ensure that: 1. the operation direction (read/write) corresponds to #VE info; 2. the address still points to mapped memory of current process; 3. the address does not point to private memory. Signed-off-by: Alexey Gladkov (Intel) --- arch/x86/coco/tdx/tdx.c | 128 ++++++++++++++++++++++++++++++++++++---- 1 file changed, 115 insertions(+), 13 deletions(-) diff --git a/arch/x86/coco/tdx/tdx.c b/arch/x86/coco/tdx/tdx.c index af0b6c1cacf7..95f2ff49728c 100644 --- a/arch/x86/coco/tdx/tdx.c +++ b/arch/x86/coco/tdx/tdx.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -405,6 +406,84 @@ static bool mmio_write(int size, unsigned long addr, u= nsigned long val) EPT_WRITE, addr, val); } =20 +static inline bool is_private_gpa(u64 gpa) +{ + return gpa =3D=3D cc_mkenc(gpa); +} + +static int get_phys_addr(unsigned long addr, phys_addr_t *phys_addr, bool = *writable) +{ + unsigned int level; + pgd_t *pgdp; + pte_t *ptep; + + /* + * Address validation only makes sense for a user process. The lock must + * be obtained before validation can begin. + */ + mmap_assert_locked(current->mm); + + pgdp =3D pgd_offset(current->mm, addr); + + if (!pgd_none(*pgdp)) { + ptep =3D lookup_address_in_pgd(pgdp, addr, &level); + if (ptep) { + unsigned long offset; + + offset =3D addr & ~page_level_mask(level); + *phys_addr =3D PFN_PHYS(pte_pfn(*ptep)); + *phys_addr |=3D offset; + + *writable =3D pte_write(*ptep); + + return 0; + } + } + + return -EFAULT; +} + +static int valid_vaddr(struct ve_info *ve, enum insn_mmio_type mmio, int s= ize, + unsigned long vaddr) +{ + phys_addr_t phys_addr; + bool writable =3D false; + + /* It's not fatal. This can happen due to swap out or page migration. */ + if (get_phys_addr(vaddr, &phys_addr, &writable) || (ve->gpa !=3D cc_mkdec= (phys_addr))) + return -EAGAIN; + + /* + * Re-check whether #VE info matches the instruction that was decoded. + * + * The ve->gpa was valid at the time ve_info was received. But this code + * executed with interrupts enabled, allowing tlb shootdown and therefore + * munmap() to be executed in the parallel thread. + * + * By the time MMIO emulation is performed, ve->gpa may be already + * unmapped from the process, the device it belongs to removed from + * system and something else could be plugged in its place. + */ + switch (mmio) { + case INSN_MMIO_WRITE: + case INSN_MMIO_WRITE_IMM: + if (!writable || !(ve->exit_qual & EPT_VIOLATION_ACC_WRITE)) + return -EFAULT; + break; + case INSN_MMIO_READ: + case INSN_MMIO_READ_ZERO_EXTEND: + case INSN_MMIO_READ_SIGN_EXTEND: + if (!(ve->exit_qual & EPT_VIOLATION_ACC_READ)) + return -EFAULT; + break; + default: + WARN_ONCE(1, "Unsupported mmio instruction: %d", mmio); + return -EINVAL; + } + + return 0; +} + static int handle_mmio_write(struct insn *insn, enum insn_mmio_type mmio, = int size, struct pt_regs *regs, struct ve_info *ve) { @@ -489,7 +568,7 @@ static int handle_mmio(struct pt_regs *regs, struct ve_= info *ve) enum insn_mmio_type mmio; struct insn insn =3D {}; unsigned long vaddr; - int size; + int size, ret; =20 /* Only in-kernel MMIO is supported */ if (WARN_ON_ONCE(user_mode(regs))) @@ -505,6 +584,17 @@ static int handle_mmio(struct pt_regs *regs, struct ve= _info *ve) if (WARN_ON_ONCE(mmio =3D=3D INSN_MMIO_DECODE_FAILED)) return -EINVAL; =20 + vaddr =3D (unsigned long)insn_get_addr_ref(&insn, regs); + + if (user_mode(regs)) { + if (mmap_read_lock_killable(current->mm)) + return -EINTR; + + ret =3D valid_vaddr(ve, mmio, size, vaddr); + if (ret) + goto unlock; + } + /* * Reject EPT violation #VEs that split pages. * @@ -514,30 +604,39 @@ static int handle_mmio(struct pt_regs *regs, struct v= e_info *ve) * * load_unaligned_zeropad() will recover using exception fixups. */ - vaddr =3D (unsigned long)insn_get_addr_ref(&insn, regs); - if (vaddr / PAGE_SIZE !=3D (vaddr + size - 1) / PAGE_SIZE) - return -EFAULT; + if (vaddr / PAGE_SIZE !=3D (vaddr + size - 1) / PAGE_SIZE) { + ret =3D -EFAULT; + goto unlock; + } =20 switch (mmio) { case INSN_MMIO_WRITE: case INSN_MMIO_WRITE_IMM: case INSN_MMIO_MOVS: - return handle_mmio_write(&insn, mmio, size, regs, ve); + ret =3D handle_mmio_write(&insn, mmio, size, regs, ve); + break; case INSN_MMIO_READ: case INSN_MMIO_READ_ZERO_EXTEND: case INSN_MMIO_READ_SIGN_EXTEND: - return handle_mmio_read(&insn, mmio, size, regs, ve); + ret =3D handle_mmio_read(&insn, mmio, size, regs, ve); + break; case INSN_MMIO_DECODE_FAILED: /* * MMIO was accessed with an instruction that could not be * decoded or handled properly. It was likely not using io.h * helpers or accessed MMIO accidentally. */ - return -EINVAL; + ret =3D -EINVAL; + break; default: WARN_ONCE(1, "Unknown insn_decode_mmio() decode value?"); - return -EINVAL; + ret =3D -EINVAL; } +unlock: + if (user_mode(regs)) + mmap_read_unlock(current->mm); + + return ret; } =20 static bool handle_in(struct pt_regs *regs, int size, int port) @@ -681,11 +780,6 @@ static int virt_exception_user(struct pt_regs *regs, s= truct ve_info *ve) } } =20 -static inline bool is_private_gpa(u64 gpa) -{ - return gpa =3D=3D cc_mkenc(gpa); -} - /* * Handle the kernel #VE. * @@ -723,6 +817,14 @@ bool tdx_handle_virt_exception(struct pt_regs *regs, s= truct ve_info *ve) insn_len =3D virt_exception_user(regs, ve); else insn_len =3D virt_exception_kernel(regs, ve); + + /* + * A special case to return to userspace without increasing regs->ip + * to repeat the instruction once again. + */ + if (insn_len =3D=3D -EAGAIN) + return true; + if (insn_len < 0) return false; =20 --=20 2.45.2