From nobody Fri Oct 31 03:48:32 2025 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=fail(p=none dis=none) header.from=arm.com Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1754919356206171.96361722444533; Mon, 11 Aug 2025 06:35:56 -0700 (PDT) Received: from list by lists.xenproject.org with outflank-mailman.1077631.1438703 (Exim 4.92) (envelope-from ) id 1ulSgc-0004L5-Aj; Mon, 11 Aug 2025 13:35:38 +0000 Received: by outflank-mailman (output) from mailman id 1077631.1438703; Mon, 11 Aug 2025 13:35:38 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1ulSgc-0004Kw-7R; Mon, 11 Aug 2025 13:35:38 +0000 Received: by outflank-mailman (input) for mailman id 1077631; Mon, 11 Aug 2025 13:35:37 +0000 Received: from se1-gles-flk1-in.inumbo.com ([94.247.172.50] helo=se1-gles-flk1.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1ulSgb-0002rn-J8 for xen-devel@lists.xenproject.org; Mon, 11 Aug 2025 13:35:37 +0000 Received: from foss.arm.com (foss.arm.com [217.140.110.172]) by se1-gles-flk1.inumbo.com (Halon) with ESMTP id 0f46d7bc-76b8-11f0-b898-0df219b8e170; Mon, 11 Aug 2025 15:35:35 +0200 (CEST) Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.121.207.14]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id 863E92661; Mon, 11 Aug 2025 06:35:26 -0700 (PDT) Received: from C3HXLD123V.arm.com (unknown [10.57.55.159]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPSA id 668533F63F; Mon, 11 Aug 2025 06:35:33 -0700 (PDT) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" X-Inumbo-ID: 0f46d7bc-76b8-11f0-b898-0df219b8e170 From: Bertrand Marquis To: xen-devel@lists.xenproject.org Cc: Volodymyr Babchuk , Stefano Stabellini , Julien Grall , Michal Orzel , Jens Wiklander , Julien Grall Subject: [PATCH v8 5/6] xen/arm: ffa: Add indirect message between VM Date: Mon, 11 Aug 2025 15:35:01 +0200 Message-ID: X-Mailer: git-send-email 2.47.1 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ZM-MESSAGEID: 1754919367472124100 Content-Type: text/plain; charset="utf-8" Add support for indirect messages between VMs. This is only enabled if CONFIG_FFA_VM_TO_VM is selected. Signed-off-by: Bertrand Marquis Reviewed-by: Jens Wiklander Acked-by: Julien Grall --- Change in v8: - Add Julien A-b Changes in v7: - None Changes in v6: - fix code alignment (Jens) - add Jens R-b Changes in v5: - Prevent potential overflow in send2 handling (Julien) - Only use page_count with rx lock acquired - Fix an issue where send2 between VMs was not doing the copy from the tx buffer but from a wrong location in the stack. This bug was introduced in v4 when switching to a local copy for the header. Changes in v4: - Use a local copy of the message header to prevent a TOC/TOU possible issue when using the payload size Changes in v3: - Move vm to vm indirect message handling in a sub function to simplify lock handling and make implementation easier to read Changes in v2: - Switch ifdef to IS_ENABLED --- xen/arch/arm/tee/ffa_msg.c | 117 ++++++++++++++++++++++++++++++++----- 1 file changed, 102 insertions(+), 15 deletions(-) diff --git a/xen/arch/arm/tee/ffa_msg.c b/xen/arch/arm/tee/ffa_msg.c index ee594e737fc7..c20c5bec0f76 100644 --- a/xen/arch/arm/tee/ffa_msg.c +++ b/xen/arch/arm/tee/ffa_msg.c @@ -88,43 +88,130 @@ out: resp.a7 & mask); } =20 +static int32_t ffa_msg_send2_vm(uint16_t dst_id, const void *src_buf, + struct ffa_part_msg_rxtx *src_msg) +{ + struct domain *dst_d; + struct ffa_ctx *dst_ctx; + struct ffa_part_msg_rxtx *dst_msg; + int err; + int32_t ret; + + if ( dst_id =3D=3D 0 ) + /* FF-A ID 0 is the hypervisor, this is not valid */ + return FFA_RET_INVALID_PARAMETERS; + + /* This is also checking that dest is not src */ + err =3D rcu_lock_live_remote_domain_by_id(dst_id - 1, &dst_d); + if ( err ) + return FFA_RET_INVALID_PARAMETERS; + + if ( dst_d->arch.tee =3D=3D NULL ) + { + ret =3D FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + dst_ctx =3D dst_d->arch.tee; + if ( !dst_ctx->guest_vers ) + { + ret =3D FFA_RET_INVALID_PARAMETERS; + goto out_unlock; + } + + /* This also checks that destination has set a Rx buffer */ + ret =3D ffa_rx_acquire(dst_d); + if ( ret ) + goto out_unlock; + + /* we need to have enough space in the destination buffer */ + if ( (dst_ctx->page_count * FFA_PAGE_SIZE - + sizeof(struct ffa_part_msg_rxtx)) < src_msg->msg_size ) + { + ret =3D FFA_RET_NO_MEMORY; + ffa_rx_release(dst_d); + goto out_unlock; + } + + dst_msg =3D dst_ctx->rx; + + /* prepare destination header */ + dst_msg->flags =3D 0; + dst_msg->reserved =3D 0; + dst_msg->msg_offset =3D sizeof(struct ffa_part_msg_rxtx); + dst_msg->send_recv_id =3D src_msg->send_recv_id; + dst_msg->msg_size =3D src_msg->msg_size; + + memcpy(dst_ctx->rx + sizeof(struct ffa_part_msg_rxtx), + src_buf + src_msg->msg_offset, src_msg->msg_size); + + /* receiver rx buffer will be released by the receiver*/ + +out_unlock: + rcu_unlock_domain(dst_d); + if ( !ret ) + ffa_raise_rx_buffer_full(dst_d); + + return ret; +} + int32_t ffa_handle_msg_send2(struct cpu_user_regs *regs) { struct domain *src_d =3D current->domain; struct ffa_ctx *src_ctx =3D src_d->arch.tee; - const struct ffa_part_msg_rxtx *src_msg; + struct ffa_part_msg_rxtx src_msg; uint16_t dst_id, src_id; int32_t ret; =20 - if ( !ffa_fw_supports_fid(FFA_MSG_SEND2) ) - return FFA_RET_NOT_SUPPORTED; + BUILD_BUG_ON(sizeof(struct ffa_part_msg_rxtx) >=3D FFA_PAGE_SIZE); =20 if ( !spin_trylock(&src_ctx->tx_lock) ) return FFA_RET_BUSY; =20 - src_msg =3D src_ctx->tx; - src_id =3D src_msg->send_recv_id >> 16; - dst_id =3D src_msg->send_recv_id & GENMASK(15,0); + /* create a copy of the message header */ + memcpy(&src_msg, src_ctx->tx, sizeof(src_msg)); =20 - if ( src_id !=3D ffa_get_vm_id(src_d) || !FFA_ID_IS_SECURE(dst_id) ) + src_id =3D src_msg.send_recv_id >> 16; + dst_id =3D src_msg.send_recv_id & GENMASK(15,0); + + if ( src_id !=3D ffa_get_vm_id(src_d) ) { ret =3D FFA_RET_INVALID_PARAMETERS; - goto out_unlock_tx; + goto out; } =20 /* check source message fits in buffer */ - if ( src_ctx->page_count * FFA_PAGE_SIZE < - src_msg->msg_offset + src_msg->msg_size || - src_msg->msg_offset < sizeof(struct ffa_part_msg_rxtx) ) + if ( src_msg.msg_offset < sizeof(struct ffa_part_msg_rxtx) || + src_msg.msg_size =3D=3D 0 || + src_msg.msg_offset > src_ctx->page_count * FFA_PAGE_SIZE || + src_msg.msg_size > (src_ctx->page_count * FFA_PAGE_SIZE - + src_msg.msg_offset) ) { ret =3D FFA_RET_INVALID_PARAMETERS; - goto out_unlock_tx; + goto out; } =20 - ret =3D ffa_simple_call(FFA_MSG_SEND2, - ((uint32_t)ffa_get_vm_id(src_d)) << 16, 0, 0, 0); + if ( FFA_ID_IS_SECURE(dst_id) ) + { + /* Message for a secure partition */ + if ( !ffa_fw_supports_fid(FFA_MSG_SEND2) ) + { + ret =3D FFA_RET_NOT_SUPPORTED; + goto out; + } + + ret =3D ffa_simple_call(FFA_MSG_SEND2, + ((uint32_t)ffa_get_vm_id(src_d)) << 16, 0, 0= , 0); + } + else if ( IS_ENABLED(CONFIG_FFA_VM_TO_VM) ) + { + /* Message for a VM */ + ret =3D ffa_msg_send2_vm(dst_id, src_ctx->tx, &src_msg); + } + else + ret =3D FFA_RET_INVALID_PARAMETERS; =20 -out_unlock_tx: +out: spin_unlock(&src_ctx->tx_lock); return ret; } --=20 2.47.1