Replace read_atomic() with ACCESS_ONCE() to match the intended use, that
is, to prevent the compiler from (via optimization) reading shared
memory more than once.
Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
---
xen/arch/arm/tee/ffa_shm.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/xen/arch/arm/tee/ffa_shm.c b/xen/arch/arm/tee/ffa_shm.c
index eed9ad2d2986..75a5b66aeb4c 100644
--- a/xen/arch/arm/tee/ffa_shm.c
+++ b/xen/arch/arm/tee/ffa_shm.c
@@ -7,6 +7,7 @@
#include <xen/sizes.h>
#include <xen/types.h>
#include <xen/mm.h>
+#include <xen/lib.h>
#include <xen/list.h>
#include <xen/spinlock.h>
@@ -171,8 +172,8 @@ static int get_shm_pages(struct domain *d, struct ffa_shm_mem *shm,
for ( n = 0; n < range_count; n++ )
{
- page_count = read_atomic(&range[n].page_count);
- addr = read_atomic(&range[n].address);
+ page_count = ACCESS_ONCE(range[n].page_count);
+ addr = ACCESS_ONCE(range[n].address);
for ( m = 0; m < page_count; m++ )
{
if ( pg_idx >= shm->page_count )
@@ -527,13 +528,13 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
goto out_unlock;
mem_access = ctx->tx + trans.mem_access_offs;
- if ( read_atomic(&mem_access->access_perm.perm) != FFA_MEM_ACC_RW )
+ if ( ACCESS_ONCE(mem_access->access_perm.perm) != FFA_MEM_ACC_RW )
{
ret = FFA_RET_NOT_SUPPORTED;
goto out_unlock;
}
- region_offs = read_atomic(&mem_access->region_offs);
+ region_offs = ACCESS_ONCE(mem_access->region_offs);
if ( sizeof(*region_descr) + region_offs > frag_len )
{
ret = FFA_RET_NOT_SUPPORTED;
@@ -541,8 +542,8 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
}
region_descr = ctx->tx + region_offs;
- range_count = read_atomic(®ion_descr->address_range_count);
- page_count = read_atomic(®ion_descr->total_page_count);
+ range_count = ACCESS_ONCE(region_descr->address_range_count);
+ page_count = ACCESS_ONCE(region_descr->total_page_count);
if ( !page_count )
{
@@ -557,7 +558,7 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
goto out_unlock;
}
shm->sender_id = trans.sender_id;
- shm->ep_id = read_atomic(&mem_access->access_perm.endpoint_id);
+ shm->ep_id = ACCESS_ONCE(mem_access->access_perm.endpoint_id);
/*
* Check that the Composite memory region descriptor fits.
--
2.34.1
Hi Jens,
> On 9 Apr 2024, at 17:36, Jens Wiklander <jens.wiklander@linaro.org> wrote:
>
> Replace read_atomic() with ACCESS_ONCE() to match the intended use, that
> is, to prevent the compiler from (via optimization) reading shared
> memory more than once.
This definitely makes sense.
>
> Signed-off-by: Jens Wiklander <jens.wiklander@linaro.org>
Reviewed-by: Bertrand Marquis <bertrand.marquis@arm.com>
Cheers
Bertrand
> ---
> xen/arch/arm/tee/ffa_shm.c | 15 ++++++++-------
> 1 file changed, 8 insertions(+), 7 deletions(-)
>
> diff --git a/xen/arch/arm/tee/ffa_shm.c b/xen/arch/arm/tee/ffa_shm.c
> index eed9ad2d2986..75a5b66aeb4c 100644
> --- a/xen/arch/arm/tee/ffa_shm.c
> +++ b/xen/arch/arm/tee/ffa_shm.c
> @@ -7,6 +7,7 @@
> #include <xen/sizes.h>
> #include <xen/types.h>
> #include <xen/mm.h>
> +#include <xen/lib.h>
> #include <xen/list.h>
> #include <xen/spinlock.h>
>
> @@ -171,8 +172,8 @@ static int get_shm_pages(struct domain *d, struct ffa_shm_mem *shm,
>
> for ( n = 0; n < range_count; n++ )
> {
> - page_count = read_atomic(&range[n].page_count);
> - addr = read_atomic(&range[n].address);
> + page_count = ACCESS_ONCE(range[n].page_count);
> + addr = ACCESS_ONCE(range[n].address);
> for ( m = 0; m < page_count; m++ )
> {
> if ( pg_idx >= shm->page_count )
> @@ -527,13 +528,13 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
> goto out_unlock;
>
> mem_access = ctx->tx + trans.mem_access_offs;
> - if ( read_atomic(&mem_access->access_perm.perm) != FFA_MEM_ACC_RW )
> + if ( ACCESS_ONCE(mem_access->access_perm.perm) != FFA_MEM_ACC_RW )
> {
> ret = FFA_RET_NOT_SUPPORTED;
> goto out_unlock;
> }
>
> - region_offs = read_atomic(&mem_access->region_offs);
> + region_offs = ACCESS_ONCE(mem_access->region_offs);
> if ( sizeof(*region_descr) + region_offs > frag_len )
> {
> ret = FFA_RET_NOT_SUPPORTED;
> @@ -541,8 +542,8 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
> }
>
> region_descr = ctx->tx + region_offs;
> - range_count = read_atomic(®ion_descr->address_range_count);
> - page_count = read_atomic(®ion_descr->total_page_count);
> + range_count = ACCESS_ONCE(region_descr->address_range_count);
> + page_count = ACCESS_ONCE(region_descr->total_page_count);
>
> if ( !page_count )
> {
> @@ -557,7 +558,7 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
> goto out_unlock;
> }
> shm->sender_id = trans.sender_id;
> - shm->ep_id = read_atomic(&mem_access->access_perm.endpoint_id);
> + shm->ep_id = ACCESS_ONCE(mem_access->access_perm.endpoint_id);
>
> /*
> * Check that the Composite memory region descriptor fits.
> --
> 2.34.1
>
© 2016 - 2026 Red Hat, Inc.