With all paths into hvmemul_linear_mmio_access() coming through
linear_{read,write}(), there's no need anymore to split accesses at
page boundaries there. Leave an assertion, though.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Replace ASSERT() by more safe construct.
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1084,7 +1084,7 @@ static int hvmemul_linear_mmio_access(
{
struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io;
unsigned long offset = gla & ~PAGE_MASK;
- unsigned int chunk, buffer_offset = gla - start;
+ unsigned int buffer_offset = gla - start;
struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(hvio, start, dir,
buffer_offset);
paddr_t gpa;
@@ -1094,13 +1094,17 @@ static int hvmemul_linear_mmio_access(
if ( cache == NULL )
return X86EMUL_UNHANDLEABLE;
- chunk = min_t(unsigned int, size, PAGE_SIZE - offset);
+ if ( size > PAGE_SIZE - offset )
+ {
+ ASSERT_UNREACHABLE();
+ return X86EMUL_UNHANDLEABLE;
+ }
if ( known_gpfn )
gpa = pfn_to_paddr(hvio->mmio_gpfn) | offset;
else
{
- rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
+ rc = hvmemul_linear_to_phys(gla, &gpa, size, &one_rep, pfec,
hvmemul_ctxt);
if ( rc != X86EMUL_OKAY )
return rc;
@@ -1108,27 +1112,8 @@ static int hvmemul_linear_mmio_access(
latch_linear_to_phys(hvio, gla, gpa, dir == IOREQ_WRITE);
}
- for ( ;; )
- {
- rc = hvmemul_phys_mmio_access(cache, gpa, chunk, dir, buffer, buffer_offset);
- if ( rc != X86EMUL_OKAY )
- break;
-
- gla += chunk;
- buffer_offset += chunk;
- size -= chunk;
-
- if ( size == 0 )
- break;
-
- chunk = min_t(unsigned int, size, PAGE_SIZE);
- rc = hvmemul_linear_to_phys(gla, &gpa, chunk, &one_rep, pfec,
- hvmemul_ctxt);
- if ( rc != X86EMUL_OKAY )
- return rc;
- }
-
- return rc;
+ return hvmemul_phys_mmio_access(cache, gpa, size, dir, buffer,
+ buffer_offset);
}
static inline int hvmemul_linear_mmio_read(
On Tue, Oct 01, 2024 at 10:50:25AM +0200, Jan Beulich wrote:
> With all paths into hvmemul_linear_mmio_access() coming through
> linear_{read,write}(), there's no need anymore to split accesses at
> page boundaries there. Leave an assertion, though.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Replace ASSERT() by more safe construct.
>
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -1084,7 +1084,7 @@ static int hvmemul_linear_mmio_access(
> {
> struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io;
> unsigned long offset = gla & ~PAGE_MASK;
> - unsigned int chunk, buffer_offset = gla - start;
> + unsigned int buffer_offset = gla - start;
> struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(hvio, start, dir,
> buffer_offset);
> paddr_t gpa;
> @@ -1094,13 +1094,17 @@ static int hvmemul_linear_mmio_access(
> if ( cache == NULL )
> return X86EMUL_UNHANDLEABLE;
>
> - chunk = min_t(unsigned int, size, PAGE_SIZE - offset);
> + if ( size > PAGE_SIZE - offset )
FWIW, I find this easier to read as `size + offset > PAGE_SIZE` (which
is the same condition used in linear_{read,write}().
Preferably with that adjusted:
Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Thanks, Roger.
On 23.01.2025 10:01, Roger Pau Monné wrote:
> On Tue, Oct 01, 2024 at 10:50:25AM +0200, Jan Beulich wrote:
>> --- a/xen/arch/x86/hvm/emulate.c
>> +++ b/xen/arch/x86/hvm/emulate.c
>> @@ -1084,7 +1084,7 @@ static int hvmemul_linear_mmio_access(
>> {
>> struct hvm_vcpu_io *hvio = ¤t->arch.hvm.hvm_io;
>> unsigned long offset = gla & ~PAGE_MASK;
>> - unsigned int chunk, buffer_offset = gla - start;
>> + unsigned int buffer_offset = gla - start;
>> struct hvm_mmio_cache *cache = hvmemul_find_mmio_cache(hvio, start, dir,
>> buffer_offset);
>> paddr_t gpa;
>> @@ -1094,13 +1094,17 @@ static int hvmemul_linear_mmio_access(
>> if ( cache == NULL )
>> return X86EMUL_UNHANDLEABLE;
>>
>> - chunk = min_t(unsigned int, size, PAGE_SIZE - offset);
>> + if ( size > PAGE_SIZE - offset )
>
> FWIW, I find this easier to read as `size + offset > PAGE_SIZE` (which
> is the same condition used in linear_{read,write}().
Hmm, yes, considering that "size" here is specifically what "bytes" is there,
doing the change is okay. However, in general I prefer the way it was written
above, for being more obviously safe against overflow (taking into account
how "offset" is calculated).
> Preferably with that adjusted:
>
> Acked-by: Roger Pau Monné <roger.pau@citrix.com>
Thanks.
Jan
© 2016 - 2026 Red Hat, Inc.