:p
atchew
Login
A guest may request ask a memory-mapped device to perform DMA. If the address specified for DMA is the device performing DMA, it will create recursion. It is very unlikely that device implementations are prepared for such an abnormal access, which can result in unpredictable behavior. In particular, such a recursion breaks e1000e, a network device. If the device is configured to write the received packet to the register to trigger receiving, it triggers re-entry to the Rx logic of e1000e. This causes use-after-free since the Rx logic is not re-entrant. As there should be no valid reason to perform recursive memory access, check for recursion before accessing memory-mapped device. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1543 Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> --- softmmu/memory.c | 79 +++++++++++++++++++++++++++++++++++++----------- 1 file changed, 62 insertions(+), 17 deletions(-) diff --git a/softmmu/memory.c b/softmmu/memory.c index XXXXXXX..XXXXXXX 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -XXX,XX +XXX,XX @@ static QTAILQ_HEAD(, AddressSpace) address_spaces static GHashTable *flat_views; +static const Object **accessed_region_owners; +static size_t accessed_region_owners_capacity; +static size_t accessed_region_owners_num; + typedef struct AddrRange AddrRange; /* @@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr, return false; } + for (size_t i = 0; i < accessed_region_owners_num; i++) { + if (accessed_region_owners[i] == mr->owner) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: recursive access\n", + is_write ? "write" : "read", + addr, size, memory_region_name(mr)); + return false; + } + } + /* Treat zero as compatibility all valid */ if (!mr->ops->valid.max_access_size) { return true; @@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr, return true; } +static bool memory_region_access_start(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool is_write, + MemTxAttrs attrs) +{ + if (!memory_region_access_valid(mr, addr, size, is_write, attrs)) { + return false; + } + + accessed_region_owners_num++; + if (accessed_region_owners_num > accessed_region_owners_capacity) { + accessed_region_owners_capacity = accessed_region_owners_num; + accessed_region_owners = g_realloc_n(accessed_region_owners, + accessed_region_owners_capacity, + sizeof(*accessed_region_owners)); + } + + accessed_region_owners[accessed_region_owners_num - 1] = mr->owner; + + return true; +} + +static void memory_region_access_end(void) +{ + accessed_region_owners_num--; +} + static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, hwaddr addr, uint64_t *pval, @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, mr->alias_offset + addr, pval, op, attrs); } - if (!memory_region_access_valid(mr, addr, size, false, attrs)) { + if (!memory_region_access_start(mr, addr, size, false, attrs)) { *pval = unassigned_mem_read(mr, addr, size); return MEMTX_DECODE_ERROR; } r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); + memory_region_access_end(); adjust_endianness(mr, pval, op); return r; } @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, MemTxAttrs attrs) { unsigned size = memop_size(op); + MemTxResult result; if (mr->alias) { return memory_region_dispatch_write(mr->alias, mr->alias_offset + addr, data, op, attrs); } - if (!memory_region_access_valid(mr, addr, size, true, attrs)) { + if (!memory_region_access_start(mr, addr, size, true, attrs)) { unassigned_mem_write(mr, addr, data, size); return MEMTX_DECODE_ERROR; } @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, if ((!kvm_eventfds_enabled()) && memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { - return MEMTX_OK; - } - - if (mr->ops->write) { - return access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_accessor, mr, - attrs); + result = MEMTX_OK; + } else if (mr->ops->write) { + result = access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_accessor, mr, + attrs); } else { - return - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_with_attrs_accessor, - mr, attrs); + result = access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_with_attrs_accessor, + mr, attrs); } + + memory_region_access_end(); + + return result; } void memory_region_init_io(MemoryRegion *mr, -- 2.39.2
A guest may request ask a memory-mapped device to perform DMA. If the address specified for DMA is the device performing DMA, it will create recursion. It is very unlikely that device implementations are prepared for such an abnormal access, which can result in unpredictable behavior. In particular, such a recursion breaks e1000e, a network device. If the device is configured to write the received packet to the register to trigger receiving, it triggers re-entry to the Rx logic of e1000e. This causes use-after-free since the Rx logic is not re-entrant. As there should be no valid reason to perform recursive memory access, check for recursion before accessing memory-mapped device. Resolves: https://gitlab.com/qemu-project/qemu/-/issues/1543 Signed-off-by: Akihiko Odaki <akihiko.odaki@daynix.com> --- V1 -> V2: Marked the variable thread-local. Introduced linked list. softmmu/memory.c | 81 ++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 64 insertions(+), 17 deletions(-) diff --git a/softmmu/memory.c b/softmmu/memory.c index XXXXXXX..XXXXXXX 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -XXX,XX +XXX,XX @@ struct AddrRange { Int128 size; }; +typedef struct AccessedRegion AccessedRegion; + +struct AccessedRegion { + const Object *owner; + const AccessedRegion *next; +}; + +static __thread const AccessedRegion *accessed_region; + static AddrRange addrrange_make(Int128 start, Int128 size) { return (AddrRange) { start, size }; @@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr, return false; } + for (const AccessedRegion *ar = accessed_region; ar; ar = ar->next) { + if (ar->owner == mr->owner) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid %s at addr 0x%" HWADDR_PRIX + ", size %u, region '%s', reason: recursive access\n", + is_write ? "write" : "read", + addr, size, memory_region_name(mr)); + return false; + } + } + /* Treat zero as compatibility all valid */ if (!mr->ops->valid.max_access_size) { return true; @@ -XXX,XX +XXX,XX @@ bool memory_region_access_valid(MemoryRegion *mr, return true; } +static bool memory_region_access_start(MemoryRegion *mr, + hwaddr addr, + unsigned size, + bool is_write, + MemTxAttrs attrs, + AccessedRegion *ar) +{ + if (!memory_region_access_valid(mr, addr, size, is_write, attrs)) { + return false; + } + + ar->owner = mr->owner; + ar->next = accessed_region; + accessed_region = ar->next; + + return true; +} + +static void memory_region_access_end(void) +{ + accessed_region = accessed_region->next; +} + static MemTxResult memory_region_dispatch_read1(MemoryRegion *mr, hwaddr addr, uint64_t *pval, @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, MemTxAttrs attrs) { unsigned size = memop_size(op); + AccessedRegion ar; MemTxResult r; if (mr->alias) { @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_read(MemoryRegion *mr, mr->alias_offset + addr, pval, op, attrs); } - if (!memory_region_access_valid(mr, addr, size, false, attrs)) { + if (!memory_region_access_start(mr, addr, size, false, attrs, &ar)) { *pval = unassigned_mem_read(mr, addr, size); return MEMTX_DECODE_ERROR; } r = memory_region_dispatch_read1(mr, addr, pval, size, attrs); + memory_region_access_end(); adjust_endianness(mr, pval, op); return r; } @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, MemTxAttrs attrs) { unsigned size = memop_size(op); + AccessedRegion ar; + MemTxResult result; if (mr->alias) { return memory_region_dispatch_write(mr->alias, mr->alias_offset + addr, data, op, attrs); } - if (!memory_region_access_valid(mr, addr, size, true, attrs)) { + if (!memory_region_access_start(mr, addr, size, true, attrs, &ar)) { unassigned_mem_write(mr, addr, data, size); return MEMTX_DECODE_ERROR; } @@ -XXX,XX +XXX,XX @@ MemTxResult memory_region_dispatch_write(MemoryRegion *mr, if ((!kvm_eventfds_enabled()) && memory_region_dispatch_write_eventfds(mr, addr, data, size, attrs)) { - return MEMTX_OK; - } - - if (mr->ops->write) { - return access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_accessor, mr, - attrs); + result = MEMTX_OK; + } else if (mr->ops->write) { + result = access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_accessor, mr, + attrs); } else { - return - access_with_adjusted_size(addr, &data, size, - mr->ops->impl.min_access_size, - mr->ops->impl.max_access_size, - memory_region_write_with_attrs_accessor, - mr, attrs); + result = access_with_adjusted_size(addr, &data, size, + mr->ops->impl.min_access_size, + mr->ops->impl.max_access_size, + memory_region_write_with_attrs_accessor, + mr, attrs); } + + memory_region_access_end(); + + return result; } void memory_region_init_io(MemoryRegion *mr, -- 2.39.2