Handle combined host and guest alignment requirements.
Handle host and guest page size differences.
Handle SHM_EXEC.
Resolves: https://gitlab.com/qemu-project/qemu/-/issues/115
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
linux-user/mmap.c | 146 ++++++++++++++++++++++++++++++++++------------
1 file changed, 110 insertions(+), 36 deletions(-)
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 82f4026283..29421cfab0 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -1236,69 +1236,143 @@ static inline abi_ulong target_shmlba(CPUArchState *cpu_env)
}
#endif
+#if defined(__arm__) || defined(__mips__) || defined(__sparc__)
+#define HOST_FORCE_SHMLBA 1
+#else
+#define HOST_FORCE_SHMLBA 0
+#endif
+
abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
abi_ulong shmaddr, int shmflg)
{
CPUState *cpu = env_cpu(cpu_env);
- abi_ulong raddr;
struct shmid_ds shm_info;
int ret;
- abi_ulong shmlba;
+ int h_pagesize;
+ int t_shmlba, h_shmlba, m_shmlba;
+ size_t t_len, h_len, m_len;
/* shmat pointers are always untagged */
- /* find out the length of the shared memory segment */
+ /*
+ * Because we can't use host shmat() unless the address is sufficiently
+ * aligned for the host, we'll need to check both.
+ * TODO: Could be fixed with softmmu.
+ */
+ t_shmlba = target_shmlba(cpu_env);
+ h_pagesize = qemu_real_host_page_size();
+ h_shmlba = (HOST_FORCE_SHMLBA ? SHMLBA : h_pagesize);
+ m_shmlba = MAX(t_shmlba, h_shmlba);
+
+ if (shmaddr) {
+ if (shmaddr & (m_shmlba - 1)) {
+ if (shmflg & SHM_RND) {
+ /*
+ * The guest is allowing the kernel to round the address.
+ * Assume that the guest is ok with us rounding to the
+ * host required alignment too. Anyway if we don't, we'll
+ * get an error from the kernel.
+ */
+ shmaddr &= ~(m_shmlba - 1);
+ if (shmaddr == 0 && (shmflg & SHM_REMAP)) {
+ return -TARGET_EINVAL;
+ }
+ } else {
+ int require = TARGET_PAGE_SIZE;
+#ifdef TARGET_FORCE_SHMLBA
+ require = t_shmlba;
+#endif
+ /*
+ * Include host required alignment, as otherwise we cannot
+ * use host shmat at all.
+ */
+ require = MAX(require, h_shmlba);
+ if (shmaddr & (require - 1)) {
+ return -TARGET_EINVAL;
+ }
+ }
+ }
+ } else {
+ if (shmflg & SHM_REMAP) {
+ return -TARGET_EINVAL;
+ }
+ }
+ /* All rounding now manually concluded. */
+ shmflg &= ~SHM_RND;
+
+ /* Find out the length of the shared memory segment. */
ret = get_errno(shmctl(shmid, IPC_STAT, &shm_info));
if (is_error(ret)) {
/* can't get length, bail out */
return ret;
}
+ t_len = TARGET_PAGE_ALIGN(shm_info.shm_segsz);
+ h_len = ROUND_UP(shm_info.shm_segsz, h_pagesize);
+ m_len = MAX(t_len, h_len);
- shmlba = target_shmlba(cpu_env);
-
- if (shmaddr & (shmlba - 1)) {
- if (shmflg & SHM_RND) {
- shmaddr &= ~(shmlba - 1);
- } else {
- return -TARGET_EINVAL;
- }
- }
- if (!guest_range_valid_untagged(shmaddr, shm_info.shm_segsz)) {
+ if (!guest_range_valid_untagged(shmaddr, m_len)) {
return -TARGET_EINVAL;
}
WITH_MMAP_LOCK_GUARD() {
- void *host_raddr;
+ void *host_raddr, *test;
abi_ulong last;
- if (shmaddr) {
- host_raddr = shmat(shmid, (void *)g2h_untagged(shmaddr), shmflg);
- } else {
- abi_ulong mmap_start;
-
- /* In order to use the host shmat, we need to honor host SHMLBA. */
- mmap_start = mmap_find_vma(0, shm_info.shm_segsz,
- MAX(SHMLBA, shmlba));
-
- if (mmap_start == -1) {
+ if (!shmaddr) {
+ shmaddr = mmap_find_vma(0, m_len, m_shmlba);
+ if (shmaddr == -1) {
return -TARGET_ENOMEM;
}
- host_raddr = shmat(shmid, g2h_untagged(mmap_start),
- shmflg | SHM_REMAP);
+ } else if (!(shmflg & SHM_REMAP)) {
+ if (!page_check_range_empty(shmaddr, shmaddr + m_len - 1)) {
+ return -TARGET_EINVAL;
+ }
+ } else if (t_len < h_len) {
+ /*
+ * ??? If the host page size is larger than host page size,
+ * then we might be mapping more pages than the guest expects.
+ * TODO: Could be fixed with softmmu.
+ */
+ if (!page_check_range_empty(shmaddr + t_len, shmaddr + h_len - 1)) {
+ return -TARGET_EINVAL;
+ }
}
- if (host_raddr == (void *)-1) {
- return get_errno(-1);
- }
- raddr = h2g(host_raddr);
- last = raddr + shm_info.shm_segsz - 1;
+ /* All placement now manually completed. */
+ host_raddr = (void *)g2h_untagged(shmaddr);
+ test = shmat(shmid, host_raddr, shmflg | SHM_REMAP);
- page_set_flags(raddr, last,
+ if (test != MAP_FAILED && t_len > h_len) {
+ /*
+ * The target page size is larger than the host page size.
+ * Fill in the final target page with anonymous memory.
+ */
+ void *t2 = mmap(host_raddr + h_len, t_len - h_len,
+ PROT_READ | (shmflg & SHM_RDONLY ? 0 : PROT_WRITE),
+ MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
+ if (t2 == MAP_FAILED) {
+ test = MAP_FAILED;
+ }
+ }
+
+ if (test == MAP_FAILED) {
+ ret = get_errno(-1);
+ if (!(shmflg & SHM_REMAP)) {
+ do_munmap(host_raddr, m_len);
+ }
+ return ret;
+ }
+
+ assert(test == host_raddr);
+ last = shmaddr + m_len - 1;
+
+ page_set_flags(shmaddr, last,
PAGE_VALID | PAGE_RESET | PAGE_READ |
- (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE));
+ (shmflg & SHM_RDONLY ? 0 : PAGE_WRITE) |
+ (shmflg & SHM_EXEC ? PAGE_EXEC : 0));
- shm_region_rm_complete(raddr, last);
- shm_region_add(raddr, last);
+ shm_region_rm_complete(shmaddr, last);
+ shm_region_add(shmaddr, last);
}
/*
@@ -1312,7 +1386,7 @@ abi_ulong target_shmat(CPUArchState *cpu_env, int shmid,
tb_flush(cpu);
}
- return raddr;
+ return shmaddr;
}
abi_long target_shmdt(abi_ulong shmaddr)
--
2.34.1
On Thu, Feb 22, 2024 at 05:03:09PM -1000, Richard Henderson wrote: > Handle combined host and guest alignment requirements. > Handle host and guest page size differences. > Handle SHM_EXEC. > > Resolves: https://gitlab.com/qemu-project/qemu/-/issues/115 > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > linux-user/mmap.c | 146 ++++++++++++++++++++++++++++++++++------------ > 1 file changed, 110 insertions(+), 36 deletions(-) [...] > - /* find out the length of the shared memory segment */ > + /* > + * Because we can't use host shmat() unless the address is sufficiently > + * aligned for the host, we'll need to check both. > + * TODO: Could be fixed with softmmu. > + */ Are there any plans to introduce softmmu to qemu-user? [...] Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> Please consider adding the reproducer to the series: From 964164ada4de55ac01a56613f7b759e538803fc9 Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich <iii@linux.ibm.com> Date: Fri, 23 Feb 2024 12:31:40 +0100 Subject: [PATCH] tests/tcg: Check that shmat() does not break /proc/self/maps Add a regression test for a recently fixed issue, where shmat() desynced the guest and the host view of the address space and caused open("/proc/self/maps") to SEGV. Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com> --- tests/tcg/multiarch/linux/linux-shmat-maps.c | 40 ++++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 tests/tcg/multiarch/linux/linux-shmat-maps.c diff --git a/tests/tcg/multiarch/linux/linux-shmat-maps.c b/tests/tcg/multiarch/linux/linux-shmat-maps.c new file mode 100644 index 00000000000..4090bc77ba7 --- /dev/null +++ b/tests/tcg/multiarch/linux/linux-shmat-maps.c @@ -0,0 +1,40 @@ +/* + * Test that shmat() does not break /proc/self/maps. + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ +#include <assert.h> +#include <fcntl.h> +#include <stdlib.h> +#include <sys/ipc.h> +#include <sys/shm.h> +#include <unistd.h> + +int main(void) +{ + char buf[128]; + int err, fd; + int shmid; + ssize_t n; + void *p; + + shmid = shmget(IPC_PRIVATE, 0x400, IPC_CREAT | 0600); + assert(shmid != -1); + p = shmat(shmid, (void *)0x800000, 0); + assert(p != (void *)-1); + + fd = open("/proc/self/maps", O_RDONLY); + assert(fd != -1); + do { + n = read(fd, buf, sizeof(buf)); + assert(n >= 0); + } while (n != 0); + close(fd); + + err = shmdt(p); + assert(err == 0); + err = shmctl(shmid, IPC_RMID, NULL); + assert(err == 0); + + return EXIT_SUCCESS; +} -- 2.34.1
On 2/23/24 01:35, Ilya Leoshkevich wrote: > On Thu, Feb 22, 2024 at 05:03:09PM -1000, Richard Henderson wrote: >> Handle combined host and guest alignment requirements. >> Handle host and guest page size differences. >> Handle SHM_EXEC. >> >> Resolves: https://gitlab.com/qemu-project/qemu/-/issues/115 >> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> >> --- >> linux-user/mmap.c | 146 ++++++++++++++++++++++++++++++++++------------ >> 1 file changed, 110 insertions(+), 36 deletions(-) > > [...] > >> - /* find out the length of the shared memory segment */ >> + /* >> + * Because we can't use host shmat() unless the address is sufficiently >> + * aligned for the host, we'll need to check both. >> + * TODO: Could be fixed with softmmu. >> + */ > > Are there any plans to introduce softmmu to qemu-user? Long-term ones, yes. There are *so* many places for which emulation doesn't quite work unless the host and guest page size match. There are projects like asan which don't work unless the guest has a *very* specific memory layout, which often conflicts with the host. r~
© 2016 - 2024 Red Hat, Inc.