Change the semantics to be the last byte of the guest va, rather
than the following byte. This avoids some overflow conditions.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
include/exec/cpu-all.h | 11 ++++++++++-
linux-user/arm/target_cpu.h | 2 +-
bsd-user/main.c | 10 +++-------
bsd-user/mmap.c | 4 ++--
linux-user/elfload.c | 14 +++++++-------
linux-user/main.c | 27 +++++++++++++--------------
linux-user/mmap.c | 4 ++--
7 files changed, 38 insertions(+), 34 deletions(-)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 64cb62dc54..090922e4a8 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -152,6 +152,15 @@ static inline void tswap64s(uint64_t *s)
*/
extern uintptr_t guest_base;
extern bool have_guest_base;
+
+/*
+ * If non-zero, the guest virtual address space is a contiguous subset
+ * of the host virtual address space, i.e. '-R reserved_va' is in effect
+ * either from the command-line or by default. The value is the last
+ * byte of the guest address space e.g. UINT32_MAX.
+ *
+ * If zero, the host and guest virtual address spaces are intermingled.
+ */
extern unsigned long reserved_va;
/*
@@ -171,7 +180,7 @@ extern unsigned long reserved_va;
#define GUEST_ADDR_MAX_ \
((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \
UINT32_MAX : ~0ul)
-#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_)
+#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_)
#else
diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h
index 89ba274cfc..f6383a7cd1 100644
--- a/linux-user/arm/target_cpu.h
+++ b/linux-user/arm/target_cpu.h
@@ -30,7 +30,7 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs)
* the high addresses. Restrict linux-user to the
* cached write-back RAM in the system map.
*/
- return 0x80000000ul;
+ return 0x7ffffffful;
} else {
/*
* We need to be able to map the commpage.
diff --git a/bsd-user/main.c b/bsd-user/main.c
index 89f225dead..babc3b009b 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -68,13 +68,9 @@ bool have_guest_base;
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
-/*
- * There are a number of places where we assign reserved_va to a variable
- * of type abi_ulong and expect it to fit. Avoid the last page.
- */
-# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK)
+# define MAX_RESERVED_VA 0xfffffffful
# else
-# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
+# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
# endif
# else
# define MAX_RESERVED_VA 0
@@ -466,7 +462,7 @@ int main(int argc, char **argv)
envlist_free(envlist);
if (reserved_va) {
- mmap_next_start = reserved_va;
+ mmap_next_start = reserved_va + 1;
}
{
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index 696057551a..565b9f97ed 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -234,7 +234,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
size = HOST_PAGE_ALIGN(size) + alignment;
end_addr = start + size;
if (end_addr > reserved_va) {
- end_addr = reserved_va;
+ end_addr = reserved_va + 1;
}
addr = end_addr - qemu_host_page_size;
@@ -243,7 +243,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
if (looped) {
return (abi_ulong)-1;
}
- end_addr = reserved_va;
+ end_addr = reserved_va + 1;
addr = end_addr - qemu_host_page_size;
looped = 1;
continue;
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index dfae967908..f1370a7a8b 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -208,7 +208,7 @@ static bool init_guest_commpage(void)
* has specified -R reserved_va, which would trigger an assert().
*/
if (reserved_va != 0 &&
- TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) {
+ TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) {
error_report("Cannot allocate vsyscall page");
exit(EXIT_FAILURE);
}
@@ -2504,7 +2504,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
if (guest_hiaddr > reserved_va) {
error_report("%s: requires more than reserved virtual "
"address space (0x%" PRIx64 " > 0x%lx)",
- image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
+ image_name, (uint64_t)guest_hiaddr, reserved_va);
exit(EXIT_FAILURE);
}
} else {
@@ -2525,7 +2525,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
if (reserved_va) {
guest_loaddr = (guest_base >= mmap_min_addr ? 0
: mmap_min_addr - guest_base);
- guest_hiaddr = reserved_va - 1;
+ guest_hiaddr = reserved_va;
}
/* Reserve the address space for the binary, or reserved_va. */
@@ -2755,7 +2755,7 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
if (guest_hiaddr > reserved_va) {
error_report("%s: requires more than reserved virtual "
"address space (0x%" PRIx64 " > 0x%lx)",
- image_name, (uint64_t)guest_hiaddr + 1, reserved_va);
+ image_name, (uint64_t)guest_hiaddr, reserved_va);
exit(EXIT_FAILURE);
}
@@ -2768,17 +2768,17 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr,
/* Reserve the memory on the host. */
assert(guest_base != 0);
test = g2h_untagged(0);
- addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0);
+ addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0);
if (addr == MAP_FAILED || addr != test) {
error_report("Unable to reserve 0x%lx bytes of virtual address "
"space at %p (%s) for use as guest address space (check your "
"virtual memory ulimit setting, min_mmap_addr or reserve less "
- "using -R option)", reserved_va, test, strerror(errno));
+ "using -R option)", reserved_va + 1, test, strerror(errno));
exit(EXIT_FAILURE);
}
qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n",
- __func__, addr, reserved_va);
+ __func__, addr, reserved_va + 1);
}
void probe_guest_base(const char *image_name, abi_ulong guest_loaddr,
diff --git a/linux-user/main.c b/linux-user/main.c
index 39d9bd4d7a..fe03293516 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -109,11 +109,9 @@ static const char *last_log_filename;
# if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS
# if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \
(TARGET_LONG_BITS == 32 || defined(TARGET_ABI32))
-/* There are a number of places where we assign reserved_va to a variable
- of type abi_ulong and expect it to fit. Avoid the last page. */
-# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK)
+# define MAX_RESERVED_VA(CPU) 0xfffffffful
# else
-# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS)
+# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
# endif
# else
# define MAX_RESERVED_VA(CPU) 0
@@ -379,7 +377,9 @@ static void handle_arg_reserved_va(const char *arg)
{
char *p;
int shift = 0;
- reserved_va = strtoul(arg, &p, 0);
+ unsigned long val;
+
+ val = strtoul(arg, &p, 0);
switch (*p) {
case 'k':
case 'K':
@@ -393,10 +393,10 @@ static void handle_arg_reserved_va(const char *arg)
break;
}
if (shift) {
- unsigned long unshifted = reserved_va;
+ unsigned long unshifted = val;
p++;
- reserved_va <<= shift;
- if (reserved_va >> shift != unshifted) {
+ val <<= shift;
+ if (val >> shift != unshifted) {
fprintf(stderr, "Reserved virtual address too big\n");
exit(EXIT_FAILURE);
}
@@ -405,6 +405,8 @@ static void handle_arg_reserved_va(const char *arg)
fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p);
exit(EXIT_FAILURE);
}
+ /* The representation is size - 1, with 0 remaining "default". */
+ reserved_va = val ? val - 1 : 0;
}
static void handle_arg_singlestep(const char *arg)
@@ -793,7 +795,7 @@ int main(int argc, char **argv, char **envp)
*/
max_reserved_va = MAX_RESERVED_VA(cpu);
if (reserved_va != 0) {
- if (reserved_va % qemu_host_page_size) {
+ if ((reserved_va + 1) % qemu_host_page_size) {
char *s = size_to_str(qemu_host_page_size);
fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s);
g_free(s);
@@ -804,11 +806,8 @@ int main(int argc, char **argv, char **envp)
exit(EXIT_FAILURE);
}
} else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) {
- /*
- * reserved_va must be aligned with the host page size
- * as it is used with mmap()
- */
- reserved_va = max_reserved_va & qemu_host_page_mask;
+ /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */
+ reserved_va = max_reserved_va;
}
{
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index 995146f60d..0aa8ae7356 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -283,7 +283,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
end_addr = start + size;
if (start > reserved_va - size) {
/* Start at the top of the address space. */
- end_addr = ((reserved_va - size) & -align) + size;
+ end_addr = ((reserved_va + 1 - size) & -align) + size;
looped = true;
}
@@ -297,7 +297,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
return (abi_ulong)-1;
}
/* Re-start at the top of the address space. */
- addr = end_addr = ((reserved_va - size) & -align) + size;
+ addr = end_addr = ((reserved_va + 1 - size) & -align) + size;
looped = true;
} else {
prot = page_get_flags(addr);
--
2.34.1
On 3/29/23 00:58, Richard Henderson wrote: > Change the semantics to be the last byte of the guest va, rather > than the following byte. This avoids some overflow conditions. > > Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > include/exec/cpu-all.h | 11 ++++++++++- > linux-user/arm/target_cpu.h | 2 +- > bsd-user/main.c | 10 +++------- > bsd-user/mmap.c | 4 ++-- > linux-user/elfload.c | 14 +++++++------- > linux-user/main.c | 27 +++++++++++++-------------- > linux-user/mmap.c | 4 ++-- > 7 files changed, 38 insertions(+), 34 deletions(-) > > diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h > index 64cb62dc54..090922e4a8 100644 > --- a/include/exec/cpu-all.h > +++ b/include/exec/cpu-all.h > @@ -152,6 +152,15 @@ static inline void tswap64s(uint64_t *s) > */ > extern uintptr_t guest_base; > extern bool have_guest_base; > + > +/* > + * If non-zero, the guest virtual address space is a contiguous subset > + * of the host virtual address space, i.e. '-R reserved_va' is in effect > + * either from the command-line or by default. The value is the last > + * byte of the guest address space e.g. UINT32_MAX. > + * > + * If zero, the host and guest virtual address spaces are intermingled. > + */ > extern unsigned long reserved_va; > > /* > @@ -171,7 +180,7 @@ extern unsigned long reserved_va; > #define GUEST_ADDR_MAX_ \ > ((MIN_CONST(TARGET_VIRT_ADDR_SPACE_BITS, TARGET_ABI_BITS) <= 32) ? \ > UINT32_MAX : ~0ul) > -#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : GUEST_ADDR_MAX_) > +#define GUEST_ADDR_MAX (reserved_va ? : GUEST_ADDR_MAX_) > > #else > > diff --git a/linux-user/arm/target_cpu.h b/linux-user/arm/target_cpu.h > index 89ba274cfc..f6383a7cd1 100644 > --- a/linux-user/arm/target_cpu.h > +++ b/linux-user/arm/target_cpu.h > @@ -30,7 +30,7 @@ static inline unsigned long arm_max_reserved_va(CPUState *cs) > * the high addresses. Restrict linux-user to the > * cached write-back RAM in the system map. > */ > - return 0x80000000ul; > + return 0x7ffffffful; > } else { > /* > * We need to be able to map the commpage. > diff --git a/bsd-user/main.c b/bsd-user/main.c > index 89f225dead..babc3b009b 100644 > --- a/bsd-user/main.c > +++ b/bsd-user/main.c > @@ -68,13 +68,9 @@ bool have_guest_base; > # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS > # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \ > (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) > -/* > - * There are a number of places where we assign reserved_va to a variable > - * of type abi_ulong and expect it to fit. Avoid the last page. > - */ > -# define MAX_RESERVED_VA (0xfffffffful & TARGET_PAGE_MASK) > +# define MAX_RESERVED_VA 0xfffffffful > # else > -# define MAX_RESERVED_VA (1ul << TARGET_VIRT_ADDR_SPACE_BITS) > +# define MAX_RESERVED_VA ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) > # endif > # else > # define MAX_RESERVED_VA 0 > @@ -466,7 +462,7 @@ int main(int argc, char **argv) > envlist_free(envlist); > > if (reserved_va) { > - mmap_next_start = reserved_va; > + mmap_next_start = reserved_va + 1; > } > > { > diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c > index 696057551a..565b9f97ed 100644 > --- a/bsd-user/mmap.c > +++ b/bsd-user/mmap.c > @@ -234,7 +234,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, > size = HOST_PAGE_ALIGN(size) + alignment; > end_addr = start + size; > if (end_addr > reserved_va) { > - end_addr = reserved_va; > + end_addr = reserved_va + 1; > } > addr = end_addr - qemu_host_page_size; > > @@ -243,7 +243,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, > if (looped) { > return (abi_ulong)-1; > } > - end_addr = reserved_va; > + end_addr = reserved_va + 1; > addr = end_addr - qemu_host_page_size; > looped = 1; > continue; > diff --git a/linux-user/elfload.c b/linux-user/elfload.c > index dfae967908..f1370a7a8b 100644 > --- a/linux-user/elfload.c > +++ b/linux-user/elfload.c > @@ -208,7 +208,7 @@ static bool init_guest_commpage(void) > * has specified -R reserved_va, which would trigger an assert(). > */ > if (reserved_va != 0 && > - TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE >= reserved_va) { > + TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE - 1 > reserved_va) { > error_report("Cannot allocate vsyscall page"); > exit(EXIT_FAILURE); > } > @@ -2504,7 +2504,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, > if (guest_hiaddr > reserved_va) { > error_report("%s: requires more than reserved virtual " > "address space (0x%" PRIx64 " > 0x%lx)", > - image_name, (uint64_t)guest_hiaddr + 1, reserved_va); > + image_name, (uint64_t)guest_hiaddr, reserved_va); > exit(EXIT_FAILURE); > } > } else { > @@ -2525,7 +2525,7 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr, > if (reserved_va) { > guest_loaddr = (guest_base >= mmap_min_addr ? 0 > : mmap_min_addr - guest_base); > - guest_hiaddr = reserved_va - 1; > + guest_hiaddr = reserved_va; > } > > /* Reserve the address space for the binary, or reserved_va. */ > @@ -2755,7 +2755,7 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr, > if (guest_hiaddr > reserved_va) { > error_report("%s: requires more than reserved virtual " > "address space (0x%" PRIx64 " > 0x%lx)", > - image_name, (uint64_t)guest_hiaddr + 1, reserved_va); > + image_name, (uint64_t)guest_hiaddr, reserved_va); > exit(EXIT_FAILURE); > } > > @@ -2768,17 +2768,17 @@ static void pgb_reserved_va(const char *image_name, abi_ulong guest_loaddr, > /* Reserve the memory on the host. */ > assert(guest_base != 0); > test = g2h_untagged(0); > - addr = mmap(test, reserved_va, PROT_NONE, flags, -1, 0); > + addr = mmap(test, reserved_va + 1, PROT_NONE, flags, -1, 0); > if (addr == MAP_FAILED || addr != test) { > error_report("Unable to reserve 0x%lx bytes of virtual address " > "space at %p (%s) for use as guest address space (check your " > "virtual memory ulimit setting, min_mmap_addr or reserve less " > - "using -R option)", reserved_va, test, strerror(errno)); > + "using -R option)", reserved_va + 1, test, strerror(errno)); > exit(EXIT_FAILURE); > } > > qemu_log_mask(CPU_LOG_PAGE, "%s: base @ %p for %lu bytes\n", > - __func__, addr, reserved_va); > + __func__, addr, reserved_va + 1); > } > > void probe_guest_base(const char *image_name, abi_ulong guest_loaddr, > diff --git a/linux-user/main.c b/linux-user/main.c > index 39d9bd4d7a..fe03293516 100644 > --- a/linux-user/main.c > +++ b/linux-user/main.c > @@ -109,11 +109,9 @@ static const char *last_log_filename; > # if HOST_LONG_BITS > TARGET_VIRT_ADDR_SPACE_BITS > # if TARGET_VIRT_ADDR_SPACE_BITS == 32 && \ > (TARGET_LONG_BITS == 32 || defined(TARGET_ABI32)) > -/* There are a number of places where we assign reserved_va to a variable > - of type abi_ulong and expect it to fit. Avoid the last page. */ > -# define MAX_RESERVED_VA(CPU) (0xfffffffful & TARGET_PAGE_MASK) > +# define MAX_RESERVED_VA(CPU) 0xfffffffful > # else > -# define MAX_RESERVED_VA(CPU) (1ul << TARGET_VIRT_ADDR_SPACE_BITS) > +# define MAX_RESERVED_VA(CPU) ((1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) > # endif > # else > # define MAX_RESERVED_VA(CPU) 0 > @@ -379,7 +377,9 @@ static void handle_arg_reserved_va(const char *arg) > { > char *p; > int shift = 0; > - reserved_va = strtoul(arg, &p, 0); > + unsigned long val; > + > + val = strtoul(arg, &p, 0); > switch (*p) { > case 'k': > case 'K': > @@ -393,10 +393,10 @@ static void handle_arg_reserved_va(const char *arg) > break; > } > if (shift) { > - unsigned long unshifted = reserved_va; > + unsigned long unshifted = val; > p++; > - reserved_va <<= shift; > - if (reserved_va >> shift != unshifted) { > + val <<= shift; > + if (val >> shift != unshifted) { > fprintf(stderr, "Reserved virtual address too big\n"); > exit(EXIT_FAILURE); > } > @@ -405,6 +405,8 @@ static void handle_arg_reserved_va(const char *arg) > fprintf(stderr, "Unrecognised -R size suffix '%s'\n", p); > exit(EXIT_FAILURE); > } > + /* The representation is size - 1, with 0 remaining "default". */ > + reserved_va = val ? val - 1 : 0; > } > > static void handle_arg_singlestep(const char *arg) > @@ -793,7 +795,7 @@ int main(int argc, char **argv, char **envp) > */ > max_reserved_va = MAX_RESERVED_VA(cpu); > if (reserved_va != 0) { > - if (reserved_va % qemu_host_page_size) { > + if ((reserved_va + 1) % qemu_host_page_size) { > char *s = size_to_str(qemu_host_page_size); > fprintf(stderr, "Reserved virtual address not aligned mod %s\n", s); > g_free(s); > @@ -804,11 +806,8 @@ int main(int argc, char **argv, char **envp) > exit(EXIT_FAILURE); > } > } else if (HOST_LONG_BITS == 64 && TARGET_VIRT_ADDR_SPACE_BITS <= 32) { > - /* > - * reserved_va must be aligned with the host page size > - * as it is used with mmap() > - */ > - reserved_va = max_reserved_va & qemu_host_page_mask; > + /* MAX_RESERVED_VA + 1 is a large power of 2, so is aligned. */ > + reserved_va = max_reserved_va; > } > > { > diff --git a/linux-user/mmap.c b/linux-user/mmap.c > index 995146f60d..0aa8ae7356 100644 > --- a/linux-user/mmap.c > +++ b/linux-user/mmap.c > @@ -283,7 +283,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, > end_addr = start + size; > if (start > reserved_va - size) { > /* Start at the top of the address space. */ > - end_addr = ((reserved_va - size) & -align) + size; > + end_addr = ((reserved_va + 1 - size) & -align) + size; > looped = true; > } > > @@ -297,7 +297,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size, > return (abi_ulong)-1; > } > /* Re-start at the top of the address space. */ > - addr = end_addr = ((reserved_va - size) & -align) + size; > + addr = end_addr = ((reserved_va + 1 - size) & -align) + size; > looped = true; > } else { > prot = page_get_flags(addr); This patch breaks something. In LTP (20230127), fcntl36 fails now (all archs): sudo unshare --time --ipc --uts --pid --fork --kill-child --mount --mount-proc --root chroot/m68k/sid # /opt/ltp/testcases/bin/fcntl36 tst_test.c:1558: TINFO: Timeout per run is 0h 00m 30s fcntl36.c:288: TINFO: OFD read lock vs OFD write lock tst_kernel.c:87: TINFO: uname.machine=m68k kernel is 32bit fcntl36.c:366: TPASS: Access between threads synchronized fcntl36.c:288: TINFO: OFD write lock vs POSIX write lock fcntl36.c:318: TBROK: pthread_create(0x40800330,(nil),0x80004328,0x40800068) failed: EAGAIN/EWOULDBLOCK Expected result: tst_test.c:1250: TINFO: Timeout per run is 0h 05m 00s fcntl36.c:289: TINFO: OFD read lock vs OFD write lock fcntl36.c:367: TPASS: Access between threads synchronized fcntl36.c:289: TINFO: OFD write lock vs POSIX write lock fcntl36.c:367: TPASS: Access between threads synchronized fcntl36.c:289: TINFO: OFD read lock vs POSIX write lock fcntl36.c:367: TPASS: Access between threads synchronized fcntl36.c:289: TINFO: OFD write lock vs POSIX read lock fcntl36.c:367: TPASS: Access between threads synchronized fcntl36.c:289: TINFO: OFD write lock vs OFD write lock fcntl36.c:367: TPASS: Access between threads synchronized fcntl36.c:289: TINFO: OFD r/w lock vs POSIX write lock fcntl36.c:367: TPASS: Access between threads synchronized fcntl36.c:289: TINFO: OFD r/w lock vs POSIX read lock fcntl36.c:367: TPASS: Access between threads synchronized Thanks, Laurent
On 5/11/23 12:48, Laurent Vivier wrote: > This patch breaks something. > > In LTP (20230127), fcntl36 fails now (all archs): > > sudo unshare --time --ipc --uts --pid --fork --kill-child --mount --mount-proc --root > chroot/m68k/sid > # /opt/ltp/testcases/bin/fcntl36 > > tst_test.c:1558: TINFO: Timeout per run is 0h 00m 30s > fcntl36.c:288: TINFO: OFD read lock vs OFD write lock > tst_kernel.c:87: TINFO: uname.machine=m68k kernel is 32bit > fcntl36.c:366: TPASS: Access between threads synchronized > fcntl36.c:288: TINFO: OFD write lock vs POSIX write lock > fcntl36.c:318: TBROK: pthread_create(0x40800330,(nil),0x80004328,0x40800068) failed: > EAGAIN/EWOULDBLOCK Any idea where the failure is coming from? I don't see a matching syscall in -strace... r~
© 2016 - 2025 Red Hat, Inc.