If a blob provides a reset PSW then we should use it instead of
branching to the PSW address and using our own mask.
Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
---
pc-bios/s390-ccw/bootmap.c | 1 +
pc-bios/s390-ccw/jump2ipl.c | 48 ++++++++++++++++++++++---------------
pc-bios/s390-ccw/s390-ccw.h | 1 +
3 files changed, 31 insertions(+), 19 deletions(-)
diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c
index 8747c4ea26..0df9b3781d 100644
--- a/pc-bios/s390-ccw/bootmap.c
+++ b/pc-bios/s390-ccw/bootmap.c
@@ -515,6 +515,7 @@ static void zipl_run(ScsiBlockPtr *pte)
IPL_assert(entry->component_type == ZIPL_COMP_ENTRY_EXEC, "No EXEC entry");
/* should not return */
+ write_reset_psw(entry->compdat.load_psw);
jump_to_IPL_code(entry->compdat.load_psw & PSW_MASK_SHORT_ADDR);
}
diff --git a/pc-bios/s390-ccw/jump2ipl.c b/pc-bios/s390-ccw/jump2ipl.c
index 767012bf0c..143d027bf7 100644
--- a/pc-bios/s390-ccw/jump2ipl.c
+++ b/pc-bios/s390-ccw/jump2ipl.c
@@ -13,20 +13,28 @@
#define KERN_IMAGE_START 0x010000UL
#define RESET_PSW_MASK (PSW_MASK_SHORTPSW | PSW_MASK_64)
-typedef struct ResetInfo {
- uint64_t ipl_psw;
- uint32_t ipl_continue;
-} ResetInfo;
-
-static ResetInfo save;
+uint64_t *reset_psw = 0, save_psw, ipl_continue;
static void jump_to_IPL_2(void)
{
- ResetInfo *current = 0;
+ /* Restore reset PSW and io and external new PSWs */
+ *reset_psw = save_psw;
- void (*ipl)(void) = (void *) (uint64_t) current->ipl_continue;
- *current = save;
- ipl(); /* should not return */
+ /* No reset PSW, let's jump instead. */
+ if (ipl_continue) {
+ void (*ipl)(void) = (void *) (uint64_t) ipl_continue;
+ ipl();
+ }
+
+ /* Reset PSW available, let's load it */
+ asm volatile ("lpsw 0(%0)\n"
+ : : "a" (0):);
+ /* should not return */
+}
+
+void write_reset_psw(uint64_t psw)
+{
+ *reset_psw = psw;
}
void jump_to_IPL_code(uint64_t address)
@@ -46,15 +54,12 @@ void jump_to_IPL_code(uint64_t address)
* content of non-BIOS memory after we loaded the guest, so we
* save the original content and restore it in jump_to_IPL_2.
*/
- ResetInfo *current = 0;
+ save_psw = *reset_psw;
+ *reset_psw = (uint64_t) &jump_to_IPL_2;
+ *reset_psw |= RESET_PSW_MASK;
+ ipl_continue = address;
- save = *current;
-
- current->ipl_psw = (uint64_t) &jump_to_IPL_2;
- current->ipl_psw |= RESET_PSW_MASK;
- current->ipl_continue = address & PSW_MASK_SHORT_ADDR;
-
- debug_print_int("set IPL addr to", current->ipl_continue);
+ debug_print_int("set IPL addr to", ipl_continue);
/* Ensure the guest output starts fresh */
sclp_print("\n");
@@ -84,7 +89,12 @@ void jump_to_low_kernel(void)
/* Trying to get PSW at zero address */
if (*((uint64_t *)0) & RESET_PSW_MASK) {
- jump_to_IPL_code((*((uint64_t *)0)) & PSW_MASK_SHORT_ADDR);
+ /*
+ * Surely nobody will try running directly from lowcore, so
+ * let's use 0 as an indication that we want to load the reset
+ * psw at 0x0 and not jump to the entry.
+ */
+ jump_to_IPL_code(0);
}
/* No other option left, so use the Linux kernel start address */
diff --git a/pc-bios/s390-ccw/s390-ccw.h b/pc-bios/s390-ccw/s390-ccw.h
index 36b884cced..7090720422 100644
--- a/pc-bios/s390-ccw/s390-ccw.h
+++ b/pc-bios/s390-ccw/s390-ccw.h
@@ -78,6 +78,7 @@ int virtio_read(ulong sector, void *load_addr);
void zipl_load(void);
/* jump2ipl.c */
+void write_reset_psw(uint64_t psw);
void jump_to_IPL_code(uint64_t address);
void jump_to_low_kernel(void);
--
2.25.1
On 27/08/2020 11.31, Janosch Frank wrote:
> If a blob provides a reset PSW then we should use it instead of
> branching to the PSW address and using our own mask.
>
> Signed-off-by: Janosch Frank <frankja@linux.ibm.com>
> ---
> pc-bios/s390-ccw/bootmap.c | 1 +
> pc-bios/s390-ccw/jump2ipl.c | 48 ++++++++++++++++++++++---------------
> pc-bios/s390-ccw/s390-ccw.h | 1 +
> 3 files changed, 31 insertions(+), 19 deletions(-)
>
> diff --git a/pc-bios/s390-ccw/bootmap.c b/pc-bios/s390-ccw/bootmap.c
> index 8747c4ea26..0df9b3781d 100644
> --- a/pc-bios/s390-ccw/bootmap.c
> +++ b/pc-bios/s390-ccw/bootmap.c
> @@ -515,6 +515,7 @@ static void zipl_run(ScsiBlockPtr *pte)
> IPL_assert(entry->component_type == ZIPL_COMP_ENTRY_EXEC, "No EXEC entry");
>
> /* should not return */
> + write_reset_psw(entry->compdat.load_psw);
> jump_to_IPL_code(entry->compdat.load_psw & PSW_MASK_SHORT_ADDR);
Shouldn't that be jump_to_IPL_code(0) now? Wouldn't it be cleaner to
have a jump_to_IPL_PSW() function instead?
> }
>
> diff --git a/pc-bios/s390-ccw/jump2ipl.c b/pc-bios/s390-ccw/jump2ipl.c
> index 767012bf0c..143d027bf7 100644
> --- a/pc-bios/s390-ccw/jump2ipl.c
> +++ b/pc-bios/s390-ccw/jump2ipl.c
> @@ -13,20 +13,28 @@
> #define KERN_IMAGE_START 0x010000UL
> #define RESET_PSW_MASK (PSW_MASK_SHORTPSW | PSW_MASK_64)
>
> -typedef struct ResetInfo {
> - uint64_t ipl_psw;
> - uint32_t ipl_continue;
> -} ResetInfo;
> -
> -static ResetInfo save;
> +uint64_t *reset_psw = 0, save_psw, ipl_continue;
I think the patch would be better readable if you'd split it in two -
first the ResetInfo rework, then the use-reset-PSW-if-available stuff ?
> static void jump_to_IPL_2(void)
> {
> - ResetInfo *current = 0;
> + /* Restore reset PSW and io and external new PSWs */
> + *reset_psw = save_psw;
The comment talks about io and external new PSWs ... but where is it in
the code?
> - void (*ipl)(void) = (void *) (uint64_t) current->ipl_continue;
> - *current = save;
> - ipl(); /* should not return */
> + /* No reset PSW, let's jump instead. */
> + if (ipl_continue) {
> + void (*ipl)(void) = (void *) (uint64_t) ipl_continue;
Is it possible to mark a function pointer with __attribute__((noreturn)) ?
> + ipl();
> + }
> +
> + /* Reset PSW available, let's load it */
> + asm volatile ("lpsw 0(%0)\n"
> + : : "a" (0):);
I've never tried. but maybe you could add __attribute__((noreturn)) to
an inline asm statement, too?
> + /* should not return */
> +}
> +
> +void write_reset_psw(uint64_t psw)
> +{
> + *reset_psw = psw;
> }
>
> void jump_to_IPL_code(uint64_t address)
> @@ -46,15 +54,12 @@ void jump_to_IPL_code(uint64_t address)
> * content of non-BIOS memory after we loaded the guest, so we
> * save the original content and restore it in jump_to_IPL_2.
> */
> - ResetInfo *current = 0;
> + save_psw = *reset_psw;
> + *reset_psw = (uint64_t) &jump_to_IPL_2;
> + *reset_psw |= RESET_PSW_MASK;
> + ipl_continue = address;
>
> - save = *current;
> -
> - current->ipl_psw = (uint64_t) &jump_to_IPL_2;
> - current->ipl_psw |= RESET_PSW_MASK;
> - current->ipl_continue = address & PSW_MASK_SHORT_ADDR;
> -
> - debug_print_int("set IPL addr to", current->ipl_continue);
> + debug_print_int("set IPL addr to", ipl_continue);
>
> /* Ensure the guest output starts fresh */
> sclp_print("\n");
> @@ -84,7 +89,12 @@ void jump_to_low_kernel(void)
>
> /* Trying to get PSW at zero address */
> if (*((uint64_t *)0) & RESET_PSW_MASK) {
> - jump_to_IPL_code((*((uint64_t *)0)) & PSW_MASK_SHORT_ADDR);
> + /*
> + * Surely nobody will try running directly from lowcore, so
> + * let's use 0 as an indication that we want to load the reset
> + * psw at 0x0 and not jump to the entry.
> + */
> + jump_to_IPL_code(0);
> }
>
> /* No other option left, so use the Linux kernel start address */
> diff --git a/pc-bios/s390-ccw/s390-ccw.h b/pc-bios/s390-ccw/s390-ccw.h
> index 36b884cced..7090720422 100644
> --- a/pc-bios/s390-ccw/s390-ccw.h
> +++ b/pc-bios/s390-ccw/s390-ccw.h
> @@ -78,6 +78,7 @@ int virtio_read(ulong sector, void *load_addr);
> void zipl_load(void);
>
> /* jump2ipl.c */
> +void write_reset_psw(uint64_t psw);
> void jump_to_IPL_code(uint64_t address);
> void jump_to_low_kernel(void);
>
>
Thomas
© 2016 - 2026 Red Hat, Inc.