target/i386/hvf/x86_task.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
fixes associated warning when building on MacOS.
Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org>
---
target/i386/hvf/x86_task.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c
index f09bfbdda5b..cdea2ea69d9 100644
--- a/target/i386/hvf/x86_task.c
+++ b/target/i386/hvf/x86_task.c
@@ -122,7 +122,6 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
load_regs(cpu);
struct x86_segment_descriptor curr_tss_desc, next_tss_desc;
- int ret;
x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR);
uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR);
uint32_t desc_limit;
@@ -138,7 +137,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
if (reason == TSR_IDT_GATE && gate_valid) {
int dpl;
- ret = x86_read_call_gate(cpu, &task_gate_desc, gate);
+ x86_read_call_gate(cpu, &task_gate_desc, gate);
dpl = task_gate_desc.dpl;
x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS);
@@ -167,11 +166,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea
x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel);
}
- if (next_tss_desc.type & 8)
- ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
- else
+ if (next_tss_desc.type & 8) {
+ task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
+ } else {
//ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc);
VM_PANIC("task_switch_16");
+ }
macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) |
CR0_TS_MASK);
--
2.39.5
Queued, thanks. Paolo
On 11/19/24 09:34, Paolo Bonzini wrote: > Queued, thanks. > > Paolo > Thanks for pulling it Paolo.
On 10/23/24 11:29, Pierrick Bouvier wrote: > fixes associated warning when building on MacOS. > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > --- > target/i386/hvf/x86_task.c | 10 +++++----- > 1 file changed, 5 insertions(+), 5 deletions(-) > > diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c > index f09bfbdda5b..cdea2ea69d9 100644 > --- a/target/i386/hvf/x86_task.c > +++ b/target/i386/hvf/x86_task.c > @@ -122,7 +122,6 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > load_regs(cpu); > > struct x86_segment_descriptor curr_tss_desc, next_tss_desc; > - int ret; > x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); > uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); > uint32_t desc_limit; > @@ -138,7 +137,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > if (reason == TSR_IDT_GATE && gate_valid) { > int dpl; > > - ret = x86_read_call_gate(cpu, &task_gate_desc, gate); > + x86_read_call_gate(cpu, &task_gate_desc, gate); > > dpl = task_gate_desc.dpl; > x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); > @@ -167,11 +166,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); > } > > - if (next_tss_desc.type & 8) > - ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > - else > + if (next_tss_desc.type & 8) { > + task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > + } else { > //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > VM_PANIC("task_switch_16"); > + } > > macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | > CR0_TS_MASK); Gentle ping on this patch, hopefully it could be merged for 9.2.
Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: > fixes associated warning when building on MacOS. > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > --- > target/i386/hvf/x86_task.c | 10 +++++----- > 1 file changed, 5 insertions(+), 5 deletions(-) > > diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c > index f09bfbdda5b..cdea2ea69d9 100644 > --- a/target/i386/hvf/x86_task.c > +++ b/target/i386/hvf/x86_task.c > @@ -122,7 +122,6 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > load_regs(cpu); > > struct x86_segment_descriptor curr_tss_desc, next_tss_desc; > - int ret; > x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); > uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); > uint32_t desc_limit; > @@ -138,7 +137,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > if (reason == TSR_IDT_GATE && gate_valid) { > int dpl; > > - ret = x86_read_call_gate(cpu, &task_gate_desc, gate); > + x86_read_call_gate(cpu, &task_gate_desc, gate); If we don't care and this is the only caller we fix up x86_read_call_gate not to return a value. It looks like it fails safe with an empty entry (but also the function x86_read_call_gate needs the printf removing). > > dpl = task_gate_desc.dpl; > x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); > @@ -167,11 +166,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); > } > > - if (next_tss_desc.type & 8) > - ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > - else > + if (next_tss_desc.type & 8) { > + task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > + } else { > //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > VM_PANIC("task_switch_16"); > + } > > macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | > CR0_TS_MASK); -- Alex Bennée Virtualisation Tech Lead @ Linaro
On 10/23/24 12:53, Alex Bennée wrote: > Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: > >> fixes associated warning when building on MacOS. >> >> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >> --- >> target/i386/hvf/x86_task.c | 10 +++++----- >> 1 file changed, 5 insertions(+), 5 deletions(-) >> >> diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c >> index f09bfbdda5b..cdea2ea69d9 100644 >> --- a/target/i386/hvf/x86_task.c >> +++ b/target/i386/hvf/x86_task.c >> @@ -122,7 +122,6 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea >> load_regs(cpu); >> >> struct x86_segment_descriptor curr_tss_desc, next_tss_desc; >> - int ret; >> x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); >> uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); >> uint32_t desc_limit; >> @@ -138,7 +137,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea >> if (reason == TSR_IDT_GATE && gate_valid) { >> int dpl; >> >> - ret = x86_read_call_gate(cpu, &task_gate_desc, gate); >> + x86_read_call_gate(cpu, &task_gate_desc, gate); > > If we don't care and this is the only caller we fix up > x86_read_call_gate not to return a value. It looks like it fails safe > with an empty entry (but also the function x86_read_call_gate needs the > printf removing). > Yes, or maybe we should check if the read was a success, or else call VM_PANIC("x86_read_call_gate"). Any advice from hvf maintainers? >> >> dpl = task_gate_desc.dpl; >> x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); >> @@ -167,11 +166,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea >> x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); >> } >> >> - if (next_tss_desc.type & 8) >> - ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); >> - else >> + if (next_tss_desc.type & 8) { >> + task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); >> + } else { >> //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); >> VM_PANIC("task_switch_16"); >> + } >> >> macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | >> CR0_TS_MASK); >
On 10/23/24 13:04, Pierrick Bouvier wrote: > On 10/23/24 12:53, Alex Bennée wrote: >> Pierrick Bouvier <pierrick.bouvier@linaro.org> writes: >> >>> fixes associated warning when building on MacOS. >>> >>> Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> >>> --- >>> target/i386/hvf/x86_task.c | 10 +++++----- >>> 1 file changed, 5 insertions(+), 5 deletions(-) >>> >>> diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c >>> index f09bfbdda5b..cdea2ea69d9 100644 >>> --- a/target/i386/hvf/x86_task.c >>> +++ b/target/i386/hvf/x86_task.c >>> @@ -122,7 +122,6 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea >>> load_regs(cpu); >>> >>> struct x86_segment_descriptor curr_tss_desc, next_tss_desc; >>> - int ret; >>> x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); >>> uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); >>> uint32_t desc_limit; >>> @@ -138,7 +137,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea >>> if (reason == TSR_IDT_GATE && gate_valid) { >>> int dpl; >>> >>> - ret = x86_read_call_gate(cpu, &task_gate_desc, gate); >>> + x86_read_call_gate(cpu, &task_gate_desc, gate); >> >> If we don't care and this is the only caller we fix up >> x86_read_call_gate not to return a value. It looks like it fails safe >> with an empty entry (but also the function x86_read_call_gate needs the >> printf removing). >> > Yes, or maybe we should check if the read was a success, or else call > VM_PANIC("x86_read_call_gate"). > Any advice from hvf maintainers? > We didn't have any feedback yet. Would that be possible for a maintainer to pull the current series as it is? It's functionnally equivalent, and remove this warning to have a clean 9.2 release on this platform. >>> >>> dpl = task_gate_desc.dpl; >>> x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); >>> @@ -167,11 +166,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea >>> x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); >>> } >>> >>> - if (next_tss_desc.type & 8) >>> - ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); >>> - else >>> + if (next_tss_desc.type & 8) { >>> + task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); >>> + } else { >>> //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); >>> VM_PANIC("task_switch_16"); >>> + } >>> >>> macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | >>> CR0_TS_MASK); >>
On 10/23/24 11:29, Pierrick Bouvier wrote: > fixes associated warning when building on MacOS. > > Signed-off-by: Pierrick Bouvier <pierrick.bouvier@linaro.org> > --- > target/i386/hvf/x86_task.c | 10 +++++----- > 1 file changed, 5 insertions(+), 5 deletions(-) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> r~ > > diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c > index f09bfbdda5b..cdea2ea69d9 100644 > --- a/target/i386/hvf/x86_task.c > +++ b/target/i386/hvf/x86_task.c > @@ -122,7 +122,6 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > load_regs(cpu); > > struct x86_segment_descriptor curr_tss_desc, next_tss_desc; > - int ret; > x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); > uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); > uint32_t desc_limit; > @@ -138,7 +137,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > if (reason == TSR_IDT_GATE && gate_valid) { > int dpl; > > - ret = x86_read_call_gate(cpu, &task_gate_desc, gate); > + x86_read_call_gate(cpu, &task_gate_desc, gate); > > dpl = task_gate_desc.dpl; > x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); > @@ -167,11 +166,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea > x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); > } > > - if (next_tss_desc.type & 8) > - ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > - else > + if (next_tss_desc.type & 8) { > + task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > + } else { > //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); > VM_PANIC("task_switch_16"); > + } > > macvm_set_cr0(cpu->accel->fd, rvmcs(cpu->accel->fd, VMCS_GUEST_CR0) | > CR0_TS_MASK);
© 2016 - 2024 Red Hat, Inc.