In preparation for adding more similar checks, move the VEX.L=0 check
and several X86_SPECIAL_* checks to a new field, where each bit represent
a common check on unused bits, or a restriction on the processor mode.
Likewise, many SVM intercepts can be checked during the decoding phase,
the main exception being the selective CR0 write, MSR and IOIO intercepts.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
---
target/i386/tcg/decode-new.c.inc | 79 +++++++++++++++++++++++---------
target/i386/tcg/decode-new.h | 25 +++++++---
target/i386/tcg/emit.c.inc | 8 ----
3 files changed, 76 insertions(+), 36 deletions(-)
diff --git a/target/i386/tcg/decode-new.c.inc b/target/i386/tcg/decode-new.c.inc
index 7d76f152758..790339eaf25 100644
--- a/target/i386/tcg/decode-new.c.inc
+++ b/target/i386/tcg/decode-new.c.inc
@@ -90,8 +90,6 @@
X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
#define cpuid(feat) .cpuid = X86_FEAT_##feat,
-#define i64 .special = X86_SPECIAL_i64,
-#define o64 .special = X86_SPECIAL_o64,
#define xchg .special = X86_SPECIAL_Locked,
#define mmx .special = X86_SPECIAL_MMX,
#define zext0 .special = X86_SPECIAL_ZExtOp0,
@@ -114,6 +112,9 @@
#define vex12 .vex_class = 12,
#define vex13 .vex_class = 13,
+#define chk(a) .check = X86_CHECK_##a,
+#define svm(a) .intercept = SVM_EXIT_##a,
+
#define avx2_256 .vex_special = X86_VEX_AVX2_256,
#define P_00 1
@@ -161,8 +162,8 @@ static void decode_group15(DisasContext *s, CPUX86State *env, X86OpEntry *entry,
};
static const X86OpEntry group15_mem[8] = {
- [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5),
- [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5),
+ [2] = X86_OP_ENTRYr(LDMXCSR, E,d, vex5 chk(VEX128)),
+ [3] = X86_OP_ENTRYw(STMXCSR, E,d, vex5 chk(VEX128)),
};
uint8_t modrm = get_modrm(s, env);
@@ -1579,6 +1580,12 @@ static bool validate_vex(DisasContext *s, X86DecodedInsn *decode)
if (s->flags & HF_EM_MASK) {
goto illegal;
}
+
+ if (e->check & X86_CHECK_VEX128) {
+ if (s->vex_l) {
+ goto illegal;
+ }
+ }
return true;
nm_exception:
@@ -1764,6 +1771,25 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
goto illegal_op;
}
+ /* Checks that result in #UD come first. */
+ if (decode.e.check) {
+ if (decode.e.check & X86_CHECK_i64) {
+ if (CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_o64) {
+ if (!CODE64(s)) {
+ goto illegal_op;
+ }
+ }
+ if (decode.e.check & X86_CHECK_prot) {
+ if (!PE(s) || VM86(s)) {
+ goto illegal_op;
+ }
+ }
+ }
+
switch (decode.e.special) {
case X86_SPECIAL_None:
break;
@@ -1774,23 +1800,6 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
}
break;
- case X86_SPECIAL_ProtMode:
- if (!PE(s) || VM86(s)) {
- goto illegal_op;
- }
- break;
-
- case X86_SPECIAL_i64:
- if (CODE64(s)) {
- goto illegal_op;
- }
- break;
- case X86_SPECIAL_o64:
- if (!CODE64(s)) {
- goto illegal_op;
- }
- break;
-
case X86_SPECIAL_ZExtOp0:
assert(decode.op[0].unit == X86_OP_INT);
if (!decode.op[0].has_ea) {
@@ -1820,6 +1829,31 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
if (!validate_vex(s, &decode)) {
return;
}
+
+ /*
+ * Checks that result in #GP or VMEXIT come second. Intercepts are
+ * generally checked after non-memory exceptions (i.e. before all
+ * exceptions if there is no memory operand). Exceptions are
+ * vm86 checks (INTn, IRET, PUSHF/POPF), RSM and XSETBV (!).
+ *
+ * RSM and XSETBV will be handled in the gen_* functions
+ * instead of using chk().
+ */
+ if (decode.e.check & X86_CHECK_cpl0) {
+ if (CPL(s) != 0) {
+ goto gp_fault;
+ }
+ }
+ if (decode.e.intercept && unlikely(GUEST(s))) {
+ gen_helper_svm_check_intercept(tcg_env,
+ tcg_constant_i32(decode.e.intercept));
+ }
+ if (decode.e.check & X86_CHECK_vm86_iopl) {
+ if (VM86(s) && IOPL(s) < 3) {
+ goto gp_fault;
+ }
+ }
+
if (decode.e.special == X86_SPECIAL_MMX &&
!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
gen_helper_enter_mmx(tcg_env);
@@ -1846,6 +1880,9 @@ static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
gen_writeback(s, &decode, 0, s->T0);
}
return;
+ gp_fault:
+ gen_exception_gpf(s);
+ return;
illegal_op:
gen_illegal_opcode(s);
return;
diff --git a/target/i386/tcg/decode-new.h b/target/i386/tcg/decode-new.h
index a542ec16813..631d39220bb 100644
--- a/target/i386/tcg/decode-new.h
+++ b/target/i386/tcg/decode-new.h
@@ -130,15 +130,28 @@ typedef enum X86OpUnit {
X86_OP_MMX, /* address in either s->ptrX or s->A0 depending on has_ea */
} X86OpUnit;
+typedef enum X86InsnCheck {
+ /* Illegal or exclusive to 64-bit mode */
+ X86_CHECK_i64 = 1,
+ X86_CHECK_o64 = 2,
+
+ /* Fault outside protected mode */
+ X86_CHECK_prot = 4,
+
+ /* Privileged instruction checks */
+ X86_CHECK_cpl0 = 8,
+ X86_CHECK_vm86_iopl = 16,
+
+ /* Fault if VEX.L=1 */
+ X86_CHECK_VEX128 = 32,
+} X86InsnCheck;
+
typedef enum X86InsnSpecial {
X86_SPECIAL_None,
/* Always locked if it has a memory operand (XCHG) */
X86_SPECIAL_Locked,
- /* Fault outside protected mode */
- X86_SPECIAL_ProtMode,
-
/*
* Register operand 0/2 is zero extended to 32 bits. Rd/Mb or Rd/Mw
* in the manual.
@@ -157,10 +170,6 @@ typedef enum X86InsnSpecial {
* become P/P/Q/N, and size "x" becomes "q".
*/
X86_SPECIAL_MMX,
-
- /* Illegal or exclusive to 64-bit mode */
- X86_SPECIAL_i64,
- X86_SPECIAL_o64,
} X86InsnSpecial;
/*
@@ -224,6 +233,8 @@ struct X86OpEntry {
unsigned vex_class:8;
X86VEXSpecial vex_special:8;
uint16_t valid_prefix:16;
+ uint8_t check:8;
+ uint8_t intercept:8;
bool is_decode:1;
};
diff --git a/target/i386/tcg/emit.c.inc b/target/i386/tcg/emit.c.inc
index 88793ba988d..7c36cf8a6df 100644
--- a/target/i386/tcg/emit.c.inc
+++ b/target/i386/tcg/emit.c.inc
@@ -1236,10 +1236,6 @@ static void gen_INSERTQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec
static void gen_LDMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- if (s->vex_l) {
- gen_illegal_opcode(s);
- return;
- }
tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T1);
gen_helper_ldmxcsr(tcg_env, s->tmp2_i32);
}
@@ -1832,10 +1828,6 @@ static void gen_VAESKEYGEN(DisasContext *s, CPUX86State *env, X86DecodedInsn *de
static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
{
- if (s->vex_l) {
- gen_illegal_opcode(s);
- return;
- }
gen_helper_update_mxcsr(tcg_env);
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
}
--
2.41.0