... | ... | ||
---|---|---|---|
3 | instruction if FEAT_SVE and !FEAT_SME. | 3 | instruction if FEAT_SVE and !FEAT_SME. |
4 | 4 | ||
5 | Fixes: 43e3f85523e4 ("kselftest/arm64: Add SME support to syscall ABI test") | 5 | Fixes: 43e3f85523e4 ("kselftest/arm64: Add SME support to syscall ABI test") |
6 | Signed-off-by: Weizhao Ouyang <o451686892@gmail.com> | 6 | Signed-off-by: Weizhao Ouyang <o451686892@gmail.com> |
7 | --- | 7 | --- |
8 | tools/testing/selftests/arm64/abi/syscall-abi-asm.S | 2 +- | 8 | .../selftests/arm64/abi/syscall-abi-asm.S | 32 +++++++++---------- |
9 | 1 file changed, 1 insertion(+), 1 deletion(-) | 9 | 1 file changed, 15 insertions(+), 17 deletions(-) |
10 | 10 | ||
11 | diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S | 11 | diff --git a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S |
12 | index XXXXXXX..XXXXXXX 100644 | 12 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S | 13 | --- a/tools/testing/selftests/arm64/abi/syscall-abi-asm.S |
14 | +++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S | 14 | +++ b/tools/testing/selftests/arm64/abi/syscall-abi-asm.S |
15 | @@ -XXX,XX +XXX,XX @@ do_syscall: | 15 | @@ -XXX,XX +XXX,XX @@ do_syscall: |
16 | stp x27, x28, [sp, #96] | 16 | stp x27, x28, [sp, #96] |
17 | 17 | ||
18 | // Set SVCR if we're doing SME | 18 | // Set SVCR if we're doing SME |
19 | - cbz x1, 1f | 19 | - cbz x1, 1f |
20 | + cbz x1, load_gpr | ||
20 | adrp x2, svcr_in | 21 | adrp x2, svcr_in |
21 | ldr x2, [x2, :lo12:svcr_in] | 22 | ldr x2, [x2, :lo12:svcr_in] |
22 | + cbz x1, 1f | ||
23 | msr S3_3_C4_C2_2, x2 | 23 | msr S3_3_C4_C2_2, x2 |
24 | 1: | 24 | -1: |
25 | |||
26 | // Load ZA and ZT0 if enabled - uses x12 as scratch due to SME LDR | ||
27 | - tbz x2, #SVCR_ZA_SHIFT, 1f | ||
28 | + tbz x2, #SVCR_ZA_SHIFT, load_gpr | ||
29 | mov w12, #0 | ||
30 | ldr x2, =za_in | ||
31 | -2: _ldr_za 12, 2 | ||
32 | +1: _ldr_za 12, 2 | ||
33 | add x2, x2, x1 | ||
34 | add x12, x12, #1 | ||
35 | cmp x1, x12 | ||
36 | - bne 2b | ||
37 | + bne 1b | ||
38 | |||
39 | // ZT0 | ||
40 | mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1 | ||
41 | ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \ | ||
42 | #ID_AA64SMFR0_EL1_SMEver_WIDTH | ||
43 | - cbz x2, 1f | ||
44 | + cbz x2, load_gpr | ||
45 | adrp x2, zt_in | ||
46 | add x2, x2, :lo12:zt_in | ||
47 | _ldr_zt 2 | ||
48 | -1: | ||
49 | |||
50 | +load_gpr: | ||
51 | // Load GPRs x8-x28, and save our SP/FP for later comparison | ||
52 | ldr x2, =gpr_in | ||
53 | add x2, x2, #64 | ||
54 | @@ -XXX,XX +XXX,XX @@ do_syscall: | ||
55 | str x30, [x2], #8 // LR | ||
56 | |||
57 | // Load FPRs if we're not doing neither SVE nor streaming SVE | ||
58 | - cbnz x0, 1f | ||
59 | + cbnz x0, check_sve_in | ||
60 | ldr x2, =svcr_in | ||
61 | - tbnz x2, #SVCR_SM_SHIFT, 1f | ||
62 | + tbnz x2, #SVCR_SM_SHIFT, check_sve_in | ||
63 | |||
64 | ldr x2, =fpr_in | ||
65 | ldp q0, q1, [x2] | ||
66 | @@ -XXX,XX +XXX,XX @@ do_syscall: | ||
67 | ldp q30, q31, [x2, #16 * 30] | ||
68 | |||
69 | b 2f | ||
70 | -1: | ||
71 | |||
72 | +check_sve_in: | ||
73 | // Load the SVE registers if we're doing SVE/SME | ||
74 | |||
75 | ldr x2, =z_in | ||
76 | @@ -XXX,XX +XXX,XX @@ do_syscall: | ||
77 | stp q30, q31, [x2, #16 * 30] | ||
78 | |||
79 | // Save SVCR if we're doing SME | ||
80 | - cbz x1, 1f | ||
81 | + cbz x1, check_sve_out | ||
82 | mrs x2, S3_3_C4_C2_2 | ||
83 | adrp x3, svcr_out | ||
84 | str x2, [x3, :lo12:svcr_out] | ||
85 | -1: | ||
86 | |||
87 | // Save ZA if it's enabled - uses x12 as scratch due to SME STR | ||
88 | - tbz x2, #SVCR_ZA_SHIFT, 1f | ||
89 | + tbz x2, #SVCR_ZA_SHIFT, check_sve_out | ||
90 | mov w12, #0 | ||
91 | ldr x2, =za_out | ||
92 | -2: _str_za 12, 2 | ||
93 | +1: _str_za 12, 2 | ||
94 | add x2, x2, x1 | ||
95 | add x12, x12, #1 | ||
96 | cmp x1, x12 | ||
97 | - bne 2b | ||
98 | + bne 1b | ||
99 | |||
100 | // ZT0 | ||
101 | mrs x2, S3_0_C0_C4_5 // ID_AA64SMFR0_EL1 | ||
102 | ubfx x2, x2, #ID_AA64SMFR0_EL1_SMEver_SHIFT, \ | ||
103 | #ID_AA64SMFR0_EL1_SMEver_WIDTH | ||
104 | - cbz x2, 1f | ||
105 | + cbz x2, check_sve_out | ||
106 | adrp x2, zt_out | ||
107 | add x2, x2, :lo12:zt_out | ||
108 | _str_zt 2 | ||
109 | -1: | ||
110 | |||
111 | +check_sve_out: | ||
112 | // Save the SVE state if we have some | ||
113 | cbz x0, 1f | ||
25 | 114 | ||
26 | -- | 115 | -- |
27 | 2.45.2 | 116 | 2.45.2 | diff view generated by jsdifflib |