... | ... | ||
---|---|---|---|
7 | - NVPG and NVC Bar MMIO operations | 7 | - NVPG and NVC Bar MMIO operations |
8 | - Group/Crowd testing | 8 | - Group/Crowd testing |
9 | - ESB Escalation | 9 | - ESB Escalation |
10 | - Pool interrupt testing | 10 | - Pool interrupt testing |
11 | 11 | ||
12 | version 2: | ||
13 | - Removed printfs from test models and replaced with g_test_message() | ||
14 | - Updated XIVE copyrights to use: | ||
15 | SPDX-License-Identifier: GPL-2.0-or-later | ||
16 | - Set entire NSR to 0, not just fields | ||
17 | - Moved rename of xive_ipb_to_pipr() into its own patch set 0002 | ||
18 | - Rename xive2_presenter_backlog_check() to | ||
19 | xive2_presenter_backlog_scan() | ||
20 | - Squash patch set 11 (crowd size restrictions) into | ||
21 | patch set 9 (support crowd-matching) | ||
22 | - Made xive2_notify() a static rou | ||
12 | 23 | ||
13 | Frederic Barrat (10): | 24 | Frederic Barrat (10): |
14 | ppc/xive2: Update NVP save/restore for group attributes | 25 | ppc/xive2: Update NVP save/restore for group attributes |
15 | ppc/xive2: Add grouping level to notification | 26 | ppc/xive2: Add grouping level to notification |
16 | ppc/xive2: Support group-matching when looking for target | 27 | ppc/xive2: Support group-matching when looking for target |
17 | ppc/xive2: Add undelivered group interrupt to backlog | 28 | ppc/xive2: Add undelivered group interrupt to backlog |
18 | ppc/xive2: Process group backlog when pushing an OS context | 29 | ppc/xive2: Process group backlog when pushing an OS context |
19 | ppc/xive2: Process group backlog when updating the CPPR | 30 | ppc/xive2: Process group backlog when updating the CPPR |
20 | qtest/xive: Add group-interrupt test | 31 | qtest/xive: Add group-interrupt test |
21 | Add support for MMIO operations on the NVPG/NVC BAR | 32 | ppc/xive2: Add support for MMIO operations on the NVPG/NVC BAR |
22 | ppc/xive2: Support crowd-matching when looking for target | 33 | ppc/xive2: Support crowd-matching when looking for target |
23 | ppc/xive2: Check crowd backlog when scanning group backlog | 34 | ppc/xive2: Check crowd backlog when scanning group backlog |
24 | 35 | ||
25 | Glenn Miles (4): | 36 | Glenn Miles (3): |
26 | pnv/xive: Only support crowd size of 0, 2, 4 and 16 | ||
27 | pnv/xive: Support ESB Escalation | 37 | pnv/xive: Support ESB Escalation |
28 | pnv/xive: Fix problem with treating NVGC as a NVP | 38 | pnv/xive: Fix problem with treating NVGC as a NVP |
29 | qtest/xive: Add test of pool interrupts | 39 | qtest/xive: Add test of pool interrupts |
30 | 40 | ||
31 | include/hw/ppc/xive.h | 35 +- | 41 | Michael Kowal (1): |
32 | include/hw/ppc/xive2.h | 19 +- | 42 | ppc/xive: Rename ipb_to_pipr() to xive_ipb_to_pipr() |
33 | include/hw/ppc/xive2_regs.h | 25 +- | 43 | |
34 | include/hw/ppc/xive_regs.h | 20 +- | 44 | include/hw/ppc/xive.h | 41 +- |
35 | tests/qtest/pnv-xive2-common.h | 1 + | 45 | include/hw/ppc/xive2.h | 25 +- |
36 | hw/intc/pnv_xive.c | 5 +- | 46 | include/hw/ppc/xive2_regs.h | 30 +- |
37 | hw/intc/pnv_xive2.c | 161 +++++-- | 47 | include/hw/ppc/xive_regs.h | 25 +- |
38 | hw/intc/spapr_xive.c | 3 +- | 48 | tests/qtest/pnv-xive2-common.h | 1 + |
39 | hw/intc/xive.c | 182 +++++--- | 49 | hw/intc/pnv_xive.c | 10 +- |
40 | hw/intc/xive2.c | 741 +++++++++++++++++++++++++++---- | 50 | hw/intc/pnv_xive2.c | 166 +++++-- |
41 | hw/ppc/pnv.c | 31 +- | 51 | hw/intc/spapr_xive.c | 8 +- |
42 | hw/ppc/spapr.c | 4 +- | 52 | hw/intc/xive.c | 200 +++++--- |
43 | tests/qtest/pnv-xive2-nvpg_bar.c | 154 +++++++ | 53 | hw/intc/xive2.c | 750 +++++++++++++++++++++++++---- |
44 | tests/qtest/pnv-xive2-test.c | 240 ++++++++++ | 54 | hw/ppc/pnv.c | 35 +- |
45 | hw/intc/trace-events | 6 +- | 55 | hw/ppc/spapr.c | 7 +- |
46 | tests/qtest/meson.build | 3 +- | 56 | tests/qtest/pnv-xive2-flush-sync.c | 6 +- |
47 | 16 files changed, 1440 insertions(+), 190 deletions(-) | 57 | tests/qtest/pnv-xive2-nvpg_bar.c | 153 ++++++ |
58 | tests/qtest/pnv-xive2-test.c | 249 +++++++++- | ||
59 | hw/intc/trace-events | 6 +- | ||
60 | tests/qtest/meson.build | 3 +- | ||
61 | 17 files changed, 1475 insertions(+), 240 deletions(-) | ||
48 | create mode 100644 tests/qtest/pnv-xive2-nvpg_bar.c | 62 | create mode 100644 tests/qtest/pnv-xive2-nvpg_bar.c |
49 | 63 | ||
50 | -- | 64 | -- |
51 | 2.43.0 | 65 | 2.43.0 | diff view generated by jsdifflib |
... | ... | ||
---|---|---|---|
7 | individually control what is saved/restored. | 7 | individually control what is saved/restored. |
8 | 8 | ||
9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
11 | --- | 11 | --- |
12 | include/hw/ppc/xive2_regs.h | 5 +++++ | 12 | include/hw/ppc/xive2_regs.h | 10 +++++++--- |
13 | hw/intc/xive2.c | 18 ++++++++++++++++-- | 13 | hw/intc/xive2.c | 23 ++++++++++++++++++----- |
14 | 2 files changed, 21 insertions(+), 2 deletions(-) | 14 | 2 files changed, 25 insertions(+), 8 deletions(-) |
15 | 15 | ||
16 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h | 16 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h |
17 | index XXXXXXX..XXXXXXX 100644 | 17 | index XXXXXXX..XXXXXXX 100644 |
18 | --- a/include/hw/ppc/xive2_regs.h | 18 | --- a/include/hw/ppc/xive2_regs.h |
19 | +++ b/include/hw/ppc/xive2_regs.h | 19 | +++ b/include/hw/ppc/xive2_regs.h |
20 | @@ -XXX,XX +XXX,XX @@ | ||
21 | /* | ||
22 | * QEMU PowerPC XIVE2 internal structure definitions (POWER10) | ||
23 | * | ||
24 | - * Copyright (c) 2019-2022, IBM Corporation. | ||
25 | + * Copyright (c) 2019-2024, IBM Corporation. | ||
26 | * | ||
27 | - * This code is licensed under the GPL version 2 or later. See the | ||
28 | - * COPYING file in the top-level directory. | ||
29 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
30 | */ | ||
31 | |||
32 | #ifndef PPC_XIVE2_REGS_H | ||
20 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2Nvp { | 33 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2Nvp { |
21 | uint32_t w0; | 34 | uint32_t w0; |
22 | #define NVP2_W0_VALID PPC_BIT32(0) | 35 | #define NVP2_W0_VALID PPC_BIT32(0) |
23 | #define NVP2_W0_HW PPC_BIT32(7) | 36 | #define NVP2_W0_HW PPC_BIT32(7) |
24 | +#define NVP2_W0_L PPC_BIT32(8) | 37 | +#define NVP2_W0_L PPC_BIT32(8) |
... | ... | ||
38 | #define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */ | 51 | #define NVP2_W4_ESC_ESB_BLOCK PPC_BITMASK32(0, 3) /* N:0 */ |
39 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 52 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
40 | index XXXXXXX..XXXXXXX 100644 | 53 | index XXXXXXX..XXXXXXX 100644 |
41 | --- a/hw/intc/xive2.c | 54 | --- a/hw/intc/xive2.c |
42 | +++ b/hw/intc/xive2.c | 55 | +++ b/hw/intc/xive2.c |
56 | @@ -XXX,XX +XXX,XX @@ | ||
57 | /* | ||
58 | * QEMU PowerPC XIVE2 interrupt controller model (POWER10) | ||
59 | * | ||
60 | - * Copyright (c) 2019-2022, IBM Corporation.. | ||
61 | + * Copyright (c) 2019-2024, IBM Corporation.. | ||
62 | * | ||
63 | - * This code is licensed under the GPL version 2 or later. See the | ||
64 | - * COPYING file in the top-level directory. | ||
65 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
66 | */ | ||
67 | |||
68 | #include "qemu/osdep.h" | ||
43 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, | 69 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_save_ctx(Xive2Router *xrtr, XiveTCTX *tctx, |
44 | 70 | ||
45 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); | 71 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]); |
46 | nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); | 72 | nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]); |
47 | - nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); | 73 | - nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]); |
... | ... | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | The NSR has a (so far unused) grouping level field. When a interrupt | 3 | The NSR has a (so far unused) grouping level field. When a interrupt |
4 | is presented, that field tells the hypervisor or OS if the interrupt | 4 | is presented, that field tells the hypervisor or OS if the interrupt |
5 | is for an individual VP or for a VP-group/crowd. This patch reworks | 5 | is for an individual VP or for a VP-group/crowd. This patch reworks |
6 | the presentation API to allow to set/unset the level when | 6 | the presentation API to allow to set/unset the level when |
7 | raising/accepting an interrupt. | 7 | raising/accepting an interrupt. |
8 | 8 | ||
9 | It also renames xive_tctx_ipb_update() to xive_tctx_pipr_update() as | 9 | It also renames xive_tctx_ipb_update() to xive_tctx_pipr_update() as |
10 | the IPB is only used for VP-specific target, whereas the PIPR always | 10 | the IPB is only used for VP-specific target, whereas the PIPR always |
11 | needs to be updated. | 11 | needs to be updated. |
12 | 12 | ||
13 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 13 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
14 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 14 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
15 | --- | 15 | --- |
16 | include/hw/ppc/xive.h | 19 +++++++- | 16 | include/hw/ppc/xive.h | 19 +++++++- |
17 | include/hw/ppc/xive_regs.h | 20 +++++++-- | 17 | include/hw/ppc/xive_regs.h | 20 +++++++-- |
18 | hw/intc/xive.c | 90 +++++++++++++++++++++++--------------- | 18 | hw/intc/xive.c | 90 +++++++++++++++++++++++--------------- |
19 | hw/intc/xive2.c | 18 ++++---- | 19 | hw/intc/xive2.c | 18 ++++---- |
20 | hw/intc/trace-events | 2 +- | 20 | hw/intc/trace-events | 2 +- |
21 | 5 files changed, 100 insertions(+), 49 deletions(-) | 21 | 5 files changed, 100 insertions(+), 49 deletions(-) |
22 | 22 | ||
23 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | 23 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h |
24 | index XXXXXXX..XXXXXXX 100644 | 24 | index XXXXXXX..XXXXXXX 100644 |
25 | --- a/include/hw/ppc/xive.h | 25 | --- a/include/hw/ppc/xive.h |
26 | +++ b/include/hw/ppc/xive.h | 26 | +++ b/include/hw/ppc/xive.h |
27 | @@ -XXX,XX +XXX,XX @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority) | 27 | @@ -XXX,XX +XXX,XX @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority) |
28 | 0 : 1 << (XIVE_PRIORITY_MAX - priority); | 28 | 0 : 1 << (XIVE_PRIORITY_MAX - priority); |
29 | } | 29 | } |
30 | 30 | ||
31 | +static inline uint8_t xive_priority_to_pipr(uint8_t priority) | 31 | +static inline uint8_t xive_priority_to_pipr(uint8_t priority) |
32 | +{ | 32 | +{ |
33 | + return priority > XIVE_PRIORITY_MAX ? 0xFF : priority; | 33 | + return priority > XIVE_PRIORITY_MAX ? 0xFF : priority; |
34 | +} | 34 | +} |
35 | + | 35 | + |
36 | +/* | 36 | +/* |
37 | + * Convert an Interrupt Pending Buffer (IPB) register to a Pending | 37 | + * Convert an Interrupt Pending Buffer (IPB) register to a Pending |
38 | + * Interrupt Priority Register (PIPR), which contains the priority of | 38 | + * Interrupt Priority Register (PIPR), which contains the priority of |
39 | + * the most favored pending notification. | 39 | + * the most favored pending notification. |
40 | + */ | 40 | + */ |
41 | +static inline uint8_t xive_ipb_to_pipr(uint8_t ibp) | 41 | +static inline uint8_t xive_ipb_to_pipr(uint8_t ibp) |
42 | +{ | 42 | +{ |
43 | + return ibp ? clz32((uint32_t)ibp << 24) : 0xff; | 43 | + return ibp ? clz32((uint32_t)ibp << 24) : 0xff; |
44 | +} | 44 | +} |
45 | + | 45 | + |
46 | /* | 46 | /* |
47 | * XIVE Thread Interrupt Management Aera (TIMA) | 47 | * XIVE Thread Interrupt Management Aera (TIMA) |
48 | * | 48 | * |
49 | @@ -XXX,XX +XXX,XX @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf); | 49 | @@ -XXX,XX +XXX,XX @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf); |
50 | Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp); | 50 | Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp); |
51 | void xive_tctx_reset(XiveTCTX *tctx); | 51 | void xive_tctx_reset(XiveTCTX *tctx); |
52 | void xive_tctx_destroy(XiveTCTX *tctx); | 52 | void xive_tctx_destroy(XiveTCTX *tctx); |
53 | -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb); | 53 | -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb); |
54 | +void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, | 54 | +void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, |
55 | + uint8_t group_level); | 55 | + uint8_t group_level); |
56 | void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring); | 56 | void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring); |
57 | +void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level); | 57 | +void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level); |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * KVM XIVE device helpers | 60 | * KVM XIVE device helpers |
61 | diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h | 61 | diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h |
62 | index XXXXXXX..XXXXXXX 100644 | 62 | index XXXXXXX..XXXXXXX 100644 |
63 | --- a/include/hw/ppc/xive_regs.h | 63 | --- a/include/hw/ppc/xive_regs.h |
64 | +++ b/include/hw/ppc/xive_regs.h | 64 | +++ b/include/hw/ppc/xive_regs.h |
65 | @@ -XXX,XX +XXX,XX @@ | 65 | @@ -XXX,XX +XXX,XX @@ |
66 | #define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */ | 66 | #define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */ |
67 | /* XXX more... */ | 67 | /* XXX more... */ |
68 | 68 | ||
69 | -/* NSR fields for the various QW ack types */ | 69 | -/* NSR fields for the various QW ack types */ |
70 | +/* | 70 | +/* |
71 | + * NSR fields for the various QW ack types | 71 | + * NSR fields for the various QW ack types |
72 | + * | 72 | + * |
73 | + * P10 has an extra bit in QW3 for the group level instead of the | 73 | + * P10 has an extra bit in QW3 for the group level instead of the |
74 | + * reserved 'i' bit. Since it is not used and we don't support group | 74 | + * reserved 'i' bit. Since it is not used and we don't support group |
75 | + * interrupts on P9, we use the P10 definition for the group level so | 75 | + * interrupts on P9, we use the P10 definition for the group level so |
76 | + * that we can have common macros for the NSR | 76 | + * that we can have common macros for the NSR |
77 | + */ | 77 | + */ |
78 | #define TM_QW0_NSR_EB PPC_BIT8(0) | 78 | #define TM_QW0_NSR_EB PPC_BIT8(0) |
79 | #define TM_QW1_NSR_EO PPC_BIT8(0) | 79 | #define TM_QW1_NSR_EO PPC_BIT8(0) |
80 | #define TM_QW3_NSR_HE PPC_BITMASK8(0, 1) | 80 | #define TM_QW3_NSR_HE PPC_BITMASK8(0, 1) |
81 | @@ -XXX,XX +XXX,XX @@ | 81 | @@ -XXX,XX +XXX,XX @@ |
82 | #define TM_QW3_NSR_HE_POOL 1 | 82 | #define TM_QW3_NSR_HE_POOL 1 |
83 | #define TM_QW3_NSR_HE_PHYS 2 | 83 | #define TM_QW3_NSR_HE_PHYS 2 |
84 | #define TM_QW3_NSR_HE_LSI 3 | 84 | #define TM_QW3_NSR_HE_LSI 3 |
85 | -#define TM_QW3_NSR_I PPC_BIT8(2) | 85 | -#define TM_QW3_NSR_I PPC_BIT8(2) |
86 | -#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7) | 86 | -#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7) |
87 | +#define TM_NSR_GRP_LVL PPC_BITMASK8(2, 7) | 87 | +#define TM_NSR_GRP_LVL PPC_BITMASK8(2, 7) |
88 | +/* | 88 | +/* |
89 | + * On P10, the format of the 6-bit group level is: 2 bits for the | 89 | + * On P10, the format of the 6-bit group level is: 2 bits for the |
90 | + * crowd size and 4 bits for the group size. Since group/crowd size is | 90 | + * crowd size and 4 bits for the group size. Since group/crowd size is |
91 | + * always a power of 2, we encode the log. For example, group_level=4 | 91 | + * always a power of 2, we encode the log. For example, group_level=4 |
92 | + * means crowd size = 0 and group size = 16 (2^4) | 92 | + * means crowd size = 0 and group size = 16 (2^4) |
93 | + * Same encoding is used in the NVP and NVGC structures for | 93 | + * Same encoding is used in the NVP and NVGC structures for |
94 | + * PGoFirst and PGoNext fields | 94 | + * PGoFirst and PGoNext fields |
95 | + */ | 95 | + */ |
96 | 96 | ||
97 | /* | 97 | /* |
98 | * EAS (Event Assignment Structure) | 98 | * EAS (Event Assignment Structure) |
99 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | 99 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c |
100 | index XXXXXXX..XXXXXXX 100644 | 100 | index XXXXXXX..XXXXXXX 100644 |
101 | --- a/hw/intc/xive.c | 101 | --- a/hw/intc/xive.c |
102 | +++ b/hw/intc/xive.c | 102 | +++ b/hw/intc/xive.c |
103 | @@ -XXX,XX +XXX,XX @@ | 103 | @@ -XXX,XX +XXX,XX @@ |
104 | * XIVE Thread Interrupt Management context | 104 | * XIVE Thread Interrupt Management context |
105 | */ | 105 | */ |
106 | 106 | ||
107 | -/* | 107 | -/* |
108 | - * Convert an Interrupt Pending Buffer (IPB) register to a Pending | 108 | - * Convert an Interrupt Pending Buffer (IPB) register to a Pending |
109 | - * Interrupt Priority Register (PIPR), which contains the priority of | 109 | - * Interrupt Priority Register (PIPR), which contains the priority of |
110 | - * the most favored pending notification. | 110 | - * the most favored pending notification. |
111 | - */ | 111 | - */ |
112 | -static uint8_t ipb_to_pipr(uint8_t ibp) | 112 | -static uint8_t ipb_to_pipr(uint8_t ibp) |
113 | -{ | 113 | -{ |
114 | - return ibp ? clz32((uint32_t)ibp << 24) : 0xff; | 114 | - return ibp ? clz32((uint32_t)ibp << 24) : 0xff; |
115 | -} | 115 | -} |
116 | - | 116 | - |
117 | static uint8_t exception_mask(uint8_t ring) | 117 | static uint8_t exception_mask(uint8_t ring) |
118 | { | 118 | { |
119 | switch (ring) { | 119 | switch (ring) { |
120 | @@ -XXX,XX +XXX,XX @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) | 120 | @@ -XXX,XX +XXX,XX @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) |
121 | 121 | ||
122 | regs[TM_CPPR] = cppr; | 122 | regs[TM_CPPR] = cppr; |
123 | 123 | ||
124 | - /* Reset the pending buffer bit */ | 124 | - /* Reset the pending buffer bit */ |
125 | - alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); | 125 | - alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); |
126 | + /* | 126 | + /* |
127 | + * If the interrupt was for a specific VP, reset the pending | 127 | + * If the interrupt was for a specific VP, reset the pending |
128 | + * buffer bit, otherwise clear the logical server indicator | 128 | + * buffer bit, otherwise clear the logical server indicator |
129 | + */ | 129 | + */ |
130 | + if (regs[TM_NSR] & TM_NSR_GRP_LVL) { | 130 | + if (regs[TM_NSR] & TM_NSR_GRP_LVL) { |
131 | + regs[TM_NSR] &= ~TM_NSR_GRP_LVL; | 131 | + regs[TM_NSR] &= ~TM_NSR_GRP_LVL; |
132 | + } else { | 132 | + } else { |
133 | + alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); | 133 | + alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); |
134 | + } | 134 | + } |
135 | 135 | ||
136 | - /* Drop Exception bit */ | 136 | - /* Drop Exception bit */ |
137 | + /* Drop the exception bit */ | 137 | + /* Drop the exception bit */ |
138 | regs[TM_NSR] &= ~mask; | 138 | regs[TM_NSR] &= ~mask; |
139 | 139 | ||
140 | trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, | 140 | trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, |
141 | @@ -XXX,XX +XXX,XX @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) | 141 | @@ -XXX,XX +XXX,XX @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) |
142 | return ((uint64_t)nsr << 8) | regs[TM_CPPR]; | 142 | return ((uint64_t)nsr << 8) | regs[TM_CPPR]; |
143 | } | 143 | } |
144 | 144 | ||
145 | -static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) | 145 | -static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) |
146 | +void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level) | 146 | +void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level) |
147 | { | 147 | { |
148 | /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ | 148 | /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ |
149 | uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; | 149 | uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; |
150 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) | 150 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) |
151 | if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { | 151 | if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { |
152 | switch (ring) { | 152 | switch (ring) { |
153 | case TM_QW1_OS: | 153 | case TM_QW1_OS: |
154 | - regs[TM_NSR] |= TM_QW1_NSR_EO; | 154 | - regs[TM_NSR] |= TM_QW1_NSR_EO; |
155 | + regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); | 155 | + regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); |
156 | break; | 156 | break; |
157 | case TM_QW2_HV_POOL: | 157 | case TM_QW2_HV_POOL: |
158 | - alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6); | 158 | - alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6); |
159 | + alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); | 159 | + alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); |
160 | break; | 160 | break; |
161 | case TM_QW3_HV_PHYS: | 161 | case TM_QW3_HV_PHYS: |
162 | - regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); | 162 | - regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); |
163 | + regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); | 163 | + regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); |
164 | break; | 164 | break; |
165 | default: | 165 | default: |
166 | g_assert_not_reached(); | 166 | g_assert_not_reached(); |
167 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | 167 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) |
168 | * Recompute the PIPR based on local pending interrupts. The PHYS | 168 | * Recompute the PIPR based on local pending interrupts. The PHYS |
169 | * ring must take the minimum of both the PHYS and POOL PIPR values. | 169 | * ring must take the minimum of both the PHYS and POOL PIPR values. |
170 | */ | 170 | */ |
171 | - pipr_min = ipb_to_pipr(regs[TM_IPB]); | 171 | - pipr_min = ipb_to_pipr(regs[TM_IPB]); |
172 | + pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); | 172 | + pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); |
173 | ring_min = ring; | 173 | ring_min = ring; |
174 | 174 | ||
175 | /* PHYS updates also depend on POOL values */ | 175 | /* PHYS updates also depend on POOL values */ |
176 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | 176 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) |
177 | /* POOL values only matter if POOL ctx is valid */ | 177 | /* POOL values only matter if POOL ctx is valid */ |
178 | if (pool_regs[TM_WORD2] & 0x80) { | 178 | if (pool_regs[TM_WORD2] & 0x80) { |
179 | 179 | ||
180 | - uint8_t pool_pipr = ipb_to_pipr(pool_regs[TM_IPB]); | 180 | - uint8_t pool_pipr = ipb_to_pipr(pool_regs[TM_IPB]); |
181 | + uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); | 181 | + uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); |
182 | 182 | ||
183 | /* | 183 | /* |
184 | * Determine highest priority interrupt and | 184 | * Determine highest priority interrupt and |
185 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | 185 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) |
186 | regs[TM_PIPR] = pipr_min; | 186 | regs[TM_PIPR] = pipr_min; |
187 | 187 | ||
188 | /* CPPR has changed, check if we need to raise a pending exception */ | 188 | /* CPPR has changed, check if we need to raise a pending exception */ |
189 | - xive_tctx_notify(tctx, ring_min); | 189 | - xive_tctx_notify(tctx, ring_min); |
190 | + xive_tctx_notify(tctx, ring_min, 0); | 190 | + xive_tctx_notify(tctx, ring_min, 0); |
191 | } | 191 | } |
192 | 192 | ||
193 | -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) | 193 | -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) |
194 | -{ | 194 | -{ |
195 | +void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, | 195 | +void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, |
196 | + uint8_t group_level) | 196 | + uint8_t group_level) |
197 | + { | 197 | + { |
198 | + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ | 198 | + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ |
199 | + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; | 199 | + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; |
200 | + uint8_t *alt_regs = &tctx->regs[alt_ring]; | 200 | + uint8_t *alt_regs = &tctx->regs[alt_ring]; |
201 | uint8_t *regs = &tctx->regs[ring]; | 201 | uint8_t *regs = &tctx->regs[ring]; |
202 | 202 | ||
203 | - regs[TM_IPB] |= ipb; | 203 | - regs[TM_IPB] |= ipb; |
204 | - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); | 204 | - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); |
205 | - xive_tctx_notify(tctx, ring); | 205 | - xive_tctx_notify(tctx, ring); |
206 | -} | 206 | -} |
207 | + if (group_level == 0) { | 207 | + if (group_level == 0) { |
208 | + /* VP-specific */ | 208 | + /* VP-specific */ |
209 | + regs[TM_IPB] |= xive_priority_to_ipb(priority); | 209 | + regs[TM_IPB] |= xive_priority_to_ipb(priority); |
210 | + alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); | 210 | + alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); |
211 | + } else { | 211 | + } else { |
212 | + /* VP-group */ | 212 | + /* VP-group */ |
213 | + alt_regs[TM_PIPR] = xive_priority_to_pipr(priority); | 213 | + alt_regs[TM_PIPR] = xive_priority_to_pipr(priority); |
214 | + } | 214 | + } |
215 | + xive_tctx_notify(tctx, ring, group_level); | 215 | + xive_tctx_notify(tctx, ring, group_level); |
216 | + } | 216 | + } |
217 | 217 | ||
218 | /* | 218 | /* |
219 | * XIVE Thread Interrupt Management Area (TIMA) | 219 | * XIVE Thread Interrupt Management Area (TIMA) |
220 | @@ -XXX,XX +XXX,XX @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, | 220 | @@ -XXX,XX +XXX,XX @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, |
221 | } | 221 | } |
222 | 222 | ||
223 | /* | 223 | /* |
224 | - * Adjust the IPB to allow a CPU to process event queues of other | 224 | - * Adjust the IPB to allow a CPU to process event queues of other |
225 | + * Adjust the PIPR to allow a CPU to process event queues of other | 225 | + * Adjust the PIPR to allow a CPU to process event queues of other |
226 | * priorities during one physical interrupt cycle. | 226 | * priorities during one physical interrupt cycle. |
227 | */ | 227 | */ |
228 | static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, | 228 | static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, |
229 | hwaddr offset, uint64_t value, unsigned size) | 229 | hwaddr offset, uint64_t value, unsigned size) |
230 | { | 230 | { |
231 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); | 231 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); |
232 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0); | 232 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0); |
233 | } | 233 | } |
234 | 234 | ||
235 | static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, | 235 | static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, |
236 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, | 236 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, |
237 | /* Reset the NVT value */ | 237 | /* Reset the NVT value */ |
238 | nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); | 238 | nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); |
239 | xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); | 239 | xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); |
240 | - } | 240 | - } |
241 | + | 241 | + |
242 | + uint8_t *regs = &tctx->regs[TM_QW1_OS]; | 242 | + uint8_t *regs = &tctx->regs[TM_QW1_OS]; |
243 | + regs[TM_IPB] |= ipb; | 243 | + regs[TM_IPB] |= ipb; |
244 | +} | 244 | +} |
245 | + | 245 | + |
246 | /* | 246 | /* |
247 | - * Always call xive_tctx_ipb_update(). Even if there were no | 247 | - * Always call xive_tctx_ipb_update(). Even if there were no |
248 | + * Always call xive_tctx_pipr_update(). Even if there were no | 248 | + * Always call xive_tctx_pipr_update(). Even if there were no |
249 | * escalation triggered, there could be a pending interrupt which | 249 | * escalation triggered, there could be a pending interrupt which |
250 | * was saved when the context was pulled and that we need to take | 250 | * was saved when the context was pulled and that we need to take |
251 | * into account by recalculating the PIPR (which is not | 251 | * into account by recalculating the PIPR (which is not |
252 | * saved/restored). | 252 | * saved/restored). |
253 | * It will also raise the External interrupt signal if needed. | 253 | * It will also raise the External interrupt signal if needed. |
254 | */ | 254 | */ |
255 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); | 255 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); |
256 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */ | 256 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */ |
257 | } | 257 | } |
258 | 258 | ||
259 | /* | 259 | /* |
260 | @@ -XXX,XX +XXX,XX @@ void xive_tctx_reset(XiveTCTX *tctx) | 260 | @@ -XXX,XX +XXX,XX @@ void xive_tctx_reset(XiveTCTX *tctx) |
261 | * CPPR is first set. | 261 | * CPPR is first set. |
262 | */ | 262 | */ |
263 | tctx->regs[TM_QW1_OS + TM_PIPR] = | 263 | tctx->regs[TM_QW1_OS + TM_PIPR] = |
264 | - ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); | 264 | - ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); |
265 | + xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); | 265 | + xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); |
266 | tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = | 266 | tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = |
267 | - ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); | 267 | - ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); |
268 | + xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); | 268 | + xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); |
269 | } | 269 | } |
270 | 270 | ||
271 | static void xive_tctx_realize(DeviceState *dev, Error **errp) | 271 | static void xive_tctx_realize(DeviceState *dev, Error **errp) |
272 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) | 272 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) |
273 | return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); | 273 | return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); |
274 | } | 274 | } |
275 | 275 | ||
276 | +static uint8_t xive_get_group_level(uint32_t nvp_index) | 276 | +static uint8_t xive_get_group_level(uint32_t nvp_index) |
277 | +{ | 277 | +{ |
278 | + /* FIXME add crowd encoding */ | 278 | + /* FIXME add crowd encoding */ |
279 | + return ctz32(~nvp_index) + 1; | 279 | + return ctz32(~nvp_index) + 1; |
280 | +} | 280 | +} |
281 | + | 281 | + |
282 | /* | 282 | /* |
283 | * The thread context register words are in big-endian format. | 283 | * The thread context register words are in big-endian format. |
284 | */ | 284 | */ |
285 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 285 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
286 | { | 286 | { |
287 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); | 287 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); |
288 | XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; | 288 | XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; |
289 | + uint8_t group_level; | 289 | + uint8_t group_level; |
290 | int count; | 290 | int count; |
291 | 291 | ||
292 | /* | 292 | /* |
293 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 293 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
294 | 294 | ||
295 | /* handle CPU exception delivery */ | 295 | /* handle CPU exception delivery */ |
296 | if (count) { | 296 | if (count) { |
297 | - trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); | 297 | - trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); |
298 | - xive_tctx_ipb_update(match.tctx, match.ring, | 298 | - xive_tctx_ipb_update(match.tctx, match.ring, |
299 | - xive_priority_to_ipb(priority)); | 299 | - xive_priority_to_ipb(priority)); |
300 | + group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; | 300 | + group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; |
301 | + trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); | 301 | + trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); |
302 | + xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); | 302 | + xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); |
303 | } | 303 | } |
304 | 304 | ||
305 | return !!count; | 305 | return !!count; |
306 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 306 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
307 | index XXXXXXX..XXXXXXX 100644 | 307 | index XXXXXXX..XXXXXXX 100644 |
308 | --- a/hw/intc/xive2.c | 308 | --- a/hw/intc/xive2.c |
309 | +++ b/hw/intc/xive2.c | 309 | +++ b/hw/intc/xive2.c |
310 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, | 310 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, |
311 | uint8_t nvp_blk, uint32_t nvp_idx, | 311 | uint8_t nvp_blk, uint32_t nvp_idx, |
312 | bool do_restore) | 312 | bool do_restore) |
313 | { | 313 | { |
314 | + uint8_t ipb, backlog_level; | 314 | + uint8_t ipb, backlog_level; |
315 | + uint8_t backlog_prio; | 315 | + uint8_t backlog_prio; |
316 | + uint8_t *regs = &tctx->regs[TM_QW1_OS]; | 316 | + uint8_t *regs = &tctx->regs[TM_QW1_OS]; |
317 | Xive2Nvp nvp; | 317 | Xive2Nvp nvp; |
318 | - uint8_t ipb; | 318 | - uint8_t ipb; |
319 | 319 | ||
320 | /* | 320 | /* |
321 | * Grab the associated thread interrupt context registers in the | 321 | * Grab the associated thread interrupt context registers in the |
322 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, | 322 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, |
323 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); | 323 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); |
324 | xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | 324 | xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); |
325 | } | 325 | } |
326 | + regs[TM_IPB] = ipb; | 326 | + regs[TM_IPB] = ipb; |
327 | + backlog_prio = xive_ipb_to_pipr(ipb); | 327 | + backlog_prio = xive_ipb_to_pipr(ipb); |
328 | + backlog_level = 0; | 328 | + backlog_level = 0; |
329 | + | 329 | + |
330 | /* | 330 | /* |
331 | - * Always call xive_tctx_ipb_update(). Even if there were no | 331 | - * Always call xive_tctx_ipb_update(). Even if there were no |
332 | - * escalation triggered, there could be a pending interrupt which | 332 | - * escalation triggered, there could be a pending interrupt which |
333 | - * was saved when the context was pulled and that we need to take | 333 | - * was saved when the context was pulled and that we need to take |
334 | - * into account by recalculating the PIPR (which is not | 334 | - * into account by recalculating the PIPR (which is not |
335 | - * saved/restored). | 335 | - * saved/restored). |
336 | - * It will also raise the External interrupt signal if needed. | 336 | - * It will also raise the External interrupt signal if needed. |
337 | + * Compute the PIPR based on the restored state. | 337 | + * Compute the PIPR based on the restored state. |
338 | + * It will raise the External interrupt signal if needed. | 338 | + * It will raise the External interrupt signal if needed. |
339 | */ | 339 | */ |
340 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); | 340 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); |
341 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); | 341 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); |
342 | } | 342 | } |
343 | 343 | ||
344 | /* | 344 | /* |
345 | diff --git a/hw/intc/trace-events b/hw/intc/trace-events | 345 | diff --git a/hw/intc/trace-events b/hw/intc/trace-events |
346 | index XXXXXXX..XXXXXXX 100644 | 346 | index XXXXXXX..XXXXXXX 100644 |
347 | --- a/hw/intc/trace-events | 347 | --- a/hw/intc/trace-events |
348 | +++ b/hw/intc/trace-events | 348 | +++ b/hw/intc/trace-events |
349 | @@ -XXX,XX +XXX,XX @@ xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "EN | 349 | @@ -XXX,XX +XXX,XX @@ xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "EN |
350 | xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" | 350 | xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" |
351 | xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 | 351 | xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 |
352 | xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 | 352 | xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 |
353 | -xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x" | 353 | -xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x" |
354 | +xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d" | 354 | +xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d" |
355 | xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 | 355 | xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 |
356 | 356 | ||
357 | # pnv_xive.c | 357 | # pnv_xive.c |
358 | -- | 358 | -- |
359 | 2.43.0 | 359 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | Renamed function to follow the convention of the other function names. | ||
1 | 2 | ||
3 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
4 | --- | ||
5 | include/hw/ppc/xive.h | 16 ++++++++++++---- | ||
6 | hw/intc/xive.c | 22 ++++++---------------- | ||
7 | 2 files changed, 18 insertions(+), 20 deletions(-) | ||
8 | |||
9 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | ||
10 | index XXXXXXX..XXXXXXX 100644 | ||
11 | --- a/include/hw/ppc/xive.h | ||
12 | +++ b/include/hw/ppc/xive.h | ||
13 | @@ -XXX,XX +XXX,XX @@ | ||
14 | * TCTX Thread interrupt Context | ||
15 | * | ||
16 | * | ||
17 | - * Copyright (c) 2017-2018, IBM Corporation. | ||
18 | - * | ||
19 | - * This code is licensed under the GPL version 2 or later. See the | ||
20 | - * COPYING file in the top-level directory. | ||
21 | + * Copyright (c) 2017-2024, IBM Corporation. | ||
22 | * | ||
23 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
24 | */ | ||
25 | |||
26 | #ifndef PPC_XIVE_H | ||
27 | @@ -XXX,XX +XXX,XX @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority) | ||
28 | 0 : 1 << (XIVE_PRIORITY_MAX - priority); | ||
29 | } | ||
30 | |||
31 | +/* | ||
32 | + * Convert an Interrupt Pending Buffer (IPB) register to a Pending | ||
33 | + * Interrupt Priority Register (PIPR), which contains the priority of | ||
34 | + * the most favored pending notification. | ||
35 | + */ | ||
36 | +static inline uint8_t xive_ipb_to_pipr(uint8_t ibp) | ||
37 | +{ | ||
38 | + return ibp ? clz32((uint32_t)ibp << 24) : 0xff; | ||
39 | +} | ||
40 | + | ||
41 | /* | ||
42 | * XIVE Thread Interrupt Management Aera (TIMA) | ||
43 | * | ||
44 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | ||
45 | index XXXXXXX..XXXXXXX 100644 | ||
46 | --- a/hw/intc/xive.c | ||
47 | +++ b/hw/intc/xive.c | ||
48 | @@ -XXX,XX +XXX,XX @@ | ||
49 | * | ||
50 | * Copyright (c) 2017-2018, IBM Corporation. | ||
51 | * | ||
52 | - * This code is licensed under the GPL version 2 or later. See the | ||
53 | - * COPYING file in the top-level directory. | ||
54 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
55 | */ | ||
56 | |||
57 | #include "qemu/osdep.h" | ||
58 | @@ -XXX,XX +XXX,XX @@ | ||
59 | * XIVE Thread Interrupt Management context | ||
60 | */ | ||
61 | |||
62 | -/* | ||
63 | - * Convert an Interrupt Pending Buffer (IPB) register to a Pending | ||
64 | - * Interrupt Priority Register (PIPR), which contains the priority of | ||
65 | - * the most favored pending notification. | ||
66 | - */ | ||
67 | -static uint8_t ipb_to_pipr(uint8_t ibp) | ||
68 | -{ | ||
69 | - return ibp ? clz32((uint32_t)ibp << 24) : 0xff; | ||
70 | -} | ||
71 | |||
72 | static uint8_t exception_mask(uint8_t ring) | ||
73 | { | ||
74 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | ||
75 | * Recompute the PIPR based on local pending interrupts. The PHYS | ||
76 | * ring must take the minimum of both the PHYS and POOL PIPR values. | ||
77 | */ | ||
78 | - pipr_min = ipb_to_pipr(regs[TM_IPB]); | ||
79 | + pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); | ||
80 | ring_min = ring; | ||
81 | |||
82 | /* PHYS updates also depend on POOL values */ | ||
83 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | ||
84 | /* POOL values only matter if POOL ctx is valid */ | ||
85 | if (pool_regs[TM_WORD2] & 0x80) { | ||
86 | |||
87 | - uint8_t pool_pipr = ipb_to_pipr(pool_regs[TM_IPB]); | ||
88 | + uint8_t pool_pipr = xive_ipb_to_pipr(pool_regs[TM_IPB]); | ||
89 | |||
90 | /* | ||
91 | * Determine highest priority interrupt and | ||
92 | @@ -XXX,XX +XXX,XX @@ void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) | ||
93 | uint8_t *regs = &tctx->regs[ring]; | ||
94 | |||
95 | regs[TM_IPB] |= ipb; | ||
96 | - regs[TM_PIPR] = ipb_to_pipr(regs[TM_IPB]); | ||
97 | + regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); | ||
98 | xive_tctx_notify(tctx, ring); | ||
99 | } | ||
100 | |||
101 | @@ -XXX,XX +XXX,XX @@ void xive_tctx_reset(XiveTCTX *tctx) | ||
102 | * CPPR is first set. | ||
103 | */ | ||
104 | tctx->regs[TM_QW1_OS + TM_PIPR] = | ||
105 | - ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); | ||
106 | + xive_ipb_to_pipr(tctx->regs[TM_QW1_OS + TM_IPB]); | ||
107 | tctx->regs[TM_QW3_HV_PHYS + TM_PIPR] = | ||
108 | - ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); | ||
109 | + xive_ipb_to_pipr(tctx->regs[TM_QW3_HV_PHYS + TM_IPB]); | ||
110 | } | ||
111 | |||
112 | static void xive_tctx_realize(DeviceState *dev, Error **errp) | ||
113 | -- | ||
114 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | If an END has the 'i' bit set (ignore), then it targets a group of | 3 | If an END has the 'i' bit set (ignore), then it targets a group of |
4 | VPs. The size of the group depends on the VP index of the target | 4 | VPs. The size of the group depends on the VP index of the target |
5 | (first 0 found when looking at the least significant bits of the | 5 | (first 0 found when looking at the least significant bits of the |
6 | index) so a mask is applied on the VP index of a running thread to | 6 | index) so a mask is applied on the VP index of a running thread to |
7 | know if we have a match. | 7 | know if we have a match. |
8 | 8 | ||
9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
11 | --- | 11 | --- |
12 | include/hw/ppc/xive.h | 5 +++- | 12 | include/hw/ppc/xive.h | 5 +++- |
13 | include/hw/ppc/xive2.h | 1 + | 13 | include/hw/ppc/xive2.h | 1 + |
14 | hw/intc/pnv_xive2.c | 33 ++++++++++++++------- | 14 | hw/intc/pnv_xive2.c | 33 ++++++++++++++------- |
15 | hw/intc/xive.c | 56 +++++++++++++++++++++++++----------- | 15 | hw/intc/xive.c | 56 +++++++++++++++++++++++++----------- |
16 | hw/intc/xive2.c | 65 ++++++++++++++++++++++++++++++------------ | 16 | hw/intc/xive2.c | 65 ++++++++++++++++++++++++++++++------------ |
17 | 5 files changed, 114 insertions(+), 46 deletions(-) | 17 | 5 files changed, 114 insertions(+), 46 deletions(-) |
18 | 18 | ||
19 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | 19 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h |
20 | index XXXXXXX..XXXXXXX 100644 | 20 | index XXXXXXX..XXXXXXX 100644 |
21 | --- a/include/hw/ppc/xive.h | 21 | --- a/include/hw/ppc/xive.h |
22 | +++ b/include/hw/ppc/xive.h | 22 | +++ b/include/hw/ppc/xive.h |
23 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas); | 23 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas); |
24 | typedef struct XiveTCTXMatch { | 24 | typedef struct XiveTCTXMatch { |
25 | XiveTCTX *tctx; | 25 | XiveTCTX *tctx; |
26 | uint8_t ring; | 26 | uint8_t ring; |
27 | + bool precluded; | 27 | + bool precluded; |
28 | } XiveTCTXMatch; | 28 | } XiveTCTXMatch; |
29 | 29 | ||
30 | #define TYPE_XIVE_PRESENTER "xive-presenter" | 30 | #define TYPE_XIVE_PRESENTER "xive-presenter" |
31 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 31 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
32 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 32 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
33 | uint8_t nvt_blk, uint32_t nvt_idx, | 33 | uint8_t nvt_blk, uint32_t nvt_idx, |
34 | bool cam_ignore, uint8_t priority, | 34 | bool cam_ignore, uint8_t priority, |
35 | - uint32_t logic_serv); | 35 | - uint32_t logic_serv); |
36 | + uint32_t logic_serv, bool *precluded); | 36 | + uint32_t logic_serv, bool *precluded); |
37 | + | 37 | + |
38 | +uint32_t xive_get_vpgroup_size(uint32_t nvp_index); | 38 | +uint32_t xive_get_vpgroup_size(uint32_t nvp_index); |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * XIVE Fabric (Interface between Interrupt Controller and Machine) | 41 | * XIVE Fabric (Interface between Interrupt Controller and Machine) |
42 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | 42 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h |
43 | index XXXXXXX..XXXXXXX 100644 | 43 | index XXXXXXX..XXXXXXX 100644 |
44 | --- a/include/hw/ppc/xive2.h | 44 | --- a/include/hw/ppc/xive2.h |
45 | +++ b/include/hw/ppc/xive2.h | 45 | +++ b/include/hw/ppc/xive2.h |
46 | @@ -XXX,XX +XXX,XX @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | 46 | @@ -XXX,XX +XXX,XX @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, |
47 | hwaddr offset, unsigned size); | 47 | hwaddr offset, unsigned size); |
48 | void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | 48 | void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, |
49 | hwaddr offset, uint64_t value, unsigned size); | 49 | hwaddr offset, uint64_t value, unsigned size); |
50 | +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); | 50 | +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); |
51 | void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, | 51 | void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, |
52 | hwaddr offset, uint64_t value, unsigned size); | 52 | hwaddr offset, uint64_t value, unsigned size); |
53 | void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | 53 | void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, |
54 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | 54 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c |
55 | index XXXXXXX..XXXXXXX 100644 | 55 | index XXXXXXX..XXXXXXX 100644 |
56 | --- a/hw/intc/pnv_xive2.c | 56 | --- a/hw/intc/pnv_xive2.c |
57 | +++ b/hw/intc/pnv_xive2.c | 57 | +++ b/hw/intc/pnv_xive2.c |
58 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, | 58 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, |
59 | logic_serv); | 59 | logic_serv); |
60 | } | 60 | } |
61 | 61 | ||
62 | - /* | 62 | - /* |
63 | - * Save the context and follow on to catch duplicates, | 63 | - * Save the context and follow on to catch duplicates, |
64 | - * that we don't support yet. | 64 | - * that we don't support yet. |
65 | - */ | 65 | - */ |
66 | if (ring != -1) { | 66 | if (ring != -1) { |
67 | - if (match->tctx) { | 67 | - if (match->tctx) { |
68 | + /* | 68 | + /* |
69 | + * For VP-specific match, finding more than one is a | 69 | + * For VP-specific match, finding more than one is a |
70 | + * problem. For group notification, it's possible. | 70 | + * problem. For group notification, it's possible. |
71 | + */ | 71 | + */ |
72 | + if (!cam_ignore && match->tctx) { | 72 | + if (!cam_ignore && match->tctx) { |
73 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " | 73 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " |
74 | "thread context NVT %x/%x\n", | 74 | "thread context NVT %x/%x\n", |
75 | nvt_blk, nvt_idx); | 75 | nvt_blk, nvt_idx); |
76 | - return false; | 76 | - return false; |
77 | + /* Should set a FIR if we ever model it */ | 77 | + /* Should set a FIR if we ever model it */ |
78 | + return -1; | 78 | + return -1; |
79 | + } | 79 | + } |
80 | + /* | 80 | + /* |
81 | + * For a group notification, we need to know if the | 81 | + * For a group notification, we need to know if the |
82 | + * match is precluded first by checking the current | 82 | + * match is precluded first by checking the current |
83 | + * thread priority. If the interrupt can be delivered, | 83 | + * thread priority. If the interrupt can be delivered, |
84 | + * we always notify the first match (for now). | 84 | + * we always notify the first match (for now). |
85 | + */ | 85 | + */ |
86 | + if (cam_ignore && | 86 | + if (cam_ignore && |
87 | + xive2_tm_irq_precluded(tctx, ring, priority)) { | 87 | + xive2_tm_irq_precluded(tctx, ring, priority)) { |
88 | + match->precluded = true; | 88 | + match->precluded = true; |
89 | + } else { | 89 | + } else { |
90 | + if (!match->tctx) { | 90 | + if (!match->tctx) { |
91 | + match->ring = ring; | 91 | + match->ring = ring; |
92 | + match->tctx = tctx; | 92 | + match->tctx = tctx; |
93 | + } | 93 | + } |
94 | + count++; | 94 | + count++; |
95 | } | 95 | } |
96 | - | 96 | - |
97 | - match->ring = ring; | 97 | - match->ring = ring; |
98 | - match->tctx = tctx; | 98 | - match->tctx = tctx; |
99 | - count++; | 99 | - count++; |
100 | } | 100 | } |
101 | } | 101 | } |
102 | } | 102 | } |
103 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | 103 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c |
104 | index XXXXXXX..XXXXXXX 100644 | 104 | index XXXXXXX..XXXXXXX 100644 |
105 | --- a/hw/intc/xive.c | 105 | --- a/hw/intc/xive.c |
106 | +++ b/hw/intc/xive.c | 106 | +++ b/hw/intc/xive.c |
107 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) | 107 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) |
108 | return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); | 108 | return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); |
109 | } | 109 | } |
110 | 110 | ||
111 | +uint32_t xive_get_vpgroup_size(uint32_t nvp_index) | 111 | +uint32_t xive_get_vpgroup_size(uint32_t nvp_index) |
112 | +{ | 112 | +{ |
113 | + /* | 113 | + /* |
114 | + * Group size is a power of 2. The position of the first 0 | 114 | + * Group size is a power of 2. The position of the first 0 |
115 | + * (starting with the least significant bits) in the NVP index | 115 | + * (starting with the least significant bits) in the NVP index |
116 | + * gives the size of the group. | 116 | + * gives the size of the group. |
117 | + */ | 117 | + */ |
118 | + return 1 << (ctz32(~nvp_index) + 1); | 118 | + return 1 << (ctz32(~nvp_index) + 1); |
119 | +} | 119 | +} |
120 | + | 120 | + |
121 | static uint8_t xive_get_group_level(uint32_t nvp_index) | 121 | static uint8_t xive_get_group_level(uint32_t nvp_index) |
122 | { | 122 | { |
123 | /* FIXME add crowd encoding */ | 123 | /* FIXME add crowd encoding */ |
124 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 124 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
125 | /* | 125 | /* |
126 | * This is our simple Xive Presenter Engine model. It is merged in the | 126 | * This is our simple Xive Presenter Engine model. It is merged in the |
127 | * Router as it does not require an extra object. | 127 | * Router as it does not require an extra object. |
128 | - * | 128 | - * |
129 | - * It receives notification requests sent by the IVRE to find one | 129 | - * It receives notification requests sent by the IVRE to find one |
130 | - * matching NVT (or more) dispatched on the processor threads. In case | 130 | - * matching NVT (or more) dispatched on the processor threads. In case |
131 | - * of a single NVT notification, the process is abbreviated and the | 131 | - * of a single NVT notification, the process is abbreviated and the |
132 | - * thread is signaled if a match is found. In case of a logical server | 132 | - * thread is signaled if a match is found. In case of a logical server |
133 | - * notification (bits ignored at the end of the NVT identifier), the | 133 | - * notification (bits ignored at the end of the NVT identifier), the |
134 | - * IVPE and IVRE select a winning thread using different filters. This | 134 | - * IVPE and IVRE select a winning thread using different filters. This |
135 | - * involves 2 or 3 exchanges on the PowerBus that the model does not | 135 | - * involves 2 or 3 exchanges on the PowerBus that the model does not |
136 | - * support. | 136 | - * support. |
137 | - * | 137 | - * |
138 | - * The parameters represent what is sent on the PowerBus | 138 | - * The parameters represent what is sent on the PowerBus |
139 | */ | 139 | */ |
140 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 140 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
141 | uint8_t nvt_blk, uint32_t nvt_idx, | 141 | uint8_t nvt_blk, uint32_t nvt_idx, |
142 | bool cam_ignore, uint8_t priority, | 142 | bool cam_ignore, uint8_t priority, |
143 | - uint32_t logic_serv) | 143 | - uint32_t logic_serv) |
144 | + uint32_t logic_serv, bool *precluded) | 144 | + uint32_t logic_serv, bool *precluded) |
145 | { | 145 | { |
146 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); | 146 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); |
147 | - XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; | 147 | - XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; |
148 | + XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false }; | 148 | + XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false }; |
149 | uint8_t group_level; | 149 | uint8_t group_level; |
150 | int count; | 150 | int count; |
151 | 151 | ||
152 | /* | 152 | /* |
153 | - * Ask the machine to scan the interrupt controllers for a match | 153 | - * Ask the machine to scan the interrupt controllers for a match |
154 | + * Ask the machine to scan the interrupt controllers for a match. | 154 | + * Ask the machine to scan the interrupt controllers for a match. |
155 | + * | 155 | + * |
156 | + * For VP-specific notification, we expect at most one match and | 156 | + * For VP-specific notification, we expect at most one match and |
157 | + * one call to the presenters is all we need (abbreviated notify | 157 | + * one call to the presenters is all we need (abbreviated notify |
158 | + * sequence documented by the architecture). | 158 | + * sequence documented by the architecture). |
159 | + * | 159 | + * |
160 | + * For VP-group notification, match_nvt() is the equivalent of the | 160 | + * For VP-group notification, match_nvt() is the equivalent of the |
161 | + * "histogram" and "poll" commands sent to the power bus to the | 161 | + * "histogram" and "poll" commands sent to the power bus to the |
162 | + * presenters. 'count' could be more than one, but we always | 162 | + * presenters. 'count' could be more than one, but we always |
163 | + * select the first match for now. 'precluded' tells if (at least) | 163 | + * select the first match for now. 'precluded' tells if (at least) |
164 | + * one thread matches but can't take the interrupt now because | 164 | + * one thread matches but can't take the interrupt now because |
165 | + * it's running at a more favored priority. We return the | 165 | + * it's running at a more favored priority. We return the |
166 | + * information to the router so that it can take appropriate | 166 | + * information to the router so that it can take appropriate |
167 | + * actions (backlog, escalation, broadcast, etc...) | 167 | + * actions (backlog, escalation, broadcast, etc...) |
168 | + * | 168 | + * |
169 | + * If we were to implement a better way of dispatching the | 169 | + * If we were to implement a better way of dispatching the |
170 | + * interrupt in case of multiple matches (instead of the first | 170 | + * interrupt in case of multiple matches (instead of the first |
171 | + * match), we would need a heuristic to elect a thread (for | 171 | + * match), we would need a heuristic to elect a thread (for |
172 | + * example, the hardware keeps track of an 'age' in the TIMA) and | 172 | + * example, the hardware keeps track of an 'age' in the TIMA) and |
173 | + * a new command to the presenters (the equivalent of the "assign" | 173 | + * a new command to the presenters (the equivalent of the "assign" |
174 | + * power bus command in the documented full notify sequence. | 174 | + * power bus command in the documented full notify sequence. |
175 | */ | 175 | */ |
176 | count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, | 176 | count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, |
177 | priority, logic_serv, &match); | 177 | priority, logic_serv, &match); |
178 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 178 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
179 | group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; | 179 | group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; |
180 | trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); | 180 | trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); |
181 | xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); | 181 | xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); |
182 | + } else { | 182 | + } else { |
183 | + *precluded = match.precluded; | 183 | + *precluded = match.precluded; |
184 | } | 184 | } |
185 | 185 | ||
186 | return !!count; | 186 | return !!count; |
187 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) | 187 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) |
188 | uint8_t nvt_blk; | 188 | uint8_t nvt_blk; |
189 | uint32_t nvt_idx; | 189 | uint32_t nvt_idx; |
190 | XiveNVT nvt; | 190 | XiveNVT nvt; |
191 | - bool found; | 191 | - bool found; |
192 | + bool found, precluded; | 192 | + bool found, precluded; |
193 | 193 | ||
194 | uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); | 194 | uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); |
195 | uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); | 195 | uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); |
196 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) | 196 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) |
197 | found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, | 197 | found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, |
198 | xive_get_field32(END_W7_F0_IGNORE, end.w7), | 198 | xive_get_field32(END_W7_F0_IGNORE, end.w7), |
199 | priority, | 199 | priority, |
200 | - xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); | 200 | - xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); |
201 | - | 201 | - |
202 | + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), | 202 | + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), |
203 | + &precluded); | 203 | + &precluded); |
204 | + /* we don't support VP-group notification on P9, so precluded is not used */ | 204 | + /* we don't support VP-group notification on P9, so precluded is not used */ |
205 | /* TODO: Auto EOI. */ | 205 | /* TODO: Auto EOI. */ |
206 | 206 | ||
207 | if (found) { | 207 | if (found) { |
208 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 208 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
209 | index XXXXXXX..XXXXXXX 100644 | 209 | index XXXXXXX..XXXXXXX 100644 |
210 | --- a/hw/intc/xive2.c | 210 | --- a/hw/intc/xive2.c |
211 | +++ b/hw/intc/xive2.c | 211 | +++ b/hw/intc/xive2.c |
212 | @@ -XXX,XX +XXX,XX @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, | 212 | @@ -XXX,XX +XXX,XX @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, |
213 | return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); | 213 | return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); |
214 | } | 214 | } |
215 | 215 | ||
216 | +static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, | 216 | +static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, |
217 | + uint32_t vp_mask) | 217 | + uint32_t vp_mask) |
218 | +{ | 218 | +{ |
219 | + return (cam1 & vp_mask) == (cam2 & vp_mask); | 219 | + return (cam1 & vp_mask) == (cam2 & vp_mask); |
220 | +} | 220 | +} |
221 | + | 221 | + |
222 | /* | 222 | /* |
223 | * The thread context register words are in big-endian format. | 223 | * The thread context register words are in big-endian format. |
224 | */ | 224 | */ |
225 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 225 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
226 | uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); | 226 | uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); |
227 | uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); | 227 | uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); |
228 | 228 | ||
229 | - /* | 229 | - /* |
230 | - * TODO (PowerNV): ignore mode. The low order bits of the NVT | 230 | - * TODO (PowerNV): ignore mode. The low order bits of the NVT |
231 | - * identifier are ignored in the "CAM" match. | 231 | - * identifier are ignored in the "CAM" match. |
232 | - */ | 232 | - */ |
233 | + uint32_t vp_mask = 0xFFFFFFFF; | 233 | + uint32_t vp_mask = 0xFFFFFFFF; |
234 | 234 | ||
235 | if (format == 0) { | 235 | if (format == 0) { |
236 | - if (cam_ignore == true) { | 236 | - if (cam_ignore == true) { |
237 | - /* | 237 | - /* |
238 | - * F=0 & i=1: Logical server notification (bits ignored at | 238 | - * F=0 & i=1: Logical server notification (bits ignored at |
239 | - * the end of the NVT identifier) | 239 | - * the end of the NVT identifier) |
240 | - */ | 240 | - */ |
241 | - qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", | 241 | - qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", |
242 | - nvt_blk, nvt_idx); | 242 | - nvt_blk, nvt_idx); |
243 | - return -1; | 243 | - return -1; |
244 | + /* | 244 | + /* |
245 | + * i=0: Specific NVT notification | 245 | + * i=0: Specific NVT notification |
246 | + * i=1: VP-group notification (bits ignored at the end of the | 246 | + * i=1: VP-group notification (bits ignored at the end of the |
247 | + * NVT identifier) | 247 | + * NVT identifier) |
248 | + */ | 248 | + */ |
249 | + if (cam_ignore) { | 249 | + if (cam_ignore) { |
250 | + vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1); | 250 | + vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1); |
251 | } | 251 | } |
252 | 252 | ||
253 | - /* F=0 & i=0: Specific NVT notification */ | 253 | - /* F=0 & i=0: Specific NVT notification */ |
254 | + /* For VP-group notifications, threads with LGS=0 are excluded */ | 254 | + /* For VP-group notifications, threads with LGS=0 are excluded */ |
255 | 255 | ||
256 | /* PHYS ring */ | 256 | /* PHYS ring */ |
257 | if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && | 257 | if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && |
258 | - cam == xive2_tctx_hw_cam_line(xptr, tctx)) { | 258 | - cam == xive2_tctx_hw_cam_line(xptr, tctx)) { |
259 | + !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) && | 259 | + !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) && |
260 | + xive2_vp_match_mask(cam, | 260 | + xive2_vp_match_mask(cam, |
261 | + xive2_tctx_hw_cam_line(xptr, tctx), | 261 | + xive2_tctx_hw_cam_line(xptr, tctx), |
262 | + vp_mask)) { | 262 | + vp_mask)) { |
263 | return TM_QW3_HV_PHYS; | 263 | return TM_QW3_HV_PHYS; |
264 | } | 264 | } |
265 | 265 | ||
266 | /* HV POOL ring */ | 266 | /* HV POOL ring */ |
267 | if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && | 267 | if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && |
268 | - cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) { | 268 | - cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) { |
269 | + !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) && | 269 | + !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) && |
270 | + xive2_vp_match_mask(cam, | 270 | + xive2_vp_match_mask(cam, |
271 | + xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2), | 271 | + xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2), |
272 | + vp_mask)) { | 272 | + vp_mask)) { |
273 | return TM_QW2_HV_POOL; | 273 | return TM_QW2_HV_POOL; |
274 | } | 274 | } |
275 | 275 | ||
276 | /* OS ring */ | 276 | /* OS ring */ |
277 | if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && | 277 | if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && |
278 | - cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) { | 278 | - cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) { |
279 | + !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) && | 279 | + !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) && |
280 | + xive2_vp_match_mask(cam, | 280 | + xive2_vp_match_mask(cam, |
281 | + xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2), | 281 | + xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2), |
282 | + vp_mask)) { | 282 | + vp_mask)) { |
283 | return TM_QW1_OS; | 283 | return TM_QW1_OS; |
284 | } | 284 | } |
285 | } else { | 285 | } else { |
286 | /* F=1 : User level Event-Based Branch (EBB) notification */ | 286 | /* F=1 : User level Event-Based Branch (EBB) notification */ |
287 | 287 | ||
288 | + /* FIXME: what if cam_ignore and LGS = 0 ? */ | 288 | + /* FIXME: what if cam_ignore and LGS = 0 ? */ |
289 | /* USER ring */ | 289 | /* USER ring */ |
290 | if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && | 290 | if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && |
291 | (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && | 291 | (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && |
292 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 292 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
293 | return -1; | 293 | return -1; |
294 | } | 294 | } |
295 | 295 | ||
296 | +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | 296 | +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) |
297 | +{ | 297 | +{ |
298 | + uint8_t *regs = &tctx->regs[ring]; | 298 | + uint8_t *regs = &tctx->regs[ring]; |
299 | + | 299 | + |
300 | + /* | 300 | + /* |
301 | + * The xive2_presenter_tctx_match() above tells if there's a match | 301 | + * The xive2_presenter_tctx_match() above tells if there's a match |
302 | + * but for VP-group notification, we still need to look at the | 302 | + * but for VP-group notification, we still need to look at the |
303 | + * priority to know if the thread can take the interrupt now or if | 303 | + * priority to know if the thread can take the interrupt now or if |
304 | + * it is precluded. | 304 | + * it is precluded. |
305 | + */ | 305 | + */ |
306 | + if (priority < regs[TM_CPPR]) { | 306 | + if (priority < regs[TM_CPPR]) { |
307 | + return false; | 307 | + return false; |
308 | + } | 308 | + } |
309 | + return true; | 309 | + return true; |
310 | +} | 310 | +} |
311 | + | 311 | + |
312 | static void xive2_router_realize(DeviceState *dev, Error **errp) | 312 | static void xive2_router_realize(DeviceState *dev, Error **errp) |
313 | { | 313 | { |
314 | Xive2Router *xrtr = XIVE2_ROUTER(dev); | 314 | Xive2Router *xrtr = XIVE2_ROUTER(dev); |
315 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 315 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
316 | Xive2End end; | 316 | Xive2End end; |
317 | uint8_t priority; | 317 | uint8_t priority; |
318 | uint8_t format; | 318 | uint8_t format; |
319 | - bool found; | 319 | - bool found; |
320 | + bool found, precluded; | 320 | + bool found, precluded; |
321 | Xive2Nvp nvp; | 321 | Xive2Nvp nvp; |
322 | uint8_t nvp_blk; | 322 | uint8_t nvp_blk; |
323 | uint32_t nvp_idx; | 323 | uint32_t nvp_idx; |
324 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 324 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
325 | found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, | 325 | found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, |
326 | xive2_end_is_ignore(&end), | 326 | xive2_end_is_ignore(&end), |
327 | priority, | 327 | priority, |
328 | - xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7)); | 328 | - xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7)); |
329 | + xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), | 329 | + xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), |
330 | + &precluded); | 330 | + &precluded); |
331 | 331 | ||
332 | /* TODO: Auto EOI. */ | 332 | /* TODO: Auto EOI. */ |
333 | 333 | ||
334 | -- | 334 | -- |
335 | 2.43.0 | 335 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | The NSR has a (so far unused) grouping level field. When a interrupt | ||
4 | is presented, that field tells the hypervisor or OS if the interrupt | ||
5 | is for an individual VP or for a VP-group/crowd. This patch reworks | ||
6 | the presentation API to allow to set/unset the level when | ||
7 | raising/accepting an interrupt. | ||
8 | |||
9 | It also renames xive_tctx_ipb_update() to xive_tctx_pipr_update() as | ||
10 | the IPB is only used for VP-specific target, whereas the PIPR always | ||
11 | needs to be updated. | ||
12 | |||
13 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
14 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
15 | --- | ||
16 | include/hw/ppc/xive.h | 9 +++- | ||
17 | include/hw/ppc/xive_regs.h | 25 ++++++++--- | ||
18 | hw/intc/xive.c | 88 ++++++++++++++++++++++---------------- | ||
19 | hw/intc/xive2.c | 19 ++++---- | ||
20 | hw/intc/trace-events | 2 +- | ||
21 | 5 files changed, 90 insertions(+), 53 deletions(-) | ||
22 | |||
23 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | ||
24 | index XXXXXXX..XXXXXXX 100644 | ||
25 | --- a/include/hw/ppc/xive.h | ||
26 | +++ b/include/hw/ppc/xive.h | ||
27 | @@ -XXX,XX +XXX,XX @@ static inline uint8_t xive_priority_to_ipb(uint8_t priority) | ||
28 | 0 : 1 << (XIVE_PRIORITY_MAX - priority); | ||
29 | } | ||
30 | |||
31 | +static inline uint8_t xive_priority_to_pipr(uint8_t priority) | ||
32 | +{ | ||
33 | + return priority > XIVE_PRIORITY_MAX ? 0xFF : priority; | ||
34 | +} | ||
35 | + | ||
36 | /* | ||
37 | * Convert an Interrupt Pending Buffer (IPB) register to a Pending | ||
38 | * Interrupt Priority Register (PIPR), which contains the priority of | ||
39 | @@ -XXX,XX +XXX,XX @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, GString *buf); | ||
40 | Object *xive_tctx_create(Object *cpu, XivePresenter *xptr, Error **errp); | ||
41 | void xive_tctx_reset(XiveTCTX *tctx); | ||
42 | void xive_tctx_destroy(XiveTCTX *tctx); | ||
43 | -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb); | ||
44 | +void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, | ||
45 | + uint8_t group_level); | ||
46 | void xive_tctx_reset_signal(XiveTCTX *tctx, uint8_t ring); | ||
47 | +void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level); | ||
48 | |||
49 | /* | ||
50 | * KVM XIVE device helpers | ||
51 | diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h | ||
52 | index XXXXXXX..XXXXXXX 100644 | ||
53 | --- a/include/hw/ppc/xive_regs.h | ||
54 | +++ b/include/hw/ppc/xive_regs.h | ||
55 | @@ -XXX,XX +XXX,XX @@ | ||
56 | * access to the different fields. | ||
57 | * | ||
58 | * | ||
59 | - * Copyright (c) 2016-2018, IBM Corporation. | ||
60 | + * Copyright (c) 2016-2024, IBM Corporation. | ||
61 | * | ||
62 | - * This code is licensed under the GPL version 2 or later. See the | ||
63 | - * COPYING file in the top-level directory. | ||
64 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
65 | */ | ||
66 | |||
67 | #ifndef PPC_XIVE_REGS_H | ||
68 | @@ -XXX,XX +XXX,XX @@ | ||
69 | #define TM_SPC_PULL_PHYS_CTX_OL 0xc38 /* Pull phys ctx to odd cache line */ | ||
70 | /* XXX more... */ | ||
71 | |||
72 | -/* NSR fields for the various QW ack types */ | ||
73 | +/* | ||
74 | + * NSR fields for the various QW ack types | ||
75 | + * | ||
76 | + * P10 has an extra bit in QW3 for the group level instead of the | ||
77 | + * reserved 'i' bit. Since it is not used and we don't support group | ||
78 | + * interrupts on P9, we use the P10 definition for the group level so | ||
79 | + * that we can have common macros for the NSR | ||
80 | + */ | ||
81 | #define TM_QW0_NSR_EB PPC_BIT8(0) | ||
82 | #define TM_QW1_NSR_EO PPC_BIT8(0) | ||
83 | #define TM_QW3_NSR_HE PPC_BITMASK8(0, 1) | ||
84 | @@ -XXX,XX +XXX,XX @@ | ||
85 | #define TM_QW3_NSR_HE_POOL 1 | ||
86 | #define TM_QW3_NSR_HE_PHYS 2 | ||
87 | #define TM_QW3_NSR_HE_LSI 3 | ||
88 | -#define TM_QW3_NSR_I PPC_BIT8(2) | ||
89 | -#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3, 7) | ||
90 | +#define TM_NSR_GRP_LVL PPC_BITMASK8(2, 7) | ||
91 | +/* | ||
92 | + * On P10, the format of the 6-bit group level is: 2 bits for the | ||
93 | + * crowd size and 4 bits for the group size. Since group/crowd size is | ||
94 | + * always a power of 2, we encode the log. For example, group_level=4 | ||
95 | + * means crowd size = 0 and group size = 16 (2^4) | ||
96 | + * Same encoding is used in the NVP and NVGC structures for | ||
97 | + * PGoFirst and PGoNext fields | ||
98 | + */ | ||
99 | |||
100 | /* | ||
101 | * EAS (Event Assignment Structure) | ||
102 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | ||
103 | index XXXXXXX..XXXXXXX 100644 | ||
104 | --- a/hw/intc/xive.c | ||
105 | +++ b/hw/intc/xive.c | ||
106 | @@ -XXX,XX +XXX,XX @@ | ||
107 | * XIVE Thread Interrupt Management context | ||
108 | */ | ||
109 | |||
110 | - | ||
111 | -static uint8_t exception_mask(uint8_t ring) | ||
112 | -{ | ||
113 | - switch (ring) { | ||
114 | - case TM_QW1_OS: | ||
115 | - return TM_QW1_NSR_EO; | ||
116 | - case TM_QW3_HV_PHYS: | ||
117 | - return TM_QW3_NSR_HE; | ||
118 | - default: | ||
119 | - g_assert_not_reached(); | ||
120 | - } | ||
121 | -} | ||
122 | - | ||
123 | static qemu_irq xive_tctx_output(XiveTCTX *tctx, uint8_t ring) | ||
124 | { | ||
125 | switch (ring) { | ||
126 | @@ -XXX,XX +XXX,XX @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) | ||
127 | { | ||
128 | uint8_t *regs = &tctx->regs[ring]; | ||
129 | uint8_t nsr = regs[TM_NSR]; | ||
130 | - uint8_t mask = exception_mask(ring); | ||
131 | |||
132 | qemu_irq_lower(xive_tctx_output(tctx, ring)); | ||
133 | |||
134 | - if (regs[TM_NSR] & mask) { | ||
135 | + if (regs[TM_NSR] != 0) { | ||
136 | uint8_t cppr = regs[TM_PIPR]; | ||
137 | uint8_t alt_ring; | ||
138 | uint8_t *alt_regs; | ||
139 | @@ -XXX,XX +XXX,XX @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) | ||
140 | |||
141 | regs[TM_CPPR] = cppr; | ||
142 | |||
143 | - /* Reset the pending buffer bit */ | ||
144 | - alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); | ||
145 | + /* | ||
146 | + * If the interrupt was for a specific VP, reset the pending | ||
147 | + * buffer bit, otherwise clear the logical server indicator | ||
148 | + */ | ||
149 | + if (regs[TM_NSR] & TM_NSR_GRP_LVL) { | ||
150 | + regs[TM_NSR] &= ~TM_NSR_GRP_LVL; | ||
151 | + } else { | ||
152 | + alt_regs[TM_IPB] &= ~xive_priority_to_ipb(cppr); | ||
153 | + } | ||
154 | |||
155 | - /* Drop Exception bit */ | ||
156 | - regs[TM_NSR] &= ~mask; | ||
157 | + /* Drop the exception bit and any group/crowd */ | ||
158 | + regs[TM_NSR] = 0; | ||
159 | |||
160 | trace_xive_tctx_accept(tctx->cs->cpu_index, alt_ring, | ||
161 | alt_regs[TM_IPB], regs[TM_PIPR], | ||
162 | @@ -XXX,XX +XXX,XX @@ static uint64_t xive_tctx_accept(XiveTCTX *tctx, uint8_t ring) | ||
163 | return ((uint64_t)nsr << 8) | regs[TM_CPPR]; | ||
164 | } | ||
165 | |||
166 | -static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) | ||
167 | +void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring, uint8_t group_level) | ||
168 | { | ||
169 | /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ | ||
170 | uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; | ||
171 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_notify(XiveTCTX *tctx, uint8_t ring) | ||
172 | if (alt_regs[TM_PIPR] < alt_regs[TM_CPPR]) { | ||
173 | switch (ring) { | ||
174 | case TM_QW1_OS: | ||
175 | - regs[TM_NSR] |= TM_QW1_NSR_EO; | ||
176 | + regs[TM_NSR] = TM_QW1_NSR_EO | (group_level & 0x3F); | ||
177 | break; | ||
178 | case TM_QW2_HV_POOL: | ||
179 | - alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6); | ||
180 | + alt_regs[TM_NSR] = (TM_QW3_NSR_HE_POOL << 6) | (group_level & 0x3F); | ||
181 | break; | ||
182 | case TM_QW3_HV_PHYS: | ||
183 | - regs[TM_NSR] |= (TM_QW3_NSR_HE_PHYS << 6); | ||
184 | + regs[TM_NSR] = (TM_QW3_NSR_HE_PHYS << 6) | (group_level & 0x3F); | ||
185 | break; | ||
186 | default: | ||
187 | g_assert_not_reached(); | ||
188 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | ||
189 | regs[TM_PIPR] = pipr_min; | ||
190 | |||
191 | /* CPPR has changed, check if we need to raise a pending exception */ | ||
192 | - xive_tctx_notify(tctx, ring_min); | ||
193 | + xive_tctx_notify(tctx, ring_min, 0); | ||
194 | } | ||
195 | |||
196 | -void xive_tctx_ipb_update(XiveTCTX *tctx, uint8_t ring, uint8_t ipb) | ||
197 | -{ | ||
198 | +void xive_tctx_pipr_update(XiveTCTX *tctx, uint8_t ring, uint8_t priority, | ||
199 | + uint8_t group_level) | ||
200 | + { | ||
201 | + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ | ||
202 | + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; | ||
203 | + uint8_t *alt_regs = &tctx->regs[alt_ring]; | ||
204 | uint8_t *regs = &tctx->regs[ring]; | ||
205 | |||
206 | - regs[TM_IPB] |= ipb; | ||
207 | - regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); | ||
208 | - xive_tctx_notify(tctx, ring); | ||
209 | -} | ||
210 | + if (group_level == 0) { | ||
211 | + /* VP-specific */ | ||
212 | + regs[TM_IPB] |= xive_priority_to_ipb(priority); | ||
213 | + alt_regs[TM_PIPR] = xive_ipb_to_pipr(regs[TM_IPB]); | ||
214 | + } else { | ||
215 | + /* VP-group */ | ||
216 | + alt_regs[TM_PIPR] = xive_priority_to_pipr(priority); | ||
217 | + } | ||
218 | + xive_tctx_notify(tctx, ring, group_level); | ||
219 | + } | ||
220 | |||
221 | /* | ||
222 | * XIVE Thread Interrupt Management Area (TIMA) | ||
223 | @@ -XXX,XX +XXX,XX @@ static void xive_tm_set_os_lgs(XivePresenter *xptr, XiveTCTX *tctx, | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | - * Adjust the IPB to allow a CPU to process event queues of other | ||
228 | + * Adjust the PIPR to allow a CPU to process event queues of other | ||
229 | * priorities during one physical interrupt cycle. | ||
230 | */ | ||
231 | static void xive_tm_set_os_pending(XivePresenter *xptr, XiveTCTX *tctx, | ||
232 | hwaddr offset, uint64_t value, unsigned size) | ||
233 | { | ||
234 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, xive_priority_to_ipb(value & 0xff)); | ||
235 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, value & 0xff, 0); | ||
236 | } | ||
237 | |||
238 | static void xive_os_cam_decode(uint32_t cam, uint8_t *nvt_blk, | ||
239 | @@ -XXX,XX +XXX,XX @@ static void xive_tctx_need_resend(XiveRouter *xrtr, XiveTCTX *tctx, | ||
240 | /* Reset the NVT value */ | ||
241 | nvt.w4 = xive_set_field32(NVT_W4_IPB, nvt.w4, 0); | ||
242 | xive_router_write_nvt(xrtr, nvt_blk, nvt_idx, &nvt, 4); | ||
243 | + | ||
244 | + uint8_t *regs = &tctx->regs[TM_QW1_OS]; | ||
245 | + regs[TM_IPB] |= ipb; | ||
246 | } | ||
247 | + | ||
248 | /* | ||
249 | - * Always call xive_tctx_ipb_update(). Even if there were no | ||
250 | + * Always call xive_tctx_pipr_update(). Even if there were no | ||
251 | * escalation triggered, there could be a pending interrupt which | ||
252 | * was saved when the context was pulled and that we need to take | ||
253 | * into account by recalculating the PIPR (which is not | ||
254 | * saved/restored). | ||
255 | * It will also raise the External interrupt signal if needed. | ||
256 | */ | ||
257 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); | ||
258 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, 0xFF, 0); /* fxb */ | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) | ||
263 | return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); | ||
264 | } | ||
265 | |||
266 | +static uint8_t xive_get_group_level(uint32_t nvp_index) | ||
267 | +{ | ||
268 | + /* FIXME add crowd encoding */ | ||
269 | + return ctz32(~nvp_index) + 1; | ||
270 | +} | ||
271 | + | ||
272 | /* | ||
273 | * The thread context register words are in big-endian format. | ||
274 | */ | ||
275 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
276 | { | ||
277 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); | ||
278 | XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; | ||
279 | + uint8_t group_level; | ||
280 | int count; | ||
281 | |||
282 | /* | ||
283 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
284 | |||
285 | /* handle CPU exception delivery */ | ||
286 | if (count) { | ||
287 | - trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring); | ||
288 | - xive_tctx_ipb_update(match.tctx, match.ring, | ||
289 | - xive_priority_to_ipb(priority)); | ||
290 | + group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; | ||
291 | + trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); | ||
292 | + xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); | ||
293 | } | ||
294 | |||
295 | return !!count; | ||
296 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
297 | index XXXXXXX..XXXXXXX 100644 | ||
298 | --- a/hw/intc/xive2.c | ||
299 | +++ b/hw/intc/xive2.c | ||
300 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, | ||
301 | uint8_t nvp_blk, uint32_t nvp_idx, | ||
302 | bool do_restore) | ||
303 | { | ||
304 | - Xive2Nvp nvp; | ||
305 | uint8_t ipb; | ||
306 | + uint8_t backlog_level; | ||
307 | + uint8_t backlog_prio; | ||
308 | + uint8_t *regs = &tctx->regs[TM_QW1_OS]; | ||
309 | + Xive2Nvp nvp; | ||
310 | |||
311 | /* | ||
312 | * Grab the associated thread interrupt context registers in the | ||
313 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, | ||
314 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0); | ||
315 | xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | ||
316 | } | ||
317 | + regs[TM_IPB] = ipb; | ||
318 | + backlog_prio = xive_ipb_to_pipr(ipb); | ||
319 | + backlog_level = 0; | ||
320 | + | ||
321 | /* | ||
322 | - * Always call xive_tctx_ipb_update(). Even if there were no | ||
323 | - * escalation triggered, there could be a pending interrupt which | ||
324 | - * was saved when the context was pulled and that we need to take | ||
325 | - * into account by recalculating the PIPR (which is not | ||
326 | - * saved/restored). | ||
327 | - * It will also raise the External interrupt signal if needed. | ||
328 | + * Compute the PIPR based on the restored state. | ||
329 | + * It will raise the External interrupt signal if needed. | ||
330 | */ | ||
331 | - xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb); | ||
332 | + xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | diff --git a/hw/intc/trace-events b/hw/intc/trace-events | ||
337 | index XXXXXXX..XXXXXXX 100644 | ||
338 | --- a/hw/intc/trace-events | ||
339 | +++ b/hw/intc/trace-events | ||
340 | @@ -XXX,XX +XXX,XX @@ xive_router_end_notify(uint8_t end_blk, uint32_t end_idx, uint32_t end_data) "EN | ||
341 | xive_router_end_escalate(uint8_t end_blk, uint32_t end_idx, uint8_t esc_blk, uint32_t esc_idx, uint32_t end_data) "END 0x%02x/0x%04x -> escalate END 0x%02x/0x%04x data 0x%08x" | ||
342 | xive_tctx_tm_write(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 | ||
343 | xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t value) "target=%d @0x%"PRIx64" sz=%d val=0x%" PRIx64 | ||
344 | -xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring) "found NVT 0x%x/0x%x ring=0x%x" | ||
345 | +xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d" | ||
346 | xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 | ||
347 | |||
348 | # pnv_xive.c | ||
349 | -- | ||
350 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | If an END has the 'i' bit set (ignore), then it targets a group of | ||
4 | VPs. The size of the group depends on the VP index of the target | ||
5 | (first 0 found when looking at the least significant bits of the | ||
6 | index) so a mask is applied on the VP index of a running thread to | ||
7 | know if we have a match. | ||
8 | |||
9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
11 | --- | ||
12 | include/hw/ppc/xive.h | 5 +++- | ||
13 | include/hw/ppc/xive2.h | 7 ++--- | ||
14 | hw/intc/pnv_xive2.c | 38 +++++++++++++++--------- | ||
15 | hw/intc/xive.c | 56 +++++++++++++++++++++++++----------- | ||
16 | hw/intc/xive2.c | 65 ++++++++++++++++++++++++++++++------------ | ||
17 | 5 files changed, 118 insertions(+), 53 deletions(-) | ||
18 | |||
19 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | ||
20 | index XXXXXXX..XXXXXXX 100644 | ||
21 | --- a/include/hw/ppc/xive.h | ||
22 | +++ b/include/hw/ppc/xive.h | ||
23 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas); | ||
24 | typedef struct XiveTCTXMatch { | ||
25 | XiveTCTX *tctx; | ||
26 | uint8_t ring; | ||
27 | + bool precluded; | ||
28 | } XiveTCTXMatch; | ||
29 | |||
30 | #define TYPE_XIVE_PRESENTER "xive-presenter" | ||
31 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
32 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
33 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
34 | bool cam_ignore, uint8_t priority, | ||
35 | - uint32_t logic_serv); | ||
36 | + uint32_t logic_serv, bool *precluded); | ||
37 | + | ||
38 | +uint32_t xive_get_vpgroup_size(uint32_t nvp_index); | ||
39 | |||
40 | /* | ||
41 | * XIVE Fabric (Interface between Interrupt Controller and Machine) | ||
42 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | ||
43 | index XXXXXXX..XXXXXXX 100644 | ||
44 | --- a/include/hw/ppc/xive2.h | ||
45 | +++ b/include/hw/ppc/xive2.h | ||
46 | @@ -XXX,XX +XXX,XX @@ | ||
47 | /* | ||
48 | * QEMU PowerPC XIVE2 interrupt controller model (POWER10) | ||
49 | * | ||
50 | - * Copyright (c) 2019-2022, IBM Corporation. | ||
51 | - * | ||
52 | - * This code is licensed under the GPL version 2 or later. See the | ||
53 | - * COPYING file in the top-level directory. | ||
54 | + * Copyright (c) 2019-2024, IBM Corporation. | ||
55 | * | ||
56 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
57 | */ | ||
58 | |||
59 | #ifndef PPC_XIVE2_H | ||
60 | @@ -XXX,XX +XXX,XX @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | ||
61 | hwaddr offset, unsigned size); | ||
62 | void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | ||
63 | hwaddr offset, uint64_t value, unsigned size); | ||
64 | +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); | ||
65 | void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, | ||
66 | hwaddr offset, uint64_t value, unsigned size); | ||
67 | void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | ||
68 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | ||
69 | index XXXXXXX..XXXXXXX 100644 | ||
70 | --- a/hw/intc/pnv_xive2.c | ||
71 | +++ b/hw/intc/pnv_xive2.c | ||
72 | @@ -XXX,XX +XXX,XX @@ | ||
73 | /* | ||
74 | * QEMU PowerPC XIVE2 interrupt controller model (POWER10) | ||
75 | * | ||
76 | - * Copyright (c) 2019-2022, IBM Corporation. | ||
77 | + * Copyright (c) 2019-2024, IBM Corporation. | ||
78 | * | ||
79 | - * This code is licensed under the GPL version 2 or later. See the | ||
80 | - * COPYING file in the top-level directory. | ||
81 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
82 | */ | ||
83 | |||
84 | #include "qemu/osdep.h" | ||
85 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, | ||
86 | logic_serv); | ||
87 | } | ||
88 | |||
89 | - /* | ||
90 | - * Save the context and follow on to catch duplicates, | ||
91 | - * that we don't support yet. | ||
92 | - */ | ||
93 | if (ring != -1) { | ||
94 | - if (match->tctx) { | ||
95 | + /* | ||
96 | + * For VP-specific match, finding more than one is a | ||
97 | + * problem. For group notification, it's possible. | ||
98 | + */ | ||
99 | + if (!cam_ignore && match->tctx) { | ||
100 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a " | ||
101 | "thread context NVT %x/%x\n", | ||
102 | nvt_blk, nvt_idx); | ||
103 | - return false; | ||
104 | + /* Should set a FIR if we ever model it */ | ||
105 | + return -1; | ||
106 | + } | ||
107 | + /* | ||
108 | + * For a group notification, we need to know if the | ||
109 | + * match is precluded first by checking the current | ||
110 | + * thread priority. If the interrupt can be delivered, | ||
111 | + * we always notify the first match (for now). | ||
112 | + */ | ||
113 | + if (cam_ignore && | ||
114 | + xive2_tm_irq_precluded(tctx, ring, priority)) { | ||
115 | + match->precluded = true; | ||
116 | + } else { | ||
117 | + if (!match->tctx) { | ||
118 | + match->ring = ring; | ||
119 | + match->tctx = tctx; | ||
120 | + } | ||
121 | + count++; | ||
122 | } | ||
123 | - | ||
124 | - match->ring = ring; | ||
125 | - match->tctx = tctx; | ||
126 | - count++; | ||
127 | } | ||
128 | } | ||
129 | } | ||
130 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | ||
131 | index XXXXXXX..XXXXXXX 100644 | ||
132 | --- a/hw/intc/xive.c | ||
133 | +++ b/hw/intc/xive.c | ||
134 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx) | ||
135 | return xive_nvt_cam_line(blk, 1 << 7 | (pir & 0x7f)); | ||
136 | } | ||
137 | |||
138 | +uint32_t xive_get_vpgroup_size(uint32_t nvp_index) | ||
139 | +{ | ||
140 | + /* | ||
141 | + * Group size is a power of 2. The position of the first 0 | ||
142 | + * (starting with the least significant bits) in the NVP index | ||
143 | + * gives the size of the group. | ||
144 | + */ | ||
145 | + return 1 << (ctz32(~nvp_index) + 1); | ||
146 | +} | ||
147 | + | ||
148 | static uint8_t xive_get_group_level(uint32_t nvp_index) | ||
149 | { | ||
150 | /* FIXME add crowd encoding */ | ||
151 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
152 | /* | ||
153 | * This is our simple Xive Presenter Engine model. It is merged in the | ||
154 | * Router as it does not require an extra object. | ||
155 | - * | ||
156 | - * It receives notification requests sent by the IVRE to find one | ||
157 | - * matching NVT (or more) dispatched on the processor threads. In case | ||
158 | - * of a single NVT notification, the process is abbreviated and the | ||
159 | - * thread is signaled if a match is found. In case of a logical server | ||
160 | - * notification (bits ignored at the end of the NVT identifier), the | ||
161 | - * IVPE and IVRE select a winning thread using different filters. This | ||
162 | - * involves 2 or 3 exchanges on the PowerBus that the model does not | ||
163 | - * support. | ||
164 | - * | ||
165 | - * The parameters represent what is sent on the PowerBus | ||
166 | */ | ||
167 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
168 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
169 | bool cam_ignore, uint8_t priority, | ||
170 | - uint32_t logic_serv) | ||
171 | + uint32_t logic_serv, bool *precluded) | ||
172 | { | ||
173 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); | ||
174 | - XiveTCTXMatch match = { .tctx = NULL, .ring = 0 }; | ||
175 | + XiveTCTXMatch match = { .tctx = NULL, .ring = 0, .precluded = false }; | ||
176 | uint8_t group_level; | ||
177 | int count; | ||
178 | |||
179 | /* | ||
180 | - * Ask the machine to scan the interrupt controllers for a match | ||
181 | + * Ask the machine to scan the interrupt controllers for a match. | ||
182 | + * | ||
183 | + * For VP-specific notification, we expect at most one match and | ||
184 | + * one call to the presenters is all we need (abbreviated notify | ||
185 | + * sequence documented by the architecture). | ||
186 | + * | ||
187 | + * For VP-group notification, match_nvt() is the equivalent of the | ||
188 | + * "histogram" and "poll" commands sent to the power bus to the | ||
189 | + * presenters. 'count' could be more than one, but we always | ||
190 | + * select the first match for now. 'precluded' tells if (at least) | ||
191 | + * one thread matches but can't take the interrupt now because | ||
192 | + * it's running at a more favored priority. We return the | ||
193 | + * information to the router so that it can take appropriate | ||
194 | + * actions (backlog, escalation, broadcast, etc...) | ||
195 | + * | ||
196 | + * If we were to implement a better way of dispatching the | ||
197 | + * interrupt in case of multiple matches (instead of the first | ||
198 | + * match), we would need a heuristic to elect a thread (for | ||
199 | + * example, the hardware keeps track of an 'age' in the TIMA) and | ||
200 | + * a new command to the presenters (the equivalent of the "assign" | ||
201 | + * power bus command in the documented full notify sequence. | ||
202 | */ | ||
203 | count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, | ||
204 | priority, logic_serv, &match); | ||
205 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
206 | group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; | ||
207 | trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); | ||
208 | xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); | ||
209 | + } else { | ||
210 | + *precluded = match.precluded; | ||
211 | } | ||
212 | |||
213 | return !!count; | ||
214 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) | ||
215 | uint8_t nvt_blk; | ||
216 | uint32_t nvt_idx; | ||
217 | XiveNVT nvt; | ||
218 | - bool found; | ||
219 | + bool found, precluded; | ||
220 | |||
221 | uint8_t end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); | ||
222 | uint32_t end_idx = xive_get_field64(EAS_END_INDEX, eas->w); | ||
223 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) | ||
224 | found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, | ||
225 | xive_get_field32(END_W7_F0_IGNORE, end.w7), | ||
226 | priority, | ||
227 | - xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7)); | ||
228 | - | ||
229 | + xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), | ||
230 | + &precluded); | ||
231 | + /* we don't support VP-group notification on P9, so precluded is not used */ | ||
232 | /* TODO: Auto EOI. */ | ||
233 | |||
234 | if (found) { | ||
235 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
236 | index XXXXXXX..XXXXXXX 100644 | ||
237 | --- a/hw/intc/xive2.c | ||
238 | +++ b/hw/intc/xive2.c | ||
239 | @@ -XXX,XX +XXX,XX @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, | ||
240 | return xrc->write_nvgc(xrtr, crowd, nvgc_blk, nvgc_idx, nvgc); | ||
241 | } | ||
242 | |||
243 | +static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, | ||
244 | + uint32_t vp_mask) | ||
245 | +{ | ||
246 | + return (cam1 & vp_mask) == (cam2 & vp_mask); | ||
247 | +} | ||
248 | + | ||
249 | /* | ||
250 | * The thread context register words are in big-endian format. | ||
251 | */ | ||
252 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
253 | uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); | ||
254 | uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); | ||
255 | |||
256 | - /* | ||
257 | - * TODO (PowerNV): ignore mode. The low order bits of the NVT | ||
258 | - * identifier are ignored in the "CAM" match. | ||
259 | - */ | ||
260 | + uint32_t vp_mask = 0xFFFFFFFF; | ||
261 | |||
262 | if (format == 0) { | ||
263 | - if (cam_ignore == true) { | ||
264 | - /* | ||
265 | - * F=0 & i=1: Logical server notification (bits ignored at | ||
266 | - * the end of the NVT identifier) | ||
267 | - */ | ||
268 | - qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n", | ||
269 | - nvt_blk, nvt_idx); | ||
270 | - return -1; | ||
271 | + /* | ||
272 | + * i=0: Specific NVT notification | ||
273 | + * i=1: VP-group notification (bits ignored at the end of the | ||
274 | + * NVT identifier) | ||
275 | + */ | ||
276 | + if (cam_ignore) { | ||
277 | + vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1); | ||
278 | } | ||
279 | |||
280 | - /* F=0 & i=0: Specific NVT notification */ | ||
281 | + /* For VP-group notifications, threads with LGS=0 are excluded */ | ||
282 | |||
283 | /* PHYS ring */ | ||
284 | if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) && | ||
285 | - cam == xive2_tctx_hw_cam_line(xptr, tctx)) { | ||
286 | + !(cam_ignore && tctx->regs[TM_QW3_HV_PHYS + TM_LGS] == 0) && | ||
287 | + xive2_vp_match_mask(cam, | ||
288 | + xive2_tctx_hw_cam_line(xptr, tctx), | ||
289 | + vp_mask)) { | ||
290 | return TM_QW3_HV_PHYS; | ||
291 | } | ||
292 | |||
293 | /* HV POOL ring */ | ||
294 | if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) && | ||
295 | - cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) { | ||
296 | + !(cam_ignore && tctx->regs[TM_QW2_HV_POOL + TM_LGS] == 0) && | ||
297 | + xive2_vp_match_mask(cam, | ||
298 | + xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2), | ||
299 | + vp_mask)) { | ||
300 | return TM_QW2_HV_POOL; | ||
301 | } | ||
302 | |||
303 | /* OS ring */ | ||
304 | if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && | ||
305 | - cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) { | ||
306 | + !(cam_ignore && tctx->regs[TM_QW1_OS + TM_LGS] == 0) && | ||
307 | + xive2_vp_match_mask(cam, | ||
308 | + xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2), | ||
309 | + vp_mask)) { | ||
310 | return TM_QW1_OS; | ||
311 | } | ||
312 | } else { | ||
313 | /* F=1 : User level Event-Based Branch (EBB) notification */ | ||
314 | |||
315 | + /* FIXME: what if cam_ignore and LGS = 0 ? */ | ||
316 | /* USER ring */ | ||
317 | if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) && | ||
318 | (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) && | ||
319 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
320 | return -1; | ||
321 | } | ||
322 | |||
323 | +bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | ||
324 | +{ | ||
325 | + uint8_t *regs = &tctx->regs[ring]; | ||
326 | + | ||
327 | + /* | ||
328 | + * The xive2_presenter_tctx_match() above tells if there's a match | ||
329 | + * but for VP-group notification, we still need to look at the | ||
330 | + * priority to know if the thread can take the interrupt now or if | ||
331 | + * it is precluded. | ||
332 | + */ | ||
333 | + if (priority < regs[TM_CPPR]) { | ||
334 | + return false; | ||
335 | + } | ||
336 | + return true; | ||
337 | +} | ||
338 | + | ||
339 | static void xive2_router_realize(DeviceState *dev, Error **errp) | ||
340 | { | ||
341 | Xive2Router *xrtr = XIVE2_ROUTER(dev); | ||
342 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
343 | Xive2End end; | ||
344 | uint8_t priority; | ||
345 | uint8_t format; | ||
346 | - bool found; | ||
347 | + bool found, precluded; | ||
348 | Xive2Nvp nvp; | ||
349 | uint8_t nvp_blk; | ||
350 | uint32_t nvp_idx; | ||
351 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
352 | found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, | ||
353 | xive2_end_is_ignore(&end), | ||
354 | priority, | ||
355 | - xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7)); | ||
356 | + xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), | ||
357 | + &precluded); | ||
358 | |||
359 | /* TODO: Auto EOI. */ | ||
360 | |||
361 | -- | ||
362 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | When a group interrupt cannot be delivered, we need to: | 3 | When a group interrupt cannot be delivered, we need to: |
4 | - increment the backlog counter for the group in the NVG table | 4 | - increment the backlog counter for the group in the NVG table |
5 | (if the END is configured to keep a backlog). | 5 | (if the END is configured to keep a backlog). |
6 | - start a broadcast operation to set the LSMFB field on matching CPUs | 6 | - start a broadcast operation to set the LSMFB field on matching CPUs |
7 | which can't take the interrupt now because they're running at too | 7 | which can't take the interrupt now because they're running at too |
8 | high a priority. | 8 | high a priority. |
9 | 9 | ||
10 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 10 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
11 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 11 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
12 | --- | 12 | --- |
13 | include/hw/ppc/xive.h | 5 ++ | 13 | include/hw/ppc/xive.h | 5 ++ |
14 | include/hw/ppc/xive2.h | 1 + | 14 | include/hw/ppc/xive2.h | 1 + |
15 | hw/intc/pnv_xive2.c | 42 +++++++++++++++++ | 15 | hw/intc/pnv_xive2.c | 42 +++++++++++++++++ |
16 | hw/intc/xive2.c | 105 +++++++++++++++++++++++++++++++++++------ | 16 | hw/intc/xive2.c | 105 +++++++++++++++++++++++++++++++++++------ |
17 | hw/ppc/pnv.c | 18 +++++++ | 17 | hw/ppc/pnv.c | 18 +++++++ |
18 | 5 files changed, 156 insertions(+), 15 deletions(-) | 18 | 5 files changed, 156 insertions(+), 15 deletions(-) |
19 | 19 | ||
20 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | 20 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h |
21 | index XXXXXXX..XXXXXXX 100644 | 21 | index XXXXXXX..XXXXXXX 100644 |
22 | --- a/include/hw/ppc/xive.h | 22 | --- a/include/hw/ppc/xive.h |
23 | +++ b/include/hw/ppc/xive.h | 23 | +++ b/include/hw/ppc/xive.h |
24 | @@ -XXX,XX +XXX,XX @@ struct XivePresenterClass { | 24 | @@ -XXX,XX +XXX,XX @@ struct XivePresenterClass { |
25 | uint32_t logic_serv, XiveTCTXMatch *match); | 25 | uint32_t logic_serv, XiveTCTXMatch *match); |
26 | bool (*in_kernel)(const XivePresenter *xptr); | 26 | bool (*in_kernel)(const XivePresenter *xptr); |
27 | uint32_t (*get_config)(XivePresenter *xptr); | 27 | uint32_t (*get_config)(XivePresenter *xptr); |
28 | + int (*broadcast)(XivePresenter *xptr, | 28 | + int (*broadcast)(XivePresenter *xptr, |
29 | + uint8_t nvt_blk, uint32_t nvt_idx, | 29 | + uint8_t nvt_blk, uint32_t nvt_idx, |
30 | + uint8_t priority); | 30 | + uint8_t priority); |
31 | }; | 31 | }; |
32 | 32 | ||
33 | int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 33 | int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
34 | @@ -XXX,XX +XXX,XX @@ struct XiveFabricClass { | 34 | @@ -XXX,XX +XXX,XX @@ struct XiveFabricClass { |
35 | uint8_t nvt_blk, uint32_t nvt_idx, | 35 | uint8_t nvt_blk, uint32_t nvt_idx, |
36 | bool cam_ignore, uint8_t priority, | 36 | bool cam_ignore, uint8_t priority, |
37 | uint32_t logic_serv, XiveTCTXMatch *match); | 37 | uint32_t logic_serv, XiveTCTXMatch *match); |
38 | + int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, | 38 | + int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, |
39 | + uint8_t priority); | 39 | + uint8_t priority); |
40 | }; | 40 | }; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | 43 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h |
44 | index XXXXXXX..XXXXXXX 100644 | 44 | index XXXXXXX..XXXXXXX 100644 |
45 | --- a/include/hw/ppc/xive2.h | 45 | --- a/include/hw/ppc/xive2.h |
46 | +++ b/include/hw/ppc/xive2.h | 46 | +++ b/include/hw/ppc/xive2.h |
47 | @@ -XXX,XX +XXX,XX @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | 47 | @@ -XXX,XX +XXX,XX @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, |
48 | void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | 48 | void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, |
49 | hwaddr offset, uint64_t value, unsigned size); | 49 | hwaddr offset, uint64_t value, unsigned size); |
50 | bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); | 50 | bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); |
51 | +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority); | 51 | +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority); |
52 | void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, | 52 | void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, |
53 | hwaddr offset, uint64_t value, unsigned size); | 53 | hwaddr offset, uint64_t value, unsigned size); |
54 | void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | 54 | void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, |
55 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | 55 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c |
56 | index XXXXXXX..XXXXXXX 100644 | 56 | index XXXXXXX..XXXXXXX 100644 |
57 | --- a/hw/intc/pnv_xive2.c | 57 | --- a/hw/intc/pnv_xive2.c |
58 | +++ b/hw/intc/pnv_xive2.c | 58 | +++ b/hw/intc/pnv_xive2.c |
59 | @@ -XXX,XX +XXX,XX @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) | 59 | @@ -XXX,XX +XXX,XX @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) |
60 | return cfg; | 60 | return cfg; |
61 | } | 61 | } |
62 | 62 | ||
63 | +static int pnv_xive2_broadcast(XivePresenter *xptr, | 63 | +static int pnv_xive2_broadcast(XivePresenter *xptr, |
64 | + uint8_t nvt_blk, uint32_t nvt_idx, | 64 | + uint8_t nvt_blk, uint32_t nvt_idx, |
65 | + uint8_t priority) | 65 | + uint8_t priority) |
66 | +{ | 66 | +{ |
67 | + PnvXive2 *xive = PNV_XIVE2(xptr); | 67 | + PnvXive2 *xive = PNV_XIVE2(xptr); |
68 | + PnvChip *chip = xive->chip; | 68 | + PnvChip *chip = xive->chip; |
69 | + int i, j; | 69 | + int i, j; |
70 | + bool gen1_tima_os = | 70 | + bool gen1_tima_os = |
71 | + xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; | 71 | + xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; |
72 | + | 72 | + |
73 | + for (i = 0; i < chip->nr_cores; i++) { | 73 | + for (i = 0; i < chip->nr_cores; i++) { |
74 | + PnvCore *pc = chip->cores[i]; | 74 | + PnvCore *pc = chip->cores[i]; |
75 | + CPUCore *cc = CPU_CORE(pc); | 75 | + CPUCore *cc = CPU_CORE(pc); |
76 | + | 76 | + |
77 | + for (j = 0; j < cc->nr_threads; j++) { | 77 | + for (j = 0; j < cc->nr_threads; j++) { |
78 | + PowerPCCPU *cpu = pc->threads[j]; | 78 | + PowerPCCPU *cpu = pc->threads[j]; |
79 | + XiveTCTX *tctx; | 79 | + XiveTCTX *tctx; |
80 | + int ring; | 80 | + int ring; |
81 | + | 81 | + |
82 | + if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { | 82 | + if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { |
83 | + continue; | 83 | + continue; |
84 | + } | 84 | + } |
85 | + | 85 | + |
86 | + tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); | 86 | + tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); |
87 | + | 87 | + |
88 | + if (gen1_tima_os) { | 88 | + if (gen1_tima_os) { |
89 | + ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | 89 | + ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk, |
90 | + nvt_idx, true, 0); | 90 | + nvt_idx, true, 0); |
91 | + } else { | 91 | + } else { |
92 | + ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | 92 | + ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk, |
93 | + nvt_idx, true, 0); | 93 | + nvt_idx, true, 0); |
94 | + } | 94 | + } |
95 | + | 95 | + |
96 | + if (ring != -1) { | 96 | + if (ring != -1) { |
97 | + xive2_tm_set_lsmfb(tctx, ring, priority); | 97 | + xive2_tm_set_lsmfb(tctx, ring, priority); |
98 | + } | 98 | + } |
99 | + } | 99 | + } |
100 | + } | 100 | + } |
101 | + return 0; | 101 | + return 0; |
102 | +} | 102 | +} |
103 | + | 103 | + |
104 | static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr) | 104 | static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr) |
105 | { | 105 | { |
106 | return pnv_xive2_block_id(PNV_XIVE2(xrtr)); | 106 | return pnv_xive2_block_id(PNV_XIVE2(xrtr)); |
107 | @@ -XXX,XX +XXX,XX @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data) | 107 | @@ -XXX,XX +XXX,XX @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data) |
108 | 108 | ||
109 | xpc->match_nvt = pnv_xive2_match_nvt; | 109 | xpc->match_nvt = pnv_xive2_match_nvt; |
110 | xpc->get_config = pnv_xive2_presenter_get_config; | 110 | xpc->get_config = pnv_xive2_presenter_get_config; |
111 | + xpc->broadcast = pnv_xive2_broadcast; | 111 | + xpc->broadcast = pnv_xive2_broadcast; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static const TypeInfo pnv_xive2_info = { | 114 | static const TypeInfo pnv_xive2_info = { |
115 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 115 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
116 | index XXXXXXX..XXXXXXX 100644 | 116 | index XXXXXXX..XXXXXXX 100644 |
117 | --- a/hw/intc/xive2.c | 117 | --- a/hw/intc/xive2.c |
118 | +++ b/hw/intc/xive2.c | 118 | +++ b/hw/intc/xive2.c |
119 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) | 119 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) |
120 | return val; | 120 | return val; |
121 | } | 121 | } |
122 | 122 | ||
123 | +static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, | 123 | +static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, |
124 | + uint32_t val) | 124 | + uint32_t val) |
125 | +{ | 125 | +{ |
126 | + uint8_t *ptr, i; | 126 | + uint8_t *ptr, i; |
127 | + uint32_t shift; | 127 | + uint32_t shift; |
128 | + | 128 | + |
129 | + if (priority > 7) { | 129 | + if (priority > 7) { |
130 | + return; | 130 | + return; |
131 | + } | 131 | + } |
132 | + | 132 | + |
133 | + if (val > 0xFFFFFF) { | 133 | + if (val > 0xFFFFFF) { |
134 | + val = 0xFFFFFF; | 134 | + val = 0xFFFFFF; |
135 | + } | 135 | + } |
136 | + /* | 136 | + /* |
137 | + * The per-priority backlog counters are 24-bit and the structure | 137 | + * The per-priority backlog counters are 24-bit and the structure |
138 | + * is stored in big endian | 138 | + * is stored in big endian |
139 | + */ | 139 | + */ |
140 | + ptr = (uint8_t *)&nvgc->w2 + priority * 3; | 140 | + ptr = (uint8_t *)&nvgc->w2 + priority * 3; |
141 | + for (i = 0; i < 3; i++, ptr++) { | 141 | + for (i = 0; i < 3; i++, ptr++) { |
142 | + shift = 8 * (2 - i); | 142 | + shift = 8 * (2 - i); |
143 | + *ptr = (val >> shift) & 0xFF; | 143 | + *ptr = (val >> shift) & 0xFF; |
144 | + } | 144 | + } |
145 | +} | 145 | +} |
146 | + | 146 | + |
147 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) | 147 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) |
148 | { | 148 | { |
149 | if (!xive2_eas_is_valid(eas)) { | 149 | if (!xive2_eas_is_valid(eas)) { |
150 | @@ -XXX,XX +XXX,XX @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | 150 | @@ -XXX,XX +XXX,XX @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) |
151 | return true; | 151 | return true; |
152 | } | 152 | } |
153 | 153 | ||
154 | +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority) | 154 | +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority) |
155 | +{ | 155 | +{ |
156 | + uint8_t *regs = &tctx->regs[ring]; | 156 | + uint8_t *regs = &tctx->regs[ring]; |
157 | + | 157 | + |
158 | + /* | 158 | + /* |
159 | + * Called by the router during a VP-group notification when the | 159 | + * Called by the router during a VP-group notification when the |
160 | + * thread matches but can't take the interrupt because it's | 160 | + * thread matches but can't take the interrupt because it's |
161 | + * already running at a more favored priority. It then stores the | 161 | + * already running at a more favored priority. It then stores the |
162 | + * new interrupt priority in the LSMFB field. | 162 | + * new interrupt priority in the LSMFB field. |
163 | + */ | 163 | + */ |
164 | + regs[TM_LSMFB] = priority; | 164 | + regs[TM_LSMFB] = priority; |
165 | +} | 165 | +} |
166 | + | 166 | + |
167 | static void xive2_router_realize(DeviceState *dev, Error **errp) | 167 | static void xive2_router_realize(DeviceState *dev, Error **errp) |
168 | { | 168 | { |
169 | Xive2Router *xrtr = XIVE2_ROUTER(dev); | 169 | Xive2Router *xrtr = XIVE2_ROUTER(dev); |
170 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 170 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
171 | /* | 171 | /* |
172 | * If no matching NVP is dispatched on a HW thread : | 172 | * If no matching NVP is dispatched on a HW thread : |
173 | * - specific VP: update the NVP structure if backlog is activated | 173 | * - specific VP: update the NVP structure if backlog is activated |
174 | - * - logical server : forward request to IVPE (not supported) | 174 | - * - logical server : forward request to IVPE (not supported) |
175 | + * - VP-group: update the backlog counter for that priority in the NVG | 175 | + * - VP-group: update the backlog counter for that priority in the NVG |
176 | */ | 176 | */ |
177 | if (xive2_end_is_backlog(&end)) { | 177 | if (xive2_end_is_backlog(&end)) { |
178 | - uint8_t ipb; | 178 | - uint8_t ipb; |
179 | 179 | ||
180 | if (format == 1) { | 180 | if (format == 1) { |
181 | qemu_log_mask(LOG_GUEST_ERROR, | 181 | qemu_log_mask(LOG_GUEST_ERROR, |
182 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 182 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
183 | return; | 183 | return; |
184 | } | 184 | } |
185 | 185 | ||
186 | - /* | 186 | - /* |
187 | - * Record the IPB in the associated NVP structure for later | 187 | - * Record the IPB in the associated NVP structure for later |
188 | - * use. The presenter will resend the interrupt when the vCPU | 188 | - * use. The presenter will resend the interrupt when the vCPU |
189 | - * is dispatched again on a HW thread. | 189 | - * is dispatched again on a HW thread. |
190 | - */ | 190 | - */ |
191 | - ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | | 191 | - ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | |
192 | - xive_priority_to_ipb(priority); | 192 | - xive_priority_to_ipb(priority); |
193 | - nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | 193 | - nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); |
194 | - xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | 194 | - xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); |
195 | - | 195 | - |
196 | - /* | 196 | - /* |
197 | - * On HW, follows a "Broadcast Backlog" to IVPEs | 197 | - * On HW, follows a "Broadcast Backlog" to IVPEs |
198 | - */ | 198 | - */ |
199 | + if (!xive2_end_is_ignore(&end)) { | 199 | + if (!xive2_end_is_ignore(&end)) { |
200 | + uint8_t ipb; | 200 | + uint8_t ipb; |
201 | + /* | 201 | + /* |
202 | + * Record the IPB in the associated NVP structure for later | 202 | + * Record the IPB in the associated NVP structure for later |
203 | + * use. The presenter will resend the interrupt when the vCPU | 203 | + * use. The presenter will resend the interrupt when the vCPU |
204 | + * is dispatched again on a HW thread. | 204 | + * is dispatched again on a HW thread. |
205 | + */ | 205 | + */ |
206 | + ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | | 206 | + ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | |
207 | + xive_priority_to_ipb(priority); | 207 | + xive_priority_to_ipb(priority); |
208 | + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | 208 | + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); |
209 | + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | 209 | + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); |
210 | + } else { | 210 | + } else { |
211 | + Xive2Nvgc nvg; | 211 | + Xive2Nvgc nvg; |
212 | + uint32_t backlog; | 212 | + uint32_t backlog; |
213 | + | 213 | + |
214 | + /* For groups, the per-priority backlog counters are in the NVG */ | 214 | + /* For groups, the per-priority backlog counters are in the NVG */ |
215 | + if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) { | 215 | + if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) { |
216 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n", | 216 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n", |
217 | + nvp_blk, nvp_idx); | 217 | + nvp_blk, nvp_idx); |
218 | + return; | 218 | + return; |
219 | + } | 219 | + } |
220 | + | 220 | + |
221 | + if (!xive2_nvgc_is_valid(&nvg)) { | 221 | + if (!xive2_nvgc_is_valid(&nvg)) { |
222 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", | 222 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", |
223 | + nvp_blk, nvp_idx); | 223 | + nvp_blk, nvp_idx); |
224 | + return; | 224 | + return; |
225 | + } | 225 | + } |
226 | + | 226 | + |
227 | + /* | 227 | + /* |
228 | + * Increment the backlog counter for that priority. | 228 | + * Increment the backlog counter for that priority. |
229 | + * For the precluded case, we only call broadcast the | 229 | + * For the precluded case, we only call broadcast the |
230 | + * first time the counter is incremented. broadcast will | 230 | + * first time the counter is incremented. broadcast will |
231 | + * set the LSMFB field of the TIMA of relevant threads so | 231 | + * set the LSMFB field of the TIMA of relevant threads so |
232 | + * that they know an interrupt is pending. | 232 | + * that they know an interrupt is pending. |
233 | + */ | 233 | + */ |
234 | + backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1; | 234 | + backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1; |
235 | + xive2_nvgc_set_backlog(&nvg, priority, backlog); | 235 | + xive2_nvgc_set_backlog(&nvg, priority, backlog); |
236 | + xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg); | 236 | + xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg); |
237 | + | 237 | + |
238 | + if (precluded && backlog == 1) { | 238 | + if (precluded && backlog == 1) { |
239 | + XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); | 239 | + XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); |
240 | + xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority); | 240 | + xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority); |
241 | + | 241 | + |
242 | + if (!xive2_end_is_precluded_escalation(&end)) { | 242 | + if (!xive2_end_is_precluded_escalation(&end)) { |
243 | + /* | 243 | + /* |
244 | + * The interrupt will be picked up when the | 244 | + * The interrupt will be picked up when the |
245 | + * matching thread lowers its priority level | 245 | + * matching thread lowers its priority level |
246 | + */ | 246 | + */ |
247 | + return; | 247 | + return; |
248 | + } | 248 | + } |
249 | + } | 249 | + } |
250 | + } | 250 | + } |
251 | } | 251 | } |
252 | 252 | ||
253 | do_escalation: | 253 | do_escalation: |
254 | diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c | 254 | diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c |
255 | index XXXXXXX..XXXXXXX 100644 | 255 | index XXXXXXX..XXXXXXX 100644 |
256 | --- a/hw/ppc/pnv.c | 256 | --- a/hw/ppc/pnv.c |
257 | +++ b/hw/ppc/pnv.c | 257 | +++ b/hw/ppc/pnv.c |
258 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | 258 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, |
259 | return total_count; | 259 | return total_count; |
260 | } | 260 | } |
261 | 261 | ||
262 | +static int pnv10_xive_broadcast(XiveFabric *xfb, | 262 | +static int pnv10_xive_broadcast(XiveFabric *xfb, |
263 | + uint8_t nvt_blk, uint32_t nvt_idx, | 263 | + uint8_t nvt_blk, uint32_t nvt_idx, |
264 | + uint8_t priority) | 264 | + uint8_t priority) |
265 | +{ | 265 | +{ |
266 | + PnvMachineState *pnv = PNV_MACHINE(xfb); | 266 | + PnvMachineState *pnv = PNV_MACHINE(xfb); |
267 | + int i; | 267 | + int i; |
268 | + | 268 | + |
269 | + for (i = 0; i < pnv->num_chips; i++) { | 269 | + for (i = 0; i < pnv->num_chips; i++) { |
270 | + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); | 270 | + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); |
271 | + XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); | 271 | + XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); |
272 | + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | 272 | + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); |
273 | + | 273 | + |
274 | + xpc->broadcast(xptr, nvt_blk, nvt_idx, priority); | 274 | + xpc->broadcast(xptr, nvt_blk, nvt_idx, priority); |
275 | + } | 275 | + } |
276 | + return 0; | 276 | + return 0; |
277 | +} | 277 | +} |
278 | + | 278 | + |
279 | static bool pnv_machine_get_big_core(Object *obj, Error **errp) | 279 | static bool pnv_machine_get_big_core(Object *obj, Error **errp) |
280 | { | 280 | { |
281 | PnvMachineState *pnv = PNV_MACHINE(obj); | 281 | PnvMachineState *pnv = PNV_MACHINE(obj); |
282 | @@ -XXX,XX +XXX,XX @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data) | 282 | @@ -XXX,XX +XXX,XX @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data) |
283 | pmc->dt_power_mgt = pnv_dt_power_mgt; | 283 | pmc->dt_power_mgt = pnv_dt_power_mgt; |
284 | 284 | ||
285 | xfc->match_nvt = pnv10_xive_match_nvt; | 285 | xfc->match_nvt = pnv10_xive_match_nvt; |
286 | + xfc->broadcast = pnv10_xive_broadcast; | 286 | + xfc->broadcast = pnv10_xive_broadcast; |
287 | 287 | ||
288 | machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); | 288 | machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); |
289 | } | 289 | } |
290 | -- | 290 | -- |
291 | 2.43.0 | 291 | 2.43.0 | diff view generated by jsdifflib |
... | ... | ||
---|---|---|---|
17 | --- a/hw/intc/xive2.c | 17 | --- a/hw/intc/xive2.c |
18 | +++ b/hw/intc/xive2.c | 18 | +++ b/hw/intc/xive2.c |
19 | @@ -XXX,XX +XXX,XX @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) | 19 | @@ -XXX,XX +XXX,XX @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) |
20 | end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); | 20 | end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); |
21 | } | 21 | } |
22 | 22 | ||
23 | +/* | 23 | +/* |
24 | + * Scan the group chain and return the highest priority and group | 24 | + * Scan the group chain and return the highest priority and group |
25 | + * level of pending group interrupts. | 25 | + * level of pending group interrupts. |
26 | + */ | 26 | + */ |
27 | +static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, | 27 | +static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, |
... | ... | ||
111 | + XivePresenter *xptr = XIVE_PRESENTER(xrtr); | 111 | + XivePresenter *xptr = XIVE_PRESENTER(xrtr); |
112 | + uint8_t ipb, backlog_level, group_level, first_group; | 112 | + uint8_t ipb, backlog_level, group_level, first_group; |
113 | + uint8_t backlog_prio, group_prio; | 113 | + uint8_t backlog_prio, group_prio; |
114 | uint8_t *regs = &tctx->regs[TM_QW1_OS]; | 114 | uint8_t *regs = &tctx->regs[TM_QW1_OS]; |
115 | Xive2Nvp nvp; | 115 | Xive2Nvp nvp; |
116 | 116 | ||
117 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, | 117 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, |
118 | backlog_prio = xive_ipb_to_pipr(ipb); | 118 | backlog_prio = xive_ipb_to_pipr(ipb); |
119 | backlog_level = 0; | 119 | backlog_level = 0; |
120 | 120 | ||
121 | + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); | 121 | + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); |
122 | + if (first_group && regs[TM_LSMFB] < backlog_prio) { | 122 | + if (first_group && regs[TM_LSMFB] < backlog_prio) { |
123 | + group_prio = xive2_presenter_backlog_check(xptr, nvp_blk, nvp_idx, | 123 | + group_prio = xive2_presenter_backlog_check(xptr, nvp_blk, nvp_idx, |
124 | + first_group, &group_level); | 124 | + first_group, &group_level); |
125 | + regs[TM_LSMFB] = group_prio; | 125 | + regs[TM_LSMFB] = group_prio; |
... | ... | ||
136 | - * Compute the PIPR based on the restored state. | 136 | - * Compute the PIPR based on the restored state. |
137 | + * Compute the PIPR based on the restored state. | 137 | + * Compute the PIPR based on the restored state. |
138 | * It will raise the External interrupt signal if needed. | 138 | * It will raise the External interrupt signal if needed. |
139 | */ | 139 | */ |
140 | xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); | 140 | xive_tctx_pipr_update(tctx, TM_QW1_OS, backlog_prio, backlog_level); |
141 | -- | 141 | -- |
142 | 2.43.0 | 142 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | When a group interrupt cannot be delivered, we need to: | ||
4 | - increment the backlog counter for the group in the NVG table | ||
5 | (if the END is configured to keep a backlog). | ||
6 | - start a broadcast operation to set the LSMFB field on matching CPUs | ||
7 | which can't take the interrupt now because they're running at too | ||
8 | high a priority. | ||
9 | |||
10 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
11 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
12 | --- | ||
13 | include/hw/ppc/xive.h | 5 ++ | ||
14 | include/hw/ppc/xive2.h | 1 + | ||
15 | hw/intc/pnv_xive2.c | 42 +++++++++++++++++ | ||
16 | hw/intc/xive2.c | 105 +++++++++++++++++++++++++++++++++++------ | ||
17 | hw/ppc/pnv.c | 22 ++++++++- | ||
18 | 5 files changed, 159 insertions(+), 16 deletions(-) | ||
19 | |||
20 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | ||
21 | index XXXXXXX..XXXXXXX 100644 | ||
22 | --- a/include/hw/ppc/xive.h | ||
23 | +++ b/include/hw/ppc/xive.h | ||
24 | @@ -XXX,XX +XXX,XX @@ struct XivePresenterClass { | ||
25 | uint32_t logic_serv, XiveTCTXMatch *match); | ||
26 | bool (*in_kernel)(const XivePresenter *xptr); | ||
27 | uint32_t (*get_config)(XivePresenter *xptr); | ||
28 | + int (*broadcast)(XivePresenter *xptr, | ||
29 | + uint8_t nvt_blk, uint32_t nvt_idx, | ||
30 | + uint8_t priority); | ||
31 | }; | ||
32 | |||
33 | int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
34 | @@ -XXX,XX +XXX,XX @@ struct XiveFabricClass { | ||
35 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
36 | bool cam_ignore, uint8_t priority, | ||
37 | uint32_t logic_serv, XiveTCTXMatch *match); | ||
38 | + int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, | ||
39 | + uint8_t priority); | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | ||
44 | index XXXXXXX..XXXXXXX 100644 | ||
45 | --- a/include/hw/ppc/xive2.h | ||
46 | +++ b/include/hw/ppc/xive2.h | ||
47 | @@ -XXX,XX +XXX,XX @@ uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | ||
48 | void xive2_tm_pull_os_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | ||
49 | hwaddr offset, uint64_t value, unsigned size); | ||
50 | bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority); | ||
51 | +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority); | ||
52 | void xive2_tm_set_hv_target(XivePresenter *xptr, XiveTCTX *tctx, | ||
53 | hwaddr offset, uint64_t value, unsigned size); | ||
54 | void xive2_tm_pull_phys_ctx_ol(XivePresenter *xptr, XiveTCTX *tctx, | ||
55 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | ||
56 | index XXXXXXX..XXXXXXX 100644 | ||
57 | --- a/hw/intc/pnv_xive2.c | ||
58 | +++ b/hw/intc/pnv_xive2.c | ||
59 | @@ -XXX,XX +XXX,XX @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) | ||
60 | return cfg; | ||
61 | } | ||
62 | |||
63 | +static int pnv_xive2_broadcast(XivePresenter *xptr, | ||
64 | + uint8_t nvt_blk, uint32_t nvt_idx, | ||
65 | + uint8_t priority) | ||
66 | +{ | ||
67 | + PnvXive2 *xive = PNV_XIVE2(xptr); | ||
68 | + PnvChip *chip = xive->chip; | ||
69 | + int i, j; | ||
70 | + bool gen1_tima_os = | ||
71 | + xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS; | ||
72 | + | ||
73 | + for (i = 0; i < chip->nr_cores; i++) { | ||
74 | + PnvCore *pc = chip->cores[i]; | ||
75 | + CPUCore *cc = CPU_CORE(pc); | ||
76 | + | ||
77 | + for (j = 0; j < cc->nr_threads; j++) { | ||
78 | + PowerPCCPU *cpu = pc->threads[j]; | ||
79 | + XiveTCTX *tctx; | ||
80 | + int ring; | ||
81 | + | ||
82 | + if (!pnv_xive2_is_cpu_enabled(xive, cpu)) { | ||
83 | + continue; | ||
84 | + } | ||
85 | + | ||
86 | + tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); | ||
87 | + | ||
88 | + if (gen1_tima_os) { | ||
89 | + ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | ||
90 | + nvt_idx, true, 0); | ||
91 | + } else { | ||
92 | + ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | ||
93 | + nvt_idx, true, 0); | ||
94 | + } | ||
95 | + | ||
96 | + if (ring != -1) { | ||
97 | + xive2_tm_set_lsmfb(tctx, ring, priority); | ||
98 | + } | ||
99 | + } | ||
100 | + } | ||
101 | + return 0; | ||
102 | +} | ||
103 | + | ||
104 | static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr) | ||
105 | { | ||
106 | return pnv_xive2_block_id(PNV_XIVE2(xrtr)); | ||
107 | @@ -XXX,XX +XXX,XX @@ static void pnv_xive2_class_init(ObjectClass *klass, void *data) | ||
108 | |||
109 | xpc->match_nvt = pnv_xive2_match_nvt; | ||
110 | xpc->get_config = pnv_xive2_presenter_get_config; | ||
111 | + xpc->broadcast = pnv_xive2_broadcast; | ||
112 | }; | ||
113 | |||
114 | static const TypeInfo pnv_xive2_info = { | ||
115 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
116 | index XXXXXXX..XXXXXXX 100644 | ||
117 | --- a/hw/intc/xive2.c | ||
118 | +++ b/hw/intc/xive2.c | ||
119 | @@ -XXX,XX +XXX,XX @@ static uint32_t xive2_nvgc_get_backlog(Xive2Nvgc *nvgc, uint8_t priority) | ||
120 | return val; | ||
121 | } | ||
122 | |||
123 | +static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, | ||
124 | + uint32_t val) | ||
125 | +{ | ||
126 | + uint8_t *ptr, i; | ||
127 | + uint32_t shift; | ||
128 | + | ||
129 | + if (priority > 7) { | ||
130 | + return; | ||
131 | + } | ||
132 | + | ||
133 | + if (val > 0xFFFFFF) { | ||
134 | + val = 0xFFFFFF; | ||
135 | + } | ||
136 | + /* | ||
137 | + * The per-priority backlog counters are 24-bit and the structure | ||
138 | + * is stored in big endian | ||
139 | + */ | ||
140 | + ptr = (uint8_t *)&nvgc->w2 + priority * 3; | ||
141 | + for (i = 0; i < 3; i++, ptr++) { | ||
142 | + shift = 8 * (2 - i); | ||
143 | + *ptr = (val >> shift) & 0xFF; | ||
144 | + } | ||
145 | +} | ||
146 | + | ||
147 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) | ||
148 | { | ||
149 | if (!xive2_eas_is_valid(eas)) { | ||
150 | @@ -XXX,XX +XXX,XX @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | ||
151 | return true; | ||
152 | } | ||
153 | |||
154 | +void xive2_tm_set_lsmfb(XiveTCTX *tctx, int ring, uint8_t priority) | ||
155 | +{ | ||
156 | + uint8_t *regs = &tctx->regs[ring]; | ||
157 | + | ||
158 | + /* | ||
159 | + * Called by the router during a VP-group notification when the | ||
160 | + * thread matches but can't take the interrupt because it's | ||
161 | + * already running at a more favored priority. It then stores the | ||
162 | + * new interrupt priority in the LSMFB field. | ||
163 | + */ | ||
164 | + regs[TM_LSMFB] = priority; | ||
165 | +} | ||
166 | + | ||
167 | static void xive2_router_realize(DeviceState *dev, Error **errp) | ||
168 | { | ||
169 | Xive2Router *xrtr = XIVE2_ROUTER(dev); | ||
170 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
171 | /* | ||
172 | * If no matching NVP is dispatched on a HW thread : | ||
173 | * - specific VP: update the NVP structure if backlog is activated | ||
174 | - * - logical server : forward request to IVPE (not supported) | ||
175 | + * - VP-group: update the backlog counter for that priority in the NVG | ||
176 | */ | ||
177 | if (xive2_end_is_backlog(&end)) { | ||
178 | - uint8_t ipb; | ||
179 | |||
180 | if (format == 1) { | ||
181 | qemu_log_mask(LOG_GUEST_ERROR, | ||
182 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
183 | return; | ||
184 | } | ||
185 | |||
186 | - /* | ||
187 | - * Record the IPB in the associated NVP structure for later | ||
188 | - * use. The presenter will resend the interrupt when the vCPU | ||
189 | - * is dispatched again on a HW thread. | ||
190 | - */ | ||
191 | - ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | | ||
192 | - xive_priority_to_ipb(priority); | ||
193 | - nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | ||
194 | - xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | ||
195 | - | ||
196 | - /* | ||
197 | - * On HW, follows a "Broadcast Backlog" to IVPEs | ||
198 | - */ | ||
199 | + if (!xive2_end_is_ignore(&end)) { | ||
200 | + uint8_t ipb; | ||
201 | + /* | ||
202 | + * Record the IPB in the associated NVP structure for later | ||
203 | + * use. The presenter will resend the interrupt when the vCPU | ||
204 | + * is dispatched again on a HW thread. | ||
205 | + */ | ||
206 | + ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) | | ||
207 | + xive_priority_to_ipb(priority); | ||
208 | + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | ||
209 | + xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | ||
210 | + } else { | ||
211 | + Xive2Nvgc nvg; | ||
212 | + uint32_t backlog; | ||
213 | + | ||
214 | + /* For groups, the per-priority backlog counters are in the NVG */ | ||
215 | + if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) { | ||
216 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n", | ||
217 | + nvp_blk, nvp_idx); | ||
218 | + return; | ||
219 | + } | ||
220 | + | ||
221 | + if (!xive2_nvgc_is_valid(&nvg)) { | ||
222 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", | ||
223 | + nvp_blk, nvp_idx); | ||
224 | + return; | ||
225 | + } | ||
226 | + | ||
227 | + /* | ||
228 | + * Increment the backlog counter for that priority. | ||
229 | + * For the precluded case, we only call broadcast the | ||
230 | + * first time the counter is incremented. broadcast will | ||
231 | + * set the LSMFB field of the TIMA of relevant threads so | ||
232 | + * that they know an interrupt is pending. | ||
233 | + */ | ||
234 | + backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1; | ||
235 | + xive2_nvgc_set_backlog(&nvg, priority, backlog); | ||
236 | + xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg); | ||
237 | + | ||
238 | + if (precluded && backlog == 1) { | ||
239 | + XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); | ||
240 | + xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority); | ||
241 | + | ||
242 | + if (!xive2_end_is_precluded_escalation(&end)) { | ||
243 | + /* | ||
244 | + * The interrupt will be picked up when the | ||
245 | + * matching thread lowers its priority level | ||
246 | + */ | ||
247 | + return; | ||
248 | + } | ||
249 | + } | ||
250 | + } | ||
251 | } | ||
252 | |||
253 | do_escalation: | ||
254 | diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c | ||
255 | index XXXXXXX..XXXXXXX 100644 | ||
256 | --- a/hw/ppc/pnv.c | ||
257 | +++ b/hw/ppc/pnv.c | ||
258 | @@ -XXX,XX +XXX,XX @@ | ||
259 | /* | ||
260 | * QEMU PowerPC PowerNV machine model | ||
261 | * | ||
262 | - * Copyright (c) 2016, IBM Corporation. | ||
263 | + * Copyright (c) 2016-2024, IBM Corporation. | ||
264 | + * | ||
265 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
266 | * | ||
267 | * This library is free software; you can redistribute it and/or | ||
268 | * modify it under the terms of the GNU Lesser General Public | ||
269 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | ||
270 | return total_count; | ||
271 | } | ||
272 | |||
273 | +static int pnv10_xive_broadcast(XiveFabric *xfb, | ||
274 | + uint8_t nvt_blk, uint32_t nvt_idx, | ||
275 | + uint8_t priority) | ||
276 | +{ | ||
277 | + PnvMachineState *pnv = PNV_MACHINE(xfb); | ||
278 | + int i; | ||
279 | + | ||
280 | + for (i = 0; i < pnv->num_chips; i++) { | ||
281 | + Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]); | ||
282 | + XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); | ||
283 | + XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | ||
284 | + | ||
285 | + xpc->broadcast(xptr, nvt_blk, nvt_idx, priority); | ||
286 | + } | ||
287 | + return 0; | ||
288 | +} | ||
289 | + | ||
290 | static bool pnv_machine_get_big_core(Object *obj, Error **errp) | ||
291 | { | ||
292 | PnvMachineState *pnv = PNV_MACHINE(obj); | ||
293 | @@ -XXX,XX +XXX,XX @@ static void pnv_machine_p10_common_class_init(ObjectClass *oc, void *data) | ||
294 | pmc->dt_power_mgt = pnv_dt_power_mgt; | ||
295 | |||
296 | xfc->match_nvt = pnv10_xive_match_nvt; | ||
297 | + xfc->broadcast = pnv10_xive_broadcast; | ||
298 | |||
299 | machine_class_allow_dynamic_sysbus_dev(mc, TYPE_PNV_PHB); | ||
300 | } | ||
301 | -- | ||
302 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | When the hypervisor or OS pushes a new value to the CPPR, if the LSMFB | 3 | When the hypervisor or OS pushes a new value to the CPPR, if the LSMFB |
4 | value is lower than the new CPPR value, there could be a pending group | 4 | value is lower than the new CPPR value, there could be a pending group |
5 | interrupt in the backlog, so it needs to be scanned. | 5 | interrupt in the backlog, so it needs to be scanned. |
6 | 6 | ||
7 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 7 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
8 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 8 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
9 | --- | 9 | --- |
10 | include/hw/ppc/xive2.h | 4 + | 10 | include/hw/ppc/xive2.h | 4 + |
11 | hw/intc/xive.c | 4 +- | 11 | hw/intc/xive.c | 4 +- |
12 | hw/intc/xive2.c | 173 ++++++++++++++++++++++++++++++++++++++++- | 12 | hw/intc/xive2.c | 173 ++++++++++++++++++++++++++++++++++++++++- |
13 | 3 files changed, 177 insertions(+), 4 deletions(-) | 13 | 3 files changed, 177 insertions(+), 4 deletions(-) |
14 | 14 | ||
15 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | 15 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h |
16 | index XXXXXXX..XXXXXXX 100644 | 16 | index XXXXXXX..XXXXXXX 100644 |
17 | --- a/include/hw/ppc/xive2.h | 17 | --- a/include/hw/ppc/xive2.h |
18 | +++ b/include/hw/ppc/xive2.h | 18 | +++ b/include/hw/ppc/xive2.h |
19 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2EndSource { | 19 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2EndSource { |
20 | * XIVE2 Thread Interrupt Management Area (POWER10) | 20 | * XIVE2 Thread Interrupt Management Area (POWER10) |
21 | */ | 21 | */ |
22 | 22 | ||
23 | +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, | 23 | +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, |
24 | + hwaddr offset, uint64_t value, unsigned size); | 24 | + hwaddr offset, uint64_t value, unsigned size); |
25 | +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, | 25 | +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, |
26 | + hwaddr offset, uint64_t value, unsigned size); | 26 | + hwaddr offset, uint64_t value, unsigned size); |
27 | void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, | 27 | void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, |
28 | uint64_t value, unsigned size); | 28 | uint64_t value, unsigned size); |
29 | uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | 29 | uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, |
30 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | 30 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c |
31 | index XXXXXXX..XXXXXXX 100644 | 31 | index XXXXXXX..XXXXXXX 100644 |
32 | --- a/hw/intc/xive.c | 32 | --- a/hw/intc/xive.c |
33 | +++ b/hw/intc/xive.c | 33 | +++ b/hw/intc/xive.c |
34 | @@ -XXX,XX +XXX,XX @@ static const XiveTmOp xive2_tm_operations[] = { | 34 | @@ -XXX,XX +XXX,XX @@ static const XiveTmOp xive2_tm_operations[] = { |
35 | * MMIOs below 2K : raw values and special operations without side | 35 | * MMIOs below 2K : raw values and special operations without side |
36 | * effects | 36 | * effects |
37 | */ | 37 | */ |
38 | - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, | 38 | - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, |
39 | + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr, | 39 | + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr, |
40 | NULL }, | 40 | NULL }, |
41 | { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, | 41 | { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, |
42 | NULL }, | 42 | NULL }, |
43 | @@ -XXX,XX +XXX,XX @@ static const XiveTmOp xive2_tm_operations[] = { | 43 | @@ -XXX,XX +XXX,XX @@ static const XiveTmOp xive2_tm_operations[] = { |
44 | NULL }, | 44 | NULL }, |
45 | { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, | 45 | { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, |
46 | NULL }, | 46 | NULL }, |
47 | - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, | 47 | - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, |
48 | + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr, | 48 | + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr, |
49 | NULL }, | 49 | NULL }, |
50 | { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, | 50 | { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, |
51 | NULL }, | 51 | NULL }, |
52 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 52 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
53 | index XXXXXXX..XXXXXXX 100644 | 53 | index XXXXXXX..XXXXXXX 100644 |
54 | --- a/hw/intc/xive2.c | 54 | --- a/hw/intc/xive2.c |
55 | +++ b/hw/intc/xive2.c | 55 | +++ b/hw/intc/xive2.c |
56 | @@ -XXX,XX +XXX,XX @@ | 56 | @@ -XXX,XX +XXX,XX @@ |
57 | #include "hw/ppc/xive.h" | 57 | #include "hw/ppc/xive.h" |
58 | #include "hw/ppc/xive2.h" | 58 | #include "hw/ppc/xive2.h" |
59 | #include "hw/ppc/xive2_regs.h" | 59 | #include "hw/ppc/xive2_regs.h" |
60 | +#include "trace.h" | 60 | +#include "trace.h" |
61 | 61 | ||
62 | uint32_t xive2_router_get_config(Xive2Router *xrtr) | 62 | uint32_t xive2_router_get_config(Xive2Router *xrtr) |
63 | { | 63 | { |
64 | @@ -XXX,XX +XXX,XX @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | 64 | @@ -XXX,XX +XXX,XX @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, |
65 | } | 65 | } |
66 | } | 66 | } |
67 | 67 | ||
68 | +static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, | 68 | +static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, |
69 | + uint32_t *nvp_blk, uint32_t *nvp_idx) | 69 | + uint32_t *nvp_blk, uint32_t *nvp_idx) |
70 | +{ | 70 | +{ |
71 | + uint32_t w2, cam; | 71 | + uint32_t w2, cam; |
72 | + | 72 | + |
73 | + w2 = xive_tctx_word2(&tctx->regs[ring]); | 73 | + w2 = xive_tctx_word2(&tctx->regs[ring]); |
74 | + switch (ring) { | 74 | + switch (ring) { |
75 | + case TM_QW1_OS: | 75 | + case TM_QW1_OS: |
76 | + if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { | 76 | + if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { |
77 | + return -1; | 77 | + return -1; |
78 | + } | 78 | + } |
79 | + cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); | 79 | + cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); |
80 | + break; | 80 | + break; |
81 | + case TM_QW2_HV_POOL: | 81 | + case TM_QW2_HV_POOL: |
82 | + if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { | 82 | + if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { |
83 | + return -1; | 83 | + return -1; |
84 | + } | 84 | + } |
85 | + cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); | 85 | + cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); |
86 | + break; | 86 | + break; |
87 | + case TM_QW3_HV_PHYS: | 87 | + case TM_QW3_HV_PHYS: |
88 | + if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { | 88 | + if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { |
89 | + return -1; | 89 | + return -1; |
90 | + } | 90 | + } |
91 | + cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); | 91 | + cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); |
92 | + break; | 92 | + break; |
93 | + default: | 93 | + default: |
94 | + return -1; | 94 | + return -1; |
95 | + } | 95 | + } |
96 | + *nvp_blk = xive2_nvp_blk(cam); | 96 | + *nvp_blk = xive2_nvp_blk(cam); |
97 | + *nvp_idx = xive2_nvp_idx(cam); | 97 | + *nvp_idx = xive2_nvp_idx(cam); |
98 | + return 0; | 98 | + return 0; |
99 | +} | 99 | +} |
100 | + | 100 | + |
101 | +static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | 101 | +static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) |
102 | +{ | 102 | +{ |
103 | + uint8_t *regs = &tctx->regs[ring]; | 103 | + uint8_t *regs = &tctx->regs[ring]; |
104 | + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); | 104 | + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); |
105 | + uint8_t old_cppr, backlog_prio, first_group, group_level = 0; | 105 | + uint8_t old_cppr, backlog_prio, first_group, group_level = 0; |
106 | + uint8_t pipr_min, lsmfb_min, ring_min; | 106 | + uint8_t pipr_min, lsmfb_min, ring_min; |
107 | + bool group_enabled; | 107 | + bool group_enabled; |
108 | + uint32_t nvp_blk, nvp_idx; | 108 | + uint32_t nvp_blk, nvp_idx; |
109 | + Xive2Nvp nvp; | 109 | + Xive2Nvp nvp; |
110 | + int rc; | 110 | + int rc; |
111 | + | 111 | + |
112 | + trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, | 112 | + trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, |
113 | + regs[TM_IPB], regs[TM_PIPR], | 113 | + regs[TM_IPB], regs[TM_PIPR], |
114 | + cppr, regs[TM_NSR]); | 114 | + cppr, regs[TM_NSR]); |
115 | + | 115 | + |
116 | + if (cppr > XIVE_PRIORITY_MAX) { | 116 | + if (cppr > XIVE_PRIORITY_MAX) { |
117 | + cppr = 0xff; | 117 | + cppr = 0xff; |
118 | + } | 118 | + } |
119 | + | 119 | + |
120 | + old_cppr = regs[TM_CPPR]; | 120 | + old_cppr = regs[TM_CPPR]; |
121 | + regs[TM_CPPR] = cppr; | 121 | + regs[TM_CPPR] = cppr; |
122 | + | 122 | + |
123 | + /* | 123 | + /* |
124 | + * Recompute the PIPR based on local pending interrupts. It will | 124 | + * Recompute the PIPR based on local pending interrupts. It will |
125 | + * be adjusted below if needed in case of pending group interrupts. | 125 | + * be adjusted below if needed in case of pending group interrupts. |
126 | + */ | 126 | + */ |
127 | + pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); | 127 | + pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); |
128 | + group_enabled = !!regs[TM_LGS]; | 128 | + group_enabled = !!regs[TM_LGS]; |
129 | + lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff; | 129 | + lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff; |
130 | + ring_min = ring; | 130 | + ring_min = ring; |
131 | + | 131 | + |
132 | + /* PHYS updates also depend on POOL values */ | 132 | + /* PHYS updates also depend on POOL values */ |
133 | + if (ring == TM_QW3_HV_PHYS) { | 133 | + if (ring == TM_QW3_HV_PHYS) { |
134 | + uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL]; | 134 | + uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL]; |
135 | + | 135 | + |
136 | + /* POOL values only matter if POOL ctx is valid */ | 136 | + /* POOL values only matter if POOL ctx is valid */ |
137 | + if (pregs[TM_WORD2] & 0x80) { | 137 | + if (pregs[TM_WORD2] & 0x80) { |
138 | + | 138 | + |
139 | + uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]); | 139 | + uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]); |
140 | + uint8_t pool_lsmfb = pregs[TM_LSMFB]; | 140 | + uint8_t pool_lsmfb = pregs[TM_LSMFB]; |
141 | + | 141 | + |
142 | + /* | 142 | + /* |
143 | + * Determine highest priority interrupt and | 143 | + * Determine highest priority interrupt and |
144 | + * remember which ring has it. | 144 | + * remember which ring has it. |
145 | + */ | 145 | + */ |
146 | + if (pool_pipr < pipr_min) { | 146 | + if (pool_pipr < pipr_min) { |
147 | + pipr_min = pool_pipr; | 147 | + pipr_min = pool_pipr; |
148 | + if (pool_pipr < lsmfb_min) { | 148 | + if (pool_pipr < lsmfb_min) { |
149 | + ring_min = TM_QW2_HV_POOL; | 149 | + ring_min = TM_QW2_HV_POOL; |
150 | + } | 150 | + } |
151 | + } | 151 | + } |
152 | + | 152 | + |
153 | + /* Values needed for group priority calculation */ | 153 | + /* Values needed for group priority calculation */ |
154 | + if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { | 154 | + if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { |
155 | + group_enabled = true; | 155 | + group_enabled = true; |
156 | + lsmfb_min = pool_lsmfb; | 156 | + lsmfb_min = pool_lsmfb; |
157 | + if (lsmfb_min < pipr_min) { | 157 | + if (lsmfb_min < pipr_min) { |
158 | + ring_min = TM_QW2_HV_POOL; | 158 | + ring_min = TM_QW2_HV_POOL; |
159 | + } | 159 | + } |
160 | + } | 160 | + } |
161 | + } | 161 | + } |
162 | + } | 162 | + } |
163 | + regs[TM_PIPR] = pipr_min; | 163 | + regs[TM_PIPR] = pipr_min; |
164 | + | 164 | + |
165 | + rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); | 165 | + rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); |
166 | + if (rc) { | 166 | + if (rc) { |
167 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n"); | 167 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n"); |
168 | + return; | 168 | + return; |
169 | + } | 169 | + } |
170 | + | 170 | + |
171 | + if (cppr < old_cppr) { | 171 | + if (cppr < old_cppr) { |
172 | + /* | 172 | + /* |
173 | + * FIXME: check if there's a group interrupt being presented | 173 | + * FIXME: check if there's a group interrupt being presented |
174 | + * and if the new cppr prevents it. If so, then the group | 174 | + * and if the new cppr prevents it. If so, then the group |
175 | + * interrupt needs to be re-added to the backlog and | 175 | + * interrupt needs to be re-added to the backlog and |
176 | + * re-triggered (see re-trigger END info in the NVGC | 176 | + * re-triggered (see re-trigger END info in the NVGC |
177 | + * structure) | 177 | + * structure) |
178 | + */ | 178 | + */ |
179 | + } | 179 | + } |
180 | + | 180 | + |
181 | + if (group_enabled && | 181 | + if (group_enabled && |
182 | + lsmfb_min < cppr && | 182 | + lsmfb_min < cppr && |
183 | + lsmfb_min < regs[TM_PIPR]) { | 183 | + lsmfb_min < regs[TM_PIPR]) { |
184 | + /* | 184 | + /* |
185 | + * Thread has seen a group interrupt with a higher priority | 185 | + * Thread has seen a group interrupt with a higher priority |
186 | + * than the new cppr or pending local interrupt. Check the | 186 | + * than the new cppr or pending local interrupt. Check the |
187 | + * backlog | 187 | + * backlog |
188 | + */ | 188 | + */ |
189 | + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { | 189 | + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { |
190 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", | 190 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", |
191 | + nvp_blk, nvp_idx); | 191 | + nvp_blk, nvp_idx); |
192 | + return; | 192 | + return; |
193 | + } | 193 | + } |
194 | + | 194 | + |
195 | + if (!xive2_nvp_is_valid(&nvp)) { | 195 | + if (!xive2_nvp_is_valid(&nvp)) { |
196 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", | 196 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", |
197 | + nvp_blk, nvp_idx); | 197 | + nvp_blk, nvp_idx); |
198 | + return; | 198 | + return; |
199 | + } | 199 | + } |
200 | + | 200 | + |
201 | + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); | 201 | + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); |
202 | + if (!first_group) { | 202 | + if (!first_group) { |
203 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", | 203 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", |
204 | + nvp_blk, nvp_idx); | 204 | + nvp_blk, nvp_idx); |
205 | + return; | 205 | + return; |
206 | + } | 206 | + } |
207 | + | 207 | + |
208 | + backlog_prio = xive2_presenter_backlog_check(tctx->xptr, | 208 | + backlog_prio = xive2_presenter_backlog_check(tctx->xptr, |
209 | + nvp_blk, nvp_idx, | 209 | + nvp_blk, nvp_idx, |
210 | + first_group, &group_level); | 210 | + first_group, &group_level); |
211 | + tctx->regs[ring_min + TM_LSMFB] = backlog_prio; | 211 | + tctx->regs[ring_min + TM_LSMFB] = backlog_prio; |
212 | + if (backlog_prio != 0xFF) { | 212 | + if (backlog_prio != 0xFF) { |
213 | + xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, | 213 | + xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, |
214 | + backlog_prio, group_level); | 214 | + backlog_prio, group_level); |
215 | + regs[TM_PIPR] = backlog_prio; | 215 | + regs[TM_PIPR] = backlog_prio; |
216 | + } | 216 | + } |
217 | + } | 217 | + } |
218 | + /* CPPR has changed, check if we need to raise a pending exception */ | 218 | + /* CPPR has changed, check if we need to raise a pending exception */ |
219 | + xive_tctx_notify(tctx, ring_min, group_level); | 219 | + xive_tctx_notify(tctx, ring_min, group_level); |
220 | +} | 220 | +} |
221 | + | 221 | + |
222 | +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, | 222 | +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, |
223 | + hwaddr offset, uint64_t value, unsigned size) | 223 | + hwaddr offset, uint64_t value, unsigned size) |
224 | +{ | 224 | +{ |
225 | + xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); | 225 | + xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); |
226 | +} | 226 | +} |
227 | + | 227 | + |
228 | +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, | 228 | +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, |
229 | + hwaddr offset, uint64_t value, unsigned size) | 229 | + hwaddr offset, uint64_t value, unsigned size) |
230 | +{ | 230 | +{ |
231 | + xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); | 231 | + xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); |
232 | +} | 232 | +} |
233 | + | 233 | + |
234 | static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) | 234 | static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) |
235 | { | 235 | { |
236 | uint8_t *regs = &tctx->regs[ring]; | 236 | uint8_t *regs = &tctx->regs[ring]; |
237 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 237 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
238 | 238 | ||
239 | bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | 239 | bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) |
240 | { | 240 | { |
241 | - uint8_t *regs = &tctx->regs[ring]; | 241 | - uint8_t *regs = &tctx->regs[ring]; |
242 | + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ | 242 | + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ |
243 | + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; | 243 | + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; |
244 | + uint8_t *alt_regs = &tctx->regs[alt_ring]; | 244 | + uint8_t *alt_regs = &tctx->regs[alt_ring]; |
245 | 245 | ||
246 | /* | 246 | /* |
247 | * The xive2_presenter_tctx_match() above tells if there's a match | 247 | * The xive2_presenter_tctx_match() above tells if there's a match |
248 | @@ -XXX,XX +XXX,XX @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | 248 | @@ -XXX,XX +XXX,XX @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) |
249 | * priority to know if the thread can take the interrupt now or if | 249 | * priority to know if the thread can take the interrupt now or if |
250 | * it is precluded. | 250 | * it is precluded. |
251 | */ | 251 | */ |
252 | - if (priority < regs[TM_CPPR]) { | 252 | - if (priority < regs[TM_CPPR]) { |
253 | + if (priority < alt_regs[TM_CPPR]) { | 253 | + if (priority < alt_regs[TM_CPPR]) { |
254 | return false; | 254 | return false; |
255 | } | 255 | } |
256 | return true; | 256 | return true; |
257 | -- | 257 | -- |
258 | 2.43.0 | 258 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | When pushing an OS context, we were already checking if there was a | ||
4 | pending interrupt in the IPB and sending a notification if needed. We | ||
5 | also need to check if there is a pending group interrupt stored in the | ||
6 | NVG table. To avoid useless backlog scans, we only scan if the NVP | ||
7 | belongs to a group. | ||
8 | |||
9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
11 | --- | ||
12 | hw/intc/xive2.c | 97 +++++++++++++++++++++++++++++++++++++++++++++++++ | ||
13 | 1 file changed, 97 insertions(+) | ||
14 | |||
15 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/hw/intc/xive2.c | ||
18 | +++ b/hw/intc/xive2.c | ||
19 | @@ -XXX,XX +XXX,XX @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) | ||
20 | end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); | ||
21 | } | ||
22 | |||
23 | +/* | ||
24 | + * Scan the group chain and return the highest priority and group | ||
25 | + * level of pending group interrupts. | ||
26 | + */ | ||
27 | +static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, | ||
28 | + uint8_t nvp_blk, uint32_t nvp_idx, | ||
29 | + uint8_t first_group, | ||
30 | + uint8_t *out_level) | ||
31 | +{ | ||
32 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); | ||
33 | + uint32_t nvgc_idx, mask; | ||
34 | + uint32_t current_level, count; | ||
35 | + uint8_t prio; | ||
36 | + Xive2Nvgc nvgc; | ||
37 | + | ||
38 | + for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { | ||
39 | + current_level = first_group & 0xF; | ||
40 | + | ||
41 | + while (current_level) { | ||
42 | + mask = (1 << current_level) - 1; | ||
43 | + nvgc_idx = nvp_idx & ~mask; | ||
44 | + nvgc_idx |= mask >> 1; | ||
45 | + qemu_log("fxb %s checking backlog for prio %d group idx %x\n", | ||
46 | + __func__, prio, nvgc_idx); | ||
47 | + | ||
48 | + if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { | ||
49 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", | ||
50 | + nvp_blk, nvgc_idx); | ||
51 | + return 0xFF; | ||
52 | + } | ||
53 | + if (!xive2_nvgc_is_valid(&nvgc)) { | ||
54 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", | ||
55 | + nvp_blk, nvgc_idx); | ||
56 | + return 0xFF; | ||
57 | + } | ||
58 | + | ||
59 | + count = xive2_nvgc_get_backlog(&nvgc, prio); | ||
60 | + if (count) { | ||
61 | + *out_level = current_level; | ||
62 | + return prio; | ||
63 | + } | ||
64 | + current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0xF; | ||
65 | + } | ||
66 | + } | ||
67 | + return 0xFF; | ||
68 | +} | ||
69 | + | ||
70 | +static void xive2_presenter_backlog_decr(XivePresenter *xptr, | ||
71 | + uint8_t nvp_blk, uint32_t nvp_idx, | ||
72 | + uint8_t group_prio, | ||
73 | + uint8_t group_level) | ||
74 | +{ | ||
75 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); | ||
76 | + uint32_t nvgc_idx, mask, count; | ||
77 | + Xive2Nvgc nvgc; | ||
78 | + | ||
79 | + group_level &= 0xF; | ||
80 | + mask = (1 << group_level) - 1; | ||
81 | + nvgc_idx = nvp_idx & ~mask; | ||
82 | + nvgc_idx |= mask >> 1; | ||
83 | + | ||
84 | + if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { | ||
85 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", | ||
86 | + nvp_blk, nvgc_idx); | ||
87 | + return; | ||
88 | + } | ||
89 | + if (!xive2_nvgc_is_valid(&nvgc)) { | ||
90 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", | ||
91 | + nvp_blk, nvgc_idx); | ||
92 | + return; | ||
93 | + } | ||
94 | + count = xive2_nvgc_get_backlog(&nvgc, group_prio); | ||
95 | + if (!count) { | ||
96 | + return; | ||
97 | + } | ||
98 | + xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1); | ||
99 | + xive2_router_write_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc); | ||
100 | +} | ||
101 | + | ||
102 | /* | ||
103 | * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode | ||
104 | * | ||
105 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, | ||
106 | uint8_t nvp_blk, uint32_t nvp_idx, | ||
107 | bool do_restore) | ||
108 | { | ||
109 | + XivePresenter *xptr = XIVE_PRESENTER(xrtr); | ||
110 | uint8_t ipb; | ||
111 | uint8_t backlog_level; | ||
112 | + uint8_t group_level; | ||
113 | + uint8_t first_group; | ||
114 | uint8_t backlog_prio; | ||
115 | + uint8_t group_prio; | ||
116 | uint8_t *regs = &tctx->regs[TM_QW1_OS]; | ||
117 | Xive2Nvp nvp; | ||
118 | |||
119 | @@ -XXX,XX +XXX,XX @@ static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx, | ||
120 | backlog_prio = xive_ipb_to_pipr(ipb); | ||
121 | backlog_level = 0; | ||
122 | |||
123 | + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); | ||
124 | + if (first_group && regs[TM_LSMFB] < backlog_prio) { | ||
125 | + group_prio = xive2_presenter_backlog_scan(xptr, nvp_blk, nvp_idx, | ||
126 | + first_group, &group_level); | ||
127 | + regs[TM_LSMFB] = group_prio; | ||
128 | + if (regs[TM_LGS] && group_prio < backlog_prio) { | ||
129 | + /* VP can take a group interrupt */ | ||
130 | + xive2_presenter_backlog_decr(xptr, nvp_blk, nvp_idx, | ||
131 | + group_prio, group_level); | ||
132 | + backlog_prio = group_prio; | ||
133 | + backlog_level = group_level; | ||
134 | + } | ||
135 | + } | ||
136 | + | ||
137 | /* | ||
138 | * Compute the PIPR based on the restored state. | ||
139 | * It will raise the External interrupt signal if needed. | ||
140 | -- | ||
141 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | When the hypervisor or OS pushes a new value to the CPPR, if the LSMFB | ||
4 | value is lower than the new CPPR value, there could be a pending group | ||
5 | interrupt in the backlog, so it needs to be scanned. | ||
6 | |||
7 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
8 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
9 | --- | ||
10 | include/hw/ppc/xive2.h | 4 + | ||
11 | hw/intc/xive.c | 4 +- | ||
12 | hw/intc/xive2.c | 173 ++++++++++++++++++++++++++++++++++++++++- | ||
13 | 3 files changed, 177 insertions(+), 4 deletions(-) | ||
14 | |||
15 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | ||
16 | index XXXXXXX..XXXXXXX 100644 | ||
17 | --- a/include/hw/ppc/xive2.h | ||
18 | +++ b/include/hw/ppc/xive2.h | ||
19 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2EndSource { | ||
20 | * XIVE2 Thread Interrupt Management Area (POWER10) | ||
21 | */ | ||
22 | |||
23 | +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, | ||
24 | + hwaddr offset, uint64_t value, unsigned size); | ||
25 | +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, | ||
26 | + hwaddr offset, uint64_t value, unsigned size); | ||
27 | void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, hwaddr offset, | ||
28 | uint64_t value, unsigned size); | ||
29 | uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | ||
30 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | ||
31 | index XXXXXXX..XXXXXXX 100644 | ||
32 | --- a/hw/intc/xive.c | ||
33 | +++ b/hw/intc/xive.c | ||
34 | @@ -XXX,XX +XXX,XX @@ static const XiveTmOp xive2_tm_operations[] = { | ||
35 | * MMIOs below 2K : raw values and special operations without side | ||
36 | * effects | ||
37 | */ | ||
38 | - { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive_tm_set_os_cppr, | ||
39 | + { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_CPPR, 1, xive2_tm_set_os_cppr, | ||
40 | NULL }, | ||
41 | { XIVE_TM_HV_PAGE, TM_QW1_OS + TM_WORD2, 4, xive2_tm_push_os_ctx, | ||
42 | NULL }, | ||
43 | @@ -XXX,XX +XXX,XX @@ static const XiveTmOp xive2_tm_operations[] = { | ||
44 | NULL }, | ||
45 | { XIVE_TM_OS_PAGE, TM_QW1_OS + TM_LGS, 1, xive_tm_set_os_lgs, | ||
46 | NULL }, | ||
47 | - { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive_tm_set_hv_cppr, | ||
48 | + { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_CPPR, 1, xive2_tm_set_hv_cppr, | ||
49 | NULL }, | ||
50 | { XIVE_TM_HV_PAGE, TM_QW3_HV_PHYS + TM_WORD2, 1, xive_tm_vt_push, | ||
51 | NULL }, | ||
52 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
53 | index XXXXXXX..XXXXXXX 100644 | ||
54 | --- a/hw/intc/xive2.c | ||
55 | +++ b/hw/intc/xive2.c | ||
56 | @@ -XXX,XX +XXX,XX @@ | ||
57 | #include "hw/ppc/xive.h" | ||
58 | #include "hw/ppc/xive2.h" | ||
59 | #include "hw/ppc/xive2_regs.h" | ||
60 | +#include "trace.h" | ||
61 | |||
62 | uint32_t xive2_router_get_config(Xive2Router *xrtr) | ||
63 | { | ||
64 | @@ -XXX,XX +XXX,XX @@ void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx, | ||
65 | } | ||
66 | } | ||
67 | |||
68 | +static int xive2_tctx_get_nvp_indexes(XiveTCTX *tctx, uint8_t ring, | ||
69 | + uint32_t *nvp_blk, uint32_t *nvp_idx) | ||
70 | +{ | ||
71 | + uint32_t w2, cam; | ||
72 | + | ||
73 | + w2 = xive_tctx_word2(&tctx->regs[ring]); | ||
74 | + switch (ring) { | ||
75 | + case TM_QW1_OS: | ||
76 | + if (!(be32_to_cpu(w2) & TM2_QW1W2_VO)) { | ||
77 | + return -1; | ||
78 | + } | ||
79 | + cam = xive_get_field32(TM2_QW1W2_OS_CAM, w2); | ||
80 | + break; | ||
81 | + case TM_QW2_HV_POOL: | ||
82 | + if (!(be32_to_cpu(w2) & TM2_QW2W2_VP)) { | ||
83 | + return -1; | ||
84 | + } | ||
85 | + cam = xive_get_field32(TM2_QW2W2_POOL_CAM, w2); | ||
86 | + break; | ||
87 | + case TM_QW3_HV_PHYS: | ||
88 | + if (!(be32_to_cpu(w2) & TM2_QW3W2_VT)) { | ||
89 | + return -1; | ||
90 | + } | ||
91 | + cam = xive2_tctx_hw_cam_line(tctx->xptr, tctx); | ||
92 | + break; | ||
93 | + default: | ||
94 | + return -1; | ||
95 | + } | ||
96 | + *nvp_blk = xive2_nvp_blk(cam); | ||
97 | + *nvp_idx = xive2_nvp_idx(cam); | ||
98 | + return 0; | ||
99 | +} | ||
100 | + | ||
101 | +static void xive2_tctx_set_cppr(XiveTCTX *tctx, uint8_t ring, uint8_t cppr) | ||
102 | +{ | ||
103 | + uint8_t *regs = &tctx->regs[ring]; | ||
104 | + Xive2Router *xrtr = XIVE2_ROUTER(tctx->xptr); | ||
105 | + uint8_t old_cppr, backlog_prio, first_group, group_level = 0; | ||
106 | + uint8_t pipr_min, lsmfb_min, ring_min; | ||
107 | + bool group_enabled; | ||
108 | + uint32_t nvp_blk, nvp_idx; | ||
109 | + Xive2Nvp nvp; | ||
110 | + int rc; | ||
111 | + | ||
112 | + trace_xive_tctx_set_cppr(tctx->cs->cpu_index, ring, | ||
113 | + regs[TM_IPB], regs[TM_PIPR], | ||
114 | + cppr, regs[TM_NSR]); | ||
115 | + | ||
116 | + if (cppr > XIVE_PRIORITY_MAX) { | ||
117 | + cppr = 0xff; | ||
118 | + } | ||
119 | + | ||
120 | + old_cppr = regs[TM_CPPR]; | ||
121 | + regs[TM_CPPR] = cppr; | ||
122 | + | ||
123 | + /* | ||
124 | + * Recompute the PIPR based on local pending interrupts. It will | ||
125 | + * be adjusted below if needed in case of pending group interrupts. | ||
126 | + */ | ||
127 | + pipr_min = xive_ipb_to_pipr(regs[TM_IPB]); | ||
128 | + group_enabled = !!regs[TM_LGS]; | ||
129 | + lsmfb_min = (group_enabled) ? regs[TM_LSMFB] : 0xff; | ||
130 | + ring_min = ring; | ||
131 | + | ||
132 | + /* PHYS updates also depend on POOL values */ | ||
133 | + if (ring == TM_QW3_HV_PHYS) { | ||
134 | + uint8_t *pregs = &tctx->regs[TM_QW2_HV_POOL]; | ||
135 | + | ||
136 | + /* POOL values only matter if POOL ctx is valid */ | ||
137 | + if (pregs[TM_WORD2] & 0x80) { | ||
138 | + | ||
139 | + uint8_t pool_pipr = xive_ipb_to_pipr(pregs[TM_IPB]); | ||
140 | + uint8_t pool_lsmfb = pregs[TM_LSMFB]; | ||
141 | + | ||
142 | + /* | ||
143 | + * Determine highest priority interrupt and | ||
144 | + * remember which ring has it. | ||
145 | + */ | ||
146 | + if (pool_pipr < pipr_min) { | ||
147 | + pipr_min = pool_pipr; | ||
148 | + if (pool_pipr < lsmfb_min) { | ||
149 | + ring_min = TM_QW2_HV_POOL; | ||
150 | + } | ||
151 | + } | ||
152 | + | ||
153 | + /* Values needed for group priority calculation */ | ||
154 | + if (pregs[TM_LGS] && (pool_lsmfb < lsmfb_min)) { | ||
155 | + group_enabled = true; | ||
156 | + lsmfb_min = pool_lsmfb; | ||
157 | + if (lsmfb_min < pipr_min) { | ||
158 | + ring_min = TM_QW2_HV_POOL; | ||
159 | + } | ||
160 | + } | ||
161 | + } | ||
162 | + } | ||
163 | + regs[TM_PIPR] = pipr_min; | ||
164 | + | ||
165 | + rc = xive2_tctx_get_nvp_indexes(tctx, ring_min, &nvp_blk, &nvp_idx); | ||
166 | + if (rc) { | ||
167 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: set CPPR on invalid context\n"); | ||
168 | + return; | ||
169 | + } | ||
170 | + | ||
171 | + if (cppr < old_cppr) { | ||
172 | + /* | ||
173 | + * FIXME: check if there's a group interrupt being presented | ||
174 | + * and if the new cppr prevents it. If so, then the group | ||
175 | + * interrupt needs to be re-added to the backlog and | ||
176 | + * re-triggered (see re-trigger END info in the NVGC | ||
177 | + * structure) | ||
178 | + */ | ||
179 | + } | ||
180 | + | ||
181 | + if (group_enabled && | ||
182 | + lsmfb_min < cppr && | ||
183 | + lsmfb_min < regs[TM_PIPR]) { | ||
184 | + /* | ||
185 | + * Thread has seen a group interrupt with a higher priority | ||
186 | + * than the new cppr or pending local interrupt. Check the | ||
187 | + * backlog | ||
188 | + */ | ||
189 | + if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) { | ||
190 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", | ||
191 | + nvp_blk, nvp_idx); | ||
192 | + return; | ||
193 | + } | ||
194 | + | ||
195 | + if (!xive2_nvp_is_valid(&nvp)) { | ||
196 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", | ||
197 | + nvp_blk, nvp_idx); | ||
198 | + return; | ||
199 | + } | ||
200 | + | ||
201 | + first_group = xive_get_field32(NVP2_W0_PGOFIRST, nvp.w0); | ||
202 | + if (!first_group) { | ||
203 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n", | ||
204 | + nvp_blk, nvp_idx); | ||
205 | + return; | ||
206 | + } | ||
207 | + | ||
208 | + backlog_prio = xive2_presenter_backlog_scan(tctx->xptr, | ||
209 | + nvp_blk, nvp_idx, | ||
210 | + first_group, &group_level); | ||
211 | + tctx->regs[ring_min + TM_LSMFB] = backlog_prio; | ||
212 | + if (backlog_prio != 0xFF) { | ||
213 | + xive2_presenter_backlog_decr(tctx->xptr, nvp_blk, nvp_idx, | ||
214 | + backlog_prio, group_level); | ||
215 | + regs[TM_PIPR] = backlog_prio; | ||
216 | + } | ||
217 | + } | ||
218 | + /* CPPR has changed, check if we need to raise a pending exception */ | ||
219 | + xive_tctx_notify(tctx, ring_min, group_level); | ||
220 | +} | ||
221 | + | ||
222 | +void xive2_tm_set_hv_cppr(XivePresenter *xptr, XiveTCTX *tctx, | ||
223 | + hwaddr offset, uint64_t value, unsigned size) | ||
224 | +{ | ||
225 | + xive2_tctx_set_cppr(tctx, TM_QW3_HV_PHYS, value & 0xff); | ||
226 | +} | ||
227 | + | ||
228 | +void xive2_tm_set_os_cppr(XivePresenter *xptr, XiveTCTX *tctx, | ||
229 | + hwaddr offset, uint64_t value, unsigned size) | ||
230 | +{ | ||
231 | + xive2_tctx_set_cppr(tctx, TM_QW1_OS, value & 0xff); | ||
232 | +} | ||
233 | + | ||
234 | static void xive2_tctx_set_target(XiveTCTX *tctx, uint8_t ring, uint8_t target) | ||
235 | { | ||
236 | uint8_t *regs = &tctx->regs[ring]; | ||
237 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
238 | |||
239 | bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | ||
240 | { | ||
241 | - uint8_t *regs = &tctx->regs[ring]; | ||
242 | + /* HV_POOL ring uses HV_PHYS NSR, CPPR and PIPR registers */ | ||
243 | + uint8_t alt_ring = (ring == TM_QW2_HV_POOL) ? TM_QW3_HV_PHYS : ring; | ||
244 | + uint8_t *alt_regs = &tctx->regs[alt_ring]; | ||
245 | |||
246 | /* | ||
247 | * The xive2_presenter_tctx_match() above tells if there's a match | ||
248 | @@ -XXX,XX +XXX,XX @@ bool xive2_tm_irq_precluded(XiveTCTX *tctx, int ring, uint8_t priority) | ||
249 | * priority to know if the thread can take the interrupt now or if | ||
250 | * it is precluded. | ||
251 | */ | ||
252 | - if (priority < regs[TM_CPPR]) { | ||
253 | + if (priority < alt_regs[TM_CPPR]) { | ||
254 | return false; | ||
255 | } | ||
256 | return true; | ||
257 | -- | ||
258 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | Add XIVE2 tests for group interrupts and group interrupts that have | 3 | Add XIVE2 tests for group interrupts and group interrupts that have |
4 | been backlogged. | 4 | been backlogged. |
5 | 5 | ||
6 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 6 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
7 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 7 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
8 | --- | 8 | --- |
9 | tests/qtest/pnv-xive2-test.c | 160 +++++++++++++++++++++++++++++++++++ | 9 | tests/qtest/pnv-xive2-test.c | 160 +++++++++++++++++++++++++++++++++++ |
10 | 1 file changed, 160 insertions(+) | 10 | 1 file changed, 160 insertions(+) |
11 | 11 | ||
12 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c | 12 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c |
13 | index XXXXXXX..XXXXXXX 100644 | 13 | index XXXXXXX..XXXXXXX 100644 |
14 | --- a/tests/qtest/pnv-xive2-test.c | 14 | --- a/tests/qtest/pnv-xive2-test.c |
15 | +++ b/tests/qtest/pnv-xive2-test.c | 15 | +++ b/tests/qtest/pnv-xive2-test.c |
16 | @@ -XXX,XX +XXX,XX @@ | 16 | @@ -XXX,XX +XXX,XX @@ |
17 | * QTest testcase for PowerNV 10 interrupt controller (xive2) | 17 | * QTest testcase for PowerNV 10 interrupt controller (xive2) |
18 | * - Test irq to hardware thread | 18 | * - Test irq to hardware thread |
19 | * - Test 'Pull Thread Context to Odd Thread Reporting Line' | 19 | * - Test 'Pull Thread Context to Odd Thread Reporting Line' |
20 | + * - Test irq to hardware group | 20 | + * - Test irq to hardware group |
21 | + * - Test irq to hardware group going through backlog | 21 | + * - Test irq to hardware group going through backlog |
22 | * | 22 | * |
23 | * Copyright (c) 2024, IBM Corporation. | 23 | * Copyright (c) 2024, IBM Corporation. |
24 | * | 24 | * |
25 | @@ -XXX,XX +XXX,XX @@ static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) | 25 | @@ -XXX,XX +XXX,XX @@ static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) |
26 | word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2); | 26 | word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2); |
27 | g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0); | 27 | g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0); |
28 | } | 28 | } |
29 | + | 29 | + |
30 | +static void test_hw_group_irq(QTestState *qts) | 30 | +static void test_hw_group_irq(QTestState *qts) |
31 | +{ | 31 | +{ |
32 | + uint32_t irq = 100; | 32 | + uint32_t irq = 100; |
33 | + uint32_t irq_data = 0xdeadbeef; | 33 | + uint32_t irq_data = 0xdeadbeef; |
34 | + uint32_t end_index = 23; | 34 | + uint32_t end_index = 23; |
35 | + uint32_t chosen_one; | 35 | + uint32_t chosen_one; |
36 | + uint32_t target_nvp = 0x81; /* group size = 4 */ | 36 | + uint32_t target_nvp = 0x81; /* group size = 4 */ |
37 | + uint8_t priority = 6; | 37 | + uint8_t priority = 6; |
38 | + uint32_t reg32; | 38 | + uint32_t reg32; |
39 | + uint16_t reg16; | 39 | + uint16_t reg16; |
40 | + uint8_t pq, nsr, cppr; | 40 | + uint8_t pq, nsr, cppr; |
41 | + | 41 | + |
42 | + printf("# ============================================================\n"); | 42 | + printf("# ============================================================\n"); |
43 | + printf("# Testing irq %d to hardware group of size 4\n", irq); | 43 | + printf("# Testing irq %d to hardware group of size 4\n", irq); |
44 | + | 44 | + |
45 | + /* irq config */ | 45 | + /* irq config */ |
46 | + set_eas(qts, irq, end_index, irq_data); | 46 | + set_eas(qts, irq, end_index, irq_data); |
47 | + set_end(qts, end_index, target_nvp, priority, true /* group */); | 47 | + set_end(qts, end_index, target_nvp, priority, true /* group */); |
48 | + | 48 | + |
49 | + /* enable and trigger irq */ | 49 | + /* enable and trigger irq */ |
50 | + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | 50 | + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); |
51 | + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); | 51 | + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); |
52 | + | 52 | + |
53 | + /* check irq is raised on cpu */ | 53 | + /* check irq is raised on cpu */ |
54 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | 54 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); |
55 | + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); | 55 | + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); |
56 | + | 56 | + |
57 | + /* find the targeted vCPU */ | 57 | + /* find the targeted vCPU */ |
58 | + for (chosen_one = 0; chosen_one < SMT; chosen_one++) { | 58 | + for (chosen_one = 0; chosen_one < SMT; chosen_one++) { |
59 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | 59 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); |
60 | + nsr = reg32 >> 24; | 60 | + nsr = reg32 >> 24; |
61 | + if (nsr == 0x82) { | 61 | + if (nsr == 0x82) { |
62 | + break; | 62 | + break; |
63 | + } | 63 | + } |
64 | + } | 64 | + } |
65 | + g_assert_cmphex(chosen_one, <, SMT); | 65 | + g_assert_cmphex(chosen_one, <, SMT); |
66 | + cppr = (reg32 >> 16) & 0xFF; | 66 | + cppr = (reg32 >> 16) & 0xFF; |
67 | + g_assert_cmphex(nsr, ==, 0x82); | 67 | + g_assert_cmphex(nsr, ==, 0x82); |
68 | + g_assert_cmphex(cppr, ==, 0xFF); | 68 | + g_assert_cmphex(cppr, ==, 0xFF); |
69 | + | 69 | + |
70 | + /* ack the irq */ | 70 | + /* ack the irq */ |
71 | + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); | 71 | + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); |
72 | + nsr = reg16 >> 8; | 72 | + nsr = reg16 >> 8; |
73 | + cppr = reg16 & 0xFF; | 73 | + cppr = reg16 & 0xFF; |
74 | + g_assert_cmphex(nsr, ==, 0x82); | 74 | + g_assert_cmphex(nsr, ==, 0x82); |
75 | + g_assert_cmphex(cppr, ==, priority); | 75 | + g_assert_cmphex(cppr, ==, priority); |
76 | + | 76 | + |
77 | + /* check irq data is what was configured */ | 77 | + /* check irq data is what was configured */ |
78 | + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); | 78 | + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); |
79 | + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); | 79 | + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); |
80 | + | 80 | + |
81 | + /* End Of Interrupt */ | 81 | + /* End Of Interrupt */ |
82 | + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); | 82 | + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); |
83 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | 83 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); |
84 | + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); | 84 | + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); |
85 | + | 85 | + |
86 | + /* reset CPPR */ | 86 | + /* reset CPPR */ |
87 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); | 87 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); |
88 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | 88 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); |
89 | + nsr = reg32 >> 24; | 89 | + nsr = reg32 >> 24; |
90 | + cppr = (reg32 >> 16) & 0xFF; | 90 | + cppr = (reg32 >> 16) & 0xFF; |
91 | + g_assert_cmphex(nsr, ==, 0x00); | 91 | + g_assert_cmphex(nsr, ==, 0x00); |
92 | + g_assert_cmphex(cppr, ==, 0xFF); | 92 | + g_assert_cmphex(cppr, ==, 0xFF); |
93 | +} | 93 | +} |
94 | + | 94 | + |
95 | +static void test_hw_group_irq_backlog(QTestState *qts) | 95 | +static void test_hw_group_irq_backlog(QTestState *qts) |
96 | +{ | 96 | +{ |
97 | + uint32_t irq = 31; | 97 | + uint32_t irq = 31; |
98 | + uint32_t irq_data = 0x01234567; | 98 | + uint32_t irq_data = 0x01234567; |
99 | + uint32_t end_index = 129; | 99 | + uint32_t end_index = 129; |
100 | + uint32_t target_nvp = 0x81; /* group size = 4 */ | 100 | + uint32_t target_nvp = 0x81; /* group size = 4 */ |
101 | + uint32_t chosen_one = 3; | 101 | + uint32_t chosen_one = 3; |
102 | + uint8_t blocking_priority, priority = 3; | 102 | + uint8_t blocking_priority, priority = 3; |
103 | + uint32_t reg32; | 103 | + uint32_t reg32; |
104 | + uint16_t reg16; | 104 | + uint16_t reg16; |
105 | + uint8_t pq, nsr, cppr, lsmfb, i; | 105 | + uint8_t pq, nsr, cppr, lsmfb, i; |
106 | + | 106 | + |
107 | + printf("# ============================================================\n"); | 107 | + printf("# ============================================================\n"); |
108 | + printf("# Testing irq %d to hardware group of size 4 going through " \ | 108 | + printf("# Testing irq %d to hardware group of size 4 going through " \ |
109 | + "backlog\n", | 109 | + "backlog\n", |
110 | + irq); | 110 | + irq); |
111 | + | 111 | + |
112 | + /* | 112 | + /* |
113 | + * set current priority of all threads in the group to something | 113 | + * set current priority of all threads in the group to something |
114 | + * higher than what we're about to trigger | 114 | + * higher than what we're about to trigger |
115 | + */ | 115 | + */ |
116 | + blocking_priority = priority - 1; | 116 | + blocking_priority = priority - 1; |
117 | + for (i = 0; i < SMT; i++) { | 117 | + for (i = 0; i < SMT; i++) { |
118 | + set_tima8(qts, i, TM_QW3_HV_PHYS + TM_CPPR, blocking_priority); | 118 | + set_tima8(qts, i, TM_QW3_HV_PHYS + TM_CPPR, blocking_priority); |
119 | + } | 119 | + } |
120 | + | 120 | + |
121 | + /* irq config */ | 121 | + /* irq config */ |
122 | + set_eas(qts, irq, end_index, irq_data); | 122 | + set_eas(qts, irq, end_index, irq_data); |
123 | + set_end(qts, end_index, target_nvp, priority, true /* group */); | 123 | + set_end(qts, end_index, target_nvp, priority, true /* group */); |
124 | + | 124 | + |
125 | + /* enable and trigger irq */ | 125 | + /* enable and trigger irq */ |
126 | + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | 126 | + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); |
127 | + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); | 127 | + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); |
128 | + | 128 | + |
129 | + /* check irq is raised on cpu */ | 129 | + /* check irq is raised on cpu */ |
130 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | 130 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); |
131 | + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); | 131 | + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); |
132 | + | 132 | + |
133 | + /* check no interrupt is pending on the 2 possible targets */ | 133 | + /* check no interrupt is pending on the 2 possible targets */ |
134 | + for (i = 0; i < SMT; i++) { | 134 | + for (i = 0; i < SMT; i++) { |
135 | + reg32 = get_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0); | 135 | + reg32 = get_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0); |
136 | + nsr = reg32 >> 24; | 136 | + nsr = reg32 >> 24; |
137 | + cppr = (reg32 >> 16) & 0xFF; | 137 | + cppr = (reg32 >> 16) & 0xFF; |
138 | + lsmfb = reg32 & 0xFF; | 138 | + lsmfb = reg32 & 0xFF; |
139 | + g_assert_cmphex(nsr, ==, 0x0); | 139 | + g_assert_cmphex(nsr, ==, 0x0); |
140 | + g_assert_cmphex(cppr, ==, blocking_priority); | 140 | + g_assert_cmphex(cppr, ==, blocking_priority); |
141 | + g_assert_cmphex(lsmfb, ==, priority); | 141 | + g_assert_cmphex(lsmfb, ==, priority); |
142 | + } | 142 | + } |
143 | + | 143 | + |
144 | + /* lower priority of one thread */ | 144 | + /* lower priority of one thread */ |
145 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, priority + 1); | 145 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, priority + 1); |
146 | + | 146 | + |
147 | + /* check backlogged interrupt is presented */ | 147 | + /* check backlogged interrupt is presented */ |
148 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | 148 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); |
149 | + nsr = reg32 >> 24; | 149 | + nsr = reg32 >> 24; |
150 | + cppr = (reg32 >> 16) & 0xFF; | 150 | + cppr = (reg32 >> 16) & 0xFF; |
151 | + g_assert_cmphex(nsr, ==, 0x82); | 151 | + g_assert_cmphex(nsr, ==, 0x82); |
152 | + g_assert_cmphex(cppr, ==, priority + 1); | 152 | + g_assert_cmphex(cppr, ==, priority + 1); |
153 | + | 153 | + |
154 | + /* ack the irq */ | 154 | + /* ack the irq */ |
155 | + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); | 155 | + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); |
156 | + nsr = reg16 >> 8; | 156 | + nsr = reg16 >> 8; |
157 | + cppr = reg16 & 0xFF; | 157 | + cppr = reg16 & 0xFF; |
158 | + g_assert_cmphex(nsr, ==, 0x82); | 158 | + g_assert_cmphex(nsr, ==, 0x82); |
159 | + g_assert_cmphex(cppr, ==, priority); | 159 | + g_assert_cmphex(cppr, ==, priority); |
160 | + | 160 | + |
161 | + /* check irq data is what was configured */ | 161 | + /* check irq data is what was configured */ |
162 | + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); | 162 | + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); |
163 | + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); | 163 | + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); |
164 | + | 164 | + |
165 | + /* End Of Interrupt */ | 165 | + /* End Of Interrupt */ |
166 | + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); | 166 | + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); |
167 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | 167 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); |
168 | + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); | 168 | + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); |
169 | + | 169 | + |
170 | + /* reset CPPR */ | 170 | + /* reset CPPR */ |
171 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); | 171 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); |
172 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | 172 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); |
173 | + nsr = reg32 >> 24; | 173 | + nsr = reg32 >> 24; |
174 | + cppr = (reg32 >> 16) & 0xFF; | 174 | + cppr = (reg32 >> 16) & 0xFF; |
175 | + lsmfb = reg32 & 0xFF; | 175 | + lsmfb = reg32 & 0xFF; |
176 | + g_assert_cmphex(nsr, ==, 0x00); | 176 | + g_assert_cmphex(nsr, ==, 0x00); |
177 | + g_assert_cmphex(cppr, ==, 0xFF); | 177 | + g_assert_cmphex(cppr, ==, 0xFF); |
178 | + g_assert_cmphex(lsmfb, ==, 0xFF); | 178 | + g_assert_cmphex(lsmfb, ==, 0xFF); |
179 | +} | 179 | +} |
180 | + | 180 | + |
181 | static void test_xive(void) | 181 | static void test_xive(void) |
182 | { | 182 | { |
183 | QTestState *qts; | 183 | QTestState *qts; |
184 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) | 184 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) |
185 | /* omit reset_state here and use settings from test_hw_irq */ | 185 | /* omit reset_state here and use settings from test_hw_irq */ |
186 | test_pull_thread_ctx_to_odd_thread_cl(qts); | 186 | test_pull_thread_ctx_to_odd_thread_cl(qts); |
187 | 187 | ||
188 | + reset_state(qts); | 188 | + reset_state(qts); |
189 | + test_hw_group_irq(qts); | 189 | + test_hw_group_irq(qts); |
190 | + | 190 | + |
191 | + reset_state(qts); | 191 | + reset_state(qts); |
192 | + test_hw_group_irq_backlog(qts); | 192 | + test_hw_group_irq_backlog(qts); |
193 | + | 193 | + |
194 | reset_state(qts); | 194 | reset_state(qts); |
195 | test_flush_sync_inject(qts); | 195 | test_flush_sync_inject(qts); |
196 | 196 | ||
197 | -- | 197 | -- |
198 | 2.43.0 | 198 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | Add XIVE2 tests for group interrupts and group interrupts that have | ||
4 | been backlogged. | ||
5 | |||
6 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
7 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
8 | --- | ||
9 | tests/qtest/pnv-xive2-test.c | 160 +++++++++++++++++++++++++++++++++++ | ||
10 | 1 file changed, 160 insertions(+) | ||
11 | |||
12 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c | ||
13 | index XXXXXXX..XXXXXXX 100644 | ||
14 | --- a/tests/qtest/pnv-xive2-test.c | ||
15 | +++ b/tests/qtest/pnv-xive2-test.c | ||
16 | @@ -XXX,XX +XXX,XX @@ | ||
17 | * QTest testcase for PowerNV 10 interrupt controller (xive2) | ||
18 | * - Test irq to hardware thread | ||
19 | * - Test 'Pull Thread Context to Odd Thread Reporting Line' | ||
20 | + * - Test irq to hardware group | ||
21 | + * - Test irq to hardware group going through backlog | ||
22 | * | ||
23 | * Copyright (c) 2024, IBM Corporation. | ||
24 | * | ||
25 | @@ -XXX,XX +XXX,XX @@ static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) | ||
26 | word2 = get_tima32(qts, target_pir, TM_QW3_HV_PHYS + TM_WORD2); | ||
27 | g_assert_cmphex(xive_get_field32(TM_QW3W2_VT, word2), ==, 0); | ||
28 | } | ||
29 | + | ||
30 | +static void test_hw_group_irq(QTestState *qts) | ||
31 | +{ | ||
32 | + uint32_t irq = 100; | ||
33 | + uint32_t irq_data = 0xdeadbeef; | ||
34 | + uint32_t end_index = 23; | ||
35 | + uint32_t chosen_one; | ||
36 | + uint32_t target_nvp = 0x81; /* group size = 4 */ | ||
37 | + uint8_t priority = 6; | ||
38 | + uint32_t reg32; | ||
39 | + uint16_t reg16; | ||
40 | + uint8_t pq, nsr, cppr; | ||
41 | + | ||
42 | + printf("# ============================================================\n"); | ||
43 | + printf("# Testing irq %d to hardware group of size 4\n", irq); | ||
44 | + | ||
45 | + /* irq config */ | ||
46 | + set_eas(qts, irq, end_index, irq_data); | ||
47 | + set_end(qts, end_index, target_nvp, priority, true /* group */); | ||
48 | + | ||
49 | + /* enable and trigger irq */ | ||
50 | + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | ||
51 | + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); | ||
52 | + | ||
53 | + /* check irq is raised on cpu */ | ||
54 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | ||
55 | + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); | ||
56 | + | ||
57 | + /* find the targeted vCPU */ | ||
58 | + for (chosen_one = 0; chosen_one < SMT; chosen_one++) { | ||
59 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | ||
60 | + nsr = reg32 >> 24; | ||
61 | + if (nsr == 0x82) { | ||
62 | + break; | ||
63 | + } | ||
64 | + } | ||
65 | + g_assert_cmphex(chosen_one, <, SMT); | ||
66 | + cppr = (reg32 >> 16) & 0xFF; | ||
67 | + g_assert_cmphex(nsr, ==, 0x82); | ||
68 | + g_assert_cmphex(cppr, ==, 0xFF); | ||
69 | + | ||
70 | + /* ack the irq */ | ||
71 | + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); | ||
72 | + nsr = reg16 >> 8; | ||
73 | + cppr = reg16 & 0xFF; | ||
74 | + g_assert_cmphex(nsr, ==, 0x82); | ||
75 | + g_assert_cmphex(cppr, ==, priority); | ||
76 | + | ||
77 | + /* check irq data is what was configured */ | ||
78 | + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); | ||
79 | + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); | ||
80 | + | ||
81 | + /* End Of Interrupt */ | ||
82 | + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); | ||
83 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | ||
84 | + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); | ||
85 | + | ||
86 | + /* reset CPPR */ | ||
87 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); | ||
88 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | ||
89 | + nsr = reg32 >> 24; | ||
90 | + cppr = (reg32 >> 16) & 0xFF; | ||
91 | + g_assert_cmphex(nsr, ==, 0x00); | ||
92 | + g_assert_cmphex(cppr, ==, 0xFF); | ||
93 | +} | ||
94 | + | ||
95 | +static void test_hw_group_irq_backlog(QTestState *qts) | ||
96 | +{ | ||
97 | + uint32_t irq = 31; | ||
98 | + uint32_t irq_data = 0x01234567; | ||
99 | + uint32_t end_index = 129; | ||
100 | + uint32_t target_nvp = 0x81; /* group size = 4 */ | ||
101 | + uint32_t chosen_one = 3; | ||
102 | + uint8_t blocking_priority, priority = 3; | ||
103 | + uint32_t reg32; | ||
104 | + uint16_t reg16; | ||
105 | + uint8_t pq, nsr, cppr, lsmfb, i; | ||
106 | + | ||
107 | + printf("# ============================================================\n"); | ||
108 | + printf("# Testing irq %d to hardware group of size 4 going through " \ | ||
109 | + "backlog\n", | ||
110 | + irq); | ||
111 | + | ||
112 | + /* | ||
113 | + * set current priority of all threads in the group to something | ||
114 | + * higher than what we're about to trigger | ||
115 | + */ | ||
116 | + blocking_priority = priority - 1; | ||
117 | + for (i = 0; i < SMT; i++) { | ||
118 | + set_tima8(qts, i, TM_QW3_HV_PHYS + TM_CPPR, blocking_priority); | ||
119 | + } | ||
120 | + | ||
121 | + /* irq config */ | ||
122 | + set_eas(qts, irq, end_index, irq_data); | ||
123 | + set_end(qts, end_index, target_nvp, priority, true /* group */); | ||
124 | + | ||
125 | + /* enable and trigger irq */ | ||
126 | + get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | ||
127 | + set_esb(qts, irq, XIVE_TRIGGER_PAGE, 0, 0); | ||
128 | + | ||
129 | + /* check irq is raised on cpu */ | ||
130 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | ||
131 | + g_assert_cmpuint(pq, ==, XIVE_ESB_PENDING); | ||
132 | + | ||
133 | + /* check no interrupt is pending on the 2 possible targets */ | ||
134 | + for (i = 0; i < SMT; i++) { | ||
135 | + reg32 = get_tima32(qts, i, TM_QW3_HV_PHYS + TM_WORD0); | ||
136 | + nsr = reg32 >> 24; | ||
137 | + cppr = (reg32 >> 16) & 0xFF; | ||
138 | + lsmfb = reg32 & 0xFF; | ||
139 | + g_assert_cmphex(nsr, ==, 0x0); | ||
140 | + g_assert_cmphex(cppr, ==, blocking_priority); | ||
141 | + g_assert_cmphex(lsmfb, ==, priority); | ||
142 | + } | ||
143 | + | ||
144 | + /* lower priority of one thread */ | ||
145 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, priority + 1); | ||
146 | + | ||
147 | + /* check backlogged interrupt is presented */ | ||
148 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | ||
149 | + nsr = reg32 >> 24; | ||
150 | + cppr = (reg32 >> 16) & 0xFF; | ||
151 | + g_assert_cmphex(nsr, ==, 0x82); | ||
152 | + g_assert_cmphex(cppr, ==, priority + 1); | ||
153 | + | ||
154 | + /* ack the irq */ | ||
155 | + reg16 = get_tima16(qts, chosen_one, TM_SPC_ACK_HV_REG); | ||
156 | + nsr = reg16 >> 8; | ||
157 | + cppr = reg16 & 0xFF; | ||
158 | + g_assert_cmphex(nsr, ==, 0x82); | ||
159 | + g_assert_cmphex(cppr, ==, priority); | ||
160 | + | ||
161 | + /* check irq data is what was configured */ | ||
162 | + reg32 = qtest_readl(qts, xive_get_queue_addr(end_index)); | ||
163 | + g_assert_cmphex((reg32 & 0x7fffffff), ==, (irq_data & 0x7fffffff)); | ||
164 | + | ||
165 | + /* End Of Interrupt */ | ||
166 | + set_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_STORE_EOI, 0); | ||
167 | + pq = get_esb(qts, irq, XIVE_EOI_PAGE, XIVE_ESB_GET); | ||
168 | + g_assert_cmpuint(pq, ==, XIVE_ESB_RESET); | ||
169 | + | ||
170 | + /* reset CPPR */ | ||
171 | + set_tima8(qts, chosen_one, TM_QW3_HV_PHYS + TM_CPPR, 0xFF); | ||
172 | + reg32 = get_tima32(qts, chosen_one, TM_QW3_HV_PHYS + TM_WORD0); | ||
173 | + nsr = reg32 >> 24; | ||
174 | + cppr = (reg32 >> 16) & 0xFF; | ||
175 | + lsmfb = reg32 & 0xFF; | ||
176 | + g_assert_cmphex(nsr, ==, 0x00); | ||
177 | + g_assert_cmphex(cppr, ==, 0xFF); | ||
178 | + g_assert_cmphex(lsmfb, ==, 0xFF); | ||
179 | +} | ||
180 | + | ||
181 | static void test_xive(void) | ||
182 | { | ||
183 | QTestState *qts; | ||
184 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) | ||
185 | /* omit reset_state here and use settings from test_hw_irq */ | ||
186 | test_pull_thread_ctx_to_odd_thread_cl(qts); | ||
187 | |||
188 | + reset_state(qts); | ||
189 | + test_hw_group_irq(qts); | ||
190 | + | ||
191 | + reset_state(qts); | ||
192 | + test_hw_group_irq_backlog(qts); | ||
193 | + | ||
194 | reset_state(qts); | ||
195 | test_flush_sync_inject(qts); | ||
196 | |||
197 | -- | ||
198 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | Add support for the NVPG and NVC BARs. Access to the BAR pages will | 3 | Add support for the NVPG and NVC BARs. Access to the BAR pages will |
4 | cause backlog counter operations to either increment or decriment | 4 | cause backlog counter operations to either increment or decriment |
5 | the counter. | 5 | the counter. |
6 | 6 | ||
7 | Also added qtests for the same. | 7 | Also added qtests for the same. |
8 | 8 | ||
9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
11 | --- | 11 | --- |
12 | include/hw/ppc/xive2.h | 9 ++ | 12 | include/hw/ppc/xive2.h | 9 ++ |
13 | include/hw/ppc/xive2_regs.h | 3 + | 13 | include/hw/ppc/xive2_regs.h | 3 + |
14 | tests/qtest/pnv-xive2-common.h | 1 + | 14 | tests/qtest/pnv-xive2-common.h | 1 + |
15 | hw/intc/pnv_xive2.c | 80 +++++++++++++--- | 15 | hw/intc/pnv_xive2.c | 80 +++++++++++++--- |
16 | hw/intc/xive2.c | 87 +++++++++++++++++ | 16 | hw/intc/xive2.c | 87 +++++++++++++++++ |
17 | tests/qtest/pnv-xive2-nvpg_bar.c | 154 +++++++++++++++++++++++++++++++ | 17 | tests/qtest/pnv-xive2-nvpg_bar.c | 154 +++++++++++++++++++++++++++++++ |
18 | tests/qtest/pnv-xive2-test.c | 3 + | 18 | tests/qtest/pnv-xive2-test.c | 3 + |
19 | hw/intc/trace-events | 4 + | 19 | hw/intc/trace-events | 4 + |
20 | tests/qtest/meson.build | 3 +- | 20 | tests/qtest/meson.build | 3 +- |
21 | 9 files changed, 329 insertions(+), 15 deletions(-) | 21 | 9 files changed, 329 insertions(+), 15 deletions(-) |
22 | create mode 100644 tests/qtest/pnv-xive2-nvpg_bar.c | 22 | create mode 100644 tests/qtest/pnv-xive2-nvpg_bar.c |
23 | 23 | ||
24 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | 24 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h |
25 | index XXXXXXX..XXXXXXX 100644 | 25 | index XXXXXXX..XXXXXXX 100644 |
26 | --- a/include/hw/ppc/xive2.h | 26 | --- a/include/hw/ppc/xive2.h |
27 | +++ b/include/hw/ppc/xive2.h | 27 | +++ b/include/hw/ppc/xive2.h |
28 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 28 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
29 | uint8_t nvt_blk, uint32_t nvt_idx, | 29 | uint8_t nvt_blk, uint32_t nvt_idx, |
30 | bool cam_ignore, uint32_t logic_serv); | 30 | bool cam_ignore, uint32_t logic_serv); |
31 | 31 | ||
32 | +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, | 32 | +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, |
33 | + uint8_t blk, uint32_t idx, | 33 | + uint8_t blk, uint32_t idx, |
34 | + uint16_t offset); | 34 | + uint16_t offset); |
35 | + | 35 | + |
36 | +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, | 36 | +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, |
37 | + bool crowd, | 37 | + bool crowd, |
38 | + uint8_t blk, uint32_t idx, | 38 | + uint8_t blk, uint32_t idx, |
39 | + uint16_t offset, uint16_t val); | 39 | + uint16_t offset, uint16_t val); |
40 | + | 40 | + |
41 | /* | 41 | /* |
42 | * XIVE2 END ESBs (POWER10) | 42 | * XIVE2 END ESBs (POWER10) |
43 | */ | 43 | */ |
44 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h | 44 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h |
45 | index XXXXXXX..XXXXXXX 100644 | 45 | index XXXXXXX..XXXXXXX 100644 |
46 | --- a/include/hw/ppc/xive2_regs.h | 46 | --- a/include/hw/ppc/xive2_regs.h |
47 | +++ b/include/hw/ppc/xive2_regs.h | 47 | +++ b/include/hw/ppc/xive2_regs.h |
48 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2Nvgc { | 48 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2Nvgc { |
49 | void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, | 49 | void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, |
50 | GString *buf); | 50 | GString *buf); |
51 | 51 | ||
52 | +#define NVx_BACKLOG_OP PPC_BITMASK(52, 53) | 52 | +#define NVx_BACKLOG_OP PPC_BITMASK(52, 53) |
53 | +#define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59) | 53 | +#define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59) |
54 | + | 54 | + |
55 | #endif /* PPC_XIVE2_REGS_H */ | 55 | #endif /* PPC_XIVE2_REGS_H */ |
56 | diff --git a/tests/qtest/pnv-xive2-common.h b/tests/qtest/pnv-xive2-common.h | 56 | diff --git a/tests/qtest/pnv-xive2-common.h b/tests/qtest/pnv-xive2-common.h |
57 | index XXXXXXX..XXXXXXX 100644 | 57 | index XXXXXXX..XXXXXXX 100644 |
58 | --- a/tests/qtest/pnv-xive2-common.h | 58 | --- a/tests/qtest/pnv-xive2-common.h |
59 | +++ b/tests/qtest/pnv-xive2-common.h | 59 | +++ b/tests/qtest/pnv-xive2-common.h |
60 | @@ -XXX,XX +XXX,XX @@ extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index, | 60 | @@ -XXX,XX +XXX,XX @@ extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index, |
61 | 61 | ||
62 | 62 | ||
63 | void test_flush_sync_inject(QTestState *qts); | 63 | void test_flush_sync_inject(QTestState *qts); |
64 | +void test_nvpg_bar(QTestState *qts); | 64 | +void test_nvpg_bar(QTestState *qts); |
65 | 65 | ||
66 | #endif /* TEST_PNV_XIVE2_COMMON_H */ | 66 | #endif /* TEST_PNV_XIVE2_COMMON_H */ |
67 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | 67 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c |
68 | index XXXXXXX..XXXXXXX 100644 | 68 | index XXXXXXX..XXXXXXX 100644 |
69 | --- a/hw/intc/pnv_xive2.c | 69 | --- a/hw/intc/pnv_xive2.c |
70 | +++ b/hw/intc/pnv_xive2.c | 70 | +++ b/hw/intc/pnv_xive2.c |
71 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_tm_ops = { | 71 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_tm_ops = { |
72 | }, | 72 | }, |
73 | }; | 73 | }; |
74 | 74 | ||
75 | -static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset, | 75 | -static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset, |
76 | +static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr, | 76 | +static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr, |
77 | unsigned size) | 77 | unsigned size) |
78 | { | 78 | { |
79 | PnvXive2 *xive = PNV_XIVE2(opaque); | 79 | PnvXive2 *xive = PNV_XIVE2(opaque); |
80 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | 80 | + XivePresenter *xptr = XIVE_PRESENTER(xive); |
81 | + uint32_t page = addr >> xive->nvpg_shift; | 81 | + uint32_t page = addr >> xive->nvpg_shift; |
82 | + uint16_t op = addr & 0xFFF; | 82 | + uint16_t op = addr & 0xFFF; |
83 | + uint8_t blk = pnv_xive2_block_id(xive); | 83 | + uint8_t blk = pnv_xive2_block_id(xive); |
84 | 84 | ||
85 | - xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset); | 85 | - xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset); |
86 | - return -1; | 86 | - return -1; |
87 | + if (size != 2) { | 87 | + if (size != 2) { |
88 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n", | 88 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n", |
89 | + size); | 89 | + size); |
90 | + return -1; | 90 | + return -1; |
91 | + } | 91 | + } |
92 | + | 92 | + |
93 | + return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1); | 93 | + return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1); |
94 | } | 94 | } |
95 | 95 | ||
96 | -static void pnv_xive2_nvc_write(void *opaque, hwaddr offset, | 96 | -static void pnv_xive2_nvc_write(void *opaque, hwaddr offset, |
97 | +static void pnv_xive2_nvc_write(void *opaque, hwaddr addr, | 97 | +static void pnv_xive2_nvc_write(void *opaque, hwaddr addr, |
98 | uint64_t val, unsigned size) | 98 | uint64_t val, unsigned size) |
99 | { | 99 | { |
100 | PnvXive2 *xive = PNV_XIVE2(opaque); | 100 | PnvXive2 *xive = PNV_XIVE2(opaque); |
101 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | 101 | + XivePresenter *xptr = XIVE_PRESENTER(xive); |
102 | + uint32_t page = addr >> xive->nvc_shift; | 102 | + uint32_t page = addr >> xive->nvc_shift; |
103 | + uint16_t op = addr & 0xFFF; | 103 | + uint16_t op = addr & 0xFFF; |
104 | + uint8_t blk = pnv_xive2_block_id(xive); | 104 | + uint8_t blk = pnv_xive2_block_id(xive); |
105 | 105 | ||
106 | - xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset); | 106 | - xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset); |
107 | + if (size != 1) { | 107 | + if (size != 1) { |
108 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n", | 108 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n", |
109 | + size); | 109 | + size); |
110 | + return; | 110 | + return; |
111 | + } | 111 | + } |
112 | + | 112 | + |
113 | + (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val); | 113 | + (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val); |
114 | } | 114 | } |
115 | 115 | ||
116 | static const MemoryRegionOps pnv_xive2_nvc_ops = { | 116 | static const MemoryRegionOps pnv_xive2_nvc_ops = { |
117 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_nvc_ops = { | 117 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_nvc_ops = { |
118 | .write = pnv_xive2_nvc_write, | 118 | .write = pnv_xive2_nvc_write, |
119 | .endianness = DEVICE_BIG_ENDIAN, | 119 | .endianness = DEVICE_BIG_ENDIAN, |
120 | .valid = { | 120 | .valid = { |
121 | - .min_access_size = 8, | 121 | - .min_access_size = 8, |
122 | + .min_access_size = 1, | 122 | + .min_access_size = 1, |
123 | .max_access_size = 8, | 123 | .max_access_size = 8, |
124 | }, | 124 | }, |
125 | .impl = { | 125 | .impl = { |
126 | - .min_access_size = 8, | 126 | - .min_access_size = 8, |
127 | + .min_access_size = 1, | 127 | + .min_access_size = 1, |
128 | .max_access_size = 8, | 128 | .max_access_size = 8, |
129 | }, | 129 | }, |
130 | }; | 130 | }; |
131 | 131 | ||
132 | -static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset, | 132 | -static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset, |
133 | +static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr, | 133 | +static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr, |
134 | unsigned size) | 134 | unsigned size) |
135 | { | 135 | { |
136 | PnvXive2 *xive = PNV_XIVE2(opaque); | 136 | PnvXive2 *xive = PNV_XIVE2(opaque); |
137 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | 137 | + XivePresenter *xptr = XIVE_PRESENTER(xive); |
138 | + uint32_t page = addr >> xive->nvpg_shift; | 138 | + uint32_t page = addr >> xive->nvpg_shift; |
139 | + uint16_t op = addr & 0xFFF; | 139 | + uint16_t op = addr & 0xFFF; |
140 | + uint32_t index = page >> 1; | 140 | + uint32_t index = page >> 1; |
141 | + uint8_t blk = pnv_xive2_block_id(xive); | 141 | + uint8_t blk = pnv_xive2_block_id(xive); |
142 | 142 | ||
143 | - xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset); | 143 | - xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset); |
144 | - return -1; | 144 | - return -1; |
145 | + if (size != 2) { | 145 | + if (size != 2) { |
146 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n", | 146 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n", |
147 | + size); | 147 | + size); |
148 | + return -1; | 148 | + return -1; |
149 | + } | 149 | + } |
150 | + | 150 | + |
151 | + if (page % 2) { | 151 | + if (page % 2) { |
152 | + /* odd page - NVG */ | 152 | + /* odd page - NVG */ |
153 | + return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1); | 153 | + return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1); |
154 | + } else { | 154 | + } else { |
155 | + /* even page - NVP */ | 155 | + /* even page - NVP */ |
156 | + return xive2_presenter_nvp_backlog_op(xptr, blk, index, op); | 156 | + return xive2_presenter_nvp_backlog_op(xptr, blk, index, op); |
157 | + } | 157 | + } |
158 | } | 158 | } |
159 | 159 | ||
160 | -static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset, | 160 | -static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset, |
161 | +static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr, | 161 | +static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr, |
162 | uint64_t val, unsigned size) | 162 | uint64_t val, unsigned size) |
163 | { | 163 | { |
164 | PnvXive2 *xive = PNV_XIVE2(opaque); | 164 | PnvXive2 *xive = PNV_XIVE2(opaque); |
165 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | 165 | + XivePresenter *xptr = XIVE_PRESENTER(xive); |
166 | + uint32_t page = addr >> xive->nvpg_shift; | 166 | + uint32_t page = addr >> xive->nvpg_shift; |
167 | + uint16_t op = addr & 0xFFF; | 167 | + uint16_t op = addr & 0xFFF; |
168 | + uint32_t index = page >> 1; | 168 | + uint32_t index = page >> 1; |
169 | + uint8_t blk = pnv_xive2_block_id(xive); | 169 | + uint8_t blk = pnv_xive2_block_id(xive); |
170 | 170 | ||
171 | - xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset); | 171 | - xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset); |
172 | + if (size != 1) { | 172 | + if (size != 1) { |
173 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n", | 173 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n", |
174 | + size); | 174 | + size); |
175 | + return; | 175 | + return; |
176 | + } | 176 | + } |
177 | + | 177 | + |
178 | + if (page % 2) { | 178 | + if (page % 2) { |
179 | + /* odd page - NVG */ | 179 | + /* odd page - NVG */ |
180 | + (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val); | 180 | + (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val); |
181 | + } else { | 181 | + } else { |
182 | + /* even page - NVP */ | 182 | + /* even page - NVP */ |
183 | + (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op); | 183 | + (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op); |
184 | + } | 184 | + } |
185 | } | 185 | } |
186 | 186 | ||
187 | static const MemoryRegionOps pnv_xive2_nvpg_ops = { | 187 | static const MemoryRegionOps pnv_xive2_nvpg_ops = { |
188 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_nvpg_ops = { | 188 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_nvpg_ops = { |
189 | .write = pnv_xive2_nvpg_write, | 189 | .write = pnv_xive2_nvpg_write, |
190 | .endianness = DEVICE_BIG_ENDIAN, | 190 | .endianness = DEVICE_BIG_ENDIAN, |
191 | .valid = { | 191 | .valid = { |
192 | - .min_access_size = 8, | 192 | - .min_access_size = 8, |
193 | + .min_access_size = 1, | 193 | + .min_access_size = 1, |
194 | .max_access_size = 8, | 194 | .max_access_size = 8, |
195 | }, | 195 | }, |
196 | .impl = { | 196 | .impl = { |
197 | - .min_access_size = 8, | 197 | - .min_access_size = 8, |
198 | + .min_access_size = 1, | 198 | + .min_access_size = 1, |
199 | .max_access_size = 8, | 199 | .max_access_size = 8, |
200 | }, | 200 | }, |
201 | }; | 201 | }; |
202 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 202 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
203 | index XXXXXXX..XXXXXXX 100644 | 203 | index XXXXXXX..XXXXXXX 100644 |
204 | --- a/hw/intc/xive2.c | 204 | --- a/hw/intc/xive2.c |
205 | +++ b/hw/intc/xive2.c | 205 | +++ b/hw/intc/xive2.c |
206 | @@ -XXX,XX +XXX,XX @@ static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, | 206 | @@ -XXX,XX +XXX,XX @@ static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, |
207 | } | 207 | } |
208 | } | 208 | } |
209 | 209 | ||
210 | +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, | 210 | +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, |
211 | + bool crowd, | 211 | + bool crowd, |
212 | + uint8_t blk, uint32_t idx, | 212 | + uint8_t blk, uint32_t idx, |
213 | + uint16_t offset, uint16_t val) | 213 | + uint16_t offset, uint16_t val) |
214 | +{ | 214 | +{ |
215 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); | 215 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); |
216 | + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); | 216 | + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); |
217 | + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); | 217 | + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); |
218 | + Xive2Nvgc nvgc; | 218 | + Xive2Nvgc nvgc; |
219 | + uint32_t count, old_count; | 219 | + uint32_t count, old_count; |
220 | + | 220 | + |
221 | + if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) { | 221 | + if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) { |
222 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n", | 222 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n", |
223 | + crowd ? "NVC" : "NVG", blk, idx); | 223 | + crowd ? "NVC" : "NVG", blk, idx); |
224 | + return -1; | 224 | + return -1; |
225 | + } | 225 | + } |
226 | + if (!xive2_nvgc_is_valid(&nvgc)) { | 226 | + if (!xive2_nvgc_is_valid(&nvgc)) { |
227 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx); | 227 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx); |
228 | + return -1; | 228 | + return -1; |
229 | + } | 229 | + } |
230 | + | 230 | + |
231 | + old_count = xive2_nvgc_get_backlog(&nvgc, priority); | 231 | + old_count = xive2_nvgc_get_backlog(&nvgc, priority); |
232 | + count = old_count; | 232 | + count = old_count; |
233 | + /* | 233 | + /* |
234 | + * op: | 234 | + * op: |
235 | + * 0b00 => increment | 235 | + * 0b00 => increment |
236 | + * 0b01 => decrement | 236 | + * 0b01 => decrement |
237 | + * 0b1- => read | 237 | + * 0b1- => read |
238 | + */ | 238 | + */ |
239 | + if (op == 0b00 || op == 0b01) { | 239 | + if (op == 0b00 || op == 0b01) { |
240 | + if (op == 0b00) { | 240 | + if (op == 0b00) { |
241 | + count += val; | 241 | + count += val; |
242 | + } else { | 242 | + } else { |
243 | + if (count > val) { | 243 | + if (count > val) { |
244 | + count -= val; | 244 | + count -= val; |
245 | + } else { | 245 | + } else { |
246 | + count = 0; | 246 | + count = 0; |
247 | + } | 247 | + } |
248 | + } | 248 | + } |
249 | + xive2_nvgc_set_backlog(&nvgc, priority, count); | 249 | + xive2_nvgc_set_backlog(&nvgc, priority, count); |
250 | + xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc); | 250 | + xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc); |
251 | + } | 251 | + } |
252 | + trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count); | 252 | + trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count); |
253 | + return old_count; | 253 | + return old_count; |
254 | +} | 254 | +} |
255 | + | 255 | + |
256 | +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, | 256 | +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, |
257 | + uint8_t blk, uint32_t idx, | 257 | + uint8_t blk, uint32_t idx, |
258 | + uint16_t offset) | 258 | + uint16_t offset) |
259 | +{ | 259 | +{ |
260 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); | 260 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); |
261 | + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); | 261 | + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); |
262 | + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); | 262 | + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); |
263 | + Xive2Nvp nvp; | 263 | + Xive2Nvp nvp; |
264 | + uint8_t ipb, old_ipb, rc; | 264 | + uint8_t ipb, old_ipb, rc; |
265 | + | 265 | + |
266 | + if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) { | 266 | + if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) { |
267 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx); | 267 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx); |
268 | + return -1; | 268 | + return -1; |
269 | + } | 269 | + } |
270 | + if (!xive2_nvp_is_valid(&nvp)) { | 270 | + if (!xive2_nvp_is_valid(&nvp)) { |
271 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx); | 271 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx); |
272 | + return -1; | 272 | + return -1; |
273 | + } | 273 | + } |
274 | + | 274 | + |
275 | + old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); | 275 | + old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); |
276 | + ipb = old_ipb; | 276 | + ipb = old_ipb; |
277 | + /* | 277 | + /* |
278 | + * op: | 278 | + * op: |
279 | + * 0b00 => set priority bit | 279 | + * 0b00 => set priority bit |
280 | + * 0b01 => reset priority bit | 280 | + * 0b01 => reset priority bit |
281 | + * 0b1- => read | 281 | + * 0b1- => read |
282 | + */ | 282 | + */ |
283 | + if (op == 0b00 || op == 0b01) { | 283 | + if (op == 0b00 || op == 0b01) { |
284 | + if (op == 0b00) { | 284 | + if (op == 0b00) { |
285 | + ipb |= xive_priority_to_ipb(priority); | 285 | + ipb |= xive_priority_to_ipb(priority); |
286 | + } else { | 286 | + } else { |
287 | + ipb &= ~xive_priority_to_ipb(priority); | 287 | + ipb &= ~xive_priority_to_ipb(priority); |
288 | + } | 288 | + } |
289 | + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | 289 | + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); |
290 | + xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2); | 290 | + xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2); |
291 | + } | 291 | + } |
292 | + rc = !!(old_ipb & xive_priority_to_ipb(priority)); | 292 | + rc = !!(old_ipb & xive_priority_to_ipb(priority)); |
293 | + trace_xive_nvp_backlog_op(blk, idx, op, priority, rc); | 293 | + trace_xive_nvp_backlog_op(blk, idx, op, priority, rc); |
294 | + return rc; | 294 | + return rc; |
295 | +} | 295 | +} |
296 | + | 296 | + |
297 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) | 297 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) |
298 | { | 298 | { |
299 | if (!xive2_eas_is_valid(eas)) { | 299 | if (!xive2_eas_is_valid(eas)) { |
300 | diff --git a/tests/qtest/pnv-xive2-nvpg_bar.c b/tests/qtest/pnv-xive2-nvpg_bar.c | 300 | diff --git a/tests/qtest/pnv-xive2-nvpg_bar.c b/tests/qtest/pnv-xive2-nvpg_bar.c |
301 | new file mode 100644 | 301 | new file mode 100644 |
302 | index XXXXXXX..XXXXXXX | 302 | index XXXXXXX..XXXXXXX |
303 | --- /dev/null | 303 | --- /dev/null |
304 | +++ b/tests/qtest/pnv-xive2-nvpg_bar.c | 304 | +++ b/tests/qtest/pnv-xive2-nvpg_bar.c |
305 | @@ -XXX,XX +XXX,XX @@ | 305 | @@ -XXX,XX +XXX,XX @@ |
306 | +/* | 306 | +/* |
307 | + * QTest testcase for PowerNV 10 interrupt controller (xive2) | 307 | + * QTest testcase for PowerNV 10 interrupt controller (xive2) |
308 | + * - Test NVPG BAR MMIO operations | 308 | + * - Test NVPG BAR MMIO operations |
309 | + * | 309 | + * |
310 | + * Copyright (c) 2024, IBM Corporation. | 310 | + * Copyright (c) 2024, IBM Corporation. |
311 | + * | 311 | + * |
312 | + * This work is licensed under the terms of the GNU GPL, version 2 or | 312 | + * This work is licensed under the terms of the GNU GPL, version 2 or |
313 | + * later. See the COPYING file in the top-level directory. | 313 | + * later. See the COPYING file in the top-level directory. |
314 | + */ | 314 | + */ |
315 | +#include "qemu/osdep.h" | 315 | +#include "qemu/osdep.h" |
316 | +#include "libqtest.h" | 316 | +#include "libqtest.h" |
317 | + | 317 | + |
318 | +#include "pnv-xive2-common.h" | 318 | +#include "pnv-xive2-common.h" |
319 | + | 319 | + |
320 | +#define NVPG_BACKLOG_OP_SHIFT 10 | 320 | +#define NVPG_BACKLOG_OP_SHIFT 10 |
321 | +#define NVPG_BACKLOG_PRIO_SHIFT 4 | 321 | +#define NVPG_BACKLOG_PRIO_SHIFT 4 |
322 | + | 322 | + |
323 | +#define XIVE_PRIORITY_MAX 7 | 323 | +#define XIVE_PRIORITY_MAX 7 |
324 | + | 324 | + |
325 | +enum NVx { | 325 | +enum NVx { |
326 | + NVP, | 326 | + NVP, |
327 | + NVG, | 327 | + NVG, |
328 | + NVC | 328 | + NVC |
329 | +}; | 329 | +}; |
330 | + | 330 | + |
331 | +typedef enum { | 331 | +typedef enum { |
332 | + INCR_STORE = 0b100, | 332 | + INCR_STORE = 0b100, |
333 | + INCR_LOAD = 0b000, | 333 | + INCR_LOAD = 0b000, |
334 | + DECR_STORE = 0b101, | 334 | + DECR_STORE = 0b101, |
335 | + DECR_LOAD = 0b001, | 335 | + DECR_LOAD = 0b001, |
336 | + READ_x = 0b010, | 336 | + READ_x = 0b010, |
337 | + READ_y = 0b011, | 337 | + READ_y = 0b011, |
338 | +} backlog_op; | 338 | +} backlog_op; |
339 | + | 339 | + |
340 | +static uint32_t nvpg_backlog_op(QTestState *qts, backlog_op op, | 340 | +static uint32_t nvpg_backlog_op(QTestState *qts, backlog_op op, |
341 | + enum NVx type, uint64_t index, | 341 | + enum NVx type, uint64_t index, |
342 | + uint8_t priority, uint8_t delta) | 342 | + uint8_t priority, uint8_t delta) |
343 | +{ | 343 | +{ |
344 | + uint64_t addr, offset; | 344 | + uint64_t addr, offset; |
345 | + uint32_t count = 0; | 345 | + uint32_t count = 0; |
346 | + | 346 | + |
347 | + switch (type) { | 347 | + switch (type) { |
348 | + case NVP: | 348 | + case NVP: |
349 | + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)); | 349 | + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)); |
350 | + break; | 350 | + break; |
351 | + case NVG: | 351 | + case NVG: |
352 | + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)) + | 352 | + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)) + |
353 | + (1 << XIVE_PAGE_SHIFT); | 353 | + (1 << XIVE_PAGE_SHIFT); |
354 | + break; | 354 | + break; |
355 | + case NVC: | 355 | + case NVC: |
356 | + addr = XIVE_NVC_ADDR + (index << XIVE_PAGE_SHIFT); | 356 | + addr = XIVE_NVC_ADDR + (index << XIVE_PAGE_SHIFT); |
357 | + break; | 357 | + break; |
358 | + default: | 358 | + default: |
359 | + g_assert_not_reached(); | 359 | + g_assert_not_reached(); |
360 | + } | 360 | + } |
361 | + | 361 | + |
362 | + offset = (op & 0b11) << NVPG_BACKLOG_OP_SHIFT; | 362 | + offset = (op & 0b11) << NVPG_BACKLOG_OP_SHIFT; |
363 | + offset |= priority << NVPG_BACKLOG_PRIO_SHIFT; | 363 | + offset |= priority << NVPG_BACKLOG_PRIO_SHIFT; |
364 | + if (op >> 2) { | 364 | + if (op >> 2) { |
365 | + qtest_writeb(qts, addr + offset, delta); | 365 | + qtest_writeb(qts, addr + offset, delta); |
366 | + } else { | 366 | + } else { |
367 | + count = qtest_readw(qts, addr + offset); | 367 | + count = qtest_readw(qts, addr + offset); |
368 | + } | 368 | + } |
369 | + return count; | 369 | + return count; |
370 | +} | 370 | +} |
371 | + | 371 | + |
372 | +void test_nvpg_bar(QTestState *qts) | 372 | +void test_nvpg_bar(QTestState *qts) |
373 | +{ | 373 | +{ |
374 | + uint32_t nvp_target = 0x11; | 374 | + uint32_t nvp_target = 0x11; |
375 | + uint32_t group_target = 0x17; /* size 16 */ | 375 | + uint32_t group_target = 0x17; /* size 16 */ |
376 | + uint32_t vp_irq = 33, group_irq = 47; | 376 | + uint32_t vp_irq = 33, group_irq = 47; |
377 | + uint32_t vp_end = 3, group_end = 97; | 377 | + uint32_t vp_end = 3, group_end = 97; |
378 | + uint32_t vp_irq_data = 0x33333333; | 378 | + uint32_t vp_irq_data = 0x33333333; |
379 | + uint32_t group_irq_data = 0x66666666; | 379 | + uint32_t group_irq_data = 0x66666666; |
380 | + uint8_t vp_priority = 0, group_priority = 5; | 380 | + uint8_t vp_priority = 0, group_priority = 5; |
381 | + uint32_t vp_count[XIVE_PRIORITY_MAX + 1] = { 0 }; | 381 | + uint32_t vp_count[XIVE_PRIORITY_MAX + 1] = { 0 }; |
382 | + uint32_t group_count[XIVE_PRIORITY_MAX + 1] = { 0 }; | 382 | + uint32_t group_count[XIVE_PRIORITY_MAX + 1] = { 0 }; |
383 | + uint32_t count, delta; | 383 | + uint32_t count, delta; |
384 | + uint8_t i; | 384 | + uint8_t i; |
385 | + | 385 | + |
386 | + printf("# ============================================================\n"); | 386 | + printf("# ============================================================\n"); |
387 | + printf("# Testing NVPG BAR operations\n"); | 387 | + printf("# Testing NVPG BAR operations\n"); |
388 | + | 388 | + |
389 | + set_nvg(qts, group_target, 0); | 389 | + set_nvg(qts, group_target, 0); |
390 | + set_nvp(qts, nvp_target, 0x04); | 390 | + set_nvp(qts, nvp_target, 0x04); |
391 | + set_nvp(qts, group_target, 0x04); | 391 | + set_nvp(qts, group_target, 0x04); |
392 | + | 392 | + |
393 | + /* | 393 | + /* |
394 | + * Setup: trigger a VP-specific interrupt and a group interrupt | 394 | + * Setup: trigger a VP-specific interrupt and a group interrupt |
395 | + * so that the backlog counters are initialized to something else | 395 | + * so that the backlog counters are initialized to something else |
396 | + * than 0 for at least one priority level | 396 | + * than 0 for at least one priority level |
397 | + */ | 397 | + */ |
398 | + set_eas(qts, vp_irq, vp_end, vp_irq_data); | 398 | + set_eas(qts, vp_irq, vp_end, vp_irq_data); |
399 | + set_end(qts, vp_end, nvp_target, vp_priority, false /* group */); | 399 | + set_end(qts, vp_end, nvp_target, vp_priority, false /* group */); |
400 | + | 400 | + |
401 | + set_eas(qts, group_irq, group_end, group_irq_data); | 401 | + set_eas(qts, group_irq, group_end, group_irq_data); |
402 | + set_end(qts, group_end, group_target, group_priority, true /* group */); | 402 | + set_end(qts, group_end, group_target, group_priority, true /* group */); |
403 | + | 403 | + |
404 | + get_esb(qts, vp_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | 404 | + get_esb(qts, vp_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); |
405 | + set_esb(qts, vp_irq, XIVE_TRIGGER_PAGE, 0, 0); | 405 | + set_esb(qts, vp_irq, XIVE_TRIGGER_PAGE, 0, 0); |
406 | + vp_count[vp_priority]++; | 406 | + vp_count[vp_priority]++; |
407 | + | 407 | + |
408 | + get_esb(qts, group_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | 408 | + get_esb(qts, group_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); |
409 | + set_esb(qts, group_irq, XIVE_TRIGGER_PAGE, 0, 0); | 409 | + set_esb(qts, group_irq, XIVE_TRIGGER_PAGE, 0, 0); |
410 | + group_count[group_priority]++; | 410 | + group_count[group_priority]++; |
411 | + | 411 | + |
412 | + /* check the initial counters */ | 412 | + /* check the initial counters */ |
413 | + for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { | 413 | + for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { |
414 | + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, i, 0); | 414 | + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, i, 0); |
415 | + g_assert_cmpuint(count, ==, vp_count[i]); | 415 | + g_assert_cmpuint(count, ==, vp_count[i]); |
416 | + | 416 | + |
417 | + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, i, 0); | 417 | + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, i, 0); |
418 | + g_assert_cmpuint(count, ==, group_count[i]); | 418 | + g_assert_cmpuint(count, ==, group_count[i]); |
419 | + } | 419 | + } |
420 | + | 420 | + |
421 | + /* do a few ops on the VP. Counter can only be 0 and 1 */ | 421 | + /* do a few ops on the VP. Counter can only be 0 and 1 */ |
422 | + vp_priority = 2; | 422 | + vp_priority = 2; |
423 | + delta = 7; | 423 | + delta = 7; |
424 | + nvpg_backlog_op(qts, INCR_STORE, NVP, nvp_target, vp_priority, delta); | 424 | + nvpg_backlog_op(qts, INCR_STORE, NVP, nvp_target, vp_priority, delta); |
425 | + vp_count[vp_priority] = 1; | 425 | + vp_count[vp_priority] = 1; |
426 | + count = nvpg_backlog_op(qts, INCR_LOAD, NVP, nvp_target, vp_priority, 0); | 426 | + count = nvpg_backlog_op(qts, INCR_LOAD, NVP, nvp_target, vp_priority, 0); |
427 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | 427 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); |
428 | + count = nvpg_backlog_op(qts, READ_y, NVP, nvp_target, vp_priority, 0); | 428 | + count = nvpg_backlog_op(qts, READ_y, NVP, nvp_target, vp_priority, 0); |
429 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | 429 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); |
430 | + | 430 | + |
431 | + count = nvpg_backlog_op(qts, DECR_LOAD, NVP, nvp_target, vp_priority, 0); | 431 | + count = nvpg_backlog_op(qts, DECR_LOAD, NVP, nvp_target, vp_priority, 0); |
432 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | 432 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); |
433 | + vp_count[vp_priority] = 0; | 433 | + vp_count[vp_priority] = 0; |
434 | + nvpg_backlog_op(qts, DECR_STORE, NVP, nvp_target, vp_priority, delta); | 434 | + nvpg_backlog_op(qts, DECR_STORE, NVP, nvp_target, vp_priority, delta); |
435 | + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, vp_priority, 0); | 435 | + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, vp_priority, 0); |
436 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | 436 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); |
437 | + | 437 | + |
438 | + /* do a few ops on the group */ | 438 | + /* do a few ops on the group */ |
439 | + group_priority = 2; | 439 | + group_priority = 2; |
440 | + delta = 9; | 440 | + delta = 9; |
441 | + /* can't go negative */ | 441 | + /* can't go negative */ |
442 | + nvpg_backlog_op(qts, DECR_STORE, NVG, group_target, group_priority, delta); | 442 | + nvpg_backlog_op(qts, DECR_STORE, NVG, group_target, group_priority, delta); |
443 | + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, group_priority, 0); | 443 | + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, group_priority, 0); |
444 | + g_assert_cmpuint(count, ==, 0); | 444 | + g_assert_cmpuint(count, ==, 0); |
445 | + nvpg_backlog_op(qts, INCR_STORE, NVG, group_target, group_priority, delta); | 445 | + nvpg_backlog_op(qts, INCR_STORE, NVG, group_target, group_priority, delta); |
446 | + group_count[group_priority] += delta; | 446 | + group_count[group_priority] += delta; |
447 | + count = nvpg_backlog_op(qts, INCR_LOAD, NVG, group_target, | 447 | + count = nvpg_backlog_op(qts, INCR_LOAD, NVG, group_target, |
448 | + group_priority, delta); | 448 | + group_priority, delta); |
449 | + g_assert_cmpuint(count, ==, group_count[group_priority]); | 449 | + g_assert_cmpuint(count, ==, group_count[group_priority]); |
450 | + group_count[group_priority]++; | 450 | + group_count[group_priority]++; |
451 | + | 451 | + |
452 | + count = nvpg_backlog_op(qts, DECR_LOAD, NVG, group_target, | 452 | + count = nvpg_backlog_op(qts, DECR_LOAD, NVG, group_target, |
453 | + group_priority, delta); | 453 | + group_priority, delta); |
454 | + g_assert_cmpuint(count, ==, group_count[group_priority]); | 454 | + g_assert_cmpuint(count, ==, group_count[group_priority]); |
455 | + group_count[group_priority]--; | 455 | + group_count[group_priority]--; |
456 | + count = nvpg_backlog_op(qts, READ_x, NVG, group_target, group_priority, 0); | 456 | + count = nvpg_backlog_op(qts, READ_x, NVG, group_target, group_priority, 0); |
457 | + g_assert_cmpuint(count, ==, group_count[group_priority]); | 457 | + g_assert_cmpuint(count, ==, group_count[group_priority]); |
458 | +} | 458 | +} |
459 | + | 459 | + |
460 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c | 460 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c |
461 | index XXXXXXX..XXXXXXX 100644 | 461 | index XXXXXXX..XXXXXXX 100644 |
462 | --- a/tests/qtest/pnv-xive2-test.c | 462 | --- a/tests/qtest/pnv-xive2-test.c |
463 | +++ b/tests/qtest/pnv-xive2-test.c | 463 | +++ b/tests/qtest/pnv-xive2-test.c |
464 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) | 464 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) |
465 | reset_state(qts); | 465 | reset_state(qts); |
466 | test_flush_sync_inject(qts); | 466 | test_flush_sync_inject(qts); |
467 | 467 | ||
468 | + reset_state(qts); | 468 | + reset_state(qts); |
469 | + test_nvpg_bar(qts); | 469 | + test_nvpg_bar(qts); |
470 | + | 470 | + |
471 | qtest_quit(qts); | 471 | qtest_quit(qts); |
472 | } | 472 | } |
473 | 473 | ||
474 | diff --git a/hw/intc/trace-events b/hw/intc/trace-events | 474 | diff --git a/hw/intc/trace-events b/hw/intc/trace-events |
475 | index XXXXXXX..XXXXXXX 100644 | 475 | index XXXXXXX..XXXXXXX 100644 |
476 | --- a/hw/intc/trace-events | 476 | --- a/hw/intc/trace-events |
477 | +++ b/hw/intc/trace-events | 477 | +++ b/hw/intc/trace-events |
478 | @@ -XXX,XX +XXX,XX @@ xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t v | 478 | @@ -XXX,XX +XXX,XX @@ xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t v |
479 | xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d" | 479 | xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d" |
480 | xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 | 480 | xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 |
481 | 481 | ||
482 | +# xive2.c | 482 | +# xive2.c |
483 | +xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d" | 483 | +xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d" |
484 | +xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d" | 484 | +xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d" |
485 | + | 485 | + |
486 | # pnv_xive.c | 486 | # pnv_xive.c |
487 | pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 | 487 | pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 |
488 | 488 | ||
489 | diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build | 489 | diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build |
490 | index XXXXXXX..XXXXXXX 100644 | 490 | index XXXXXXX..XXXXXXX 100644 |
491 | --- a/tests/qtest/meson.build | 491 | --- a/tests/qtest/meson.build |
492 | +++ b/tests/qtest/meson.build | 492 | +++ b/tests/qtest/meson.build |
493 | @@ -XXX,XX +XXX,XX @@ qtests = { | 493 | @@ -XXX,XX +XXX,XX @@ qtests = { |
494 | 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], | 494 | 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], |
495 | 'migration-test': migration_files, | 495 | 'migration-test': migration_files, |
496 | 'pxe-test': files('boot-sector.c'), | 496 | 'pxe-test': files('boot-sector.c'), |
497 | - 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c'), | 497 | - 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c'), |
498 | + 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c', | 498 | + 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c', |
499 | + 'pnv-xive2-nvpg_bar.c'), | 499 | + 'pnv-xive2-nvpg_bar.c'), |
500 | 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()], | 500 | 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()], |
501 | 'tpm-crb-swtpm-test': [io, tpmemu_files], | 501 | 'tpm-crb-swtpm-test': [io, tpmemu_files], |
502 | 'tpm-crb-test': [io, tpmemu_files], | 502 | 'tpm-crb-test': [io, tpmemu_files], |
503 | -- | 503 | -- |
504 | 2.43.0 | 504 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | If an END is defined with the 'crowd' bit set, then a target can be | 3 | If an END is defined with the 'crowd' bit set, then a target can be |
4 | running on different blocks. It means that some bits from the block | 4 | running on different blocks. It means that some bits from the block |
5 | VP are masked when looking for a match. It is similar to groups, but | 5 | VP are masked when looking for a match. It is similar to groups, but |
6 | on the block instead of the VP index. | 6 | on the block instead of the VP index. |
7 | 7 | ||
8 | Most of the changes are due to passing the extra argument 'crowd' all | 8 | Most of the changes are due to passing the extra argument 'crowd' all |
9 | the way to the function checking for matches. | 9 | the way to the function checking for matches. |
10 | 10 | ||
11 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 11 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
12 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 12 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
13 | --- | 13 | --- |
14 | include/hw/ppc/xive.h | 10 +++--- | 14 | include/hw/ppc/xive.h | 10 +++--- |
15 | include/hw/ppc/xive2.h | 3 +- | 15 | include/hw/ppc/xive2.h | 3 +- |
16 | hw/intc/pnv_xive.c | 5 +-- | 16 | hw/intc/pnv_xive.c | 5 +-- |
17 | hw/intc/pnv_xive2.c | 12 +++---- | 17 | hw/intc/pnv_xive2.c | 12 +++---- |
18 | hw/intc/spapr_xive.c | 3 +- | 18 | hw/intc/spapr_xive.c | 3 +- |
19 | hw/intc/xive.c | 21 ++++++++---- | 19 | hw/intc/xive.c | 21 ++++++++---- |
20 | hw/intc/xive2.c | 78 +++++++++++++++++++++++++++++++++--------- | 20 | hw/intc/xive2.c | 78 +++++++++++++++++++++++++++++++++--------- |
21 | hw/ppc/pnv.c | 15 ++++---- | 21 | hw/ppc/pnv.c | 15 ++++---- |
22 | hw/ppc/spapr.c | 4 +-- | 22 | hw/ppc/spapr.c | 4 +-- |
23 | 9 files changed, 105 insertions(+), 46 deletions(-) | 23 | 9 files changed, 105 insertions(+), 46 deletions(-) |
24 | 24 | ||
25 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | 25 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h |
26 | index XXXXXXX..XXXXXXX 100644 | 26 | index XXXXXXX..XXXXXXX 100644 |
27 | --- a/include/hw/ppc/xive.h | 27 | --- a/include/hw/ppc/xive.h |
28 | +++ b/include/hw/ppc/xive.h | 28 | +++ b/include/hw/ppc/xive.h |
29 | @@ -XXX,XX +XXX,XX @@ struct XivePresenterClass { | 29 | @@ -XXX,XX +XXX,XX @@ struct XivePresenterClass { |
30 | InterfaceClass parent; | 30 | InterfaceClass parent; |
31 | int (*match_nvt)(XivePresenter *xptr, uint8_t format, | 31 | int (*match_nvt)(XivePresenter *xptr, uint8_t format, |
32 | uint8_t nvt_blk, uint32_t nvt_idx, | 32 | uint8_t nvt_blk, uint32_t nvt_idx, |
33 | - bool cam_ignore, uint8_t priority, | 33 | - bool cam_ignore, uint8_t priority, |
34 | + bool crowd, bool cam_ignore, uint8_t priority, | 34 | + bool crowd, bool cam_ignore, uint8_t priority, |
35 | uint32_t logic_serv, XiveTCTXMatch *match); | 35 | uint32_t logic_serv, XiveTCTXMatch *match); |
36 | bool (*in_kernel)(const XivePresenter *xptr); | 36 | bool (*in_kernel)(const XivePresenter *xptr); |
37 | uint32_t (*get_config)(XivePresenter *xptr); | 37 | uint32_t (*get_config)(XivePresenter *xptr); |
38 | int (*broadcast)(XivePresenter *xptr, | 38 | int (*broadcast)(XivePresenter *xptr, |
39 | uint8_t nvt_blk, uint32_t nvt_idx, | 39 | uint8_t nvt_blk, uint32_t nvt_idx, |
40 | - uint8_t priority); | 40 | - uint8_t priority); |
41 | + bool crowd, bool cam_ignore, uint8_t priority); | 41 | + bool crowd, bool cam_ignore, uint8_t priority); |
42 | }; | 42 | }; |
43 | 43 | ||
44 | int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 44 | int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
45 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 45 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
46 | bool cam_ignore, uint32_t logic_serv); | 46 | bool cam_ignore, uint32_t logic_serv); |
47 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 47 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
48 | uint8_t nvt_blk, uint32_t nvt_idx, | 48 | uint8_t nvt_blk, uint32_t nvt_idx, |
49 | - bool cam_ignore, uint8_t priority, | 49 | - bool cam_ignore, uint8_t priority, |
50 | + bool crowd, bool cam_ignore, uint8_t priority, | 50 | + bool crowd, bool cam_ignore, uint8_t priority, |
51 | uint32_t logic_serv, bool *precluded); | 51 | uint32_t logic_serv, bool *precluded); |
52 | 52 | ||
53 | uint32_t xive_get_vpgroup_size(uint32_t nvp_index); | 53 | uint32_t xive_get_vpgroup_size(uint32_t nvp_index); |
54 | @@ -XXX,XX +XXX,XX @@ struct XiveFabricClass { | 54 | @@ -XXX,XX +XXX,XX @@ struct XiveFabricClass { |
55 | InterfaceClass parent; | 55 | InterfaceClass parent; |
56 | int (*match_nvt)(XiveFabric *xfb, uint8_t format, | 56 | int (*match_nvt)(XiveFabric *xfb, uint8_t format, |
57 | uint8_t nvt_blk, uint32_t nvt_idx, | 57 | uint8_t nvt_blk, uint32_t nvt_idx, |
58 | - bool cam_ignore, uint8_t priority, | 58 | - bool cam_ignore, uint8_t priority, |
59 | + bool crowd, bool cam_ignore, uint8_t priority, | 59 | + bool crowd, bool cam_ignore, uint8_t priority, |
60 | uint32_t logic_serv, XiveTCTXMatch *match); | 60 | uint32_t logic_serv, XiveTCTXMatch *match); |
61 | int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, | 61 | int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, |
62 | - uint8_t priority); | 62 | - uint8_t priority); |
63 | + bool crowd, bool cam_ignore, uint8_t priority); | 63 | + bool crowd, bool cam_ignore, uint8_t priority); |
64 | }; | 64 | }; |
65 | 65 | ||
66 | /* | 66 | /* |
67 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | 67 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h |
68 | index XXXXXXX..XXXXXXX 100644 | 68 | index XXXXXXX..XXXXXXX 100644 |
69 | --- a/include/hw/ppc/xive2.h | 69 | --- a/include/hw/ppc/xive2.h |
70 | +++ b/include/hw/ppc/xive2.h | 70 | +++ b/include/hw/ppc/xive2.h |
71 | @@ -XXX,XX +XXX,XX @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); | 71 | @@ -XXX,XX +XXX,XX @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); |
72 | int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 72 | int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
73 | uint8_t format, | 73 | uint8_t format, |
74 | uint8_t nvt_blk, uint32_t nvt_idx, | 74 | uint8_t nvt_blk, uint32_t nvt_idx, |
75 | - bool cam_ignore, uint32_t logic_serv); | 75 | - bool cam_ignore, uint32_t logic_serv); |
76 | + bool crowd, bool cam_ignore, | 76 | + bool crowd, bool cam_ignore, |
77 | + uint32_t logic_serv); | 77 | + uint32_t logic_serv); |
78 | 78 | ||
79 | uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, | 79 | uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, |
80 | uint8_t blk, uint32_t idx, | 80 | uint8_t blk, uint32_t idx, |
81 | diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c | 81 | diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c |
82 | index XXXXXXX..XXXXXXX 100644 | 82 | index XXXXXXX..XXXXXXX 100644 |
83 | --- a/hw/intc/pnv_xive.c | 83 | --- a/hw/intc/pnv_xive.c |
84 | +++ b/hw/intc/pnv_xive.c | 84 | +++ b/hw/intc/pnv_xive.c |
85 | @@ -XXX,XX +XXX,XX @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) | 85 | @@ -XXX,XX +XXX,XX @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) |
86 | 86 | ||
87 | static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, | 87 | static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, |
88 | uint8_t nvt_blk, uint32_t nvt_idx, | 88 | uint8_t nvt_blk, uint32_t nvt_idx, |
89 | - bool cam_ignore, uint8_t priority, | 89 | - bool cam_ignore, uint8_t priority, |
90 | + bool crowd, bool cam_ignore, uint8_t priority, | 90 | + bool crowd, bool cam_ignore, uint8_t priority, |
91 | uint32_t logic_serv, XiveTCTXMatch *match) | 91 | uint32_t logic_serv, XiveTCTXMatch *match) |
92 | { | 92 | { |
93 | PnvXive *xive = PNV_XIVE(xptr); | 93 | PnvXive *xive = PNV_XIVE(xptr); |
94 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, | 94 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, |
95 | * Check the thread context CAM lines and record matches. | 95 | * Check the thread context CAM lines and record matches. |
96 | */ | 96 | */ |
97 | ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, | 97 | ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, |
98 | - nvt_idx, cam_ignore, logic_serv); | 98 | - nvt_idx, cam_ignore, logic_serv); |
99 | + nvt_idx, cam_ignore, | 99 | + nvt_idx, cam_ignore, |
100 | + logic_serv); | 100 | + logic_serv); |
101 | /* | 101 | /* |
102 | * Save the context and follow on to catch duplicates, that we | 102 | * Save the context and follow on to catch duplicates, that we |
103 | * don't support yet. | 103 | * don't support yet. |
104 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | 104 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c |
105 | index XXXXXXX..XXXXXXX 100644 | 105 | index XXXXXXX..XXXXXXX 100644 |
106 | --- a/hw/intc/pnv_xive2.c | 106 | --- a/hw/intc/pnv_xive2.c |
107 | +++ b/hw/intc/pnv_xive2.c | 107 | +++ b/hw/intc/pnv_xive2.c |
108 | @@ -XXX,XX +XXX,XX @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) | 108 | @@ -XXX,XX +XXX,XX @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) |
109 | 109 | ||
110 | static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, | 110 | static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, |
111 | uint8_t nvt_blk, uint32_t nvt_idx, | 111 | uint8_t nvt_blk, uint32_t nvt_idx, |
112 | - bool cam_ignore, uint8_t priority, | 112 | - bool cam_ignore, uint8_t priority, |
113 | + bool crowd, bool cam_ignore, uint8_t priority, | 113 | + bool crowd, bool cam_ignore, uint8_t priority, |
114 | uint32_t logic_serv, XiveTCTXMatch *match) | 114 | uint32_t logic_serv, XiveTCTXMatch *match) |
115 | { | 115 | { |
116 | PnvXive2 *xive = PNV_XIVE2(xptr); | 116 | PnvXive2 *xive = PNV_XIVE2(xptr); |
117 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, | 117 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, |
118 | logic_serv); | 118 | logic_serv); |
119 | } else { | 119 | } else { |
120 | ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk, | 120 | ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk, |
121 | - nvt_idx, cam_ignore, | 121 | - nvt_idx, cam_ignore, |
122 | - logic_serv); | 122 | - logic_serv); |
123 | + nvt_idx, crowd, cam_ignore, | 123 | + nvt_idx, crowd, cam_ignore, |
124 | + logic_serv); | 124 | + logic_serv); |
125 | } | 125 | } |
126 | 126 | ||
127 | if (ring != -1) { | 127 | if (ring != -1) { |
128 | @@ -XXX,XX +XXX,XX @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) | 128 | @@ -XXX,XX +XXX,XX @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) |
129 | 129 | ||
130 | static int pnv_xive2_broadcast(XivePresenter *xptr, | 130 | static int pnv_xive2_broadcast(XivePresenter *xptr, |
131 | uint8_t nvt_blk, uint32_t nvt_idx, | 131 | uint8_t nvt_blk, uint32_t nvt_idx, |
132 | - uint8_t priority) | 132 | - uint8_t priority) |
133 | + bool crowd, bool ignore, uint8_t priority) | 133 | + bool crowd, bool ignore, uint8_t priority) |
134 | { | 134 | { |
135 | PnvXive2 *xive = PNV_XIVE2(xptr); | 135 | PnvXive2 *xive = PNV_XIVE2(xptr); |
136 | PnvChip *chip = xive->chip; | 136 | PnvChip *chip = xive->chip; |
137 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_broadcast(XivePresenter *xptr, | 137 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_broadcast(XivePresenter *xptr, |
138 | 138 | ||
139 | if (gen1_tima_os) { | 139 | if (gen1_tima_os) { |
140 | ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | 140 | ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk, |
141 | - nvt_idx, true, 0); | 141 | - nvt_idx, true, 0); |
142 | + nvt_idx, ignore, 0); | 142 | + nvt_idx, ignore, 0); |
143 | } else { | 143 | } else { |
144 | ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | 144 | ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk, |
145 | - nvt_idx, true, 0); | 145 | - nvt_idx, true, 0); |
146 | + nvt_idx, crowd, ignore, 0); | 146 | + nvt_idx, crowd, ignore, 0); |
147 | } | 147 | } |
148 | 148 | ||
149 | if (ring != -1) { | 149 | if (ring != -1) { |
150 | diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c | 150 | diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c |
151 | index XXXXXXX..XXXXXXX 100644 | 151 | index XXXXXXX..XXXXXXX 100644 |
152 | --- a/hw/intc/spapr_xive.c | 152 | --- a/hw/intc/spapr_xive.c |
153 | +++ b/hw/intc/spapr_xive.c | 153 | +++ b/hw/intc/spapr_xive.c |
154 | @@ -XXX,XX +XXX,XX @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, | 154 | @@ -XXX,XX +XXX,XX @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, |
155 | 155 | ||
156 | static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, | 156 | static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, |
157 | uint8_t nvt_blk, uint32_t nvt_idx, | 157 | uint8_t nvt_blk, uint32_t nvt_idx, |
158 | - bool cam_ignore, uint8_t priority, | 158 | - bool cam_ignore, uint8_t priority, |
159 | + bool crowd, bool cam_ignore, | 159 | + bool crowd, bool cam_ignore, |
160 | + uint8_t priority, | 160 | + uint8_t priority, |
161 | uint32_t logic_serv, XiveTCTXMatch *match) | 161 | uint32_t logic_serv, XiveTCTXMatch *match) |
162 | { | 162 | { |
163 | CPUState *cs; | 163 | CPUState *cs; |
164 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | 164 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c |
165 | index XXXXXXX..XXXXXXX 100644 | 165 | index XXXXXXX..XXXXXXX 100644 |
166 | --- a/hw/intc/xive.c | 166 | --- a/hw/intc/xive.c |
167 | +++ b/hw/intc/xive.c | 167 | +++ b/hw/intc/xive.c |
168 | @@ -XXX,XX +XXX,XX @@ uint32_t xive_get_vpgroup_size(uint32_t nvp_index) | 168 | @@ -XXX,XX +XXX,XX @@ uint32_t xive_get_vpgroup_size(uint32_t nvp_index) |
169 | return 1 << (ctz32(~nvp_index) + 1); | 169 | return 1 << (ctz32(~nvp_index) + 1); |
170 | } | 170 | } |
171 | 171 | ||
172 | -static uint8_t xive_get_group_level(uint32_t nvp_index) | 172 | -static uint8_t xive_get_group_level(uint32_t nvp_index) |
173 | +static uint8_t xive_get_group_level(bool crowd, bool ignore, | 173 | +static uint8_t xive_get_group_level(bool crowd, bool ignore, |
174 | + uint32_t nvp_blk, uint32_t nvp_index) | 174 | + uint32_t nvp_blk, uint32_t nvp_index) |
175 | { | 175 | { |
176 | - /* FIXME add crowd encoding */ | 176 | - /* FIXME add crowd encoding */ |
177 | - return ctz32(~nvp_index) + 1; | 177 | - return ctz32(~nvp_index) + 1; |
178 | + uint8_t level = 0; | 178 | + uint8_t level = 0; |
179 | + | 179 | + |
180 | + if (crowd) { | 180 | + if (crowd) { |
181 | + level = ((ctz32(~nvp_blk) + 1) & 0b11) << 4; | 181 | + level = ((ctz32(~nvp_blk) + 1) & 0b11) << 4; |
182 | + } | 182 | + } |
183 | + if (ignore) { | 183 | + if (ignore) { |
184 | + level |= (ctz32(~nvp_index) + 1) & 0b1111; | 184 | + level |= (ctz32(~nvp_index) + 1) & 0b1111; |
185 | + } | 185 | + } |
186 | + return level; | 186 | + return level; |
187 | } | 187 | } |
188 | 188 | ||
189 | /* | 189 | /* |
190 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 190 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
191 | */ | 191 | */ |
192 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 192 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
193 | uint8_t nvt_blk, uint32_t nvt_idx, | 193 | uint8_t nvt_blk, uint32_t nvt_idx, |
194 | - bool cam_ignore, uint8_t priority, | 194 | - bool cam_ignore, uint8_t priority, |
195 | + bool crowd, bool cam_ignore, uint8_t priority, | 195 | + bool crowd, bool cam_ignore, uint8_t priority, |
196 | uint32_t logic_serv, bool *precluded) | 196 | uint32_t logic_serv, bool *precluded) |
197 | { | 197 | { |
198 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); | 198 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); |
199 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 199 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
200 | * a new command to the presenters (the equivalent of the "assign" | 200 | * a new command to the presenters (the equivalent of the "assign" |
201 | * power bus command in the documented full notify sequence. | 201 | * power bus command in the documented full notify sequence. |
202 | */ | 202 | */ |
203 | - count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, | 203 | - count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, |
204 | + count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, | 204 | + count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, |
205 | priority, logic_serv, &match); | 205 | priority, logic_serv, &match); |
206 | if (count < 0) { | 206 | if (count < 0) { |
207 | return false; | 207 | return false; |
208 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | 208 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, |
209 | 209 | ||
210 | /* handle CPU exception delivery */ | 210 | /* handle CPU exception delivery */ |
211 | if (count) { | 211 | if (count) { |
212 | - group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; | 212 | - group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; |
213 | + group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx); | 213 | + group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx); |
214 | trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); | 214 | trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); |
215 | xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); | 215 | xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); |
216 | } else { | 216 | } else { |
217 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) | 217 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) |
218 | } | 218 | } |
219 | 219 | ||
220 | found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, | 220 | found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, |
221 | + false /* crowd */, | 221 | + false /* crowd */, |
222 | xive_get_field32(END_W7_F0_IGNORE, end.w7), | 222 | xive_get_field32(END_W7_F0_IGNORE, end.w7), |
223 | priority, | 223 | priority, |
224 | xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), | 224 | xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), |
225 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 225 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
226 | index XXXXXXX..XXXXXXX 100644 | 226 | index XXXXXXX..XXXXXXX 100644 |
227 | --- a/hw/intc/xive2.c | 227 | --- a/hw/intc/xive2.c |
228 | +++ b/hw/intc/xive2.c | 228 | +++ b/hw/intc/xive2.c |
229 | @@ -XXX,XX +XXX,XX @@ static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, | 229 | @@ -XXX,XX +XXX,XX @@ static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, |
230 | return (cam1 & vp_mask) == (cam2 & vp_mask); | 230 | return (cam1 & vp_mask) == (cam2 & vp_mask); |
231 | } | 231 | } |
232 | 232 | ||
233 | +static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd) | 233 | +static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd) |
234 | +{ | 234 | +{ |
235 | + uint8_t size, block_mask = 0b1111; | 235 | + uint8_t size, block_mask = 0b1111; |
236 | + | 236 | + |
237 | + /* 3 supported crowd sizes: 2, 4, 16 */ | 237 | + /* 3 supported crowd sizes: 2, 4, 16 */ |
238 | + if (crowd) { | 238 | + if (crowd) { |
239 | + size = xive_get_vpgroup_size(nvt_blk); | 239 | + size = xive_get_vpgroup_size(nvt_blk); |
240 | + if (size == 8) { | 240 | + if (size == 8) { |
241 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of 8n"); | 241 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of 8n"); |
242 | + return block_mask; | 242 | + return block_mask; |
243 | + } | 243 | + } |
244 | + block_mask = ~(size - 1); | 244 | + block_mask = ~(size - 1); |
245 | + block_mask &= 0b1111; | 245 | + block_mask &= 0b1111; |
246 | + } | 246 | + } |
247 | + return block_mask; | 247 | + return block_mask; |
248 | +} | 248 | +} |
249 | + | 249 | + |
250 | +static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore) | 250 | +static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore) |
251 | +{ | 251 | +{ |
252 | + uint32_t index_mask = 0xFFFFFF; /* 24 bits */ | 252 | + uint32_t index_mask = 0xFFFFFF; /* 24 bits */ |
253 | + | 253 | + |
254 | + if (cam_ignore) { | 254 | + if (cam_ignore) { |
255 | + index_mask = ~(xive_get_vpgroup_size(nvt_index) - 1); | 255 | + index_mask = ~(xive_get_vpgroup_size(nvt_index) - 1); |
256 | + index_mask &= 0xFFFFFF; | 256 | + index_mask &= 0xFFFFFF; |
257 | + } | 257 | + } |
258 | + return index_mask; | 258 | + return index_mask; |
259 | +} | 259 | +} |
260 | + | 260 | + |
261 | /* | 261 | /* |
262 | * The thread context register words are in big-endian format. | 262 | * The thread context register words are in big-endian format. |
263 | */ | 263 | */ |
264 | int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 264 | int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
265 | uint8_t format, | 265 | uint8_t format, |
266 | uint8_t nvt_blk, uint32_t nvt_idx, | 266 | uint8_t nvt_blk, uint32_t nvt_idx, |
267 | - bool cam_ignore, uint32_t logic_serv) | 267 | - bool cam_ignore, uint32_t logic_serv) |
268 | + bool crowd, bool cam_ignore, | 268 | + bool crowd, bool cam_ignore, |
269 | + uint32_t logic_serv) | 269 | + uint32_t logic_serv) |
270 | { | 270 | { |
271 | uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); | 271 | uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); |
272 | uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); | 272 | uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); |
273 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 273 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
274 | uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); | 274 | uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); |
275 | uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); | 275 | uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); |
276 | 276 | ||
277 | - uint32_t vp_mask = 0xFFFFFFFF; | 277 | - uint32_t vp_mask = 0xFFFFFFFF; |
278 | + uint32_t index_mask, vp_mask; | 278 | + uint32_t index_mask, vp_mask; |
279 | + uint8_t block_mask; | 279 | + uint8_t block_mask; |
280 | 280 | ||
281 | if (format == 0) { | 281 | if (format == 0) { |
282 | /* | 282 | /* |
283 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | 283 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, |
284 | * i=1: VP-group notification (bits ignored at the end of the | 284 | * i=1: VP-group notification (bits ignored at the end of the |
285 | * NVT identifier) | 285 | * NVT identifier) |
286 | */ | 286 | */ |
287 | - if (cam_ignore) { | 287 | - if (cam_ignore) { |
288 | - vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1); | 288 | - vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1); |
289 | - } | 289 | - } |
290 | + block_mask = xive2_get_vp_block_mask(nvt_blk, crowd); | 290 | + block_mask = xive2_get_vp_block_mask(nvt_blk, crowd); |
291 | + index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore); | 291 | + index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore); |
292 | + vp_mask = xive2_nvp_cam_line(block_mask, index_mask); | 292 | + vp_mask = xive2_nvp_cam_line(block_mask, index_mask); |
293 | 293 | ||
294 | /* For VP-group notifications, threads with LGS=0 are excluded */ | 294 | /* For VP-group notifications, threads with LGS=0 are excluded */ |
295 | 295 | ||
296 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 296 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
297 | return; | 297 | return; |
298 | } | 298 | } |
299 | 299 | ||
300 | + if (xive2_end_is_crowd(&end) & !xive2_end_is_ignore(&end)) { | 300 | + if (xive2_end_is_crowd(&end) & !xive2_end_is_ignore(&end)) { |
301 | + qemu_log_mask(LOG_GUEST_ERROR, | 301 | + qemu_log_mask(LOG_GUEST_ERROR, |
302 | + "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n"); | 302 | + "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n"); |
303 | + return; | 303 | + return; |
304 | + } | 304 | + } |
305 | + | 305 | + |
306 | if (xive2_end_is_enqueue(&end)) { | 306 | if (xive2_end_is_enqueue(&end)) { |
307 | xive2_end_enqueue(&end, end_data); | 307 | xive2_end_enqueue(&end, end_data); |
308 | /* Enqueuing event data modifies the EQ toggle and index */ | 308 | /* Enqueuing event data modifies the EQ toggle and index */ |
309 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 309 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
310 | } | 310 | } |
311 | 311 | ||
312 | found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, | 312 | found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, |
313 | - xive2_end_is_ignore(&end), | 313 | - xive2_end_is_ignore(&end), |
314 | + xive2_end_is_crowd(&end), xive2_end_is_ignore(&end), | 314 | + xive2_end_is_crowd(&end), xive2_end_is_ignore(&end), |
315 | priority, | 315 | priority, |
316 | xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), | 316 | xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), |
317 | &precluded); | 317 | &precluded); |
318 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 318 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
319 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | 319 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); |
320 | xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | 320 | xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); |
321 | } else { | 321 | } else { |
322 | - Xive2Nvgc nvg; | 322 | - Xive2Nvgc nvg; |
323 | + Xive2Nvgc nvgc; | 323 | + Xive2Nvgc nvgc; |
324 | uint32_t backlog; | 324 | uint32_t backlog; |
325 | + bool crowd; | 325 | + bool crowd; |
326 | 326 | ||
327 | - /* For groups, the per-priority backlog counters are in the NVG */ | 327 | - /* For groups, the per-priority backlog counters are in the NVG */ |
328 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) { | 328 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) { |
329 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n", | 329 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n", |
330 | - nvp_blk, nvp_idx); | 330 | - nvp_blk, nvp_idx); |
331 | + crowd = xive2_end_is_crowd(&end); | 331 | + crowd = xive2_end_is_crowd(&end); |
332 | + | 332 | + |
333 | + /* | 333 | + /* |
334 | + * For groups and crowds, the per-priority backlog | 334 | + * For groups and crowds, the per-priority backlog |
335 | + * counters are stored in the NVG/NVC structures | 335 | + * counters are stored in the NVG/NVC structures |
336 | + */ | 336 | + */ |
337 | + if (xive2_router_get_nvgc(xrtr, crowd, | 337 | + if (xive2_router_get_nvgc(xrtr, crowd, |
338 | + nvp_blk, nvp_idx, &nvgc)) { | 338 | + nvp_blk, nvp_idx, &nvgc)) { |
339 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", | 339 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", |
340 | + crowd ? "NVC" : "NVG", nvp_blk, nvp_idx); | 340 | + crowd ? "NVC" : "NVG", nvp_blk, nvp_idx); |
341 | return; | 341 | return; |
342 | } | 342 | } |
343 | 343 | ||
344 | - if (!xive2_nvgc_is_valid(&nvg)) { | 344 | - if (!xive2_nvgc_is_valid(&nvg)) { |
345 | + if (!xive2_nvgc_is_valid(&nvgc)) { | 345 | + if (!xive2_nvgc_is_valid(&nvgc)) { |
346 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", | 346 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", |
347 | nvp_blk, nvp_idx); | 347 | nvp_blk, nvp_idx); |
348 | return; | 348 | return; |
349 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | 349 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, |
350 | * set the LSMFB field of the TIMA of relevant threads so | 350 | * set the LSMFB field of the TIMA of relevant threads so |
351 | * that they know an interrupt is pending. | 351 | * that they know an interrupt is pending. |
352 | */ | 352 | */ |
353 | - backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1; | 353 | - backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1; |
354 | - xive2_nvgc_set_backlog(&nvg, priority, backlog); | 354 | - xive2_nvgc_set_backlog(&nvg, priority, backlog); |
355 | - xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg); | 355 | - xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg); |
356 | + backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1; | 356 | + backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1; |
357 | + xive2_nvgc_set_backlog(&nvgc, priority, backlog); | 357 | + xive2_nvgc_set_backlog(&nvgc, priority, backlog); |
358 | + xive2_router_write_nvgc(xrtr, crowd, nvp_blk, nvp_idx, &nvgc); | 358 | + xive2_router_write_nvgc(xrtr, crowd, nvp_blk, nvp_idx, &nvgc); |
359 | 359 | ||
360 | if (precluded && backlog == 1) { | 360 | if (precluded && backlog == 1) { |
361 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); | 361 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); |
362 | - xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority); | 362 | - xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority); |
363 | + xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, | 363 | + xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, |
364 | + xive2_end_is_crowd(&end), | 364 | + xive2_end_is_crowd(&end), |
365 | + xive2_end_is_ignore(&end), | 365 | + xive2_end_is_ignore(&end), |
366 | + priority); | 366 | + priority); |
367 | 367 | ||
368 | if (!xive2_end_is_precluded_escalation(&end)) { | 368 | if (!xive2_end_is_precluded_escalation(&end)) { |
369 | /* | 369 | /* |
370 | diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c | 370 | diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c |
371 | index XXXXXXX..XXXXXXX 100644 | 371 | index XXXXXXX..XXXXXXX 100644 |
372 | --- a/hw/ppc/pnv.c | 372 | --- a/hw/ppc/pnv.c |
373 | +++ b/hw/ppc/pnv.c | 373 | +++ b/hw/ppc/pnv.c |
374 | @@ -XXX,XX +XXX,XX @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf) | 374 | @@ -XXX,XX +XXX,XX @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf) |
375 | 375 | ||
376 | static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, | 376 | static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, |
377 | uint8_t nvt_blk, uint32_t nvt_idx, | 377 | uint8_t nvt_blk, uint32_t nvt_idx, |
378 | - bool cam_ignore, uint8_t priority, | 378 | - bool cam_ignore, uint8_t priority, |
379 | + bool crowd, bool cam_ignore, uint8_t priority, | 379 | + bool crowd, bool cam_ignore, uint8_t priority, |
380 | uint32_t logic_serv, | 380 | uint32_t logic_serv, |
381 | XiveTCTXMatch *match) | 381 | XiveTCTXMatch *match) |
382 | { | 382 | { |
383 | @@ -XXX,XX +XXX,XX @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, | 383 | @@ -XXX,XX +XXX,XX @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, |
384 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | 384 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); |
385 | int count; | 385 | int count; |
386 | 386 | ||
387 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, | 387 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, |
388 | - priority, logic_serv, match); | 388 | - priority, logic_serv, match); |
389 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, | 389 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, |
390 | + cam_ignore, priority, logic_serv, match); | 390 | + cam_ignore, priority, logic_serv, match); |
391 | 391 | ||
392 | if (count < 0) { | 392 | if (count < 0) { |
393 | return count; | 393 | return count; |
394 | @@ -XXX,XX +XXX,XX @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, | 394 | @@ -XXX,XX +XXX,XX @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, |
395 | 395 | ||
396 | static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | 396 | static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, |
397 | uint8_t nvt_blk, uint32_t nvt_idx, | 397 | uint8_t nvt_blk, uint32_t nvt_idx, |
398 | - bool cam_ignore, uint8_t priority, | 398 | - bool cam_ignore, uint8_t priority, |
399 | + bool crowd, bool cam_ignore, uint8_t priority, | 399 | + bool crowd, bool cam_ignore, uint8_t priority, |
400 | uint32_t logic_serv, | 400 | uint32_t logic_serv, |
401 | XiveTCTXMatch *match) | 401 | XiveTCTXMatch *match) |
402 | { | 402 | { |
403 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | 403 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, |
404 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | 404 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); |
405 | int count; | 405 | int count; |
406 | 406 | ||
407 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, | 407 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, |
408 | - priority, logic_serv, match); | 408 | - priority, logic_serv, match); |
409 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, | 409 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, |
410 | + cam_ignore, priority, logic_serv, match); | 410 | + cam_ignore, priority, logic_serv, match); |
411 | 411 | ||
412 | if (count < 0) { | 412 | if (count < 0) { |
413 | return count; | 413 | return count; |
414 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | 414 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, |
415 | 415 | ||
416 | static int pnv10_xive_broadcast(XiveFabric *xfb, | 416 | static int pnv10_xive_broadcast(XiveFabric *xfb, |
417 | uint8_t nvt_blk, uint32_t nvt_idx, | 417 | uint8_t nvt_blk, uint32_t nvt_idx, |
418 | + bool crowd, bool cam_ignore, | 418 | + bool crowd, bool cam_ignore, |
419 | uint8_t priority) | 419 | uint8_t priority) |
420 | { | 420 | { |
421 | PnvMachineState *pnv = PNV_MACHINE(xfb); | 421 | PnvMachineState *pnv = PNV_MACHINE(xfb); |
422 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_broadcast(XiveFabric *xfb, | 422 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_broadcast(XiveFabric *xfb, |
423 | XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); | 423 | XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); |
424 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | 424 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); |
425 | 425 | ||
426 | - xpc->broadcast(xptr, nvt_blk, nvt_idx, priority); | 426 | - xpc->broadcast(xptr, nvt_blk, nvt_idx, priority); |
427 | + xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority); | 427 | + xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority); |
428 | } | 428 | } |
429 | return 0; | 429 | return 0; |
430 | } | 430 | } |
431 | diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c | 431 | diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c |
432 | index XXXXXXX..XXXXXXX 100644 | 432 | index XXXXXXX..XXXXXXX 100644 |
433 | --- a/hw/ppc/spapr.c | 433 | --- a/hw/ppc/spapr.c |
434 | +++ b/hw/ppc/spapr.c | 434 | +++ b/hw/ppc/spapr.c |
435 | @@ -XXX,XX +XXX,XX @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf) | 435 | @@ -XXX,XX +XXX,XX @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf) |
436 | */ | 436 | */ |
437 | static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, | 437 | static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, |
438 | uint8_t nvt_blk, uint32_t nvt_idx, | 438 | uint8_t nvt_blk, uint32_t nvt_idx, |
439 | - bool cam_ignore, uint8_t priority, | 439 | - bool cam_ignore, uint8_t priority, |
440 | + bool crowd, bool cam_ignore, uint8_t priority, | 440 | + bool crowd, bool cam_ignore, uint8_t priority, |
441 | uint32_t logic_serv, XiveTCTXMatch *match) | 441 | uint32_t logic_serv, XiveTCTXMatch *match) |
442 | { | 442 | { |
443 | SpaprMachineState *spapr = SPAPR_MACHINE(xfb); | 443 | SpaprMachineState *spapr = SPAPR_MACHINE(xfb); |
444 | @@ -XXX,XX +XXX,XX @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, | 444 | @@ -XXX,XX +XXX,XX @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, |
445 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | 445 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); |
446 | int count; | 446 | int count; |
447 | 447 | ||
448 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, | 448 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, |
449 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore, | 449 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore, |
450 | priority, logic_serv, match); | 450 | priority, logic_serv, match); |
451 | if (count < 0) { | 451 | if (count < 0) { |
452 | return count; | 452 | return count; |
453 | -- | 453 | -- |
454 | 2.43.0 | 454 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | Add support for the NVPG and NVC BARs. Access to the BAR pages will | ||
4 | cause backlog counter operations to either increment or decriment | ||
5 | the counter. | ||
6 | |||
7 | Also added qtests for the same. | ||
8 | |||
9 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
10 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
11 | --- | ||
12 | include/hw/ppc/xive2.h | 9 ++ | ||
13 | include/hw/ppc/xive2_regs.h | 3 + | ||
14 | tests/qtest/pnv-xive2-common.h | 1 + | ||
15 | hw/intc/pnv_xive2.c | 80 +++++++++++++--- | ||
16 | hw/intc/xive2.c | 87 +++++++++++++++++ | ||
17 | tests/qtest/pnv-xive2-nvpg_bar.c | 154 +++++++++++++++++++++++++++++++ | ||
18 | tests/qtest/pnv-xive2-test.c | 3 + | ||
19 | hw/intc/trace-events | 4 + | ||
20 | tests/qtest/meson.build | 3 +- | ||
21 | 9 files changed, 329 insertions(+), 15 deletions(-) | ||
22 | create mode 100644 tests/qtest/pnv-xive2-nvpg_bar.c | ||
23 | |||
24 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | ||
25 | index XXXXXXX..XXXXXXX 100644 | ||
26 | --- a/include/hw/ppc/xive2.h | ||
27 | +++ b/include/hw/ppc/xive2.h | ||
28 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
29 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
30 | bool cam_ignore, uint32_t logic_serv); | ||
31 | |||
32 | +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, | ||
33 | + uint8_t blk, uint32_t idx, | ||
34 | + uint16_t offset); | ||
35 | + | ||
36 | +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, | ||
37 | + bool crowd, | ||
38 | + uint8_t blk, uint32_t idx, | ||
39 | + uint16_t offset, uint16_t val); | ||
40 | + | ||
41 | /* | ||
42 | * XIVE2 END ESBs (POWER10) | ||
43 | */ | ||
44 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h | ||
45 | index XXXXXXX..XXXXXXX 100644 | ||
46 | --- a/include/hw/ppc/xive2_regs.h | ||
47 | +++ b/include/hw/ppc/xive2_regs.h | ||
48 | @@ -XXX,XX +XXX,XX @@ typedef struct Xive2Nvgc { | ||
49 | void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, | ||
50 | GString *buf); | ||
51 | |||
52 | +#define NVx_BACKLOG_OP PPC_BITMASK(52, 53) | ||
53 | +#define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59) | ||
54 | + | ||
55 | #endif /* PPC_XIVE2_REGS_H */ | ||
56 | diff --git a/tests/qtest/pnv-xive2-common.h b/tests/qtest/pnv-xive2-common.h | ||
57 | index XXXXXXX..XXXXXXX 100644 | ||
58 | --- a/tests/qtest/pnv-xive2-common.h | ||
59 | +++ b/tests/qtest/pnv-xive2-common.h | ||
60 | @@ -XXX,XX +XXX,XX @@ extern void set_end(QTestState *qts, uint32_t index, uint32_t nvp_index, | ||
61 | |||
62 | |||
63 | void test_flush_sync_inject(QTestState *qts); | ||
64 | +void test_nvpg_bar(QTestState *qts); | ||
65 | |||
66 | #endif /* TEST_PNV_XIVE2_COMMON_H */ | ||
67 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | ||
68 | index XXXXXXX..XXXXXXX 100644 | ||
69 | --- a/hw/intc/pnv_xive2.c | ||
70 | +++ b/hw/intc/pnv_xive2.c | ||
71 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_tm_ops = { | ||
72 | }, | ||
73 | }; | ||
74 | |||
75 | -static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset, | ||
76 | +static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr addr, | ||
77 | unsigned size) | ||
78 | { | ||
79 | PnvXive2 *xive = PNV_XIVE2(opaque); | ||
80 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | ||
81 | + uint32_t page = addr >> xive->nvpg_shift; | ||
82 | + uint16_t op = addr & 0xFFF; | ||
83 | + uint8_t blk = pnv_xive2_block_id(xive); | ||
84 | |||
85 | - xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset); | ||
86 | - return -1; | ||
87 | + if (size != 2) { | ||
88 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc load size %d\n", | ||
89 | + size); | ||
90 | + return -1; | ||
91 | + } | ||
92 | + | ||
93 | + return xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, 1); | ||
94 | } | ||
95 | |||
96 | -static void pnv_xive2_nvc_write(void *opaque, hwaddr offset, | ||
97 | +static void pnv_xive2_nvc_write(void *opaque, hwaddr addr, | ||
98 | uint64_t val, unsigned size) | ||
99 | { | ||
100 | PnvXive2 *xive = PNV_XIVE2(opaque); | ||
101 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | ||
102 | + uint32_t page = addr >> xive->nvc_shift; | ||
103 | + uint16_t op = addr & 0xFFF; | ||
104 | + uint8_t blk = pnv_xive2_block_id(xive); | ||
105 | |||
106 | - xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset); | ||
107 | + if (size != 1) { | ||
108 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvc write size %d\n", | ||
109 | + size); | ||
110 | + return; | ||
111 | + } | ||
112 | + | ||
113 | + (void)xive2_presenter_nvgc_backlog_op(xptr, true, blk, page, op, val); | ||
114 | } | ||
115 | |||
116 | static const MemoryRegionOps pnv_xive2_nvc_ops = { | ||
117 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_nvc_ops = { | ||
118 | .write = pnv_xive2_nvc_write, | ||
119 | .endianness = DEVICE_BIG_ENDIAN, | ||
120 | .valid = { | ||
121 | - .min_access_size = 8, | ||
122 | + .min_access_size = 1, | ||
123 | .max_access_size = 8, | ||
124 | }, | ||
125 | .impl = { | ||
126 | - .min_access_size = 8, | ||
127 | + .min_access_size = 1, | ||
128 | .max_access_size = 8, | ||
129 | }, | ||
130 | }; | ||
131 | |||
132 | -static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset, | ||
133 | +static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr addr, | ||
134 | unsigned size) | ||
135 | { | ||
136 | PnvXive2 *xive = PNV_XIVE2(opaque); | ||
137 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | ||
138 | + uint32_t page = addr >> xive->nvpg_shift; | ||
139 | + uint16_t op = addr & 0xFFF; | ||
140 | + uint32_t index = page >> 1; | ||
141 | + uint8_t blk = pnv_xive2_block_id(xive); | ||
142 | |||
143 | - xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset); | ||
144 | - return -1; | ||
145 | + if (size != 2) { | ||
146 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg load size %d\n", | ||
147 | + size); | ||
148 | + return -1; | ||
149 | + } | ||
150 | + | ||
151 | + if (page % 2) { | ||
152 | + /* odd page - NVG */ | ||
153 | + return xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, 1); | ||
154 | + } else { | ||
155 | + /* even page - NVP */ | ||
156 | + return xive2_presenter_nvp_backlog_op(xptr, blk, index, op); | ||
157 | + } | ||
158 | } | ||
159 | |||
160 | -static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset, | ||
161 | +static void pnv_xive2_nvpg_write(void *opaque, hwaddr addr, | ||
162 | uint64_t val, unsigned size) | ||
163 | { | ||
164 | PnvXive2 *xive = PNV_XIVE2(opaque); | ||
165 | + XivePresenter *xptr = XIVE_PRESENTER(xive); | ||
166 | + uint32_t page = addr >> xive->nvpg_shift; | ||
167 | + uint16_t op = addr & 0xFFF; | ||
168 | + uint32_t index = page >> 1; | ||
169 | + uint8_t blk = pnv_xive2_block_id(xive); | ||
170 | |||
171 | - xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset); | ||
172 | + if (size != 1) { | ||
173 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid nvpg write size %d\n", | ||
174 | + size); | ||
175 | + return; | ||
176 | + } | ||
177 | + | ||
178 | + if (page % 2) { | ||
179 | + /* odd page - NVG */ | ||
180 | + (void)xive2_presenter_nvgc_backlog_op(xptr, false, blk, index, op, val); | ||
181 | + } else { | ||
182 | + /* even page - NVP */ | ||
183 | + (void)xive2_presenter_nvp_backlog_op(xptr, blk, index, op); | ||
184 | + } | ||
185 | } | ||
186 | |||
187 | static const MemoryRegionOps pnv_xive2_nvpg_ops = { | ||
188 | @@ -XXX,XX +XXX,XX @@ static const MemoryRegionOps pnv_xive2_nvpg_ops = { | ||
189 | .write = pnv_xive2_nvpg_write, | ||
190 | .endianness = DEVICE_BIG_ENDIAN, | ||
191 | .valid = { | ||
192 | - .min_access_size = 8, | ||
193 | + .min_access_size = 1, | ||
194 | .max_access_size = 8, | ||
195 | }, | ||
196 | .impl = { | ||
197 | - .min_access_size = 8, | ||
198 | + .min_access_size = 1, | ||
199 | .max_access_size = 8, | ||
200 | }, | ||
201 | }; | ||
202 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
203 | index XXXXXXX..XXXXXXX 100644 | ||
204 | --- a/hw/intc/xive2.c | ||
205 | +++ b/hw/intc/xive2.c | ||
206 | @@ -XXX,XX +XXX,XX @@ static void xive2_nvgc_set_backlog(Xive2Nvgc *nvgc, uint8_t priority, | ||
207 | } | ||
208 | } | ||
209 | |||
210 | +uint64_t xive2_presenter_nvgc_backlog_op(XivePresenter *xptr, | ||
211 | + bool crowd, | ||
212 | + uint8_t blk, uint32_t idx, | ||
213 | + uint16_t offset, uint16_t val) | ||
214 | +{ | ||
215 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); | ||
216 | + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); | ||
217 | + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); | ||
218 | + Xive2Nvgc nvgc; | ||
219 | + uint32_t count, old_count; | ||
220 | + | ||
221 | + if (xive2_router_get_nvgc(xrtr, crowd, blk, idx, &nvgc)) { | ||
222 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No %s %x/%x\n", | ||
223 | + crowd ? "NVC" : "NVG", blk, idx); | ||
224 | + return -1; | ||
225 | + } | ||
226 | + if (!xive2_nvgc_is_valid(&nvgc)) { | ||
227 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", blk, idx); | ||
228 | + return -1; | ||
229 | + } | ||
230 | + | ||
231 | + old_count = xive2_nvgc_get_backlog(&nvgc, priority); | ||
232 | + count = old_count; | ||
233 | + /* | ||
234 | + * op: | ||
235 | + * 0b00 => increment | ||
236 | + * 0b01 => decrement | ||
237 | + * 0b1- => read | ||
238 | + */ | ||
239 | + if (op == 0b00 || op == 0b01) { | ||
240 | + if (op == 0b00) { | ||
241 | + count += val; | ||
242 | + } else { | ||
243 | + if (count > val) { | ||
244 | + count -= val; | ||
245 | + } else { | ||
246 | + count = 0; | ||
247 | + } | ||
248 | + } | ||
249 | + xive2_nvgc_set_backlog(&nvgc, priority, count); | ||
250 | + xive2_router_write_nvgc(xrtr, crowd, blk, idx, &nvgc); | ||
251 | + } | ||
252 | + trace_xive_nvgc_backlog_op(crowd, blk, idx, op, priority, old_count); | ||
253 | + return old_count; | ||
254 | +} | ||
255 | + | ||
256 | +uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, | ||
257 | + uint8_t blk, uint32_t idx, | ||
258 | + uint16_t offset) | ||
259 | +{ | ||
260 | + Xive2Router *xrtr = XIVE2_ROUTER(xptr); | ||
261 | + uint8_t priority = GETFIELD(NVx_BACKLOG_PRIO, offset); | ||
262 | + uint8_t op = GETFIELD(NVx_BACKLOG_OP, offset); | ||
263 | + Xive2Nvp nvp; | ||
264 | + uint8_t ipb, old_ipb, rc; | ||
265 | + | ||
266 | + if (xive2_router_get_nvp(xrtr, blk, idx, &nvp)) { | ||
267 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n", blk, idx); | ||
268 | + return -1; | ||
269 | + } | ||
270 | + if (!xive2_nvp_is_valid(&nvp)) { | ||
271 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVP %x/%x\n", blk, idx); | ||
272 | + return -1; | ||
273 | + } | ||
274 | + | ||
275 | + old_ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2); | ||
276 | + ipb = old_ipb; | ||
277 | + /* | ||
278 | + * op: | ||
279 | + * 0b00 => set priority bit | ||
280 | + * 0b01 => reset priority bit | ||
281 | + * 0b1- => read | ||
282 | + */ | ||
283 | + if (op == 0b00 || op == 0b01) { | ||
284 | + if (op == 0b00) { | ||
285 | + ipb |= xive_priority_to_ipb(priority); | ||
286 | + } else { | ||
287 | + ipb &= ~xive_priority_to_ipb(priority); | ||
288 | + } | ||
289 | + nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | ||
290 | + xive2_router_write_nvp(xrtr, blk, idx, &nvp, 2); | ||
291 | + } | ||
292 | + rc = !!(old_ipb & xive_priority_to_ipb(priority)); | ||
293 | + trace_xive_nvp_backlog_op(blk, idx, op, priority, rc); | ||
294 | + return rc; | ||
295 | +} | ||
296 | + | ||
297 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf) | ||
298 | { | ||
299 | if (!xive2_eas_is_valid(eas)) { | ||
300 | diff --git a/tests/qtest/pnv-xive2-nvpg_bar.c b/tests/qtest/pnv-xive2-nvpg_bar.c | ||
301 | new file mode 100644 | ||
302 | index XXXXXXX..XXXXXXX | ||
303 | --- /dev/null | ||
304 | +++ b/tests/qtest/pnv-xive2-nvpg_bar.c | ||
305 | @@ -XXX,XX +XXX,XX @@ | ||
306 | +/* | ||
307 | + * QTest testcase for PowerNV 10 interrupt controller (xive2) | ||
308 | + * - Test NVPG BAR MMIO operations | ||
309 | + * | ||
310 | + * Copyright (c) 2024, IBM Corporation. | ||
311 | + * | ||
312 | + * This work is licensed under the terms of the GNU GPL, version 2 or | ||
313 | + * later. See the COPYING file in the top-level directory. | ||
314 | + */ | ||
315 | +#include "qemu/osdep.h" | ||
316 | +#include "libqtest.h" | ||
317 | + | ||
318 | +#include "pnv-xive2-common.h" | ||
319 | + | ||
320 | +#define NVPG_BACKLOG_OP_SHIFT 10 | ||
321 | +#define NVPG_BACKLOG_PRIO_SHIFT 4 | ||
322 | + | ||
323 | +#define XIVE_PRIORITY_MAX 7 | ||
324 | + | ||
325 | +enum NVx { | ||
326 | + NVP, | ||
327 | + NVG, | ||
328 | + NVC | ||
329 | +}; | ||
330 | + | ||
331 | +typedef enum { | ||
332 | + INCR_STORE = 0b100, | ||
333 | + INCR_LOAD = 0b000, | ||
334 | + DECR_STORE = 0b101, | ||
335 | + DECR_LOAD = 0b001, | ||
336 | + READ_x = 0b010, | ||
337 | + READ_y = 0b011, | ||
338 | +} backlog_op; | ||
339 | + | ||
340 | +static uint32_t nvpg_backlog_op(QTestState *qts, backlog_op op, | ||
341 | + enum NVx type, uint64_t index, | ||
342 | + uint8_t priority, uint8_t delta) | ||
343 | +{ | ||
344 | + uint64_t addr, offset; | ||
345 | + uint32_t count = 0; | ||
346 | + | ||
347 | + switch (type) { | ||
348 | + case NVP: | ||
349 | + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)); | ||
350 | + break; | ||
351 | + case NVG: | ||
352 | + addr = XIVE_NVPG_ADDR + (index << (XIVE_PAGE_SHIFT + 1)) + | ||
353 | + (1 << XIVE_PAGE_SHIFT); | ||
354 | + break; | ||
355 | + case NVC: | ||
356 | + addr = XIVE_NVC_ADDR + (index << XIVE_PAGE_SHIFT); | ||
357 | + break; | ||
358 | + default: | ||
359 | + g_assert_not_reached(); | ||
360 | + } | ||
361 | + | ||
362 | + offset = (op & 0b11) << NVPG_BACKLOG_OP_SHIFT; | ||
363 | + offset |= priority << NVPG_BACKLOG_PRIO_SHIFT; | ||
364 | + if (op >> 2) { | ||
365 | + qtest_writeb(qts, addr + offset, delta); | ||
366 | + } else { | ||
367 | + count = qtest_readw(qts, addr + offset); | ||
368 | + } | ||
369 | + return count; | ||
370 | +} | ||
371 | + | ||
372 | +void test_nvpg_bar(QTestState *qts) | ||
373 | +{ | ||
374 | + uint32_t nvp_target = 0x11; | ||
375 | + uint32_t group_target = 0x17; /* size 16 */ | ||
376 | + uint32_t vp_irq = 33, group_irq = 47; | ||
377 | + uint32_t vp_end = 3, group_end = 97; | ||
378 | + uint32_t vp_irq_data = 0x33333333; | ||
379 | + uint32_t group_irq_data = 0x66666666; | ||
380 | + uint8_t vp_priority = 0, group_priority = 5; | ||
381 | + uint32_t vp_count[XIVE_PRIORITY_MAX + 1] = { 0 }; | ||
382 | + uint32_t group_count[XIVE_PRIORITY_MAX + 1] = { 0 }; | ||
383 | + uint32_t count, delta; | ||
384 | + uint8_t i; | ||
385 | + | ||
386 | + printf("# ============================================================\n"); | ||
387 | + printf("# Testing NVPG BAR operations\n"); | ||
388 | + | ||
389 | + set_nvg(qts, group_target, 0); | ||
390 | + set_nvp(qts, nvp_target, 0x04); | ||
391 | + set_nvp(qts, group_target, 0x04); | ||
392 | + | ||
393 | + /* | ||
394 | + * Setup: trigger a VP-specific interrupt and a group interrupt | ||
395 | + * so that the backlog counters are initialized to something else | ||
396 | + * than 0 for at least one priority level | ||
397 | + */ | ||
398 | + set_eas(qts, vp_irq, vp_end, vp_irq_data); | ||
399 | + set_end(qts, vp_end, nvp_target, vp_priority, false /* group */); | ||
400 | + | ||
401 | + set_eas(qts, group_irq, group_end, group_irq_data); | ||
402 | + set_end(qts, group_end, group_target, group_priority, true /* group */); | ||
403 | + | ||
404 | + get_esb(qts, vp_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | ||
405 | + set_esb(qts, vp_irq, XIVE_TRIGGER_PAGE, 0, 0); | ||
406 | + vp_count[vp_priority]++; | ||
407 | + | ||
408 | + get_esb(qts, group_irq, XIVE_EOI_PAGE, XIVE_ESB_SET_PQ_00); | ||
409 | + set_esb(qts, group_irq, XIVE_TRIGGER_PAGE, 0, 0); | ||
410 | + group_count[group_priority]++; | ||
411 | + | ||
412 | + /* check the initial counters */ | ||
413 | + for (i = 0; i <= XIVE_PRIORITY_MAX; i++) { | ||
414 | + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, i, 0); | ||
415 | + g_assert_cmpuint(count, ==, vp_count[i]); | ||
416 | + | ||
417 | + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, i, 0); | ||
418 | + g_assert_cmpuint(count, ==, group_count[i]); | ||
419 | + } | ||
420 | + | ||
421 | + /* do a few ops on the VP. Counter can only be 0 and 1 */ | ||
422 | + vp_priority = 2; | ||
423 | + delta = 7; | ||
424 | + nvpg_backlog_op(qts, INCR_STORE, NVP, nvp_target, vp_priority, delta); | ||
425 | + vp_count[vp_priority] = 1; | ||
426 | + count = nvpg_backlog_op(qts, INCR_LOAD, NVP, nvp_target, vp_priority, 0); | ||
427 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | ||
428 | + count = nvpg_backlog_op(qts, READ_y, NVP, nvp_target, vp_priority, 0); | ||
429 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | ||
430 | + | ||
431 | + count = nvpg_backlog_op(qts, DECR_LOAD, NVP, nvp_target, vp_priority, 0); | ||
432 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | ||
433 | + vp_count[vp_priority] = 0; | ||
434 | + nvpg_backlog_op(qts, DECR_STORE, NVP, nvp_target, vp_priority, delta); | ||
435 | + count = nvpg_backlog_op(qts, READ_x, NVP, nvp_target, vp_priority, 0); | ||
436 | + g_assert_cmpuint(count, ==, vp_count[vp_priority]); | ||
437 | + | ||
438 | + /* do a few ops on the group */ | ||
439 | + group_priority = 2; | ||
440 | + delta = 9; | ||
441 | + /* can't go negative */ | ||
442 | + nvpg_backlog_op(qts, DECR_STORE, NVG, group_target, group_priority, delta); | ||
443 | + count = nvpg_backlog_op(qts, READ_y, NVG, group_target, group_priority, 0); | ||
444 | + g_assert_cmpuint(count, ==, 0); | ||
445 | + nvpg_backlog_op(qts, INCR_STORE, NVG, group_target, group_priority, delta); | ||
446 | + group_count[group_priority] += delta; | ||
447 | + count = nvpg_backlog_op(qts, INCR_LOAD, NVG, group_target, | ||
448 | + group_priority, delta); | ||
449 | + g_assert_cmpuint(count, ==, group_count[group_priority]); | ||
450 | + group_count[group_priority]++; | ||
451 | + | ||
452 | + count = nvpg_backlog_op(qts, DECR_LOAD, NVG, group_target, | ||
453 | + group_priority, delta); | ||
454 | + g_assert_cmpuint(count, ==, group_count[group_priority]); | ||
455 | + group_count[group_priority]--; | ||
456 | + count = nvpg_backlog_op(qts, READ_x, NVG, group_target, group_priority, 0); | ||
457 | + g_assert_cmpuint(count, ==, group_count[group_priority]); | ||
458 | +} | ||
459 | + | ||
460 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c | ||
461 | index XXXXXXX..XXXXXXX 100644 | ||
462 | --- a/tests/qtest/pnv-xive2-test.c | ||
463 | +++ b/tests/qtest/pnv-xive2-test.c | ||
464 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) | ||
465 | reset_state(qts); | ||
466 | test_flush_sync_inject(qts); | ||
467 | |||
468 | + reset_state(qts); | ||
469 | + test_nvpg_bar(qts); | ||
470 | + | ||
471 | qtest_quit(qts); | ||
472 | } | ||
473 | |||
474 | diff --git a/hw/intc/trace-events b/hw/intc/trace-events | ||
475 | index XXXXXXX..XXXXXXX 100644 | ||
476 | --- a/hw/intc/trace-events | ||
477 | +++ b/hw/intc/trace-events | ||
478 | @@ -XXX,XX +XXX,XX @@ xive_tctx_tm_read(uint32_t index, uint64_t offset, unsigned int size, uint64_t v | ||
479 | xive_presenter_notify(uint8_t nvt_blk, uint32_t nvt_idx, uint8_t ring, uint8_t group_level) "found NVT 0x%x/0x%x ring=0x%x group_level=%d" | ||
480 | xive_end_source_read(uint8_t end_blk, uint32_t end_idx, uint64_t addr) "END 0x%x/0x%x @0x%"PRIx64 | ||
481 | |||
482 | +# xive2.c | ||
483 | +xive_nvp_backlog_op(uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint8_t rc) "NVP 0x%x/0x%x operation=%d priority=%d rc=%d" | ||
484 | +xive_nvgc_backlog_op(bool c, uint8_t blk, uint32_t idx, uint8_t op, uint8_t priority, uint32_t rc) "NVGC crowd=%d 0x%x/0x%x operation=%d priority=%d rc=%d" | ||
485 | + | ||
486 | # pnv_xive.c | ||
487 | pnv_xive_ic_hw_trigger(uint64_t addr, uint64_t val) "@0x%"PRIx64" val=0x%"PRIx64 | ||
488 | |||
489 | diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build | ||
490 | index XXXXXXX..XXXXXXX 100644 | ||
491 | --- a/tests/qtest/meson.build | ||
492 | +++ b/tests/qtest/meson.build | ||
493 | @@ -XXX,XX +XXX,XX @@ qtests = { | ||
494 | 'ivshmem-test': [rt, '../../contrib/ivshmem-server/ivshmem-server.c'], | ||
495 | 'migration-test': migration_files, | ||
496 | 'pxe-test': files('boot-sector.c'), | ||
497 | - 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c'), | ||
498 | + 'pnv-xive2-test': files('pnv-xive2-common.c', 'pnv-xive2-flush-sync.c', | ||
499 | + 'pnv-xive2-nvpg_bar.c'), | ||
500 | 'qos-test': [chardev, io, qos_test_ss.apply({}).sources()], | ||
501 | 'tpm-crb-swtpm-test': [io, tpmemu_files], | ||
502 | 'tpm-crb-test': [io, tpmemu_files], | ||
503 | -- | ||
504 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | 1 | From: Frederic Barrat <fbarrat@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | When processing a backlog scan for group interrupts, also take | 3 | When processing a backlog scan for group interrupts, also take |
4 | into account crowd interrupts. | 4 | into account crowd interrupts. |
5 | 5 | ||
6 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | 6 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> |
7 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 7 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
8 | --- | 8 | --- |
9 | include/hw/ppc/xive2_regs.h | 4 ++ | 9 | include/hw/ppc/xive2_regs.h | 4 ++ |
10 | hw/intc/xive2.c | 82 +++++++++++++++++++++++++------------ | 10 | hw/intc/xive2.c | 82 +++++++++++++++++++++++++------------ |
11 | 2 files changed, 60 insertions(+), 26 deletions(-) | 11 | 2 files changed, 60 insertions(+), 26 deletions(-) |
12 | 12 | ||
13 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h | 13 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h |
14 | index XXXXXXX..XXXXXXX 100644 | 14 | index XXXXXXX..XXXXXXX 100644 |
15 | --- a/include/hw/ppc/xive2_regs.h | 15 | --- a/include/hw/ppc/xive2_regs.h |
16 | +++ b/include/hw/ppc/xive2_regs.h | 16 | +++ b/include/hw/ppc/xive2_regs.h |
17 | @@ -XXX,XX +XXX,XX @@ void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, | 17 | @@ -XXX,XX +XXX,XX @@ void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, |
18 | #define NVx_BACKLOG_OP PPC_BITMASK(52, 53) | 18 | #define NVx_BACKLOG_OP PPC_BITMASK(52, 53) |
19 | #define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59) | 19 | #define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59) |
20 | 20 | ||
21 | +/* split the 6-bit crowd/group level */ | 21 | +/* split the 6-bit crowd/group level */ |
22 | +#define NVx_CROWD_LVL(level) ((level >> 4) & 0b11) | 22 | +#define NVx_CROWD_LVL(level) ((level >> 4) & 0b11) |
23 | +#define NVx_GROUP_LVL(level) (level & 0b1111) | 23 | +#define NVx_GROUP_LVL(level) (level & 0b1111) |
24 | + | 24 | + |
25 | #endif /* PPC_XIVE2_REGS_H */ | 25 | #endif /* PPC_XIVE2_REGS_H */ |
26 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 26 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
27 | index XXXXXXX..XXXXXXX 100644 | 27 | index XXXXXXX..XXXXXXX 100644 |
28 | --- a/hw/intc/xive2.c | 28 | --- a/hw/intc/xive2.c |
29 | +++ b/hw/intc/xive2.c | 29 | +++ b/hw/intc/xive2.c |
30 | @@ -XXX,XX +XXX,XX @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) | 30 | @@ -XXX,XX +XXX,XX @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) |
31 | end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); | 31 | end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); |
32 | } | 32 | } |
33 | 33 | ||
34 | +static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx, | 34 | +static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx, |
35 | + uint8_t next_level) | 35 | + uint8_t next_level) |
36 | +{ | 36 | +{ |
37 | + uint32_t mask, next_idx; | 37 | + uint32_t mask, next_idx; |
38 | + uint8_t next_blk; | 38 | + uint8_t next_blk; |
39 | + | 39 | + |
40 | + /* | 40 | + /* |
41 | + * Adjust the block and index of a VP for the next group/crowd | 41 | + * Adjust the block and index of a VP for the next group/crowd |
42 | + * size (PGofFirst/PGofNext field in the NVP and NVGC structures). | 42 | + * size (PGofFirst/PGofNext field in the NVP and NVGC structures). |
43 | + * | 43 | + * |
44 | + * The 6-bit group level is split into a 2-bit crowd and 4-bit | 44 | + * The 6-bit group level is split into a 2-bit crowd and 4-bit |
45 | + * group levels. Encoding is similar. However, we don't support | 45 | + * group levels. Encoding is similar. However, we don't support |
46 | + * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd | 46 | + * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd |
47 | + * size of 16. | 47 | + * size of 16. |
48 | + */ | 48 | + */ |
49 | + next_blk = NVx_CROWD_LVL(next_level); | 49 | + next_blk = NVx_CROWD_LVL(next_level); |
50 | + if (next_blk == 3) { | 50 | + if (next_blk == 3) { |
51 | + next_blk = 4; | 51 | + next_blk = 4; |
52 | + } | 52 | + } |
53 | + mask = (1 << next_blk) - 1; | 53 | + mask = (1 << next_blk) - 1; |
54 | + *nvgc_blk &= ~mask; | 54 | + *nvgc_blk &= ~mask; |
55 | + *nvgc_blk |= mask >> 1; | 55 | + *nvgc_blk |= mask >> 1; |
56 | + | 56 | + |
57 | + next_idx = NVx_GROUP_LVL(next_level); | 57 | + next_idx = NVx_GROUP_LVL(next_level); |
58 | + mask = (1 << next_idx) - 1; | 58 | + mask = (1 << next_idx) - 1; |
59 | + *nvgc_idx &= ~mask; | 59 | + *nvgc_idx &= ~mask; |
60 | + *nvgc_idx |= mask >> 1; | 60 | + *nvgc_idx |= mask >> 1; |
61 | +} | 61 | +} |
62 | + | 62 | + |
63 | /* | 63 | /* |
64 | * Scan the group chain and return the highest priority and group | 64 | * Scan the group chain and return the highest priority and group |
65 | * level of pending group interrupts. | 65 | * level of pending group interrupts. |
66 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, | 66 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, |
67 | uint8_t *out_level) | 67 | uint8_t *out_level) |
68 | { | 68 | { |
69 | Xive2Router *xrtr = XIVE2_ROUTER(xptr); | 69 | Xive2Router *xrtr = XIVE2_ROUTER(xptr); |
70 | - uint32_t nvgc_idx, mask; | 70 | - uint32_t nvgc_idx, mask; |
71 | + uint32_t nvgc_idx; | 71 | + uint32_t nvgc_idx; |
72 | uint32_t current_level, count; | 72 | uint32_t current_level, count; |
73 | - uint8_t prio; | 73 | - uint8_t prio; |
74 | + uint8_t nvgc_blk, prio; | 74 | + uint8_t nvgc_blk, prio; |
75 | Xive2Nvgc nvgc; | 75 | Xive2Nvgc nvgc; |
76 | 76 | ||
77 | for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { | 77 | for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { |
78 | - current_level = first_group & 0xF; | 78 | - current_level = first_group & 0xF; |
79 | + current_level = first_group & 0x3F; | 79 | + current_level = first_group & 0x3F; |
80 | + nvgc_blk = nvp_blk; | 80 | + nvgc_blk = nvp_blk; |
81 | + nvgc_idx = nvp_idx; | 81 | + nvgc_idx = nvp_idx; |
82 | 82 | ||
83 | while (current_level) { | 83 | while (current_level) { |
84 | - mask = (1 << current_level) - 1; | 84 | - mask = (1 << current_level) - 1; |
85 | - nvgc_idx = nvp_idx & ~mask; | 85 | - nvgc_idx = nvp_idx & ~mask; |
86 | - nvgc_idx |= mask >> 1; | 86 | - nvgc_idx |= mask >> 1; |
87 | - qemu_log("fxb %s checking backlog for prio %d group idx %x\n", | 87 | - qemu_log("fxb %s checking backlog for prio %d group idx %x\n", |
88 | - __func__, prio, nvgc_idx); | 88 | - __func__, prio, nvgc_idx); |
89 | - | 89 | - |
90 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { | 90 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { |
91 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", | 91 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", |
92 | - nvp_blk, nvgc_idx); | 92 | - nvp_blk, nvgc_idx); |
93 | + xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level); | 93 | + xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level); |
94 | + | 94 | + |
95 | + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level), | 95 | + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level), |
96 | + nvgc_blk, nvgc_idx, &nvgc)) { | 96 | + nvgc_blk, nvgc_idx, &nvgc)) { |
97 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", | 97 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", |
98 | + nvgc_blk, nvgc_idx); | 98 | + nvgc_blk, nvgc_idx); |
99 | return 0xFF; | 99 | return 0xFF; |
100 | } | 100 | } |
101 | if (!xive2_nvgc_is_valid(&nvgc)) { | 101 | if (!xive2_nvgc_is_valid(&nvgc)) { |
102 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", | 102 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", |
103 | - nvp_blk, nvgc_idx); | 103 | - nvp_blk, nvgc_idx); |
104 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", | 104 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", |
105 | + nvgc_blk, nvgc_idx); | 105 | + nvgc_blk, nvgc_idx); |
106 | return 0xFF; | 106 | return 0xFF; |
107 | } | 107 | } |
108 | 108 | ||
109 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, | 109 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, |
110 | *out_level = current_level; | 110 | *out_level = current_level; |
111 | return prio; | 111 | return prio; |
112 | } | 112 | } |
113 | - current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0xF; | 113 | - current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0xF; |
114 | + current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F; | 114 | + current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F; |
115 | } | 115 | } |
116 | } | 116 | } |
117 | return 0xFF; | 117 | return 0xFF; |
118 | @@ -XXX,XX +XXX,XX @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, | 118 | @@ -XXX,XX +XXX,XX @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, |
119 | uint8_t group_level) | 119 | uint8_t group_level) |
120 | { | 120 | { |
121 | Xive2Router *xrtr = XIVE2_ROUTER(xptr); | 121 | Xive2Router *xrtr = XIVE2_ROUTER(xptr); |
122 | - uint32_t nvgc_idx, mask, count; | 122 | - uint32_t nvgc_idx, mask, count; |
123 | + uint32_t nvgc_idx, count; | 123 | + uint32_t nvgc_idx, count; |
124 | + uint8_t nvgc_blk; | 124 | + uint8_t nvgc_blk; |
125 | Xive2Nvgc nvgc; | 125 | Xive2Nvgc nvgc; |
126 | 126 | ||
127 | - group_level &= 0xF; | 127 | - group_level &= 0xF; |
128 | - mask = (1 << group_level) - 1; | 128 | - mask = (1 << group_level) - 1; |
129 | - nvgc_idx = nvp_idx & ~mask; | 129 | - nvgc_idx = nvp_idx & ~mask; |
130 | - nvgc_idx |= mask >> 1; | 130 | - nvgc_idx |= mask >> 1; |
131 | + nvgc_blk = nvp_blk; | 131 | + nvgc_blk = nvp_blk; |
132 | + nvgc_idx = nvp_idx; | 132 | + nvgc_idx = nvp_idx; |
133 | + xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level); | 133 | + xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level); |
134 | 134 | ||
135 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { | 135 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { |
136 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", | 136 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", |
137 | - nvp_blk, nvgc_idx); | 137 | - nvp_blk, nvgc_idx); |
138 | + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level), | 138 | + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level), |
139 | + nvgc_blk, nvgc_idx, &nvgc)) { | 139 | + nvgc_blk, nvgc_idx, &nvgc)) { |
140 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", | 140 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", |
141 | + nvgc_blk, nvgc_idx); | 141 | + nvgc_blk, nvgc_idx); |
142 | return; | 142 | return; |
143 | } | 143 | } |
144 | if (!xive2_nvgc_is_valid(&nvgc)) { | 144 | if (!xive2_nvgc_is_valid(&nvgc)) { |
145 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", | 145 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", |
146 | - nvp_blk, nvgc_idx); | 146 | - nvp_blk, nvgc_idx); |
147 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", | 147 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", |
148 | + nvgc_blk, nvgc_idx); | 148 | + nvgc_blk, nvgc_idx); |
149 | return; | 149 | return; |
150 | } | 150 | } |
151 | count = xive2_nvgc_get_backlog(&nvgc, group_prio); | 151 | count = xive2_nvgc_get_backlog(&nvgc, group_prio); |
152 | @@ -XXX,XX +XXX,XX @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, | 152 | @@ -XXX,XX +XXX,XX @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, |
153 | return; | 153 | return; |
154 | } | 154 | } |
155 | xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1); | 155 | xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1); |
156 | - xive2_router_write_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc); | 156 | - xive2_router_write_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc); |
157 | + xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level), | 157 | + xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level), |
158 | + nvgc_blk, nvgc_idx, &nvgc); | 158 | + nvgc_blk, nvgc_idx, &nvgc); |
159 | } | 159 | } |
160 | 160 | ||
161 | /* | 161 | /* |
162 | -- | 162 | -- |
163 | 2.43.0 | 163 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | XIVE crowd sizes are encoded into a 2-bit field as follows: | ||
4 | 0: 0b00 | ||
5 | 2: 0b01 | ||
6 | 4: 0b10 | ||
7 | 16: 0b11 | ||
8 | |||
9 | A crowd size of 8 is not supported. | ||
10 | |||
11 | If an END is defined with the 'crowd' bit set, then a target can be | ||
12 | running on different blocks. It means that some bits from the block | ||
13 | VP are masked when looking for a match. It is similar to groups, but | ||
14 | on the block instead of the VP index. | ||
15 | |||
16 | Most of the changes are due to passing the extra argument 'crowd' all | ||
17 | the way to the function checking for matches. | ||
18 | |||
19 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
20 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> | ||
21 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
22 | --- | ||
23 | include/hw/ppc/xive.h | 10 +++--- | ||
24 | include/hw/ppc/xive2.h | 3 +- | ||
25 | hw/intc/pnv_xive.c | 10 +++--- | ||
26 | hw/intc/pnv_xive2.c | 12 +++---- | ||
27 | hw/intc/spapr_xive.c | 8 ++--- | ||
28 | hw/intc/xive.c | 40 ++++++++++++++++++---- | ||
29 | hw/intc/xive2.c | 78 +++++++++++++++++++++++++++++++++--------- | ||
30 | hw/ppc/pnv.c | 15 ++++---- | ||
31 | hw/ppc/spapr.c | 7 ++-- | ||
32 | 9 files changed, 131 insertions(+), 52 deletions(-) | ||
33 | |||
34 | diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h | ||
35 | index XXXXXXX..XXXXXXX 100644 | ||
36 | --- a/include/hw/ppc/xive.h | ||
37 | +++ b/include/hw/ppc/xive.h | ||
38 | @@ -XXX,XX +XXX,XX @@ struct XivePresenterClass { | ||
39 | InterfaceClass parent; | ||
40 | int (*match_nvt)(XivePresenter *xptr, uint8_t format, | ||
41 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
42 | - bool cam_ignore, uint8_t priority, | ||
43 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
44 | uint32_t logic_serv, XiveTCTXMatch *match); | ||
45 | bool (*in_kernel)(const XivePresenter *xptr); | ||
46 | uint32_t (*get_config)(XivePresenter *xptr); | ||
47 | int (*broadcast)(XivePresenter *xptr, | ||
48 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
49 | - uint8_t priority); | ||
50 | + bool crowd, bool cam_ignore, uint8_t priority); | ||
51 | }; | ||
52 | |||
53 | int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
54 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
55 | bool cam_ignore, uint32_t logic_serv); | ||
56 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
57 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
58 | - bool cam_ignore, uint8_t priority, | ||
59 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
60 | uint32_t logic_serv, bool *precluded); | ||
61 | |||
62 | uint32_t xive_get_vpgroup_size(uint32_t nvp_index); | ||
63 | @@ -XXX,XX +XXX,XX @@ struct XiveFabricClass { | ||
64 | InterfaceClass parent; | ||
65 | int (*match_nvt)(XiveFabric *xfb, uint8_t format, | ||
66 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
67 | - bool cam_ignore, uint8_t priority, | ||
68 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
69 | uint32_t logic_serv, XiveTCTXMatch *match); | ||
70 | int (*broadcast)(XiveFabric *xfb, uint8_t nvt_blk, uint32_t nvt_idx, | ||
71 | - uint8_t priority); | ||
72 | + bool crowd, bool cam_ignore, uint8_t priority); | ||
73 | }; | ||
74 | |||
75 | /* | ||
76 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | ||
77 | index XXXXXXX..XXXXXXX 100644 | ||
78 | --- a/include/hw/ppc/xive2.h | ||
79 | +++ b/include/hw/ppc/xive2.h | ||
80 | @@ -XXX,XX +XXX,XX @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); | ||
81 | int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
82 | uint8_t format, | ||
83 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
84 | - bool cam_ignore, uint32_t logic_serv); | ||
85 | + bool crowd, bool cam_ignore, | ||
86 | + uint32_t logic_serv); | ||
87 | |||
88 | uint64_t xive2_presenter_nvp_backlog_op(XivePresenter *xptr, | ||
89 | uint8_t blk, uint32_t idx, | ||
90 | diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c | ||
91 | index XXXXXXX..XXXXXXX 100644 | ||
92 | --- a/hw/intc/pnv_xive.c | ||
93 | +++ b/hw/intc/pnv_xive.c | ||
94 | @@ -XXX,XX +XXX,XX @@ | ||
95 | /* | ||
96 | * QEMU PowerPC XIVE interrupt controller model | ||
97 | * | ||
98 | - * Copyright (c) 2017-2019, IBM Corporation. | ||
99 | + * Copyright (c) 2017-2024, IBM Corporation. | ||
100 | * | ||
101 | - * This code is licensed under the GPL version 2 or later. See the | ||
102 | - * COPYING file in the top-level directory. | ||
103 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
104 | */ | ||
105 | |||
106 | #include "qemu/osdep.h" | ||
107 | @@ -XXX,XX +XXX,XX @@ static bool pnv_xive_is_cpu_enabled(PnvXive *xive, PowerPCCPU *cpu) | ||
108 | |||
109 | static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, | ||
110 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
111 | - bool cam_ignore, uint8_t priority, | ||
112 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
113 | uint32_t logic_serv, XiveTCTXMatch *match) | ||
114 | { | ||
115 | PnvXive *xive = PNV_XIVE(xptr); | ||
116 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive_match_nvt(XivePresenter *xptr, uint8_t format, | ||
117 | * Check the thread context CAM lines and record matches. | ||
118 | */ | ||
119 | ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk, | ||
120 | - nvt_idx, cam_ignore, logic_serv); | ||
121 | + nvt_idx, cam_ignore, | ||
122 | + logic_serv); | ||
123 | /* | ||
124 | * Save the context and follow on to catch duplicates, that we | ||
125 | * don't support yet. | ||
126 | diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c | ||
127 | index XXXXXXX..XXXXXXX 100644 | ||
128 | --- a/hw/intc/pnv_xive2.c | ||
129 | +++ b/hw/intc/pnv_xive2.c | ||
130 | @@ -XXX,XX +XXX,XX @@ static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu) | ||
131 | |||
132 | static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, | ||
133 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
134 | - bool cam_ignore, uint8_t priority, | ||
135 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
136 | uint32_t logic_serv, XiveTCTXMatch *match) | ||
137 | { | ||
138 | PnvXive2 *xive = PNV_XIVE2(xptr); | ||
139 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format, | ||
140 | logic_serv); | ||
141 | } else { | ||
142 | ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk, | ||
143 | - nvt_idx, cam_ignore, | ||
144 | - logic_serv); | ||
145 | + nvt_idx, crowd, cam_ignore, | ||
146 | + logic_serv); | ||
147 | } | ||
148 | |||
149 | if (ring != -1) { | ||
150 | @@ -XXX,XX +XXX,XX @@ static uint32_t pnv_xive2_presenter_get_config(XivePresenter *xptr) | ||
151 | |||
152 | static int pnv_xive2_broadcast(XivePresenter *xptr, | ||
153 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
154 | - uint8_t priority) | ||
155 | + bool crowd, bool ignore, uint8_t priority) | ||
156 | { | ||
157 | PnvXive2 *xive = PNV_XIVE2(xptr); | ||
158 | PnvChip *chip = xive->chip; | ||
159 | @@ -XXX,XX +XXX,XX @@ static int pnv_xive2_broadcast(XivePresenter *xptr, | ||
160 | |||
161 | if (gen1_tima_os) { | ||
162 | ring = xive_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | ||
163 | - nvt_idx, true, 0); | ||
164 | + nvt_idx, ignore, 0); | ||
165 | } else { | ||
166 | ring = xive2_presenter_tctx_match(xptr, tctx, 0, nvt_blk, | ||
167 | - nvt_idx, true, 0); | ||
168 | + nvt_idx, crowd, ignore, 0); | ||
169 | } | ||
170 | |||
171 | if (ring != -1) { | ||
172 | diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c | ||
173 | index XXXXXXX..XXXXXXX 100644 | ||
174 | --- a/hw/intc/spapr_xive.c | ||
175 | +++ b/hw/intc/spapr_xive.c | ||
176 | @@ -XXX,XX +XXX,XX @@ | ||
177 | /* | ||
178 | * QEMU PowerPC sPAPR XIVE interrupt controller model | ||
179 | * | ||
180 | - * Copyright (c) 2017-2018, IBM Corporation. | ||
181 | + * Copyright (c) 2017-2024, IBM Corporation. | ||
182 | * | ||
183 | - * This code is licensed under the GPL version 2 or later. See the | ||
184 | - * COPYING file in the top-level directory. | ||
185 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
186 | */ | ||
187 | |||
188 | #include "qemu/osdep.h" | ||
189 | @@ -XXX,XX +XXX,XX @@ static int spapr_xive_write_nvt(XiveRouter *xrtr, uint8_t nvt_blk, | ||
190 | |||
191 | static int spapr_xive_match_nvt(XivePresenter *xptr, uint8_t format, | ||
192 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
193 | - bool cam_ignore, uint8_t priority, | ||
194 | + bool crowd, bool cam_ignore, | ||
195 | + uint8_t priority, | ||
196 | uint32_t logic_serv, XiveTCTXMatch *match) | ||
197 | { | ||
198 | CPUState *cs; | ||
199 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | ||
200 | index XXXXXXX..XXXXXXX 100644 | ||
201 | --- a/hw/intc/xive.c | ||
202 | +++ b/hw/intc/xive.c | ||
203 | @@ -XXX,XX +XXX,XX @@ uint32_t xive_get_vpgroup_size(uint32_t nvp_index) | ||
204 | return 1 << (ctz32(~nvp_index) + 1); | ||
205 | } | ||
206 | |||
207 | -static uint8_t xive_get_group_level(uint32_t nvp_index) | ||
208 | +static uint8_t xive_get_group_level(bool crowd, bool ignore, | ||
209 | + uint32_t nvp_blk, uint32_t nvp_index) | ||
210 | { | ||
211 | - /* FIXME add crowd encoding */ | ||
212 | - return ctz32(~nvp_index) + 1; | ||
213 | + uint8_t level = 0; | ||
214 | + | ||
215 | + if (crowd) { | ||
216 | + /* crowd level is bit position of first 0 from the right in nvp_blk */ | ||
217 | + level = ctz32(~nvp_blk) + 1; | ||
218 | + | ||
219 | + /* | ||
220 | + * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported. | ||
221 | + * HW will encode level 4 as the value 3. See xive2_pgofnext(). | ||
222 | + */ | ||
223 | + switch (level) { | ||
224 | + case 1: | ||
225 | + case 2: | ||
226 | + break; | ||
227 | + case 4: | ||
228 | + level = 3; | ||
229 | + break; | ||
230 | + default: | ||
231 | + g_assert_not_reached(); | ||
232 | + } | ||
233 | + | ||
234 | + /* Crowd level bits reside in upper 2 bits of the 6 bit group level */ | ||
235 | + level <<= 4; | ||
236 | + } | ||
237 | + if (ignore) { | ||
238 | + level |= (ctz32(~nvp_index) + 1) & 0b1111; | ||
239 | + } | ||
240 | + return level; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | @@ -XXX,XX +XXX,XX @@ int xive_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
245 | */ | ||
246 | bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
247 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
248 | - bool cam_ignore, uint8_t priority, | ||
249 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
250 | uint32_t logic_serv, bool *precluded) | ||
251 | { | ||
252 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xfb); | ||
253 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
254 | * a new command to the presenters (the equivalent of the "assign" | ||
255 | * power bus command in the documented full notify sequence. | ||
256 | */ | ||
257 | - count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, cam_ignore, | ||
258 | + count = xfc->match_nvt(xfb, format, nvt_blk, nvt_idx, crowd, cam_ignore, | ||
259 | priority, logic_serv, &match); | ||
260 | if (count < 0) { | ||
261 | return false; | ||
262 | @@ -XXX,XX +XXX,XX @@ bool xive_presenter_notify(XiveFabric *xfb, uint8_t format, | ||
263 | |||
264 | /* handle CPU exception delivery */ | ||
265 | if (count) { | ||
266 | - group_level = cam_ignore ? xive_get_group_level(nvt_idx) : 0; | ||
267 | + group_level = xive_get_group_level(crowd, cam_ignore, nvt_blk, nvt_idx); | ||
268 | trace_xive_presenter_notify(nvt_blk, nvt_idx, match.ring, group_level); | ||
269 | xive_tctx_pipr_update(match.tctx, match.ring, priority, group_level); | ||
270 | } else { | ||
271 | @@ -XXX,XX +XXX,XX @@ void xive_router_end_notify(XiveRouter *xrtr, XiveEAS *eas) | ||
272 | } | ||
273 | |||
274 | found = xive_presenter_notify(xrtr->xfb, format, nvt_blk, nvt_idx, | ||
275 | + false /* crowd */, | ||
276 | xive_get_field32(END_W7_F0_IGNORE, end.w7), | ||
277 | priority, | ||
278 | xive_get_field32(END_W7_F1_LOG_SERVER_ID, end.w7), | ||
279 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
280 | index XXXXXXX..XXXXXXX 100644 | ||
281 | --- a/hw/intc/xive2.c | ||
282 | +++ b/hw/intc/xive2.c | ||
283 | @@ -XXX,XX +XXX,XX @@ static bool xive2_vp_match_mask(uint32_t cam1, uint32_t cam2, | ||
284 | return (cam1 & vp_mask) == (cam2 & vp_mask); | ||
285 | } | ||
286 | |||
287 | +static uint8_t xive2_get_vp_block_mask(uint32_t nvt_blk, bool crowd) | ||
288 | +{ | ||
289 | + uint8_t size, block_mask = 0b1111; | ||
290 | + | ||
291 | + /* 3 supported crowd sizes: 2, 4, 16 */ | ||
292 | + if (crowd) { | ||
293 | + size = xive_get_vpgroup_size(nvt_blk); | ||
294 | + if (size == 8) { | ||
295 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid crowd size of 8n"); | ||
296 | + return block_mask; | ||
297 | + } | ||
298 | + block_mask = ~(size - 1); | ||
299 | + block_mask &= 0b1111; | ||
300 | + } | ||
301 | + return block_mask; | ||
302 | +} | ||
303 | + | ||
304 | +static uint32_t xive2_get_vp_index_mask(uint32_t nvt_index, bool cam_ignore) | ||
305 | +{ | ||
306 | + uint32_t index_mask = 0xFFFFFF; /* 24 bits */ | ||
307 | + | ||
308 | + if (cam_ignore) { | ||
309 | + index_mask = ~(xive_get_vpgroup_size(nvt_index) - 1); | ||
310 | + index_mask &= 0xFFFFFF; | ||
311 | + } | ||
312 | + return index_mask; | ||
313 | +} | ||
314 | + | ||
315 | /* | ||
316 | * The thread context register words are in big-endian format. | ||
317 | */ | ||
318 | int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
319 | uint8_t format, | ||
320 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
321 | - bool cam_ignore, uint32_t logic_serv) | ||
322 | + bool crowd, bool cam_ignore, | ||
323 | + uint32_t logic_serv) | ||
324 | { | ||
325 | uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx); | ||
326 | uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]); | ||
327 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
328 | uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]); | ||
329 | uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]); | ||
330 | |||
331 | - uint32_t vp_mask = 0xFFFFFFFF; | ||
332 | + uint32_t index_mask, vp_mask; | ||
333 | + uint8_t block_mask; | ||
334 | |||
335 | if (format == 0) { | ||
336 | /* | ||
337 | @@ -XXX,XX +XXX,XX @@ int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx, | ||
338 | * i=1: VP-group notification (bits ignored at the end of the | ||
339 | * NVT identifier) | ||
340 | */ | ||
341 | - if (cam_ignore) { | ||
342 | - vp_mask = ~(xive_get_vpgroup_size(nvt_idx) - 1); | ||
343 | - } | ||
344 | + block_mask = xive2_get_vp_block_mask(nvt_blk, crowd); | ||
345 | + index_mask = xive2_get_vp_index_mask(nvt_idx, cam_ignore); | ||
346 | + vp_mask = xive2_nvp_cam_line(block_mask, index_mask); | ||
347 | |||
348 | /* For VP-group notifications, threads with LGS=0 are excluded */ | ||
349 | |||
350 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
351 | return; | ||
352 | } | ||
353 | |||
354 | + if (xive2_end_is_crowd(&end) & !xive2_end_is_ignore(&end)) { | ||
355 | + qemu_log_mask(LOG_GUEST_ERROR, | ||
356 | + "XIVE: invalid END, 'crowd' bit requires 'ignore' bit\n"); | ||
357 | + return; | ||
358 | + } | ||
359 | + | ||
360 | if (xive2_end_is_enqueue(&end)) { | ||
361 | xive2_end_enqueue(&end, end_data); | ||
362 | /* Enqueuing event data modifies the EQ toggle and index */ | ||
363 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
364 | } | ||
365 | |||
366 | found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx, | ||
367 | - xive2_end_is_ignore(&end), | ||
368 | + xive2_end_is_crowd(&end), xive2_end_is_ignore(&end), | ||
369 | priority, | ||
370 | xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7), | ||
371 | &precluded); | ||
372 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
373 | nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb); | ||
374 | xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2); | ||
375 | } else { | ||
376 | - Xive2Nvgc nvg; | ||
377 | + Xive2Nvgc nvgc; | ||
378 | uint32_t backlog; | ||
379 | + bool crowd; | ||
380 | |||
381 | - /* For groups, the per-priority backlog counters are in the NVG */ | ||
382 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg)) { | ||
383 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVG %x/%x\n", | ||
384 | - nvp_blk, nvp_idx); | ||
385 | + crowd = xive2_end_is_crowd(&end); | ||
386 | + | ||
387 | + /* | ||
388 | + * For groups and crowds, the per-priority backlog | ||
389 | + * counters are stored in the NVG/NVC structures | ||
390 | + */ | ||
391 | + if (xive2_router_get_nvgc(xrtr, crowd, | ||
392 | + nvp_blk, nvp_idx, &nvgc)) { | ||
393 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no %s %x/%x\n", | ||
394 | + crowd ? "NVC" : "NVG", nvp_blk, nvp_idx); | ||
395 | return; | ||
396 | } | ||
397 | |||
398 | - if (!xive2_nvgc_is_valid(&nvg)) { | ||
399 | + if (!xive2_nvgc_is_valid(&nvgc)) { | ||
400 | qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVG %x/%x is invalid\n", | ||
401 | nvp_blk, nvp_idx); | ||
402 | return; | ||
403 | @@ -XXX,XX +XXX,XX @@ static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk, | ||
404 | * set the LSMFB field of the TIMA of relevant threads so | ||
405 | * that they know an interrupt is pending. | ||
406 | */ | ||
407 | - backlog = xive2_nvgc_get_backlog(&nvg, priority) + 1; | ||
408 | - xive2_nvgc_set_backlog(&nvg, priority, backlog); | ||
409 | - xive2_router_write_nvgc(xrtr, false, nvp_blk, nvp_idx, &nvg); | ||
410 | + backlog = xive2_nvgc_get_backlog(&nvgc, priority) + 1; | ||
411 | + xive2_nvgc_set_backlog(&nvgc, priority, backlog); | ||
412 | + xive2_router_write_nvgc(xrtr, crowd, nvp_blk, nvp_idx, &nvgc); | ||
413 | |||
414 | if (precluded && backlog == 1) { | ||
415 | XiveFabricClass *xfc = XIVE_FABRIC_GET_CLASS(xrtr->xfb); | ||
416 | - xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, priority); | ||
417 | + xfc->broadcast(xrtr->xfb, nvp_blk, nvp_idx, | ||
418 | + xive2_end_is_crowd(&end), | ||
419 | + xive2_end_is_ignore(&end), | ||
420 | + priority); | ||
421 | |||
422 | if (!xive2_end_is_precluded_escalation(&end)) { | ||
423 | /* | ||
424 | diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c | ||
425 | index XXXXXXX..XXXXXXX 100644 | ||
426 | --- a/hw/ppc/pnv.c | ||
427 | +++ b/hw/ppc/pnv.c | ||
428 | @@ -XXX,XX +XXX,XX @@ static void pnv_pic_print_info(InterruptStatsProvider *obj, GString *buf) | ||
429 | |||
430 | static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, | ||
431 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
432 | - bool cam_ignore, uint8_t priority, | ||
433 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
434 | uint32_t logic_serv, | ||
435 | XiveTCTXMatch *match) | ||
436 | { | ||
437 | @@ -XXX,XX +XXX,XX @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, | ||
438 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | ||
439 | int count; | ||
440 | |||
441 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, | ||
442 | - priority, logic_serv, match); | ||
443 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, | ||
444 | + cam_ignore, priority, logic_serv, match); | ||
445 | |||
446 | if (count < 0) { | ||
447 | return count; | ||
448 | @@ -XXX,XX +XXX,XX @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format, | ||
449 | |||
450 | static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | ||
451 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
452 | - bool cam_ignore, uint8_t priority, | ||
453 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
454 | uint32_t logic_serv, | ||
455 | XiveTCTXMatch *match) | ||
456 | { | ||
457 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | ||
458 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | ||
459 | int count; | ||
460 | |||
461 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, | ||
462 | - priority, logic_serv, match); | ||
463 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, | ||
464 | + cam_ignore, priority, logic_serv, match); | ||
465 | |||
466 | if (count < 0) { | ||
467 | return count; | ||
468 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format, | ||
469 | |||
470 | static int pnv10_xive_broadcast(XiveFabric *xfb, | ||
471 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
472 | + bool crowd, bool cam_ignore, | ||
473 | uint8_t priority) | ||
474 | { | ||
475 | PnvMachineState *pnv = PNV_MACHINE(xfb); | ||
476 | @@ -XXX,XX +XXX,XX @@ static int pnv10_xive_broadcast(XiveFabric *xfb, | ||
477 | XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive); | ||
478 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | ||
479 | |||
480 | - xpc->broadcast(xptr, nvt_blk, nvt_idx, priority); | ||
481 | + xpc->broadcast(xptr, nvt_blk, nvt_idx, crowd, cam_ignore, priority); | ||
482 | } | ||
483 | return 0; | ||
484 | } | ||
485 | diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c | ||
486 | index XXXXXXX..XXXXXXX 100644 | ||
487 | --- a/hw/ppc/spapr.c | ||
488 | +++ b/hw/ppc/spapr.c | ||
489 | @@ -XXX,XX +XXX,XX @@ | ||
490 | * Copyright (c) 2004-2007 Fabrice Bellard | ||
491 | * Copyright (c) 2007 Jocelyn Mayer | ||
492 | * Copyright (c) 2010 David Gibson, IBM Corporation. | ||
493 | + * Copyright (c) 2010-2024, IBM Corporation.. | ||
494 | + * | ||
495 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
496 | * | ||
497 | * Permission is hereby granted, free of charge, to any person obtaining a copy | ||
498 | * of this software and associated documentation files (the "Software"), to deal | ||
499 | @@ -XXX,XX +XXX,XX @@ static void spapr_pic_print_info(InterruptStatsProvider *obj, GString *buf) | ||
500 | */ | ||
501 | static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, | ||
502 | uint8_t nvt_blk, uint32_t nvt_idx, | ||
503 | - bool cam_ignore, uint8_t priority, | ||
504 | + bool crowd, bool cam_ignore, uint8_t priority, | ||
505 | uint32_t logic_serv, XiveTCTXMatch *match) | ||
506 | { | ||
507 | SpaprMachineState *spapr = SPAPR_MACHINE(xfb); | ||
508 | @@ -XXX,XX +XXX,XX @@ static int spapr_match_nvt(XiveFabric *xfb, uint8_t format, | ||
509 | XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr); | ||
510 | int count; | ||
511 | |||
512 | - count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore, | ||
513 | + count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, crowd, cam_ignore, | ||
514 | priority, logic_serv, match); | ||
515 | if (count < 0) { | ||
516 | return count; | ||
517 | -- | ||
518 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Glenn Miles <milesg@linux.vnet.ibm.com> | 1 | From: Glenn Miles <milesg@linux.vnet.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | XIVE crowd sizes are encoded into a 2-bit field as follows: | 3 | XIVE crowd sizes are encoded into a 2-bit field as follows: |
4 | 0: 0b00 | 4 | 0: 0b00 |
5 | 2: 0b01 | 5 | 2: 0b01 |
6 | 4: 0b10 | 6 | 4: 0b10 |
7 | 16: 0b11 | 7 | 16: 0b11 |
8 | 8 | ||
9 | A crowd size of 8 is not supported. | 9 | A crowd size of 8 is not supported. |
10 | 10 | ||
11 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> | 11 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> |
12 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 12 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
13 | --- | 13 | --- |
14 | hw/intc/xive.c | 21 ++++++++++++++++++++- | 14 | hw/intc/xive.c | 21 ++++++++++++++++++++- |
15 | 1 file changed, 20 insertions(+), 1 deletion(-) | 15 | 1 file changed, 20 insertions(+), 1 deletion(-) |
16 | 16 | ||
17 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c | 17 | diff --git a/hw/intc/xive.c b/hw/intc/xive.c |
18 | index XXXXXXX..XXXXXXX 100644 | 18 | index XXXXXXX..XXXXXXX 100644 |
19 | --- a/hw/intc/xive.c | 19 | --- a/hw/intc/xive.c |
20 | +++ b/hw/intc/xive.c | 20 | +++ b/hw/intc/xive.c |
21 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive_get_group_level(bool crowd, bool ignore, | 21 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive_get_group_level(bool crowd, bool ignore, |
22 | uint8_t level = 0; | 22 | uint8_t level = 0; |
23 | 23 | ||
24 | if (crowd) { | 24 | if (crowd) { |
25 | - level = ((ctz32(~nvp_blk) + 1) & 0b11) << 4; | 25 | - level = ((ctz32(~nvp_blk) + 1) & 0b11) << 4; |
26 | + /* crowd level is bit position of first 0 from the right in nvp_blk */ | 26 | + /* crowd level is bit position of first 0 from the right in nvp_blk */ |
27 | + level = ctz32(~nvp_blk) + 1; | 27 | + level = ctz32(~nvp_blk) + 1; |
28 | + | 28 | + |
29 | + /* | 29 | + /* |
30 | + * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported. | 30 | + * Supported crowd sizes are 2^1, 2^2, and 2^4. 2^3 is not supported. |
31 | + * HW will encode level 4 as the value 3. See xive2_pgofnext(). | 31 | + * HW will encode level 4 as the value 3. See xive2_pgofnext(). |
32 | + */ | 32 | + */ |
33 | + switch (level) { | 33 | + switch (level) { |
34 | + case 1: | 34 | + case 1: |
35 | + case 2: | 35 | + case 2: |
36 | + break; | 36 | + break; |
37 | + case 4: | 37 | + case 4: |
38 | + level = 3; | 38 | + level = 3; |
39 | + break; | 39 | + break; |
40 | + default: | 40 | + default: |
41 | + g_assert_not_reached(); | 41 | + g_assert_not_reached(); |
42 | + } | 42 | + } |
43 | + | 43 | + |
44 | + /* Crowd level bits reside in upper 2 bits of the 6 bit group level */ | 44 | + /* Crowd level bits reside in upper 2 bits of the 6 bit group level */ |
45 | + level <<= 4; | 45 | + level <<= 4; |
46 | } | 46 | } |
47 | if (ignore) { | 47 | if (ignore) { |
48 | level |= (ctz32(~nvp_index) + 1) & 0b1111; | 48 | level |= (ctz32(~nvp_index) + 1) & 0b1111; |
49 | -- | 49 | -- |
50 | 2.43.0 | 50 | 2.43.0 | diff view generated by jsdifflib |
New patch | |||
---|---|---|---|
1 | From: Frederic Barrat <fbarrat@linux.ibm.com> | ||
1 | 2 | ||
3 | When processing a backlog scan for group interrupts, also take | ||
4 | into account crowd interrupts. | ||
5 | |||
6 | Signed-off-by: Frederic Barrat <fbarrat@linux.ibm.com> | ||
7 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | ||
8 | --- | ||
9 | include/hw/ppc/xive2_regs.h | 4 ++ | ||
10 | hw/intc/xive2.c | 82 +++++++++++++++++++++++++------------ | ||
11 | 2 files changed, 60 insertions(+), 26 deletions(-) | ||
12 | |||
13 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/include/hw/ppc/xive2_regs.h | ||
16 | +++ b/include/hw/ppc/xive2_regs.h | ||
17 | @@ -XXX,XX +XXX,XX @@ void xive2_nvgc_pic_print_info(Xive2Nvgc *nvgc, uint32_t nvgc_idx, | ||
18 | #define NVx_BACKLOG_OP PPC_BITMASK(52, 53) | ||
19 | #define NVx_BACKLOG_PRIO PPC_BITMASK(57, 59) | ||
20 | |||
21 | +/* split the 6-bit crowd/group level */ | ||
22 | +#define NVx_CROWD_LVL(level) ((level >> 4) & 0b11) | ||
23 | +#define NVx_GROUP_LVL(level) (level & 0b1111) | ||
24 | + | ||
25 | #endif /* PPC_XIVE2_REGS_H */ | ||
26 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | ||
27 | index XXXXXXX..XXXXXXX 100644 | ||
28 | --- a/hw/intc/xive2.c | ||
29 | +++ b/hw/intc/xive2.c | ||
30 | @@ -XXX,XX +XXX,XX @@ static void xive2_end_enqueue(Xive2End *end, uint32_t data) | ||
31 | end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex); | ||
32 | } | ||
33 | |||
34 | +static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx, | ||
35 | + uint8_t next_level) | ||
36 | +{ | ||
37 | + uint32_t mask, next_idx; | ||
38 | + uint8_t next_blk; | ||
39 | + | ||
40 | + /* | ||
41 | + * Adjust the block and index of a VP for the next group/crowd | ||
42 | + * size (PGofFirst/PGofNext field in the NVP and NVGC structures). | ||
43 | + * | ||
44 | + * The 6-bit group level is split into a 2-bit crowd and 4-bit | ||
45 | + * group levels. Encoding is similar. However, we don't support | ||
46 | + * crowd size of 8. So a crowd level of 0b11 is bumped to a crowd | ||
47 | + * size of 16. | ||
48 | + */ | ||
49 | + next_blk = NVx_CROWD_LVL(next_level); | ||
50 | + if (next_blk == 3) { | ||
51 | + next_blk = 4; | ||
52 | + } | ||
53 | + mask = (1 << next_blk) - 1; | ||
54 | + *nvgc_blk &= ~mask; | ||
55 | + *nvgc_blk |= mask >> 1; | ||
56 | + | ||
57 | + next_idx = NVx_GROUP_LVL(next_level); | ||
58 | + mask = (1 << next_idx) - 1; | ||
59 | + *nvgc_idx &= ~mask; | ||
60 | + *nvgc_idx |= mask >> 1; | ||
61 | +} | ||
62 | + | ||
63 | /* | ||
64 | * Scan the group chain and return the highest priority and group | ||
65 | * level of pending group interrupts. | ||
66 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, | ||
67 | uint8_t *out_level) | ||
68 | { | ||
69 | Xive2Router *xrtr = XIVE2_ROUTER(xptr); | ||
70 | - uint32_t nvgc_idx, mask; | ||
71 | + uint32_t nvgc_idx; | ||
72 | uint32_t current_level, count; | ||
73 | - uint8_t prio; | ||
74 | + uint8_t nvgc_blk, prio; | ||
75 | Xive2Nvgc nvgc; | ||
76 | |||
77 | for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { | ||
78 | - current_level = first_group & 0xF; | ||
79 | + current_level = first_group & 0x3F; | ||
80 | + nvgc_blk = nvp_blk; | ||
81 | + nvgc_idx = nvp_idx; | ||
82 | |||
83 | while (current_level) { | ||
84 | - mask = (1 << current_level) - 1; | ||
85 | - nvgc_idx = nvp_idx & ~mask; | ||
86 | - nvgc_idx |= mask >> 1; | ||
87 | - qemu_log("fxb %s checking backlog for prio %d group idx %x\n", | ||
88 | - __func__, prio, nvgc_idx); | ||
89 | - | ||
90 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { | ||
91 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", | ||
92 | - nvp_blk, nvgc_idx); | ||
93 | + xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level); | ||
94 | + | ||
95 | + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(current_level), | ||
96 | + nvgc_blk, nvgc_idx, &nvgc)) { | ||
97 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", | ||
98 | + nvgc_blk, nvgc_idx); | ||
99 | return 0xFF; | ||
100 | } | ||
101 | if (!xive2_nvgc_is_valid(&nvgc)) { | ||
102 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", | ||
103 | - nvp_blk, nvgc_idx); | ||
104 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", | ||
105 | + nvgc_blk, nvgc_idx); | ||
106 | return 0xFF; | ||
107 | } | ||
108 | |||
109 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, | ||
110 | *out_level = current_level; | ||
111 | return prio; | ||
112 | } | ||
113 | - current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0xF; | ||
114 | + current_level = xive_get_field32(NVGC2_W0_PGONEXT, nvgc.w0) & 0x3F; | ||
115 | } | ||
116 | } | ||
117 | return 0xFF; | ||
118 | @@ -XXX,XX +XXX,XX @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, | ||
119 | uint8_t group_level) | ||
120 | { | ||
121 | Xive2Router *xrtr = XIVE2_ROUTER(xptr); | ||
122 | - uint32_t nvgc_idx, mask, count; | ||
123 | + uint32_t nvgc_idx, count; | ||
124 | + uint8_t nvgc_blk; | ||
125 | Xive2Nvgc nvgc; | ||
126 | |||
127 | - group_level &= 0xF; | ||
128 | - mask = (1 << group_level) - 1; | ||
129 | - nvgc_idx = nvp_idx & ~mask; | ||
130 | - nvgc_idx |= mask >> 1; | ||
131 | + nvgc_blk = nvp_blk; | ||
132 | + nvgc_idx = nvp_idx; | ||
133 | + xive2_pgofnext(&nvgc_blk, &nvgc_idx, group_level); | ||
134 | |||
135 | - if (xive2_router_get_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc)) { | ||
136 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVG %x/%x\n", | ||
137 | - nvp_blk, nvgc_idx); | ||
138 | + if (xive2_router_get_nvgc(xrtr, NVx_CROWD_LVL(group_level), | ||
139 | + nvgc_blk, nvgc_idx, &nvgc)) { | ||
140 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVGC %x/%x\n", | ||
141 | + nvgc_blk, nvgc_idx); | ||
142 | return; | ||
143 | } | ||
144 | if (!xive2_nvgc_is_valid(&nvgc)) { | ||
145 | - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVG %x/%x\n", | ||
146 | - nvp_blk, nvgc_idx); | ||
147 | + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid NVGC %x/%x\n", | ||
148 | + nvgc_blk, nvgc_idx); | ||
149 | return; | ||
150 | } | ||
151 | count = xive2_nvgc_get_backlog(&nvgc, group_prio); | ||
152 | @@ -XXX,XX +XXX,XX @@ static void xive2_presenter_backlog_decr(XivePresenter *xptr, | ||
153 | return; | ||
154 | } | ||
155 | xive2_nvgc_set_backlog(&nvgc, group_prio, count - 1); | ||
156 | - xive2_router_write_nvgc(xrtr, false, nvp_blk, nvgc_idx, &nvgc); | ||
157 | + xive2_router_write_nvgc(xrtr, NVx_CROWD_LVL(group_level), | ||
158 | + nvgc_blk, nvgc_idx, &nvgc); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | -- | ||
163 | 2.43.0 | diff view generated by jsdifflib |
1 | From: Glenn Miles <milesg@linux.vnet.ibm.com> | 1 | From: Glenn Miles <milesg@linux.vnet.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | END notification processing has an escalation path. The escalation is | 3 | END notification processing has an escalation path. The escalation is |
4 | not always an END escalation but can be an ESB escalation. | 4 | not always an END escalation but can be an ESB escalation. |
5 | 5 | ||
6 | Also added a check for 'resume' processing which log a message stating it | 6 | Also added a check for 'resume' processing which log a message stating it |
7 | needs to be implemented. This is not needed at the time but is part of | 7 | needs to be implemented. This is not needed at the time but is part of |
8 | the END notification processing. | 8 | the END notification processing. |
9 | 9 | ||
10 | This change was taken from a patch provided by Michael Kowal | 10 | This change was taken from a patch provided by Michael Kowal |
11 | 11 | ||
12 | Suggested-by: Michael Kowal <kowal@us.ibm.com> | 12 | Suggested-by: Michael Kowal <kowal@us.ibm.com> |
13 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> | 13 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> |
14 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 14 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
15 | --- | 15 | --- |
16 | include/hw/ppc/xive2.h | 1 + | 16 | include/hw/ppc/xive2.h | 1 + |
17 | include/hw/ppc/xive2_regs.h | 13 +++++--- | 17 | include/hw/ppc/xive2_regs.h | 13 +++++--- |
18 | hw/intc/xive2.c | 61 +++++++++++++++++++++++++++++-------- | 18 | hw/intc/xive2.c | 61 +++++++++++++++++++++++++++++-------- |
19 | 3 files changed, 58 insertions(+), 17 deletions(-) | 19 | 3 files changed, 58 insertions(+), 17 deletions(-) |
20 | 20 | ||
21 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h | 21 | diff --git a/include/hw/ppc/xive2.h b/include/hw/ppc/xive2.h |
22 | index XXXXXXX..XXXXXXX 100644 | 22 | index XXXXXXX..XXXXXXX 100644 |
23 | --- a/include/hw/ppc/xive2.h | 23 | --- a/include/hw/ppc/xive2.h |
24 | +++ b/include/hw/ppc/xive2.h | 24 | +++ b/include/hw/ppc/xive2.h |
25 | @@ -XXX,XX +XXX,XX @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, | 25 | @@ -XXX,XX +XXX,XX @@ int xive2_router_write_nvgc(Xive2Router *xrtr, bool crowd, |
26 | uint32_t xive2_router_get_config(Xive2Router *xrtr); | 26 | uint32_t xive2_router_get_config(Xive2Router *xrtr); |
27 | 27 | ||
28 | void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); | 28 | void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked); |
29 | +void xive2_notify(Xive2Router *xrtr, uint32_t lisn, bool pq_checked); | 29 | +void xive2_notify(Xive2Router *xrtr, uint32_t lisn, bool pq_checked); |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * XIVE2 Presenter (POWER10) | 32 | * XIVE2 Presenter (POWER10) |
33 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h | 33 | diff --git a/include/hw/ppc/xive2_regs.h b/include/hw/ppc/xive2_regs.h |
34 | index XXXXXXX..XXXXXXX 100644 | 34 | index XXXXXXX..XXXXXXX 100644 |
35 | --- a/include/hw/ppc/xive2_regs.h | 35 | --- a/include/hw/ppc/xive2_regs.h |
36 | +++ b/include/hw/ppc/xive2_regs.h | 36 | +++ b/include/hw/ppc/xive2_regs.h |
37 | @@ -XXX,XX +XXX,XX @@ | 37 | @@ -XXX,XX +XXX,XX @@ |
38 | 38 | ||
39 | typedef struct Xive2Eas { | 39 | typedef struct Xive2Eas { |
40 | uint64_t w; | 40 | uint64_t w; |
41 | -#define EAS2_VALID PPC_BIT(0) | 41 | -#define EAS2_VALID PPC_BIT(0) |
42 | -#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ | 42 | -#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ |
43 | -#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ | 43 | -#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ |
44 | -#define EAS2_MASKED PPC_BIT(32) /* Masked */ | 44 | -#define EAS2_MASKED PPC_BIT(32) /* Masked */ |
45 | -#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ | 45 | -#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ |
46 | +#define EAS2_VALID PPC_BIT(0) | 46 | +#define EAS2_VALID PPC_BIT(0) |
47 | +#define EAS2_QOS PPC_BIT(1, 2) /* Quality of Service(unimp) */ | 47 | +#define EAS2_QOS PPC_BIT(1, 2) /* Quality of Service(unimp) */ |
48 | +#define EAS2_RESUME PPC_BIT(3) /* END Resume(unimp) */ | 48 | +#define EAS2_RESUME PPC_BIT(3) /* END Resume(unimp) */ |
49 | +#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ | 49 | +#define EAS2_END_BLOCK PPC_BITMASK(4, 7) /* Destination EQ block# */ |
50 | +#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ | 50 | +#define EAS2_END_INDEX PPC_BITMASK(8, 31) /* Destination EQ index */ |
51 | +#define EAS2_MASKED PPC_BIT(32) /* Masked */ | 51 | +#define EAS2_MASKED PPC_BIT(32) /* Masked */ |
52 | +#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ | 52 | +#define EAS2_END_DATA PPC_BITMASK(33, 63) /* written to the EQ */ |
53 | } Xive2Eas; | 53 | } Xive2Eas; |
54 | 54 | ||
55 | #define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID) | 55 | #define xive2_eas_is_valid(eas) (be64_to_cpu((eas)->w) & EAS2_VALID) |
56 | #define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED) | 56 | #define xive2_eas_is_masked(eas) (be64_to_cpu((eas)->w) & EAS2_MASKED) |
57 | +#define xive2_eas_is_resume(eas) (be64_to_cpu((eas)->w) & EAS2_RESUME) | 57 | +#define xive2_eas_is_resume(eas) (be64_to_cpu((eas)->w) & EAS2_RESUME) |
58 | 58 | ||
59 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf); | 59 | void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, GString *buf); |
60 | 60 | ||
61 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c | 61 | diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c |
62 | index XXXXXXX..XXXXXXX 100644 | 62 | index XXXXXXX..XXXXXXX 100644 |
63 | --- a/hw/intc/xive2.c | 63 | --- a/hw/intc/xive2.c |
64 | +++ b/hw/intc/xive2.c | 64 | +++ b/hw/intc/xive2.c |
65 | @@ -XXX,XX +XXX,XX @@ do_escalation: | 65 | @@ -XXX,XX +XXX,XX @@ do_escalation: |
66 | } | 66 | } |
67 | } | 67 | } |
68 | 68 | ||
69 | - /* | 69 | - /* |
70 | - * The END trigger becomes an Escalation trigger | 70 | - * The END trigger becomes an Escalation trigger |
71 | - */ | 71 | - */ |
72 | - xive2_router_end_notify(xrtr, | 72 | - xive2_router_end_notify(xrtr, |
73 | - xive_get_field32(END2_W4_END_BLOCK, end.w4), | 73 | - xive_get_field32(END2_W4_END_BLOCK, end.w4), |
74 | - xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), | 74 | - xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), |
75 | - xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); | 75 | - xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); |
76 | + if (xive2_end_is_escalate_end(&end)) { | 76 | + if (xive2_end_is_escalate_end(&end)) { |
77 | + /* | 77 | + /* |
78 | + * Perform END Adaptive escalation processing | 78 | + * Perform END Adaptive escalation processing |
79 | + * The END trigger becomes an Escalation trigger | 79 | + * The END trigger becomes an Escalation trigger |
80 | + */ | 80 | + */ |
81 | + xive2_router_end_notify(xrtr, | 81 | + xive2_router_end_notify(xrtr, |
82 | + xive_get_field32(END2_W4_END_BLOCK, end.w4), | 82 | + xive_get_field32(END2_W4_END_BLOCK, end.w4), |
83 | + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), | 83 | + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4), |
84 | + xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); | 84 | + xive_get_field32(END2_W5_ESC_END_DATA, end.w5)); |
85 | + } /* end END adaptive escalation */ | 85 | + } /* end END adaptive escalation */ |
86 | + | 86 | + |
87 | + else { | 87 | + else { |
88 | + uint32_t lisn; /* Logical Interrupt Source Number */ | 88 | + uint32_t lisn; /* Logical Interrupt Source Number */ |
89 | + | 89 | + |
90 | + /* | 90 | + /* |
91 | + * Perform ESB escalation processing | 91 | + * Perform ESB escalation processing |
92 | + * E[N] == 1 --> N | 92 | + * E[N] == 1 --> N |
93 | + * Req[Block] <- E[ESB_Block] | 93 | + * Req[Block] <- E[ESB_Block] |
94 | + * Req[Index] <- E[ESB_Index] | 94 | + * Req[Index] <- E[ESB_Index] |
95 | + * Req[Offset] <- 0x000 | 95 | + * Req[Offset] <- 0x000 |
96 | + * Execute <ESB Store> Req command | 96 | + * Execute <ESB Store> Req command |
97 | + */ | 97 | + */ |
98 | + lisn = XIVE_EAS(xive_get_field32(END2_W4_END_BLOCK, end.w4), | 98 | + lisn = XIVE_EAS(xive_get_field32(END2_W4_END_BLOCK, end.w4), |
99 | + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4)); | 99 | + xive_get_field32(END2_W4_ESC_END_INDEX, end.w4)); |
100 | + | 100 | + |
101 | + xive2_notify(xrtr, lisn, true /* pq_checked */); | 101 | + xive2_notify(xrtr, lisn, true /* pq_checked */); |
102 | + } | 102 | + } |
103 | + | 103 | + |
104 | + return; | 104 | + return; |
105 | } | 105 | } |
106 | 106 | ||
107 | -void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) | 107 | -void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) |
108 | +void xive2_notify(Xive2Router *xrtr , uint32_t lisn, bool pq_checked) | 108 | +void xive2_notify(Xive2Router *xrtr , uint32_t lisn, bool pq_checked) |
109 | { | 109 | { |
110 | - Xive2Router *xrtr = XIVE2_ROUTER(xn); | 110 | - Xive2Router *xrtr = XIVE2_ROUTER(xn); |
111 | uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); | 111 | uint8_t eas_blk = XIVE_EAS_BLOCK(lisn); |
112 | uint32_t eas_idx = XIVE_EAS_INDEX(lisn); | 112 | uint32_t eas_idx = XIVE_EAS_INDEX(lisn); |
113 | Xive2Eas eas; | 113 | Xive2Eas eas; |
114 | @@ -XXX,XX +XXX,XX @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) | 114 | @@ -XXX,XX +XXX,XX @@ void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) |
115 | return; | 115 | return; |
116 | } | 116 | } |
117 | 117 | ||
118 | + /* TODO: add support for EAS resume if ever needed */ | 118 | + /* TODO: add support for EAS resume if ever needed */ |
119 | + if (xive2_eas_is_resume(&eas)) { | 119 | + if (xive2_eas_is_resume(&eas)) { |
120 | + qemu_log_mask(LOG_UNIMP, | 120 | + qemu_log_mask(LOG_UNIMP, |
121 | + "XIVE: EAS resume processing unimplemented - LISN %x\n", | 121 | + "XIVE: EAS resume processing unimplemented - LISN %x\n", |
122 | + lisn); | 122 | + lisn); |
123 | + return; | 123 | + return; |
124 | + } | 124 | + } |
125 | + | 125 | + |
126 | /* | 126 | /* |
127 | * The event trigger becomes an END trigger | 127 | * The event trigger becomes an END trigger |
128 | */ | 128 | */ |
129 | xive2_router_end_notify(xrtr, | 129 | xive2_router_end_notify(xrtr, |
130 | - xive_get_field64(EAS2_END_BLOCK, eas.w), | 130 | - xive_get_field64(EAS2_END_BLOCK, eas.w), |
131 | - xive_get_field64(EAS2_END_INDEX, eas.w), | 131 | - xive_get_field64(EAS2_END_INDEX, eas.w), |
132 | - xive_get_field64(EAS2_END_DATA, eas.w)); | 132 | - xive_get_field64(EAS2_END_DATA, eas.w)); |
133 | + xive_get_field64(EAS2_END_BLOCK, eas.w), | 133 | + xive_get_field64(EAS2_END_BLOCK, eas.w), |
134 | + xive_get_field64(EAS2_END_INDEX, eas.w), | 134 | + xive_get_field64(EAS2_END_INDEX, eas.w), |
135 | + xive_get_field64(EAS2_END_DATA, eas.w)); | 135 | + xive_get_field64(EAS2_END_DATA, eas.w)); |
136 | +} | 136 | +} |
137 | + | 137 | + |
138 | +void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) | 138 | +void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked) |
139 | +{ | 139 | +{ |
140 | + Xive2Router *xrtr = XIVE2_ROUTER(xn); | 140 | + Xive2Router *xrtr = XIVE2_ROUTER(xn); |
141 | + | 141 | + |
142 | + xive2_notify(xrtr, lisn, pq_checked); | 142 | + xive2_notify(xrtr, lisn, pq_checked); |
143 | + return; | 143 | + return; |
144 | } | 144 | } |
145 | 145 | ||
146 | static Property xive2_router_properties[] = { | 146 | static Property xive2_router_properties[] = { |
147 | -- | 147 | -- |
148 | 2.43.0 | 148 | 2.43.0 | diff view generated by jsdifflib |
... | ... | ||
---|---|---|---|
6 | confusion in the future and now we delay loading the NVP until | 6 | confusion in the future and now we delay loading the NVP until |
7 | the point where we know that the block and index actually point to | 7 | the point where we know that the block and index actually point to |
8 | a NVP. | 8 | a NVP. |
9 | 9 | ||
10 | Suggested-by: Michael Kowal <kowal@us.ibm.com> | 10 | Suggested-by: Michael Kowal <kowal@us.ibm.com> |
11 | Fixes: 6d4c4f70262 ("ppc/xive2: Support crowd-matching when looking for target") | 11 | Fixes: ("ppc/xive2: Support crowd-matching when looking for target") |
12 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> | 12 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> |
13 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 13 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
14 | --- | 14 | --- |
15 | hw/intc/xive2.c | 78 ++++++++++++++++++++++++------------------------- | 15 | hw/intc/xive2.c | 78 ++++++++++++++++++++++++------------------------- |
16 | 1 file changed, 39 insertions(+), 39 deletions(-) | 16 | 1 file changed, 39 insertions(+), 39 deletions(-) |
... | ... | ||
40 | if (qaddr_base) { | 40 | if (qaddr_base) { |
41 | g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", | 41 | g_string_append_printf(buf, " eq:@%08"PRIx64"% 6d/%5d ^%d", |
42 | @@ -XXX,XX +XXX,XX @@ static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx, | 42 | @@ -XXX,XX +XXX,XX @@ static void xive2_pgofnext(uint8_t *nvgc_blk, uint32_t *nvgc_idx, |
43 | * level of pending group interrupts. | 43 | * level of pending group interrupts. |
44 | */ | 44 | */ |
45 | static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, | 45 | static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, |
46 | - uint8_t nvp_blk, uint32_t nvp_idx, | 46 | - uint8_t nvp_blk, uint32_t nvp_idx, |
47 | + uint8_t nvx_blk, uint32_t nvx_idx, | 47 | + uint8_t nvx_blk, uint32_t nvx_idx, |
48 | uint8_t first_group, | 48 | uint8_t first_group, |
49 | uint8_t *out_level) | 49 | uint8_t *out_level) |
50 | { | 50 | { |
51 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, | 51 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, |
52 | 52 | ||
53 | for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { | 53 | for (prio = 0; prio <= XIVE_PRIORITY_MAX; prio++) { |
54 | current_level = first_group & 0x3F; | 54 | current_level = first_group & 0x3F; |
55 | - nvgc_blk = nvp_blk; | 55 | - nvgc_blk = nvp_blk; |
56 | - nvgc_idx = nvp_idx; | 56 | - nvgc_idx = nvp_idx; |
57 | + nvgc_blk = nvx_blk; | 57 | + nvgc_blk = nvx_blk; |
58 | + nvgc_idx = nvx_idx; | 58 | + nvgc_idx = nvx_idx; |
59 | 59 | ||
60 | while (current_level) { | 60 | while (current_level) { |
61 | xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level); | 61 | xive2_pgofnext(&nvgc_blk, &nvgc_idx, current_level); |
62 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_check(XivePresenter *xptr, | 62 | @@ -XXX,XX +XXX,XX @@ static uint8_t xive2_presenter_backlog_scan(XivePresenter *xptr, |
63 | } | 63 | } |
64 | 64 | ||
65 | static void xive2_presenter_backlog_decr(XivePresenter *xptr, | 65 | static void xive2_presenter_backlog_decr(XivePresenter *xptr, |
66 | - uint8_t nvp_blk, uint32_t nvp_idx, | 66 | - uint8_t nvp_blk, uint32_t nvp_idx, |
67 | + uint8_t nvx_blk, uint32_t nvx_idx, | 67 | + uint8_t nvx_blk, uint32_t nvx_idx, |
... | ... | diff view generated by jsdifflib |
1 | From: Glenn Miles <milesg@linux.ibm.com> | 1 | From: Glenn Miles <milesg@linux.ibm.com> |
---|---|---|---|
2 | 2 | ||
3 | Added new test for pool interrupts. | 3 | Added new test for pool interrupts. Removed all printfs from pnv-xive2-* qtests. |
4 | 4 | ||
5 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> | 5 | Signed-off-by: Glenn Miles <milesg@linux.vnet.ibm.com> |
6 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> | 6 | Signed-off-by: Michael Kowal <kowal@linux.ibm.com> |
7 | --- | 7 | --- |
8 | tests/qtest/pnv-xive2-test.c | 77 ++++++++++++++++++++++++++++++++++++ | 8 | tests/qtest/pnv-xive2-flush-sync.c | 6 +- |
9 | 1 file changed, 77 insertions(+) | 9 | tests/qtest/pnv-xive2-nvpg_bar.c | 7 +-- |
10 | tests/qtest/pnv-xive2-test.c | 98 +++++++++++++++++++++++++++--- | ||
11 | 3 files changed, 94 insertions(+), 17 deletions(-) | ||
10 | 12 | ||
13 | diff --git a/tests/qtest/pnv-xive2-flush-sync.c b/tests/qtest/pnv-xive2-flush-sync.c | ||
14 | index XXXXXXX..XXXXXXX 100644 | ||
15 | --- a/tests/qtest/pnv-xive2-flush-sync.c | ||
16 | +++ b/tests/qtest/pnv-xive2-flush-sync.c | ||
17 | @@ -XXX,XX +XXX,XX @@ void test_flush_sync_inject(QTestState *qts) | ||
18 | int test_nr; | ||
19 | uint8_t byte; | ||
20 | |||
21 | - printf("# ============================================================\n"); | ||
22 | - printf("# Starting cache flush/queue sync injection tests...\n"); | ||
23 | + g_test_message("========================================================="); | ||
24 | + g_test_message("Starting cache flush/queue sync injection tests..."); | ||
25 | |||
26 | for (test_nr = 0; test_nr < sizeof(xive_inject_tests); | ||
27 | test_nr++) { | ||
28 | int op_type = xive_inject_tests[test_nr]; | ||
29 | |||
30 | - printf("# Running test %d\n", test_nr); | ||
31 | + g_test_message("Running test %d", test_nr); | ||
32 | |||
33 | /* start with status byte set to 0 */ | ||
34 | clr_sync(qts, src_pir, ic_topo_id, op_type); | ||
35 | diff --git a/tests/qtest/pnv-xive2-nvpg_bar.c b/tests/qtest/pnv-xive2-nvpg_bar.c | ||
36 | index XXXXXXX..XXXXXXX 100644 | ||
37 | --- a/tests/qtest/pnv-xive2-nvpg_bar.c | ||
38 | +++ b/tests/qtest/pnv-xive2-nvpg_bar.c | ||
39 | @@ -XXX,XX +XXX,XX @@ | ||
40 | * | ||
41 | * Copyright (c) 2024, IBM Corporation. | ||
42 | * | ||
43 | - * This work is licensed under the terms of the GNU GPL, version 2 or | ||
44 | - * later. See the COPYING file in the top-level directory. | ||
45 | + * SPDX-License-Identifier: GPL-2.0-or-later | ||
46 | */ | ||
47 | #include "qemu/osdep.h" | ||
48 | #include "libqtest.h" | ||
49 | @@ -XXX,XX +XXX,XX @@ void test_nvpg_bar(QTestState *qts) | ||
50 | uint32_t count, delta; | ||
51 | uint8_t i; | ||
52 | |||
53 | - printf("# ============================================================\n"); | ||
54 | - printf("# Testing NVPG BAR operations\n"); | ||
55 | + g_test_message("========================================================="); | ||
56 | + g_test_message("Testing NVPG BAR operations"); | ||
57 | |||
58 | set_nvg(qts, group_target, 0); | ||
59 | set_nvp(qts, nvp_target, 0x04); | ||
11 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c | 60 | diff --git a/tests/qtest/pnv-xive2-test.c b/tests/qtest/pnv-xive2-test.c |
12 | index XXXXXXX..XXXXXXX 100644 | 61 | index XXXXXXX..XXXXXXX 100644 |
13 | --- a/tests/qtest/pnv-xive2-test.c | 62 | --- a/tests/qtest/pnv-xive2-test.c |
14 | +++ b/tests/qtest/pnv-xive2-test.c | 63 | +++ b/tests/qtest/pnv-xive2-test.c |
15 | @@ -XXX,XX +XXX,XX @@ | 64 | @@ -XXX,XX +XXX,XX @@ |
... | ... | ||
19 | + * - Test irq to pool thread | 68 | + * - Test irq to pool thread |
20 | * | 69 | * |
21 | * Copyright (c) 2024, IBM Corporation. | 70 | * Copyright (c) 2024, IBM Corporation. |
22 | * | 71 | * |
23 | @@ -XXX,XX +XXX,XX @@ static void test_hw_irq(QTestState *qts) | 72 | @@ -XXX,XX +XXX,XX @@ static void test_hw_irq(QTestState *qts) |
73 | uint16_t reg16; | ||
74 | uint8_t pq, nsr, cppr; | ||
75 | |||
76 | - printf("# ============================================================\n"); | ||
77 | - printf("# Testing irq %d to hardware thread %d\n", irq, target_pir); | ||
78 | + g_test_message("========================================================="); | ||
79 | + g_test_message("Testing irq %d to hardware thread %d", irq, target_pir); | ||
80 | |||
81 | /* irq config */ | ||
82 | set_eas(qts, irq, end_index, irq_data); | ||
83 | @@ -XXX,XX +XXX,XX @@ static void test_hw_irq(QTestState *qts) | ||
24 | g_assert_cmphex(cppr, ==, 0xFF); | 84 | g_assert_cmphex(cppr, ==, 0xFF); |
25 | } | 85 | } |
26 | 86 | ||
27 | +static void test_pool_irq(QTestState *qts) | 87 | +static void test_pool_irq(QTestState *qts) |
28 | +{ | 88 | +{ |
... | ... | ||
34 | + uint8_t priority = 5; | 94 | + uint8_t priority = 5; |
35 | + uint32_t reg32; | 95 | + uint32_t reg32; |
36 | + uint16_t reg16; | 96 | + uint16_t reg16; |
37 | + uint8_t pq, nsr, cppr, ipb; | 97 | + uint8_t pq, nsr, cppr, ipb; |
38 | + | 98 | + |
39 | + printf("# ============================================================\n"); | 99 | + g_test_message("========================================================="); |
40 | + printf("# Testing irq %d to pool thread %d\n", irq, target_pir); | 100 | + g_test_message("Testing irq %d to pool thread %d", irq, target_pir); |
41 | + | 101 | + |
42 | + /* irq config */ | 102 | + /* irq config */ |
43 | + set_eas(qts, irq, end_index, irq_data); | 103 | + set_eas(qts, irq, end_index, irq_data); |
44 | + set_end(qts, end_index, target_nvp, priority, false /* group */); | 104 | + set_end(qts, end_index, target_nvp, priority, false /* group */); |
45 | + | 105 | + |
... | ... | ||
98 | +} | 158 | +} |
99 | + | 159 | + |
100 | #define XIVE_ODD_CL 0x80 | 160 | #define XIVE_ODD_CL 0x80 |
101 | static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) | 161 | static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) |
102 | { | 162 | { |
163 | @@ -XXX,XX +XXX,XX @@ static void test_pull_thread_ctx_to_odd_thread_cl(QTestState *qts) | ||
164 | uint32_t cl_word; | ||
165 | uint32_t word2; | ||
166 | |||
167 | - printf("# ============================================================\n"); | ||
168 | - printf("# Testing 'Pull Thread Context to Odd Thread Reporting Line'\n"); | ||
169 | + g_test_message("========================================================="); | ||
170 | + g_test_message("Testing 'Pull Thread Context to Odd Thread Reporting " \ | ||
171 | + "Line'"); | ||
172 | |||
173 | /* clear odd cache line prior to pull operation */ | ||
174 | memset(cl_pair, 0, sizeof(cl_pair)); | ||
175 | @@ -XXX,XX +XXX,XX @@ static void test_hw_group_irq(QTestState *qts) | ||
176 | uint16_t reg16; | ||
177 | uint8_t pq, nsr, cppr; | ||
178 | |||
179 | - printf("# ============================================================\n"); | ||
180 | - printf("# Testing irq %d to hardware group of size 4\n", irq); | ||
181 | + g_test_message("========================================================="); | ||
182 | + g_test_message("Testing irq %d to hardware group of size 4", irq); | ||
183 | |||
184 | /* irq config */ | ||
185 | set_eas(qts, irq, end_index, irq_data); | ||
186 | @@ -XXX,XX +XXX,XX @@ static void test_hw_group_irq_backlog(QTestState *qts) | ||
187 | uint16_t reg16; | ||
188 | uint8_t pq, nsr, cppr, lsmfb, i; | ||
189 | |||
190 | - printf("# ============================================================\n"); | ||
191 | - printf("# Testing irq %d to hardware group of size 4 going through " \ | ||
192 | - "backlog\n", | ||
193 | - irq); | ||
194 | + g_test_message("========================================================="); | ||
195 | + g_test_message("Testing irq %d to hardware group of size 4 going " \ | ||
196 | + "through backlog", | ||
197 | + irq); | ||
198 | |||
199 | /* | ||
200 | * set current priority of all threads in the group to something | ||
103 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) | 201 | @@ -XXX,XX +XXX,XX @@ static void test_xive(void) |
104 | /* omit reset_state here and use settings from test_hw_irq */ | 202 | /* omit reset_state here and use settings from test_hw_irq */ |
105 | test_pull_thread_ctx_to_odd_thread_cl(qts); | 203 | test_pull_thread_ctx_to_odd_thread_cl(qts); |
106 | 204 | ||
107 | + reset_state(qts); | 205 | + reset_state(qts); |
... | ... | diff view generated by jsdifflib |