Expose two helper functions to support emulated ITS in the hypervisor.
These allow the KVM layer to notify the driver when hypervisor
initialization is complete.
The caller is expected to use the functions as follows:
1. its_start_deprivilege(): Acquire the ITS locks.
2. on_each_cpu(_kvm_host_prot_finalize, ...): Finalizes pKVM init
3. its_end_deprivilege(): Shadow the ITS structures, invoke the KVM
callback, and release locks.
Specifically, this shadows the ITS command queue and the 1st level
indirect tables. These shadow buffers will be used by the driver after
host deprivilege, while the hypervisor unmaps and takes ownership of the
original structures.
Signed-off-by: Sebastian Ene <sebastianene@google.com>
---
drivers/irqchip/irq-gic-v3-its.c | 165 +++++++++++++++++++++++++++--
include/linux/irqchip/arm-gic-v3.h | 24 +++++
2 files changed, 178 insertions(+), 11 deletions(-)
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 291d7668cc8d..278dbc56f962 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -78,17 +78,6 @@ struct its_collection {
u16 col_id;
};
-/*
- * The ITS_BASER structure - contains memory information, cached
- * value of BASER register configuration and ITS page size.
- */
-struct its_baser {
- void *base;
- u64 val;
- u32 order;
- u32 psz;
-};
-
struct its_device;
/*
@@ -5232,6 +5221,160 @@ static int __init its_compute_its_list_map(struct its_node *its)
return its_number;
}
+static void its_free_shadow_tables(struct its_shadow_tables *shadow)
+{
+ int i;
+
+ if (shadow->cmd_shadow)
+ its_free_pages(shadow->cmd_shadow, get_order(ITS_CMD_QUEUE_SZ));
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (!shadow->tables[i].shadow)
+ continue;
+
+ its_free_pages(shadow->tables[i].shadow, 0);
+ }
+
+ its_free_pages(shadow, 0);
+}
+
+static struct its_shadow_tables *its_get_shadow_tables(struct its_node *its)
+{
+ void *page;
+ struct its_shadow_tables *shadow;
+ int i;
+
+ page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 0);
+ if (!page)
+ return NULL;
+
+ shadow = (void *)page_address(page);
+ page = its_alloc_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ get_order(ITS_CMD_QUEUE_SZ));
+ if (!page)
+ goto err_alloc_shadow;
+
+ shadow->cmd_shadow = page_address(page);
+ shadow->cmdq_len = ITS_CMD_QUEUE_SZ;
+ shadow->cmd_original = its->cmd_base;
+
+ memcpy(shadow->tables, its->tables, sizeof(struct its_baser) * GITS_BASER_NR_REGS);
+
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ if (!(shadow->tables[i].val & GITS_BASER_VALID))
+ continue;
+
+ if (!(shadow->tables[i].val & GITS_BASER_INDIRECT))
+ continue;
+
+ page = its_alloc_pages_node(its->numa_node,
+ GFP_KERNEL | __GFP_ZERO,
+ shadow->tables[i].order);
+ if (!page)
+ goto err_alloc_shadow;
+
+ shadow->tables[i].shadow = page_address(page);
+
+ memcpy(shadow->tables[i].shadow, shadow->tables[i].base,
+ PAGE_ORDER_TO_SIZE(shadow->tables[i].order));
+ }
+
+ return shadow;
+
+err_alloc_shadow:
+ its_free_shadow_tables(shadow);
+ return NULL;
+}
+
+void *its_start_depriviledge(void)
+{
+ struct its_node *its;
+ int num_nodes = 0, i = 0;
+ unsigned long *flags;
+
+ raw_spin_lock(&its_lock);
+ list_for_each_entry(its, &its_nodes, entry) {
+ num_nodes++;
+ }
+
+ flags = kzalloc(num_nodes * sizeof(unsigned long), GFP_KERNEL_ACCOUNT);
+ if (!flags) {
+ raw_spin_unlock(&its_lock);
+ return NULL;
+ }
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ raw_spin_lock_irqsave(&its->lock, flags[i++]);
+ }
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(its_start_depriviledge);
+
+static int its_switch_to_shadow_locked(struct its_node *its, its_init_emulate init_emulate_cb)
+{
+ struct its_shadow_tables *hyp_shadow, shadow;
+ int i, ret;
+ u64 baser, baser_phys;
+
+ hyp_shadow = its_get_shadow_tables(its);
+ if (!hyp_shadow)
+ return -ENOMEM;
+
+ memcpy(&shadow, hyp_shadow, sizeof(shadow));
+ ret = init_emulate_cb(its->phys_base, hyp_shadow);
+ if (ret) {
+ its_free_shadow_tables(hyp_shadow);
+ return ret;
+ }
+
+ /* Switch the driver command queue to use the shadow and save the original */
+ its->cmd_write = (its->cmd_write - its->cmd_base) +
+ (struct its_cmd_block *)shadow.cmd_shadow;
+ its->cmd_base = shadow.cmd_shadow;
+
+ /* Shadow the first level of the indirect tables */
+ for (i = 0; i < GITS_BASER_NR_REGS; i++) {
+ baser = shadow.tables[i].val;
+
+ if (!shadow.tables[i].shadow)
+ continue;
+
+ baser_phys = virt_to_phys(shadow.tables[i].shadow);
+ if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48))
+ baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
+
+ its->tables[i].val &= ~GENMASK(47, 12);
+ its->tables[i].val |= baser_phys;
+ its->tables[i].base = shadow.tables[i].shadow;
+ }
+
+ return 0;
+}
+
+int its_end_depriviledge(int ret_pkvm_finalize, unsigned long *flags, its_init_emulate cb)
+{
+ struct its_node *its;
+ int i = 0, ret = 0;
+
+ if (!flags || !cb)
+ return -EINVAL;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!ret_pkvm_finalize && !ret)
+ ret = its_switch_to_shadow_locked(its, cb);
+
+ raw_spin_unlock_irqrestore(&its->lock, flags[i++]);
+ }
+
+ kfree(flags);
+ raw_spin_unlock(&its_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(its_end_depriviledge);
+
static int __init its_probe_one(struct its_node *its)
{
u64 baser, tmp;
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 0225121f3013..40457a4375d4 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -657,6 +657,30 @@ static inline bool gic_enable_sre(void)
return !!(val & ICC_SRE_EL1_SRE);
}
+/*
+ * The ITS_BASER structure - contains memory information, cached
+ * value of BASER register configuration and ITS page size.
+ */
+struct its_baser {
+ void *base;
+ void *shadow;
+ u64 val;
+ u32 order;
+ u32 psz;
+};
+
+struct its_shadow_tables {
+ struct its_baser tables[GITS_BASER_NR_REGS];
+ void *cmd_shadow;
+ void *cmd_original;
+ size_t cmdq_len;
+};
+
+typedef int (*its_init_emulate)(phys_addr_t its_phys_base, struct its_shadow_tables *shadow);
+
+void *its_start_depriviledge(void);
+int its_end_depriviledge(int ret, unsigned long *flags, its_init_emulate cb);
+
#endif
#endif
--
2.53.0.473.g4a7958ca14-goog
Hi Sebastian,
On Tue, 10 Mar 2026 at 12:49, Sebastian Ene <sebastianene@google.com> wrote:
>
> Expose two helper functions to support emulated ITS in the hypervisor.
> These allow the KVM layer to notify the driver when hypervisor
> initialization is complete.
> The caller is expected to use the functions as follows:
> 1. its_start_deprivilege(): Acquire the ITS locks.
> 2. on_each_cpu(_kvm_host_prot_finalize, ...): Finalizes pKVM init
> 3. its_end_deprivilege(): Shadow the ITS structures, invoke the KVM
> callback, and release locks.
> Specifically, this shadows the ITS command queue and the 1st level
> indirect tables. These shadow buffers will be used by the driver after
> host deprivilege, while the hypervisor unmaps and takes ownership of the
> original structures.
Just a note again on preferring not to use the "shadow" terminology. I
thought about it a bit more, since these are not at the host, perhaps
"proxy" is a better term, to convey that the host is writing to a
middle-man buffer.
Another term is "staging," which is common in DMA: the host "stages"
the commands here, and EL2 "commits" them to the hardware.
>
> Signed-off-by: Sebastian Ene <sebastianene@google.com>
> ---
> drivers/irqchip/irq-gic-v3-its.c | 165 +++++++++++++++++++++++++++--
> include/linux/irqchip/arm-gic-v3.h | 24 +++++
> 2 files changed, 178 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
> index 291d7668cc8d..278dbc56f962 100644
> --- a/drivers/irqchip/irq-gic-v3-its.c
> +++ b/drivers/irqchip/irq-gic-v3-its.c
> @@ -78,17 +78,6 @@ struct its_collection {
> u16 col_id;
> };
>
> -/*
> - * The ITS_BASER structure - contains memory information, cached
> - * value of BASER register configuration and ITS page size.
> - */
> -struct its_baser {
> - void *base;
> - u64 val;
> - u32 order;
> - u32 psz;
> -};
> -
> struct its_device;
>
> /*
> @@ -5232,6 +5221,160 @@ static int __init its_compute_its_list_map(struct its_node *its)
> return its_number;
> }
>
> +static void its_free_shadow_tables(struct its_shadow_tables *shadow)
> +{
> + int i;
> +
> + if (shadow->cmd_shadow)
> + its_free_pages(shadow->cmd_shadow, get_order(ITS_CMD_QUEUE_SZ));
> +
> + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> + if (!shadow->tables[i].shadow)
> + continue;
> +
> + its_free_pages(shadow->tables[i].shadow, 0);
> + }
> +
> + its_free_pages(shadow, 0);
> +}
> +
> +static struct its_shadow_tables *its_get_shadow_tables(struct its_node *its)
> +{
> + void *page;
> + struct its_shadow_tables *shadow;
> + int i;
Prefer RCT declarations.
> +
> + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 0);
This is called with the raw_spin_lock_irqsave held, and GFP_KERNEL can
sleep. You have one of two options, either use GFP_ATOMIC, but that's
more likely to fail. The alternative is to move this to
its_start_deprivilege(), before any lock is held.
> + if (!page)
> + return NULL;
> +
> + shadow = (void *)page_address(page);
> + page = its_alloc_pages_node(its->numa_node,
> + GFP_KERNEL | __GFP_ZERO,
> + get_order(ITS_CMD_QUEUE_SZ));
> + if (!page)
> + goto err_alloc_shadow;
> +
> + shadow->cmd_shadow = page_address(page);
> + shadow->cmdq_len = ITS_CMD_QUEUE_SZ;
> + shadow->cmd_original = its->cmd_base;
> +
> + memcpy(shadow->tables, its->tables, sizeof(struct its_baser) * GITS_BASER_NR_REGS);
> +
> + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> + if (!(shadow->tables[i].val & GITS_BASER_VALID))
> + continue;
> +
> + if (!(shadow->tables[i].val & GITS_BASER_INDIRECT))
> + continue;
> +
> + page = its_alloc_pages_node(its->numa_node,
> + GFP_KERNEL | __GFP_ZERO,
> + shadow->tables[i].order);
> + if (!page)
> + goto err_alloc_shadow;
> +
> + shadow->tables[i].shadow = page_address(page);
> +
> + memcpy(shadow->tables[i].shadow, shadow->tables[i].base,
> + PAGE_ORDER_TO_SIZE(shadow->tables[i].order));
> + }
> +
> + return shadow;
> +
> +err_alloc_shadow:
> + its_free_shadow_tables(shadow);
> + return NULL;
> +}
> +
> +void *its_start_depriviledge(void)
Typo here and elsewhere in this patch:
s/depriviledge/deprivilege/g
This is particularly important because it also appears in exported
symbols as well (later in this patch).
> +{
> + struct its_node *its;
> + int num_nodes = 0, i = 0;
> + unsigned long *flags;
RCT declaration order, and please untagle them, i.e., don't declare
the num_nodes and the iterator in the same line.
> +
> + raw_spin_lock(&its_lock);
> + list_for_each_entry(its, &its_nodes, entry) {
> + num_nodes++;
> + }
> +
> + flags = kzalloc(num_nodes * sizeof(unsigned long), GFP_KERNEL_ACCOUNT);
Same as the other allocation. This can sleep. I think that for this as
well, it's better to move it before lock acquisition. Even if you use
a different allocator, it's still better to keep the critical section
short.
> + if (!flags) {
> + raw_spin_unlock(&its_lock);
> + return NULL;
> + }
> +
> + list_for_each_entry(its, &its_nodes, entry) {
> + raw_spin_lock_irqsave(&its->lock, flags[i++]);
> + }
> +
> + return flags;
> +}
> +EXPORT_SYMBOL_GPL(its_start_depriviledge);
> +
> +static int its_switch_to_shadow_locked(struct its_node *its, its_init_emulate init_emulate_cb)
> +{
> + struct its_shadow_tables *hyp_shadow, shadow;
> + int i, ret;
> + u64 baser, baser_phys;
> +
> + hyp_shadow = its_get_shadow_tables(its);
> + if (!hyp_shadow)
> + return -ENOMEM;
> +
> + memcpy(&shadow, hyp_shadow, sizeof(shadow));
> + ret = init_emulate_cb(its->phys_base, hyp_shadow);
You are performing this callback with the lock held and local
interrupts disabled. The hvc call is byitself expensive, especially
since it's going to do stage-2 manipulations.
You should decouple the synchronous pointer swapping (which must be
locked) from the hypervisor notification (which can be done outside
the lock). Instead of executing the callback inside the critical
section, its_end_deprivilege should:
- Lock everything.
- Perform the pointer swaps in the host driver structures.
- Save the hyp_shadow pointers to a temporary array.
- Unlock everything.
- Loop through the temporary array and call the KVM cb to notify EL2.
You should probably split this patch into two. The first patch would
implement the freeze/unfreeze locking mechanism, and the second would
swap the driver's internal memory pointers to the shadow structures,
and invoke the KVM callback to lock down the real hardware.
Cheers,
/fuad
> + if (ret) {
> + its_free_shadow_tables(hyp_shadow);
> + return ret;
> + }
> +
> + /* Switch the driver command queue to use the shadow and save the original */
> + its->cmd_write = (its->cmd_write - its->cmd_base) +
> + (struct its_cmd_block *)shadow.cmd_shadow;
> + its->cmd_base = shadow.cmd_shadow;
> +
> + /* Shadow the first level of the indirect tables */
> + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> + baser = shadow.tables[i].val;
> +
> + if (!shadow.tables[i].shadow)
> + continue;
> +
> + baser_phys = virt_to_phys(shadow.tables[i].shadow);
> + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48))
> + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
> +
> + its->tables[i].val &= ~GENMASK(47, 12);
> + its->tables[i].val |= baser_phys;
> + its->tables[i].base = shadow.tables[i].shadow;
> + }
> +
> + return 0;
> +}
> +
> +int its_end_depriviledge(int ret_pkvm_finalize, unsigned long *flags, its_init_emulate cb)
> +{
> + struct its_node *its;
> + int i = 0, ret = 0;
> +
> + if (!flags || !cb)
> + return -EINVAL;
> +
> + list_for_each_entry(its, &its_nodes, entry) {
> + if (!ret_pkvm_finalize && !ret)
> + ret = its_switch_to_shadow_locked(its, cb);
> +
> + raw_spin_unlock_irqrestore(&its->lock, flags[i++]);
> + }
> +
> + kfree(flags);
> + raw_spin_unlock(&its_lock);
> +
> + return ret;
> +}
> +EXPORT_SYMBOL_GPL(its_end_depriviledge);
> +
> static int __init its_probe_one(struct its_node *its)
> {
> u64 baser, tmp;
> diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
> index 0225121f3013..40457a4375d4 100644
> --- a/include/linux/irqchip/arm-gic-v3.h
> +++ b/include/linux/irqchip/arm-gic-v3.h
> @@ -657,6 +657,30 @@ static inline bool gic_enable_sre(void)
> return !!(val & ICC_SRE_EL1_SRE);
> }
>
> +/*
> + * The ITS_BASER structure - contains memory information, cached
> + * value of BASER register configuration and ITS page size.
> + */
> +struct its_baser {
> + void *base;
> + void *shadow;
> + u64 val;
> + u32 order;
> + u32 psz;
> +};
> +
> +struct its_shadow_tables {
> + struct its_baser tables[GITS_BASER_NR_REGS];
> + void *cmd_shadow;
> + void *cmd_original;
> + size_t cmdq_len;
> +};
> +
> +typedef int (*its_init_emulate)(phys_addr_t its_phys_base, struct its_shadow_tables *shadow);
> +
> +void *its_start_depriviledge(void);
> +int its_end_depriviledge(int ret, unsigned long *flags, its_init_emulate cb);
> +
> #endif
>
> #endif
> --
> 2.53.0.473.g4a7958ca14-goog
>
On Fri, Mar 13, 2026 at 11:26:04AM +0000, Fuad Tabba wrote:
Hi Fuad,
> Hi Sebastian,
>
> On Tue, 10 Mar 2026 at 12:49, Sebastian Ene <sebastianene@google.com> wrote:
> >
> > Expose two helper functions to support emulated ITS in the hypervisor.
> > These allow the KVM layer to notify the driver when hypervisor
> > initialization is complete.
> > The caller is expected to use the functions as follows:
> > 1. its_start_deprivilege(): Acquire the ITS locks.
> > 2. on_each_cpu(_kvm_host_prot_finalize, ...): Finalizes pKVM init
> > 3. its_end_deprivilege(): Shadow the ITS structures, invoke the KVM
> > callback, and release locks.
> > Specifically, this shadows the ITS command queue and the 1st level
> > indirect tables. These shadow buffers will be used by the driver after
> > host deprivilege, while the hypervisor unmaps and takes ownership of the
> > original structures.
>
> Just a note again on preferring not to use the "shadow" terminology. I
> thought about it a bit more, since these are not at the host, perhaps
> "proxy" is a better term, to convey that the host is writing to a
> middle-man buffer.
>
> Another term is "staging," which is common in DMA: the host "stages"
> the commands here, and EL2 "commits" them to the hardware.
Sure, happy to use one of the two indicated ones.
>
> >
> > Signed-off-by: Sebastian Ene <sebastianene@google.com>
> > ---
> > drivers/irqchip/irq-gic-v3-its.c | 165 +++++++++++++++++++++++++++--
> > include/linux/irqchip/arm-gic-v3.h | 24 +++++
> > 2 files changed, 178 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
> > index 291d7668cc8d..278dbc56f962 100644
> > --- a/drivers/irqchip/irq-gic-v3-its.c
> > +++ b/drivers/irqchip/irq-gic-v3-its.c
> > @@ -78,17 +78,6 @@ struct its_collection {
> > u16 col_id;
> > };
> >
> > -/*
> > - * The ITS_BASER structure - contains memory information, cached
> > - * value of BASER register configuration and ITS page size.
> > - */
> > -struct its_baser {
> > - void *base;
> > - u64 val;
> > - u32 order;
> > - u32 psz;
> > -};
> > -
> > struct its_device;
> >
> > /*
> > @@ -5232,6 +5221,160 @@ static int __init its_compute_its_list_map(struct its_node *its)
> > return its_number;
> > }
> >
> > +static void its_free_shadow_tables(struct its_shadow_tables *shadow)
> > +{
> > + int i;
> > +
> > + if (shadow->cmd_shadow)
> > + its_free_pages(shadow->cmd_shadow, get_order(ITS_CMD_QUEUE_SZ));
> > +
> > + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> > + if (!shadow->tables[i].shadow)
> > + continue;
> > +
> > + its_free_pages(shadow->tables[i].shadow, 0);
> > + }
> > +
> > + its_free_pages(shadow, 0);
> > +}
> > +
> > +static struct its_shadow_tables *its_get_shadow_tables(struct its_node *its)
> > +{
> > + void *page;
> > + struct its_shadow_tables *shadow;
> > + int i;
>
> Prefer RCT declarations.
>
> > +
> > + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 0);
>
> This is called with the raw_spin_lock_irqsave held, and GFP_KERNEL can
> sleep. You have one of two options, either use GFP_ATOMIC, but that's
> more likely to fail. The alternative is to move this to
> its_start_deprivilege(), before any lock is held.
>
Thanks, I will try to move the allocation before the lock.
> > + if (!page)
> > + return NULL;
> > +
> > + shadow = (void *)page_address(page);
> > + page = its_alloc_pages_node(its->numa_node,
> > + GFP_KERNEL | __GFP_ZERO,
> > + get_order(ITS_CMD_QUEUE_SZ));
> > + if (!page)
> > + goto err_alloc_shadow;
> > +
> > + shadow->cmd_shadow = page_address(page);
> > + shadow->cmdq_len = ITS_CMD_QUEUE_SZ;
> > + shadow->cmd_original = its->cmd_base;
> > +
> > + memcpy(shadow->tables, its->tables, sizeof(struct its_baser) * GITS_BASER_NR_REGS);
> > +
> > + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> > + if (!(shadow->tables[i].val & GITS_BASER_VALID))
> > + continue;
> > +
> > + if (!(shadow->tables[i].val & GITS_BASER_INDIRECT))
> > + continue;
> > +
> > + page = its_alloc_pages_node(its->numa_node,
> > + GFP_KERNEL | __GFP_ZERO,
> > + shadow->tables[i].order);
> > + if (!page)
> > + goto err_alloc_shadow;
> > +
> > + shadow->tables[i].shadow = page_address(page);
> > +
> > + memcpy(shadow->tables[i].shadow, shadow->tables[i].base,
> > + PAGE_ORDER_TO_SIZE(shadow->tables[i].order));
> > + }
> > +
> > + return shadow;
> > +
> > +err_alloc_shadow:
> > + its_free_shadow_tables(shadow);
> > + return NULL;
> > +}
> > +
> > +void *its_start_depriviledge(void)
>
> Typo here and elsewhere in this patch:
>
> s/depriviledge/deprivilege/g
>
> This is particularly important because it also appears in exported
> symbols as well (later in this patch).
>
Ack, will fix this.
> > +{
> > + struct its_node *its;
> > + int num_nodes = 0, i = 0;
> > + unsigned long *flags;
>
> RCT declaration order, and please untagle them, i.e., don't declare
> the num_nodes and the iterator in the same line.
>
Ack,
> > +
> > + raw_spin_lock(&its_lock);
> > + list_for_each_entry(its, &its_nodes, entry) {
> > + num_nodes++;
> > + }
> > +
> > + flags = kzalloc(num_nodes * sizeof(unsigned long), GFP_KERNEL_ACCOUNT);
>
> Same as the other allocation. This can sleep. I think that for this as
> well, it's better to move it before lock acquisition. Even if you use
> a different allocator, it's still better to keep the critical section
> short.
>
> > + if (!flags) {
> > + raw_spin_unlock(&its_lock);
> > + return NULL;
> > + }
> > +
> > + list_for_each_entry(its, &its_nodes, entry) {
> > + raw_spin_lock_irqsave(&its->lock, flags[i++]);
> > + }
> > +
> > + return flags;
> > +}
> > +EXPORT_SYMBOL_GPL(its_start_depriviledge);
> > +
> > +static int its_switch_to_shadow_locked(struct its_node *its, its_init_emulate init_emulate_cb)
> > +{
> > + struct its_shadow_tables *hyp_shadow, shadow;
> > + int i, ret;
> > + u64 baser, baser_phys;
> > +
> > + hyp_shadow = its_get_shadow_tables(its);
> > + if (!hyp_shadow)
> > + return -ENOMEM;
> > +
> > + memcpy(&shadow, hyp_shadow, sizeof(shadow));
> > + ret = init_emulate_cb(its->phys_base, hyp_shadow);
>
> You are performing this callback with the lock held and local
> interrupts disabled. The hvc call is byitself expensive, especially
> since it's going to do stage-2 manipulations.
>
> You should decouple the synchronous pointer swapping (which must be
> locked) from the hypervisor notification (which can be done outside
> the lock). Instead of executing the callback inside the critical
> section, its_end_deprivilege should:
> - Lock everything.
> - Perform the pointer swaps in the host driver structures.
> - Save the hyp_shadow pointers to a temporary array.
> - Unlock everything.
I am afraid you can't do that because you can have dropped commands &
timeouts between these two steps. The driver might put commands in the
swapped queue and they will timeout.
> - Loop through the temporary array and call the KVM cb to notify EL2.
>
> You should probably split this patch into two. The first patch would
> implement the freeze/unfreeze locking mechanism, and the second would
> swap the driver's internal memory pointers to the shadow structures,
> and invoke the KVM callback to lock down the real hardware.
>
> Cheers,
> /fuad
>
Thanks,
Sebastian
> > + if (ret) {
> > + its_free_shadow_tables(hyp_shadow);
> > + return ret;
> > + }
> > +
> > + /* Switch the driver command queue to use the shadow and save the original */
> > + its->cmd_write = (its->cmd_write - its->cmd_base) +
> > + (struct its_cmd_block *)shadow.cmd_shadow;
> > + its->cmd_base = shadow.cmd_shadow;
> > +
> > + /* Shadow the first level of the indirect tables */
> > + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> > + baser = shadow.tables[i].val;
> > +
> > + if (!shadow.tables[i].shadow)
> > + continue;
> > +
> > + baser_phys = virt_to_phys(shadow.tables[i].shadow);
> > + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48))
> > + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
> > +
> > + its->tables[i].val &= ~GENMASK(47, 12);
> > + its->tables[i].val |= baser_phys;
> > + its->tables[i].base = shadow.tables[i].shadow;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +int its_end_depriviledge(int ret_pkvm_finalize, unsigned long *flags, its_init_emulate cb)
> > +{
> > + struct its_node *its;
> > + int i = 0, ret = 0;
> > +
> > + if (!flags || !cb)
> > + return -EINVAL;
> > +
> > + list_for_each_entry(its, &its_nodes, entry) {
> > + if (!ret_pkvm_finalize && !ret)
> > + ret = its_switch_to_shadow_locked(its, cb);
> > +
> > + raw_spin_unlock_irqrestore(&its->lock, flags[i++]);
> > + }
> > +
> > + kfree(flags);
> > + raw_spin_unlock(&its_lock);
> > +
> > + return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(its_end_depriviledge);
> > +
> > static int __init its_probe_one(struct its_node *its)
> > {
> > u64 baser, tmp;
> > diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
> > index 0225121f3013..40457a4375d4 100644
> > --- a/include/linux/irqchip/arm-gic-v3.h
> > +++ b/include/linux/irqchip/arm-gic-v3.h
> > @@ -657,6 +657,30 @@ static inline bool gic_enable_sre(void)
> > return !!(val & ICC_SRE_EL1_SRE);
> > }
> >
> > +/*
> > + * The ITS_BASER structure - contains memory information, cached
> > + * value of BASER register configuration and ITS page size.
> > + */
> > +struct its_baser {
> > + void *base;
> > + void *shadow;
> > + u64 val;
> > + u32 order;
> > + u32 psz;
> > +};
> > +
> > +struct its_shadow_tables {
> > + struct its_baser tables[GITS_BASER_NR_REGS];
> > + void *cmd_shadow;
> > + void *cmd_original;
> > + size_t cmdq_len;
> > +};
> > +
> > +typedef int (*its_init_emulate)(phys_addr_t its_phys_base, struct its_shadow_tables *shadow);
> > +
> > +void *its_start_depriviledge(void);
> > +int its_end_depriviledge(int ret, unsigned long *flags, its_init_emulate cb);
> > +
> > #endif
> >
> > #endif
> > --
> > 2.53.0.473.g4a7958ca14-goog
> >
Hi Seb,
<snip>
> > You should decouple the synchronous pointer swapping (which must be
> > locked) from the hypervisor notification (which can be done outside
> > the lock). Instead of executing the callback inside the critical
> > section, its_end_deprivilege should:
> > - Lock everything.
> > - Perform the pointer swaps in the host driver structures.
> > - Save the hyp_shadow pointers to a temporary array.
> > - Unlock everything.
>
> I am afraid you can't do that because you can have dropped commands &
> timeouts between these two steps. The driver might put commands in the
> swapped queue and they will timeout.
You're right, that won't work. Simply releasing the lock between the
pointer swap and the hypercall (HVC) isn't safe for two reasons:
- Timeouts (your point): If we swap the pointers to the new shadow
queue and drop the lock, another CPU might immediately try to queue a
command. It will trap to EL2 (cwriter_write), but because the HVC
hasn't finished initializing the hypervisor's internal state
(region->priv), EL2 will
drop the MMIO write. The host will then spin in
its_wait_for_range_completion waiting for the hardware to process a
command it never saw, resulting in a timeout.
- Stage-2 Aborts: Conversely, if we try to run the HVC before swapping
the pointers, the HVC will actively unmap (donate) the original
queue's memory to EL2. If the host is not locked and tries to write to
the old queue during this window, it will trigger a Stage-2 Data Abort
and boom!
We must pause host command submission to the affected ITS while the
HVC is running. That said, I still don't think it's acceptable to do
what you propose in this patch. This holds the raw_spin_lock_irqsave
for way too long, keeping local interrupts disabled while performing
slow hypercalls for the entire system.
I had a bit of a think, and I have two ideas, one is an improvement,
but not a full solution. It's simple though, and it might be enough
for now and I am more confident that it works. The second one is a
better solution I think, assuming it works and I haven't missed
anything :)
Option A: Granular Locking (Per-ITS Lock & HVC)
In this patch its_start_depriviledge effectively locks *all* ITS nodes
in the system simultaneously. Then, its_end_depriviledge calls the HVC
for every single ITS sequentially, while the CPU is still holding all
the locks with interrupts globally disabled. This makes this critical
section very long. Making it shorter is pretty straightforward i
think...
Instead of trying to decouple the pointer swap from the HVC, we can
start by reducing the scope of the lock. We remove the global locking
in its_start_depriviledge. Inside its_end_depriviledge, we process one
ITS at a time:
1. Disable local interrupts and lock one specific ITS
(raw_spin_lock_irqsave(&its->lock)).
2. Perform the pointer swap AND the HVC for this specific ITS.
3. Unlock and re-enable local interrupts (raw_spin_unlock_irqrestore).
4. Move to the next ITS in the list.
This should be simple to implement. It guarantees zero dropped
commands and zero aborts because the swap and HVC remain atomic.
However, the CPU executing the deprivilege still holds a raw spinlock
during a hypercall, but the duration is reduced to a single ITS node
rather than the entire thing.
Option B: Software Quiescence (Driver-Level Pausing)
To make it so that the HVC runs outside of an atomic context (with
local interrupts enabled), maybe we can teach the ITS driver to
voluntarily pause command submission without holding the raw spinlock.
I was thinking that we introduce a new state flag, e.g.,
is_vmm_migrating, to struct its_node. Every ITS command goes through
the BUILD_SINGLE_CMD_FUNC macro. We modify this macro so that if a CPU
tries to send a command and sees this flag is true, it temporarily
drops the lock, re-enables its interrupts, spins (cpu_relax()), and
retries.
The deprivilege sequence per ITS then becomes:
1. Lock: Acquire its->lock.
2. Swap & Pause: Swap the pointers to the shadow queue and set
its->is_vmm_migrating = true.
3. Unlock: Drop its->lock and re-enable interrupts. (Any other CPU
trying to send a command to this ITS will now safely spin and wait).
4. The HVC: Execute the slow hypercall safely outside of atomic context.
5. Resume: Re-acquire its->lock, set its->is_vmm_migrating = false,
and drop the lock. (This wakes up any spinning CPUs, and they
immediately send their commands to the newly registered shadow queue).
The HVC runs safely with local interrupts enabled, guaranteeing that
no commands are dropped or sent to unmapped memory. If a hardware
interrupt fires on another CPU that requires sending an ITS command
exactly while the HVC is running, that CPU will be forced to spin.
However, this is no worse than aquiring locks, where that CPU would
have been spinning waiting for the raw spinlock anyway.
What do you think?
If you like, I could hack something and we could discuss it some more.
Cheers,
/fuad
>
> > - Loop through the temporary array and call the KVM cb to notify EL2.
> >
> > You should probably split this patch into two. The first patch would
> > implement the freeze/unfreeze locking mechanism, and the second would
> > swap the driver's internal memory pointers to the shadow structures,
> > and invoke the KVM callback to lock down the real hardware.
> >
> > Cheers,
> > /fuad
> >
>
> Thanks,
> Sebastian
>
> > > + if (ret) {
> > > + its_free_shadow_tables(hyp_shadow);
> > > + return ret;
> > > + }
> > > +
> > > + /* Switch the driver command queue to use the shadow and save the original */
> > > + its->cmd_write = (its->cmd_write - its->cmd_base) +
> > > + (struct its_cmd_block *)shadow.cmd_shadow;
> > > + its->cmd_base = shadow.cmd_shadow;
> > > +
> > > + /* Shadow the first level of the indirect tables */
> > > + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> > > + baser = shadow.tables[i].val;
> > > +
> > > + if (!shadow.tables[i].shadow)
> > > + continue;
> > > +
> > > + baser_phys = virt_to_phys(shadow.tables[i].shadow);
> > > + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48))
> > > + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
> > > +
> > > + its->tables[i].val &= ~GENMASK(47, 12);
> > > + its->tables[i].val |= baser_phys;
> > > + its->tables[i].base = shadow.tables[i].shadow;
> > > + }
> > > +
> > > + return 0;
> > > +}
> > > +
> > > +int its_end_depriviledge(int ret_pkvm_finalize, unsigned long *flags, its_init_emulate cb)
> > > +{
> > > + struct its_node *its;
> > > + int i = 0, ret = 0;
> > > +
> > > + if (!flags || !cb)
> > > + return -EINVAL;
> > > +
> > > + list_for_each_entry(its, &its_nodes, entry) {
> > > + if (!ret_pkvm_finalize && !ret)
> > > + ret = its_switch_to_shadow_locked(its, cb);
> > > +
> > > + raw_spin_unlock_irqrestore(&its->lock, flags[i++]);
> > > + }
> > > +
> > > + kfree(flags);
> > > + raw_spin_unlock(&its_lock);
> > > +
> > > + return ret;
> > > +}
> > > +EXPORT_SYMBOL_GPL(its_end_depriviledge);
> > > +
> > > static int __init its_probe_one(struct its_node *its)
> > > {
> > > u64 baser, tmp;
> > > diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
> > > index 0225121f3013..40457a4375d4 100644
> > > --- a/include/linux/irqchip/arm-gic-v3.h
> > > +++ b/include/linux/irqchip/arm-gic-v3.h
> > > @@ -657,6 +657,30 @@ static inline bool gic_enable_sre(void)
> > > return !!(val & ICC_SRE_EL1_SRE);
> > > }
> > >
> > > +/*
> > > + * The ITS_BASER structure - contains memory information, cached
> > > + * value of BASER register configuration and ITS page size.
> > > + */
> > > +struct its_baser {
> > > + void *base;
> > > + void *shadow;
> > > + u64 val;
> > > + u32 order;
> > > + u32 psz;
> > > +};
> > > +
> > > +struct its_shadow_tables {
> > > + struct its_baser tables[GITS_BASER_NR_REGS];
> > > + void *cmd_shadow;
> > > + void *cmd_original;
> > > + size_t cmdq_len;
> > > +};
> > > +
> > > +typedef int (*its_init_emulate)(phys_addr_t its_phys_base, struct its_shadow_tables *shadow);
> > > +
> > > +void *its_start_depriviledge(void);
> > > +int its_end_depriviledge(int ret, unsigned long *flags, its_init_emulate cb);
> > > +
> > > #endif
> > >
> > > #endif
> > > --
> > > 2.53.0.473.g4a7958ca14-goog
> > >
On Fri, 13 Mar 2026 at 11:26, Fuad Tabba <tabba@google.com> wrote:
>
> Hi Sebastian,
>
> On Tue, 10 Mar 2026 at 12:49, Sebastian Ene <sebastianene@google.com> wrote:
> >
> > Expose two helper functions to support emulated ITS in the hypervisor.
> > These allow the KVM layer to notify the driver when hypervisor
> > initialization is complete.
> > The caller is expected to use the functions as follows:
> > 1. its_start_deprivilege(): Acquire the ITS locks.
> > 2. on_each_cpu(_kvm_host_prot_finalize, ...): Finalizes pKVM init
> > 3. its_end_deprivilege(): Shadow the ITS structures, invoke the KVM
> > callback, and release locks.
> > Specifically, this shadows the ITS command queue and the 1st level
> > indirect tables. These shadow buffers will be used by the driver after
> > host deprivilege, while the hypervisor unmaps and takes ownership of the
> > original structures.
>
> Just a note again on preferring not to use the "shadow" terminology. I
> thought about it a bit more, since these are not at the host, perhaps
> "proxy" is a better term, to convey that the host is writing to a
> middle-man buffer.
I meant to say that, since these are not at the "hypervisor", or since
they *are* allocated in host.....
>
> Another term is "staging," which is common in DMA: the host "stages"
> the commands here, and EL2 "commits" them to the hardware.
>
> >
> > Signed-off-by: Sebastian Ene <sebastianene@google.com>
> > ---
> > drivers/irqchip/irq-gic-v3-its.c | 165 +++++++++++++++++++++++++++--
> > include/linux/irqchip/arm-gic-v3.h | 24 +++++
> > 2 files changed, 178 insertions(+), 11 deletions(-)
> >
> > diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
> > index 291d7668cc8d..278dbc56f962 100644
> > --- a/drivers/irqchip/irq-gic-v3-its.c
> > +++ b/drivers/irqchip/irq-gic-v3-its.c
> > @@ -78,17 +78,6 @@ struct its_collection {
> > u16 col_id;
> > };
> >
> > -/*
> > - * The ITS_BASER structure - contains memory information, cached
> > - * value of BASER register configuration and ITS page size.
> > - */
> > -struct its_baser {
> > - void *base;
> > - u64 val;
> > - u32 order;
> > - u32 psz;
> > -};
> > -
> > struct its_device;
> >
> > /*
> > @@ -5232,6 +5221,160 @@ static int __init its_compute_its_list_map(struct its_node *its)
> > return its_number;
> > }
> >
> > +static void its_free_shadow_tables(struct its_shadow_tables *shadow)
> > +{
> > + int i;
> > +
> > + if (shadow->cmd_shadow)
> > + its_free_pages(shadow->cmd_shadow, get_order(ITS_CMD_QUEUE_SZ));
> > +
> > + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> > + if (!shadow->tables[i].shadow)
> > + continue;
> > +
> > + its_free_pages(shadow->tables[i].shadow, 0);
> > + }
> > +
> > + its_free_pages(shadow, 0);
> > +}
> > +
> > +static struct its_shadow_tables *its_get_shadow_tables(struct its_node *its)
> > +{
> > + void *page;
> > + struct its_shadow_tables *shadow;
> > + int i;
>
> Prefer RCT declarations.
>
> > +
> > + page = its_alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, 0);
>
> This is called with the raw_spin_lock_irqsave held, and GFP_KERNEL can
> sleep. You have one of two options, either use GFP_ATOMIC, but that's
> more likely to fail. The alternative is to move this to
> its_start_deprivilege(), before any lock is held.
>
> > + if (!page)
> > + return NULL;
> > +
> > + shadow = (void *)page_address(page);
> > + page = its_alloc_pages_node(its->numa_node,
> > + GFP_KERNEL | __GFP_ZERO,
> > + get_order(ITS_CMD_QUEUE_SZ));
> > + if (!page)
> > + goto err_alloc_shadow;
> > +
> > + shadow->cmd_shadow = page_address(page);
> > + shadow->cmdq_len = ITS_CMD_QUEUE_SZ;
> > + shadow->cmd_original = its->cmd_base;
> > +
> > + memcpy(shadow->tables, its->tables, sizeof(struct its_baser) * GITS_BASER_NR_REGS);
> > +
> > + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> > + if (!(shadow->tables[i].val & GITS_BASER_VALID))
> > + continue;
> > +
> > + if (!(shadow->tables[i].val & GITS_BASER_INDIRECT))
> > + continue;
> > +
> > + page = its_alloc_pages_node(its->numa_node,
> > + GFP_KERNEL | __GFP_ZERO,
> > + shadow->tables[i].order);
> > + if (!page)
> > + goto err_alloc_shadow;
> > +
> > + shadow->tables[i].shadow = page_address(page);
> > +
> > + memcpy(shadow->tables[i].shadow, shadow->tables[i].base,
> > + PAGE_ORDER_TO_SIZE(shadow->tables[i].order));
> > + }
> > +
> > + return shadow;
> > +
> > +err_alloc_shadow:
> > + its_free_shadow_tables(shadow);
> > + return NULL;
> > +}
> > +
> > +void *its_start_depriviledge(void)
>
> Typo here and elsewhere in this patch:
>
> s/depriviledge/deprivilege/g
>
> This is particularly important because it also appears in exported
> symbols as well (later in this patch).
>
> > +{
> > + struct its_node *its;
> > + int num_nodes = 0, i = 0;
> > + unsigned long *flags;
>
> RCT declaration order, and please untagle them, i.e., don't declare
> the num_nodes and the iterator in the same line.
>
> > +
> > + raw_spin_lock(&its_lock);
> > + list_for_each_entry(its, &its_nodes, entry) {
> > + num_nodes++;
> > + }
> > +
> > + flags = kzalloc(num_nodes * sizeof(unsigned long), GFP_KERNEL_ACCOUNT);
>
> Same as the other allocation. This can sleep. I think that for this as
> well, it's better to move it before lock acquisition. Even if you use
> a different allocator, it's still better to keep the critical section
> short.
>
> > + if (!flags) {
> > + raw_spin_unlock(&its_lock);
> > + return NULL;
> > + }
> > +
> > + list_for_each_entry(its, &its_nodes, entry) {
> > + raw_spin_lock_irqsave(&its->lock, flags[i++]);
> > + }
> > +
> > + return flags;
> > +}
> > +EXPORT_SYMBOL_GPL(its_start_depriviledge);
> > +
> > +static int its_switch_to_shadow_locked(struct its_node *its, its_init_emulate init_emulate_cb)
> > +{
> > + struct its_shadow_tables *hyp_shadow, shadow;
> > + int i, ret;
> > + u64 baser, baser_phys;
> > +
> > + hyp_shadow = its_get_shadow_tables(its);
> > + if (!hyp_shadow)
> > + return -ENOMEM;
> > +
> > + memcpy(&shadow, hyp_shadow, sizeof(shadow));
> > + ret = init_emulate_cb(its->phys_base, hyp_shadow);
>
> You are performing this callback with the lock held and local
> interrupts disabled. The hvc call is byitself expensive, especially
> since it's going to do stage-2 manipulations.
>
> You should decouple the synchronous pointer swapping (which must be
> locked) from the hypervisor notification (which can be done outside
> the lock). Instead of executing the callback inside the critical
> section, its_end_deprivilege should:
> - Lock everything.
> - Perform the pointer swaps in the host driver structures.
> - Save the hyp_shadow pointers to a temporary array.
> - Unlock everything.
> - Loop through the temporary array and call the KVM cb to notify EL2.
>
> You should probably split this patch into two. The first patch would
> implement the freeze/unfreeze locking mechanism, and the second would
> swap the driver's internal memory pointers to the shadow structures,
> and invoke the KVM callback to lock down the real hardware.
>
> Cheers,
> /fuad
>
> > + if (ret) {
> > + its_free_shadow_tables(hyp_shadow);
> > + return ret;
> > + }
> > +
> > + /* Switch the driver command queue to use the shadow and save the original */
> > + its->cmd_write = (its->cmd_write - its->cmd_base) +
> > + (struct its_cmd_block *)shadow.cmd_shadow;
> > + its->cmd_base = shadow.cmd_shadow;
> > +
> > + /* Shadow the first level of the indirect tables */
> > + for (i = 0; i < GITS_BASER_NR_REGS; i++) {
> > + baser = shadow.tables[i].val;
> > +
> > + if (!shadow.tables[i].shadow)
> > + continue;
> > +
> > + baser_phys = virt_to_phys(shadow.tables[i].shadow);
> > + if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48))
> > + baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
> > +
> > + its->tables[i].val &= ~GENMASK(47, 12);
> > + its->tables[i].val |= baser_phys;
> > + its->tables[i].base = shadow.tables[i].shadow;
> > + }
> > +
> > + return 0;
> > +}
> > +
> > +int its_end_depriviledge(int ret_pkvm_finalize, unsigned long *flags, its_init_emulate cb)
> > +{
> > + struct its_node *its;
> > + int i = 0, ret = 0;
> > +
> > + if (!flags || !cb)
> > + return -EINVAL;
> > +
> > + list_for_each_entry(its, &its_nodes, entry) {
> > + if (!ret_pkvm_finalize && !ret)
> > + ret = its_switch_to_shadow_locked(its, cb);
> > +
> > + raw_spin_unlock_irqrestore(&its->lock, flags[i++]);
> > + }
> > +
> > + kfree(flags);
> > + raw_spin_unlock(&its_lock);
> > +
> > + return ret;
> > +}
> > +EXPORT_SYMBOL_GPL(its_end_depriviledge);
> > +
> > static int __init its_probe_one(struct its_node *its)
> > {
> > u64 baser, tmp;
> > diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
> > index 0225121f3013..40457a4375d4 100644
> > --- a/include/linux/irqchip/arm-gic-v3.h
> > +++ b/include/linux/irqchip/arm-gic-v3.h
> > @@ -657,6 +657,30 @@ static inline bool gic_enable_sre(void)
> > return !!(val & ICC_SRE_EL1_SRE);
> > }
> >
> > +/*
> > + * The ITS_BASER structure - contains memory information, cached
> > + * value of BASER register configuration and ITS page size.
> > + */
> > +struct its_baser {
> > + void *base;
> > + void *shadow;
> > + u64 val;
> > + u32 order;
> > + u32 psz;
> > +};
> > +
> > +struct its_shadow_tables {
> > + struct its_baser tables[GITS_BASER_NR_REGS];
> > + void *cmd_shadow;
> > + void *cmd_original;
> > + size_t cmdq_len;
> > +};
> > +
> > +typedef int (*its_init_emulate)(phys_addr_t its_phys_base, struct its_shadow_tables *shadow);
> > +
> > +void *its_start_depriviledge(void);
> > +int its_end_depriviledge(int ret, unsigned long *flags, its_init_emulate cb);
> > +
> > #endif
> >
> > #endif
> > --
> > 2.53.0.473.g4a7958ca14-goog
> >
© 2016 - 2026 Red Hat, Inc.