[PATCH v9 4/4] irqchip/irq-loongarch-ir:Add Redirect irqchip support

Tianyang Zhang posted 4 patches 1 week, 4 days ago
There is a newer version of this series
[PATCH v9 4/4] irqchip/irq-loongarch-ir:Add Redirect irqchip support
Posted by Tianyang Zhang 1 week, 4 days ago
The main function of the Redirected interrupt controller is to manage
the redirected-interrupt table, which consists of many redirected entries.
When MSI interrupts are requested, the driver creates a corresponding
redirected entry that describes the target CPU/vector number and the
operating mode of the interrupt. The redirected interrupt module has an
independent cache, and during the interrupt routing process, it will
prioritize the redirected entries that hit the cache. The driver
invalidates certain entry caches via a command queue.

Co-developed-by: Liupu Wang <wangliupu@loongson.cn>
Signed-off-by: Liupu Wang <wangliupu@loongson.cn>
Signed-off-by: Tianyang Zhang <zhangtianyang@loongson.cn>
---
 drivers/irqchip/Makefile             |   2 +-
 drivers/irqchip/irq-loongarch-avec.c |   6 +-
 drivers/irqchip/irq-loongarch-ir.c   | 534 +++++++++++++++++++++++++++
 3 files changed, 540 insertions(+), 2 deletions(-)
 create mode 100644 drivers/irqchip/irq-loongarch-ir.c

diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 6a229443efe0..2fc15a8e6ada 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -118,7 +118,7 @@ obj-$(CONFIG_LS1X_IRQ)			+= irq-ls1x.o
 obj-$(CONFIG_TI_SCI_INTR_IRQCHIP)	+= irq-ti-sci-intr.o
 obj-$(CONFIG_TI_SCI_INTA_IRQCHIP)	+= irq-ti-sci-inta.o
 obj-$(CONFIG_TI_PRUSS_INTC)		+= irq-pruss-intc.o
-obj-$(CONFIG_IRQ_LOONGARCH_CPU)		+= irq-loongarch-cpu.o irq-loongarch-avec.o
+obj-$(CONFIG_IRQ_LOONGARCH_CPU)		+= irq-loongarch-cpu.o irq-loongarch-avec.o irq-loongarch-ir.o
 obj-$(CONFIG_LOONGSON_LIOINTC)		+= irq-loongson-liointc.o
 obj-$(CONFIG_LOONGSON_EIOINTC)		+= irq-loongson-eiointc.o
 obj-$(CONFIG_LOONGSON_HTPIC)		+= irq-loongson-htpic.o
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
index 7bdf3f678db4..972203182079 100644
--- a/drivers/irqchip/irq-loongarch-avec.c
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -111,7 +111,8 @@ static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *de
 		adata->cpu = cpu;
 		adata->vec = vector;
 		per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
-		avecintc_sync(adata);
+		if (!cpu_has_redirectint)
+			avecintc_sync(adata);
 	}
 
 	irq_data_update_effective_affinity(data, cpumask_of(cpu));
@@ -403,6 +404,9 @@ static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
 
 static inline int __init acpi_cascade_irqdomain_init(void)
 {
+	if (cpu_has_redirectint)
+		return redirect_acpi_init(loongarch_avec.domain);
+
 	return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
 }
 
diff --git a/drivers/irqchip/irq-loongarch-ir.c b/drivers/irqchip/irq-loongarch-ir.c
new file mode 100644
index 000000000000..0ee7c37487f7
--- /dev/null
+++ b/drivers/irqchip/irq-loongarch-ir.c
@@ -0,0 +1,534 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Loongson Technologies, Inc.
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-msi-lib.h>
+#include <linux/irqdomain.h>
+#include <linux/spinlock.h>
+#include <linux/msi.h>
+
+#include <asm/irq.h>
+#include <asm/loongarch.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/setup.h>
+#include <larchintrin.h>
+
+#include "irq-loongson.h"
+
+#define LOONGARCH_IOCSR_REDIRECT_CFG           0x15e0
+#define LOONGARCH_IOCSR_REDIRECT_TBR           0x15e8  /* IRT BASE REG*/
+#define LOONGARCH_IOCSR_REDIRECT_CQB           0x15f0  /* IRT CACHE QUEUE BASE */
+#define LOONGARCH_IOCSR_REDIRECT_CQH           0x15f8  /* IRT CACHE QUEUE HEAD, 32bit */
+#define LOONGARCH_IOCSR_REDIRECT_CQT           0x15fc  /* IRT CACHE QUEUE TAIL, 32bit */
+
+#define GPID_ADDR_MASK			GENMASK(47, 6)
+#define GPID_ADDR_SHIFT 		6
+
+#define CQB_SIZE_MASK			0xf
+#define CQB_ADDR_MASK			GENMASK(47, 12)
+
+#define CFG_DISABLE_IDLE		2
+#define INVALID_INDEX			0
+
+#define MAX_IR_ENGINES			16
+
+struct redirect_entry {
+	struct  {
+		u64	valid	: 1,
+			res1	: 5,
+			gpid	: 42,
+			res2	: 8,
+			vector	: 8;
+	}	lo;
+	u64	hi;
+};
+#define IRD_ENTRY_SIZE			sizeof(struct redirect_entry)
+#define IRD_ENTRIES			65536
+#define IRD_TABLE_PAGE_ORDER		get_order(IRD_ENTRIES * IRD_ENTRY_SIZE)
+
+struct redirect_gpid {
+	u64	pir[4];      // Pending interrupt requested
+	u8	en	: 1, // doorbell
+		res0	: 7;
+	u8	irqnum;
+	u16	res1;
+	u32	dst;
+	u32	rsvd[6];
+};
+
+struct irde_inv_cmd {
+	union {
+		u64	cmd_info;
+		struct {
+			u64	res1		: 4,
+				type		: 1,
+				need_notice	: 1,
+				pad		: 2,
+				index		: 16,
+				pad2		: 40;
+		}	index;
+	};
+	u64		notice_addr;
+};
+#define IRD_CMD_SIZE			sizeof(struct irde_inv_cmd)
+#define INVALID_QUEUE_SIZE		4096
+#define INV_QUEUE_PAGE_ORDER		get_order(INVALID_QUEUE_SIZE * IRD_CMD_SIZE)
+
+struct redirect_table {
+	struct redirect_entry	*table;
+	unsigned long		*bitmap;
+	raw_spinlock_t		lock;
+};
+
+struct redirect_queue {
+	struct irde_inv_cmd	*cmd_base;
+	int			head;
+	int			tail;
+	raw_spinlock_t		lock;
+};
+
+struct irde_desc {
+	struct	redirect_table	ird_table;
+	struct	redirect_queue	inv_queue;
+	int			node;
+};
+
+struct redirect_item {
+	int			index;
+	struct irde_desc	*irde;
+	struct redirect_gpid	*gpid;
+};
+
+
+static struct irq_domain *redirect_domain;
+static struct irde_desc irde_descs[MAX_IR_ENGINES];
+static phys_addr_t msi_base_addr;
+static phys_addr_t redirect_reg_base = LOONGSON_REG_BASE;
+
+#define REDIRECT_REG(reg, node) \
+	((void __iomem *)(IO_BASE | redirect_reg_base | (u64)(node) << NODE_ADDRSPACE_SHIFT | (reg)))
+
+static inline void redirect_write_reg64(u32 node, u64 val, u32 reg)
+{
+	void __iomem *reg_addr = REDIRECT_REG(reg, node);
+
+	return writeq(val, reg_addr);
+}
+
+static inline void redirect_write_reg32(u32 node, u32 val, u32 reg)
+{
+	void __iomem *reg_addr = REDIRECT_REG(reg, node);
+
+	return writel(val, reg_addr);
+}
+
+static inline u32 redirect_read_reg32(u32 node, u32 reg)
+{
+	void __iomem *reg_addr = REDIRECT_REG(reg, node);
+
+	return readl(reg_addr);
+}
+
+static inline struct redirect_entry *item_get_entry(struct redirect_item *item)
+{
+	return item->irde->ird_table.table + item->index;
+}
+
+static inline bool invalid_queue_is_full(int node, u32 *tail)
+{
+	u32 head = redirect_read_reg32(node, LOONGARCH_IOCSR_REDIRECT_CQH);
+
+	*tail = redirect_read_reg32(node, LOONGARCH_IOCSR_REDIRECT_CQT);
+
+	return head == ((*tail + 1) % INVALID_QUEUE_SIZE);
+}
+
+static void invalid_enqueue(struct redirect_item *item, struct irde_inv_cmd *cmd)
+{
+	struct redirect_queue *inv_queue = &item->irde->inv_queue;
+	u32 tail;
+
+	guard(raw_spinlock_irqsave)(&inv_queue->lock);
+
+	while (invalid_queue_is_full(item->irde->node, &tail))
+		cpu_relax();
+
+	memcpy(&inv_queue->cmd_base[tail], cmd, sizeof(*cmd));
+
+	redirect_write_reg32(item->irde->node, (tail + 1) % INVALID_QUEUE_SIZE,
+			     LOONGARCH_IOCSR_REDIRECT_CQT);
+}
+
+static void irde_invalid_entry(struct redirect_item *item)
+{
+	struct irde_inv_cmd cmd;
+	u64 raddr = 0;
+
+	cmd.cmd_info = 0;
+	cmd.index.type = INVALID_INDEX;
+	cmd.index.need_notice = 1;
+	cmd.index.index = item->index;
+	cmd.notice_addr = (u64)(__pa(&raddr));
+
+	invalid_enqueue(item, &cmd);
+
+	/*
+	 * CPU needs to wait here for cmd to complete, and it determines this
+	 * by checking whether invalid queue has already written a valid value
+	 * to cmd.notice_addr.
+	 */
+	while (!raddr)
+		cpu_relax();
+
+}
+
+static inline struct avecintc_data *irq_data_get_avec_data(struct irq_data *data)
+{
+	return data->parent_data->chip_data;
+}
+
+static int redirect_table_alloc(int node, u32 nr_irqs)
+{
+	struct redirect_table *ird_table = &irde_descs[node].ird_table;
+	unsigned int index, order;
+
+	if (nr_irqs > 1) {
+		nr_irqs = __roundup_pow_of_two(nr_irqs);
+		order = ilog2(nr_irqs);
+	}
+
+	guard(raw_spinlock_irqsave)(&ird_table->lock);
+
+	index = bitmap_find_free_region(ird_table->bitmap,
+					IRD_ENTRIES, order);
+	if (index < 0) {
+		pr_err("No redirect entry to use\n");
+		return -ENOMEM;
+	}
+
+	return index;
+}
+
+static void redirect_table_free(struct redirect_item *item)
+{
+	struct redirect_table *ird_table = &item->irde->ird_table;
+	struct redirect_entry *entry = item_get_entry(item);
+
+	memset(entry, 0, sizeof(*entry));
+
+	scoped_guard(raw_spinlock_irq, &ird_table->lock)
+		clear_bit(item->index, ird_table->bitmap);
+
+	kfree(item->gpid);
+
+	irde_invalid_entry(item);
+}
+
+static inline void redirect_domain_prepare_entry(struct redirect_item *item,
+						 struct avecintc_data *adata)
+{
+	struct redirect_entry *entry = item_get_entry(item);
+
+	item->gpid->en = 1;
+	item->gpid->irqnum = adata->vec;
+	item->gpid->dst = adata->cpu;
+
+	entry->lo.valid = 1;
+	entry->lo.gpid = ((u64)item->gpid & GPID_ADDR_MASK) >> GPID_ADDR_SHIFT;
+	entry->lo.vector = 0xff;
+}
+
+static int redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
+{
+	struct redirect_item *item = data->chip_data;
+	int ret;
+
+	ret = irq_chip_set_affinity_parent(data, dest, force);
+	if (ret == IRQ_SET_MASK_OK_DONE) {
+		return ret;
+	} else if (ret) {
+		pr_err("IRDE:set_affinity error %d\n", ret);
+		return ret;
+	}
+	struct avecintc_data *adata = irq_data_get_avec_data(data);
+
+	redirect_domain_prepare_entry(item, adata);
+	irde_invalid_entry(item);
+	avecintc_sync(adata);
+
+	return IRQ_SET_MASK_OK;
+}
+
+static void redirect_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
+{
+	struct redirect_item *item = irq_data_get_irq_chip_data(d);
+
+	msg->address_lo = (msi_base_addr | 1 << 2) ;
+	msg->address_hi = 0x0;
+	msg->data = item->index;
+}
+
+static struct irq_chip loongarch_redirect_chip = {
+	.name			= "REDIRECT",
+	.irq_ack		= irq_chip_ack_parent,
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_set_affinity	= redirect_set_affinity,
+	.irq_compose_msi_msg	= redirect_compose_msi_msg,
+};
+
+static void redirect_free_resources(struct irq_domain *domain, unsigned int virq,
+				    unsigned int nr_irqs)
+{
+	for (int i = 0; i < nr_irqs; i++) {
+		struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq + i);
+
+		if (irq_data && irq_data->chip_data) {
+			struct redirect_item *item = irq_data->chip_data;
+
+			redirect_table_free(item);
+			kfree(item);
+		}
+	}
+}
+
+static int redirect_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				 unsigned int nr_irqs, void *arg)
+{
+	msi_alloc_info_t *info = arg;
+	int ret, i, node, index;
+
+	node = dev_to_node(info->desc->dev);
+
+	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+	if (ret < 0)
+		return ret;
+
+	index = redirect_table_alloc(node, nr_irqs);
+	if (index < 0) {
+		pr_err("Alloc redirect table entry failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < nr_irqs; i++) {
+		struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq + i);
+		struct redirect_item *item;
+
+		item = kzalloc(sizeof(*item), GFP_KERNEL);
+		if (!item) {
+			pr_err("Alloc redirect descriptor failed\n");
+			goto out_free_resources;
+		}
+		item->irde = &irde_descs[node];
+
+		/*
+		 * Only bits 47:6 of the GPID are passed to the controller,
+		 * 64-byte alignment must be guarantee and make kzalloc can
+		 * align to the respective size.
+		 */
+		static_assert(sizeof(*item->gpid) == 64);
+		item->gpid = kzalloc_node(sizeof(*item->gpid), GFP_KERNEL, node);
+		if (!item->gpid) {
+			pr_err("Alloc redirect GPID failed\n");
+			goto out_free_resources;
+		}
+		item->index = index + i;
+
+		irq_data->chip_data = item;
+		irq_data->chip = &loongarch_redirect_chip;
+
+		redirect_domain_prepare_entry(item, irq_data_get_avec_data(irq_data));
+	}
+	return 0;
+
+out_free_resources:
+	redirect_free_resources(domain, virq, nr_irqs);
+	irq_domain_free_irqs_common(domain, virq, nr_irqs);
+
+	return -ENOMEM;
+}
+
+static void redirect_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs)
+{
+	redirect_free_resources(domain, virq, nr_irqs);
+	return irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+static const struct irq_domain_ops redirect_domain_ops = {
+	.alloc		= redirect_domain_alloc,
+	.free		= redirect_domain_free,
+	.select		= msi_lib_irq_domain_select,
+};
+
+static int redirect_queue_init(struct irde_desc *irde)
+{
+	struct redirect_queue *inv_queue = &irde->inv_queue;
+	struct folio *folio;
+
+	folio = __folio_alloc_node(GFP_KERNEL | __GFP_ZERO, INV_QUEUE_PAGE_ORDER, irde->node);
+	if (!folio) {
+		pr_err("Node [%d] invalid queue alloc pages failed!\n", irde->node);
+		return -ENOMEM;
+	}
+
+	inv_queue->cmd_base = folio_address(folio);
+	inv_queue->head = 0;
+	inv_queue->tail = 0;
+	raw_spin_lock_init(&inv_queue->lock);
+
+	return 0;
+}
+
+static int redirect_table_init(struct irde_desc *irde)
+{
+	struct redirect_table *ird_table = &irde->ird_table;
+	unsigned long *bitmap;
+	struct folio *folio;
+	int node = irde->node;
+
+	folio = __folio_alloc_node(GFP_KERNEL | __GFP_ZERO, IRD_TABLE_PAGE_ORDER, node);
+	if (!folio) {
+		pr_err("Node [%d] redirect table alloc pages failed!\n", node);
+		return -ENOMEM;
+	}
+	ird_table->table = folio_address(folio);
+
+	bitmap = bitmap_zalloc(IRD_ENTRIES, GFP_KERNEL);
+	if (!bitmap) {
+		pr_err("Node [%d] redirect table bitmap alloc pages failed!\n", node);
+		folio_put(folio);
+		ird_table->table = NULL;
+		return -ENOMEM;
+	}
+
+	ird_table->bitmap = bitmap;
+
+	raw_spin_lock_init(&ird_table->lock);
+
+	return 0;
+}
+
+static void redirect_irde_cfg(struct irde_desc *irde)
+{
+	redirect_write_reg64(irde->node, CFG_DISABLE_IDLE, LOONGARCH_IOCSR_REDIRECT_CFG);
+	redirect_write_reg64(irde->node, __pa(irde->ird_table.table), LOONGARCH_IOCSR_REDIRECT_TBR);
+	redirect_write_reg32(irde->node, 0, LOONGARCH_IOCSR_REDIRECT_CQH);
+	redirect_write_reg32(irde->node, 0, LOONGARCH_IOCSR_REDIRECT_CQT);
+	redirect_write_reg64(irde->node, ((u64)irde->inv_queue.cmd_base & CQB_ADDR_MASK) |
+			     CQB_SIZE_MASK, LOONGARCH_IOCSR_REDIRECT_CQB);
+}
+
+static void __redirect_irde_fini(struct irde_desc *irde)
+{
+	struct redirect_table *ird_table = &irde_descs->ird_table;
+	struct redirect_queue *inv_queue = &irde_descs->inv_queue;
+
+	if (ird_table->table) {
+		folio_put(virt_to_folio(ird_table->table));
+		ird_table->table = NULL;
+	}
+
+	if (ird_table->bitmap) {
+		bitmap_free(ird_table->bitmap);
+		ird_table->bitmap = NULL;
+	}
+
+	if (inv_queue->cmd_base) {
+		folio_put(virt_to_folio(inv_queue->cmd_base));
+		inv_queue->cmd_base = NULL;
+	}
+}
+
+static inline void redirect_irde_fini(int node)
+{
+	__redirect_irde_fini(&irde_descs[node]);
+}
+
+static int redirect_irde_init(int node)
+{
+	struct irde_desc *irde = &irde_descs[node];
+	int ret;
+
+	irde->node = node;
+
+	ret = redirect_table_init(irde);
+	if (ret)
+		return ret;
+
+	ret = redirect_queue_init(irde);
+	if (ret) {
+		__redirect_irde_fini(irde);
+		return ret;
+	}
+
+	redirect_irde_cfg(irde);
+
+	return 0;
+}
+
+static int __init pch_msi_parse_madt(union acpi_subtable_headers *header, const unsigned long end)
+{
+	struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
+
+	msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
+
+	return pch_msi_acpi_init_avec(redirect_domain);
+}
+
+static int __init acpi_cascade_irqdomain_init(void)
+{
+	return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
+}
+
+int __init redirect_acpi_init(struct irq_domain *parent)
+{
+	struct fwnode_handle *fwnode;
+	int ret = -EINVAL, node;
+
+	fwnode = irq_domain_alloc_named_fwnode("redirect");
+	if (!fwnode) {
+		pr_err("Unable to alloc redirect domain handle\n");
+		goto fail;
+	}
+
+	redirect_domain = irq_domain_create_hierarchy(parent, 0, IRD_ENTRIES, fwnode,
+						      &redirect_domain_ops, irde_descs);
+	if (!redirect_domain) {
+		pr_err("Unable to alloc redirect domain\n");
+		goto out_free_fwnode;
+	}
+
+
+	for_each_node_mask(node, node_possible_map) {
+		ret = redirect_irde_init(node);
+		if (ret)
+			goto out_clear_irde;
+	}
+
+	ret = acpi_cascade_irqdomain_init();
+	if (ret < 0) {
+		pr_err("Failed to cascade IRQ domain, ret=%d\n", ret);
+		goto out_clear_irde;
+	}
+
+	pr_info("loongarch irq redirect modules init succeeded\n");
+	return 0;
+
+out_clear_irde:
+	for_each_node_mask(node, node_possible_map)
+		redirect_irde_fini(node);
+
+	irq_domain_remove(redirect_domain);
+out_free_fwnode:
+	irq_domain_free_fwnode(fwnode);
+fail:
+	return ret;
+}
-- 
2.41.0
Re: [PATCH v9 4/4] irqchip/irq-loongarch-ir:Add Redirect irqchip support
Posted by Thomas Gleixner 1 week, 4 days ago
On Fri, Jan 30 2026 at 10:59, Tianyang Zhang wrote:
> +// SPDX-License-Identifier: GPL-2.0

GPL-2.0-only please

> +/*
> + * Copyright (C) 2020 Loongson Technologies, Inc.

This was written 6 years ago already?

> + */
> +
> +#include <linux/cpuhotplug.h>
> +#include <linux/init.h>
> +#include <linux/interrupt.h>
> +#include <linux/kernel.h>
> +#include <linux/irq.h>
> +#include <linux/irqchip.h>
> +#include <linux/irqchip/irq-msi-lib.h>
> +#include <linux/irqdomain.h>
> +#include <linux/spinlock.h>
> +#include <linux/msi.h>

Includes should be alphabetically ordered.

> +#define REDIRECT_REG(reg, node) \
> +	((void __iomem *)(IO_BASE | redirect_reg_base | (u64)(node) << NODE_ADDRSPACE_SHIFT | (reg)))
> +
> +static inline void redirect_write_reg64(u32 node, u64 val, u32 reg)
> +{
> +	void __iomem *reg_addr = REDIRECT_REG(reg, node);
> +
> +	return writeq(val, reg_addr);

Bogus return and you can simplify this to

      writeq(val, REDIRECT_REG(reg, node));

No?

> +}
> +
> +static inline void redirect_write_reg32(u32 node, u32 val, u32 reg)
> +{
> +	void __iomem *reg_addr = REDIRECT_REG(reg, node);
> +
> +	return writel(val, reg_addr);

Ditto

> +}
> +
> +static inline u32 redirect_read_reg32(u32 node, u32 reg)
> +{
> +	void __iomem *reg_addr = REDIRECT_REG(reg, node);
> +
> +	return readl(reg_addr);

Condense to single line as well

> +static void irde_invalid_entry(struct redirect_item *item)

This should be named irde_invalidate_entry() as that's what the function
is about. irq_invalid_entry() reads more like a function which check for
an invalid entry.

> +{
> +	struct irde_inv_cmd cmd;
> +	u64 raddr = 0;
> +
> +	cmd.cmd_info = 0;
> +	cmd.index.type = INVALID_INDEX;
> +	cmd.index.need_notice = 1;
> +	cmd.index.index = item->index;
> +	cmd.notice_addr = (u64)(__pa(&raddr));
> +
> +	invalid_enqueue(item, &cmd);
> +
> +	/*
> +	 * CPU needs to wait here for cmd to complete, and it determines this

The CPU 

> +	 * by checking whether invalid queue has already written a valid value

whether the invalidation queue

> +	 * to cmd.notice_addr.
> +	 */

> +static int redirect_table_alloc(int node, u32 nr_irqs)
> +{
> +	struct redirect_table *ird_table = &irde_descs[node].ird_table;
> +	unsigned int index, order;
> +
> +	if (nr_irqs > 1) {
> +		nr_irqs = __roundup_pow_of_two(nr_irqs);
> +		order = ilog2(nr_irqs);
> +	}
> +
> +	guard(raw_spinlock_irqsave)(&ird_table->lock);
> +
> +	index = bitmap_find_free_region(ird_table->bitmap,
> +					IRD_ENTRIES, order);

Get rid of this pointless line break. You have 100 characters and the
above even fits into 80 

> +	if (index < 0) {
> +		pr_err("No redirect entry to use\n");
> +		return -ENOMEM;
> +	}

> +static int redirect_domain_alloc(struct irq_domain *domain, unsigned int virq,
> +				 unsigned int nr_irqs, void *arg)
> +{
> +	msi_alloc_info_t *info = arg;
> +	int ret, i, node, index;
> +
> +	node = dev_to_node(info->desc->dev);
> +
> +	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
> +	if (ret < 0)
> +		return ret;
> +
> +	index = redirect_table_alloc(node, nr_irqs);
> +	if (index < 0) {
> +		pr_err("Alloc redirect table entry failed\n");
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < nr_irqs; i++) {
> +		struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq + i);
> +		struct redirect_item *item;
> +
> +		item = kzalloc(sizeof(*item), GFP_KERNEL);
> +		if (!item) {
> +			pr_err("Alloc redirect descriptor failed\n");
> +			goto out_free_resources;
> +		}
> +		item->irde = &irde_descs[node];
> +
> +		/*
> +		 * Only bits 47:6 of the GPID are passed to the controller,
> +		 * 64-byte alignment must be guarantee and make kzalloc can
> +		 * align to the respective size.

-ENOPARSE

> +		 */

> +static const struct irq_domain_ops redirect_domain_ops = {
> +	.alloc		= redirect_domain_alloc,
> +	.free		= redirect_domain_free,
> +	.select		= msi_lib_irq_domain_select,
> +};

> +static void __redirect_irde_fini(struct irde_desc *irde)

This schoolbook _fini() naming is just lame and nondescriptive. Please
use descriptive function names which make it clear what this is about,
e.g. redirect_free_irde() or something like that.

Also this should be __init, no?

> +{
> +	struct redirect_table *ird_table = &irde_descs->ird_table;
> +	struct redirect_queue *inv_queue = &irde_descs->inv_queue;

> +static inline void redirect_irde_fini(int node)
> +{
> +	__redirect_irde_fini(&irde_descs[node]);

This indirection is really pointless. Just move the '&irde_descs[node]' to
the only caller.

> +int __init redirect_acpi_init(struct irq_domain *parent)
> +{
> +	struct fwnode_handle *fwnode;
> +	int ret = -EINVAL, node;
> +
> +	fwnode = irq_domain_alloc_named_fwnode("redirect");
> +	if (!fwnode) {
> +		pr_err("Unable to alloc redirect domain handle\n");
> +		goto fail;
> +	}
> +
> +	redirect_domain = irq_domain_create_hierarchy(parent, 0, IRD_ENTRIES, fwnode,
> +						      &redirect_domain_ops, irde_descs);
> +	if (!redirect_domain) {
> +		pr_err("Unable to alloc redirect domain\n");
> +		goto out_free_fwnode;
> +	}
> +
> +

stray newline

> +	for_each_node_mask(node, node_possible_map) {
> +		ret = redirect_irde_init(node);
> +		if (ret)
> +			goto out_clear_irde;
> +	}
> +
> +	ret = acpi_cascade_irqdomain_init();
> +	if (ret < 0) {
> +		pr_err("Failed to cascade IRQ domain, ret=%d\n", ret);
> +		goto out_clear_irde;
> +	}
> +
> +	pr_info("loongarch irq redirect modules init succeeded\n");

You really want to have:

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

at the top of the file, so that all printk()s in this file are properly
prefixed.

Thanks,

        tglx
Re: [PATCH v9 4/4] irqchip/irq-loongarch-ir:Add Redirect irqchip support
Posted by Tianyang Zhang 1 week, 1 day ago
Hi, Thomas

在 2026/1/30 下午4:54, Thomas Gleixner 写道:
> On Fri, Jan 30 2026 at 10:59, Tianyang Zhang wrote:
>> +// SPDX-License-Identifier: GPL-2.0
> GPL-2.0-only please
Ok, I got it
>
>> +/*
>> + * Copyright (C) 2020 Loongson Technologies, Inc.
> This was written 6 years ago already?
This was copied from other files, and I will rework it, thanks
>
>> + */
>> +
>> +#include <linux/cpuhotplug.h>
>> +#include <linux/init.h>
>> +#include <linux/interrupt.h>
>> +#include <linux/kernel.h>
>> +#include <linux/irq.h>
>> +#include <linux/irqchip.h>
>> +#include <linux/irqchip/irq-msi-lib.h>
>> +#include <linux/irqdomain.h>
>> +#include <linux/spinlock.h>
>> +#include <linux/msi.h>
> Includes should be alphabetically ordered.
Ok, I got it
>
>> +#define REDIRECT_REG(reg, node) \
>> +	((void __iomem *)(IO_BASE | redirect_reg_base | (u64)(node) << NODE_ADDRSPACE_SHIFT | (reg)))
>> +
>> +static inline void redirect_write_reg64(u32 node, u64 val, u32 reg)
>> +{
>> +	void __iomem *reg_addr = REDIRECT_REG(reg, node);
>> +
>> +	return writeq(val, reg_addr);
> Bogus return and you can simplify this to
>
>        writeq(val, REDIRECT_REG(reg, node));
>
> No?
Ok, this is more reasonable.
>
>> +}
>> +
>> +static inline u32 redirect_read_reg32(u32 node, u32 reg)
>> +{
>> +	void __iomem *reg_addr = REDIRECT_REG(reg, node);
>> +
>> +	return readl(reg_addr);
> Condense to single line as well
Ok, I got it
>
>> +static void irde_invalid_entry(struct redirect_item *item)
> This should be named irde_invalidate_entry() as that's what the function
> is about. irq_invalid_entry() reads more like a function which check for
> an invalid entry.
Ok, I got it
>
>> +{
>> +	struct irde_inv_cmd cmd;
>> +	u64 raddr = 0;
>> +
>> +	cmd.cmd_info = 0;
>> +	cmd.index.type = INVALID_INDEX;
>> +	cmd.index.need_notice = 1;
>> +	cmd.index.index = item->index;
>> +	cmd.notice_addr = (u64)(__pa(&raddr));
>> +
>> +	invalid_enqueue(item, &cmd);
>> +
>> +	/*
>> +	 * CPU needs to wait here for cmd to complete, and it determines this
> The CPU
Ok, I got it, thanks
>
>> +	 * by checking whether invalid queue has already written a valid value
> whether the invalidation queue
Ok, I got it, thanks
>
>> +static int redirect_table_alloc(int node, u32 nr_irqs)
>> +{
>> +	struct redirect_table *ird_table = &irde_descs[node].ird_table;
>> +	unsigned int index, order;
>> +
>> +	if (nr_irqs > 1) {
>> +		nr_irqs = __roundup_pow_of_two(nr_irqs);
>> +		order = ilog2(nr_irqs);
>> +	}
>> +
>> +	guard(raw_spinlock_irqsave)(&ird_table->lock);
>> +
>> +	index = bitmap_find_free_region(ird_table->bitmap,
>> +					IRD_ENTRIES, order);
> Get rid of this pointless line break. You have 100 characters and the
> above even fits into 80
Ok, I got it
>
>> +	if (index < 0) {
>> +		pr_err("No redirect entry to use\n");
>> +		return -ENOMEM;
>> +	}
>> +static int redirect_domain_alloc(struct irq_domain *domain, unsigned int virq,
>> +				 unsigned int nr_irqs, void *arg)
>> +{
>> +	msi_alloc_info_t *info = arg;
>> +	int ret, i, node, index;
>> +
>> +	node = dev_to_node(info->desc->dev);
>> +
>> +	ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
>> +	if (ret < 0)
>> +		return ret;
>> +
>> +	index = redirect_table_alloc(node, nr_irqs);
>> +	if (index < 0) {
>> +		pr_err("Alloc redirect table entry failed\n");
>> +		return -ENOMEM;
>> +	}
>> +
>> +	for (i = 0; i < nr_irqs; i++) {
>> +		struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq + i);
>> +		struct redirect_item *item;
>> +
>> +		item = kzalloc(sizeof(*item), GFP_KERNEL);
>> +		if (!item) {
>> +			pr_err("Alloc redirect descriptor failed\n");
>> +			goto out_free_resources;
>> +		}
>> +		item->irde = &irde_descs[node];
>> +
>> +		/*
>> +		 * Only bits 47:6 of the GPID are passed to the controller,
>> +		 * 64-byte alignment must be guarantee and make kzalloc can
>> +		 * align to the respective size.
> -ENOPARSE
Ok ,I got it
>> +		 */
>> +static const struct irq_domain_ops redirect_domain_ops = {
>> +	.alloc		= redirect_domain_alloc,
>> +	.free		= redirect_domain_free,
>> +	.select		= msi_lib_irq_domain_select,
>> +};
>> +static void __redirect_irde_fini(struct irde_desc *irde)
> This schoolbook _fini() naming is just lame and nondescriptive. Please
> use descriptive function names which make it clear what this is about,
> e.g. redirect_free_irde() or something like that.
>
> Also this should be __init, no?
Yes , This code style is indeed not very appropriate, I will adjust the 
function name
>
>> +{
>> +	struct redirect_table *ird_table = &irde_descs->ird_table;
>> +	struct redirect_queue *inv_queue = &irde_descs->inv_queue;
>> +static inline void redirect_irde_fini(int node)
>> +{
>> +	__redirect_irde_fini(&irde_descs[node]);
> This indirection is really pointless. Just move the '&irde_descs[node]' to
> the only caller.
Ok, I got it
>
>> +int __init redirect_acpi_init(struct irq_domain *parent)
>> +{
>> +	struct fwnode_handle *fwnode;
>> +	int ret = -EINVAL, node;
>> +
>> +	fwnode = irq_domain_alloc_named_fwnode("redirect");
>> +	if (!fwnode) {
>> +		pr_err("Unable to alloc redirect domain handle\n");
>> +		goto fail;
>> +	}
>> +
>> +	redirect_domain = irq_domain_create_hierarchy(parent, 0, IRD_ENTRIES, fwnode,
>> +						      &redirect_domain_ops, irde_descs);
>> +	if (!redirect_domain) {
>> +		pr_err("Unable to alloc redirect domain\n");
>> +		goto out_free_fwnode;
>> +	}
>> +
>> +
> stray newline
Ok, I got it
>
>> +	for_each_node_mask(node, node_possible_map) {
>> +		ret = redirect_irde_init(node);
>> +		if (ret)
>> +			goto out_clear_irde;
>> +	}
>> +
>> +	ret = acpi_cascade_irqdomain_init();
>> +	if (ret < 0) {
>> +		pr_err("Failed to cascade IRQ domain, ret=%d\n", ret);
>> +		goto out_clear_irde;
>> +	}
>> +
>> +	pr_info("loongarch irq redirect modules init succeeded\n");
> You really want to have:
>
> #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
>
> at the top of the file, so that all printk()s in this file are properly
> prefixed.
Ok, I got it, thanks
>
> Thanks,
>
>          tglx

Tiyang

Re: [PATCH v9 4/4] irqchip/irq-loongarch-ir:Add Redirect irqchip support
Posted by kernel test robot 1 week, 4 days ago
Hi Tianyang,

kernel test robot noticed the following build warnings:

[auto build test WARNING on tip/irq/core]
[also build test WARNING on linus/master v6.19-rc7 next-20260129]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Tianyang-Zhang/Docs-LoongArch-Add-Advanced-Extended-Redirect-IRQ-model-description/20260130-110249
base:   tip/irq/core
patch link:    https://lore.kernel.org/r/20260130025941.2140582-5-zhangtianyang%40loongson.cn
patch subject: [PATCH v9 4/4] irqchip/irq-loongarch-ir:Add Redirect irqchip support
config: loongarch-allnoconfig (https://download.01.org/0day-ci/archive/20260130/202601301601.EonMu52D-lkp@intel.com/config)
compiler: clang version 22.0.0git (https://github.com/llvm/llvm-project 9b8addffa70cee5b2acc5454712d9cf78ce45710)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260130/202601301601.EonMu52D-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202601301601.EonMu52D-lkp@intel.com/

All warnings (new ones prefixed by >>):

>> drivers/irqchip/irq-loongarch-ir.c:203:6: warning: variable 'order' is used uninitialized whenever 'if' condition is false [-Wsometimes-uninitialized]
     203 |         if (nr_irqs > 1) {
         |             ^~~~~~~~~~~
   drivers/irqchip/irq-loongarch-ir.c:211:19: note: uninitialized use occurs here
     211 |                                         IRD_ENTRIES, order);
         |                                                      ^~~~~
   drivers/irqchip/irq-loongarch-ir.c:203:2: note: remove the 'if' if its condition is always true
     203 |         if (nr_irqs > 1) {
         |         ^~~~~~~~~~~~~~~~
   drivers/irqchip/irq-loongarch-ir.c:201:27: note: initialize the variable 'order' to silence this warning
     201 |         unsigned int index, order;
         |                                  ^
         |                                   = 0
   1 warning generated.


vim +203 drivers/irqchip/irq-loongarch-ir.c

   197	
   198	static int redirect_table_alloc(int node, u32 nr_irqs)
   199	{
   200		struct redirect_table *ird_table = &irde_descs[node].ird_table;
   201		unsigned int index, order;
   202	
 > 203		if (nr_irqs > 1) {
   204			nr_irqs = __roundup_pow_of_two(nr_irqs);
   205			order = ilog2(nr_irqs);
   206		}
   207	
   208		guard(raw_spinlock_irqsave)(&ird_table->lock);
   209	
   210		index = bitmap_find_free_region(ird_table->bitmap,
   211						IRD_ENTRIES, order);
   212		if (index < 0) {
   213			pr_err("No redirect entry to use\n");
   214			return -ENOMEM;
   215		}
   216	
   217		return index;
   218	}
   219	

-- 
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki