[GIT pull] irq/cleanups for v7.0-rc1

Thomas Gleixner posted 1 patch 4 hours ago
arch/arm/mach-versatile/spc.c              |  4 ++--
drivers/bluetooth/btintel_pcie.c           |  9 ++-------
drivers/bus/fsl-mc/dprc-driver.c           | 13 +------------
drivers/char/tpm/tpm_tis_i2c_cr50.c        |  3 +--
drivers/char/tpm/tpm_tis_spi_cr50.c        |  2 +-
drivers/edac/altera_edac.c                 | 11 ++++-------
drivers/iommu/amd/amd_iommu.h              |  1 -
drivers/iommu/amd/init.c                   | 12 ++++--------
drivers/iommu/amd/iommu.c                  |  5 -----
drivers/mailbox/bcm-flexrm-mailbox.c       | 14 ++------------
drivers/media/pci/mgb4/mgb4_trigger.c      |  2 +-
drivers/platform/x86/intel/int0002_vgpio.c |  4 ++--
drivers/rtc/rtc-amlogic-a4.c               |  2 +-
drivers/scsi/elx/efct/efct_driver.c        |  8 +-------
drivers/thermal/qcom/lmh.c                 |  2 +-
drivers/usb/typec/tcpm/fusb302.c           |  3 +--
include/linux/interrupt.h                  |  2 +-
include/linux/mfd/wm8350/core.h            |  2 +-
18 files changed, 26 insertions(+), 73 deletions(-)
[GIT pull] irq/cleanups for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest irq/cleanups branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-cleanups-2026-02-09

up to:  ef92b98f5f67: media: pci: mg4b: Use IRQF_NO_THREAD


A series of treewide cleanups to ensure interrupt request consistency.

  - Add the missing IRQF_COND_ONESHOT flag to devm_request_irq()

    This is inconsistent vs. request_irq() and causes the same issues which
    where addressed with the introduction of this flag

  - Cleanup IRQF_ONESHOT and IRQF_NO_THREAD usage

    Quite some drivers have inconsistent interrupt request flags related to
    interrupt threading namely IRQF_ONESHOT and IRQF_NO_THREAD. This leads to
    warnings and/or malfunction when forced interrupt threading is enabled.
    
  - Remove stub primary (hard interrupt) handlers      

    A bunch of drivers implement a stub primary (hard interrupt) handler which
    just returns IRQ_WAKE_THREAD. The same functionality is provided by the
    core code when the primary handler argument of request_thread_irq() is set
    to NULL.

Thanks,

	tglx

------------------>
Sebastian Andrzej Siewior (15):
      genirq: Set IRQF_COND_ONESHOT in devm_request_irq().
      platform/x86: int0002: Remove IRQF_ONESHOT from request_irq()
      iommu/amd: Use core's primary handler and set IRQF_ONESHOT
      mailbox: bcm-ferxrm-mailbox: Use default primary handler
      bus: fsl-mc: Use default primary handler
      Bluetooth: btintel_pcie: Use IRQF_ONESHOT and default primary handler
      scsi: efct: Use IRQF_ONESHOT and default primary handler
      ARM: versatile: Remove IRQF_ONESHOT
      char: tpm: cr50: Remove IRQF_ONESHOT
      EDAC/altera: Remove IRQF_ONESHOT
      usb: typec: fusb302: Remove IRQF_ONESHOT
      rtc: amlogic-a4: Remove IRQF_ONESHOT
      thermal/qcom/lmh: Replace IRQF_ONESHOT with IRQF_NO_THREAD
      mfd: wm8350-core: Use IRQF_ONESHOT
      media: pci: mg4b: Use IRQF_NO_THREAD


 arch/arm/mach-versatile/spc.c              |  4 ++--
 drivers/bluetooth/btintel_pcie.c           |  9 ++-------
 drivers/bus/fsl-mc/dprc-driver.c           | 13 +------------
 drivers/char/tpm/tpm_tis_i2c_cr50.c        |  3 +--
 drivers/char/tpm/tpm_tis_spi_cr50.c        |  2 +-
 drivers/edac/altera_edac.c                 | 11 ++++-------
 drivers/iommu/amd/amd_iommu.h              |  1 -
 drivers/iommu/amd/init.c                   | 12 ++++--------
 drivers/iommu/amd/iommu.c                  |  5 -----
 drivers/mailbox/bcm-flexrm-mailbox.c       | 14 ++------------
 drivers/media/pci/mgb4/mgb4_trigger.c      |  2 +-
 drivers/platform/x86/intel/int0002_vgpio.c |  4 ++--
 drivers/rtc/rtc-amlogic-a4.c               |  2 +-
 drivers/scsi/elx/efct/efct_driver.c        |  8 +-------
 drivers/thermal/qcom/lmh.c                 |  2 +-
 drivers/usb/typec/tcpm/fusb302.c           |  3 +--
 include/linux/interrupt.h                  |  2 +-
 include/linux/mfd/wm8350/core.h            |  2 +-
 18 files changed, 26 insertions(+), 73 deletions(-)

diff --git a/arch/arm/mach-versatile/spc.c b/arch/arm/mach-versatile/spc.c
index 812db32448fc..2d27777a00d3 100644
--- a/arch/arm/mach-versatile/spc.c
+++ b/arch/arm/mach-versatile/spc.c
@@ -459,8 +459,8 @@ int __init ve_spc_init(void __iomem *baseaddr, u32 a15_clusid, int irq)
 
 	readl_relaxed(info->baseaddr + PWC_STATUS);
 
-	ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH
-				| IRQF_ONESHOT, "vexpress-spc", info);
+	ret = request_irq(irq, ve_spc_irq_handler, IRQF_TRIGGER_HIGH,
+			  "vexpress-spc", info);
 	if (ret) {
 		pr_err(SPCLOG "IRQ %d request failed\n", irq);
 		kfree(info);
diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c
index 2936b535479f..704767b334b9 100644
--- a/drivers/bluetooth/btintel_pcie.c
+++ b/drivers/bluetooth/btintel_pcie.c
@@ -1431,11 +1431,6 @@ static void btintel_pcie_msix_rx_handle(struct btintel_pcie_data *data)
 	}
 }
 
-static irqreturn_t btintel_pcie_msix_isr(int irq, void *data)
-{
-	return IRQ_WAKE_THREAD;
-}
-
 static inline bool btintel_pcie_is_rxq_empty(struct btintel_pcie_data *data)
 {
 	return data->ia.cr_hia[BTINTEL_PCIE_RXQ_NUM] == data->ia.cr_tia[BTINTEL_PCIE_RXQ_NUM];
@@ -1537,9 +1532,9 @@ static int btintel_pcie_setup_irq(struct btintel_pcie_data *data)
 
 		err = devm_request_threaded_irq(&data->pdev->dev,
 						msix_entry->vector,
-						btintel_pcie_msix_isr,
+						NULL,
 						btintel_pcie_irq_msix_handler,
-						IRQF_SHARED,
+						IRQF_ONESHOT | IRQF_SHARED,
 						KBUILD_MODNAME,
 						msix_entry);
 		if (err) {
diff --git a/drivers/bus/fsl-mc/dprc-driver.c b/drivers/bus/fsl-mc/dprc-driver.c
index c63a7e688db6..db67442addad 100644
--- a/drivers/bus/fsl-mc/dprc-driver.c
+++ b/drivers/bus/fsl-mc/dprc-driver.c
@@ -380,17 +380,6 @@ int dprc_scan_container(struct fsl_mc_device *mc_bus_dev,
 }
 EXPORT_SYMBOL_GPL(dprc_scan_container);
 
-/**
- * dprc_irq0_handler - Regular ISR for DPRC interrupt 0
- *
- * @irq_num: IRQ number of the interrupt being handled
- * @arg: Pointer to device structure
- */
-static irqreturn_t dprc_irq0_handler(int irq_num, void *arg)
-{
-	return IRQ_WAKE_THREAD;
-}
-
 /**
  * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0
  *
@@ -527,7 +516,7 @@ static int register_dprc_irq_handler(struct fsl_mc_device *mc_dev)
 	 */
 	error = devm_request_threaded_irq(&mc_dev->dev,
 					  irq->virq,
-					  dprc_irq0_handler,
+					  NULL,
 					  dprc_irq0_handler_thread,
 					  IRQF_NO_SUSPEND | IRQF_ONESHOT,
 					  dev_name(&mc_dev->dev),
diff --git a/drivers/char/tpm/tpm_tis_i2c_cr50.c b/drivers/char/tpm/tpm_tis_i2c_cr50.c
index fc6891a0b693..b48cacacc066 100644
--- a/drivers/char/tpm/tpm_tis_i2c_cr50.c
+++ b/drivers/char/tpm/tpm_tis_i2c_cr50.c
@@ -749,8 +749,7 @@ static int tpm_cr50_i2c_probe(struct i2c_client *client)
 
 	if (client->irq > 0) {
 		rc = devm_request_irq(dev, client->irq, tpm_cr50_i2c_int_handler,
-				      IRQF_TRIGGER_FALLING | IRQF_ONESHOT |
-				      IRQF_NO_AUTOEN,
+				      IRQF_TRIGGER_FALLING | IRQF_NO_AUTOEN,
 				      dev->driver->name, chip);
 		if (rc < 0) {
 			dev_err(dev, "Failed to probe IRQ %d\n", client->irq);
diff --git a/drivers/char/tpm/tpm_tis_spi_cr50.c b/drivers/char/tpm/tpm_tis_spi_cr50.c
index f4937280e940..32920b4cecfb 100644
--- a/drivers/char/tpm/tpm_tis_spi_cr50.c
+++ b/drivers/char/tpm/tpm_tis_spi_cr50.c
@@ -287,7 +287,7 @@ int cr50_spi_probe(struct spi_device *spi)
 	if (spi->irq > 0) {
 		ret = devm_request_irq(&spi->dev, spi->irq,
 				       cr50_spi_irq_handler,
-				       IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+				       IRQF_TRIGGER_RISING,
 				       "cr50_spi", cr50_phy);
 		if (ret < 0) {
 			if (ret == -EPROBE_DEFER)
diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
index 0c5b94e64ea1..4edd2088c2db 100644
--- a/drivers/edac/altera_edac.c
+++ b/drivers/edac/altera_edac.c
@@ -1563,8 +1563,7 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
 		goto err_release_group_1;
 	}
 	rc = devm_request_irq(&altdev->ddev, altdev->sb_irq,
-			      prv->ecc_irq_handler,
-			      IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+			      prv->ecc_irq_handler, IRQF_TRIGGER_HIGH,
 			      ecc_name, altdev);
 	if (rc) {
 		edac_printk(KERN_ERR, EDAC_DEVICE, "PortB SBERR IRQ error\n");
@@ -1587,8 +1586,7 @@ static int altr_portb_setup(struct altr_edac_device_dev *device)
 		goto err_release_group_1;
 	}
 	rc = devm_request_irq(&altdev->ddev, altdev->db_irq,
-			      prv->ecc_irq_handler,
-			      IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+			      prv->ecc_irq_handler, IRQF_TRIGGER_HIGH,
 			      ecc_name, altdev);
 	if (rc) {
 		edac_printk(KERN_ERR, EDAC_DEVICE, "PortB DBERR IRQ error\n");
@@ -1970,8 +1968,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
 		goto err_release_group1;
 	}
 	rc = devm_request_irq(edac->dev, altdev->sb_irq, prv->ecc_irq_handler,
-			      IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
-			      ecc_name, altdev);
+			      IRQF_TRIGGER_HIGH, ecc_name, altdev);
 	if (rc) {
 		edac_printk(KERN_ERR, EDAC_DEVICE, "No SBERR IRQ resource\n");
 		goto err_release_group1;
@@ -1993,7 +1990,7 @@ static int altr_edac_a10_device_add(struct altr_arria10_edac *edac,
 		goto err_release_group1;
 	}
 	rc = devm_request_irq(edac->dev, altdev->db_irq, prv->ecc_irq_handler,
-			      IRQF_ONESHOT | IRQF_TRIGGER_HIGH,
+			      IRQF_TRIGGER_HIGH,
 			      ecc_name, altdev);
 	if (rc) {
 		edac_printk(KERN_ERR, EDAC_DEVICE, "No DBERR IRQ resource\n");
diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h
index b742ef1adb35..df1c238dc888 100644
--- a/drivers/iommu/amd/amd_iommu.h
+++ b/drivers/iommu/amd/amd_iommu.h
@@ -15,7 +15,6 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data);
 irqreturn_t amd_iommu_int_thread_evtlog(int irq, void *data);
 irqreturn_t amd_iommu_int_thread_pprlog(int irq, void *data);
 irqreturn_t amd_iommu_int_thread_galog(int irq, void *data);
-irqreturn_t amd_iommu_int_handler(int irq, void *data);
 void amd_iommu_restart_log(struct amd_iommu *iommu, const char *evt_type,
 			   u8 cntrl_intr, u8 cntrl_log,
 			   u32 status_run_mask, u32 status_overflow_mask);
diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c
index 384c90b4f90a..62a7a718acf8 100644
--- a/drivers/iommu/amd/init.c
+++ b/drivers/iommu/amd/init.c
@@ -2356,12 +2356,8 @@ static int iommu_setup_msi(struct amd_iommu *iommu)
 	if (r)
 		return r;
 
-	r = request_threaded_irq(iommu->dev->irq,
-				 amd_iommu_int_handler,
-				 amd_iommu_int_thread,
-				 0, "AMD-Vi",
-				 iommu);
-
+	r = request_threaded_irq(iommu->dev->irq, NULL, amd_iommu_int_thread,
+				 IRQF_ONESHOT, "AMD-Vi", iommu);
 	if (r) {
 		pci_disable_msi(iommu->dev);
 		return r;
@@ -2535,8 +2531,8 @@ static int __iommu_setup_intcapxt(struct amd_iommu *iommu, const char *devname,
 		return irq;
 	}
 
-	ret = request_threaded_irq(irq, amd_iommu_int_handler,
-				   thread_fn, 0, devname, iommu);
+	ret = request_threaded_irq(irq, NULL, thread_fn, IRQF_ONESHOT, devname,
+				   iommu);
 	if (ret) {
 		irq_domain_free_irqs(irq, 1);
 		irq_domain_remove(domain);
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 5d45795c367a..bd577852618b 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -1151,11 +1151,6 @@ irqreturn_t amd_iommu_int_thread(int irq, void *data)
 	return IRQ_HANDLED;
 }
 
-irqreturn_t amd_iommu_int_handler(int irq, void *data)
-{
-	return IRQ_WAKE_THREAD;
-}
-
 /****************************************************************************
  *
  * IOMMU command queuing functions
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index 41f79e51d9e5..4255fefc3a5a 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1173,14 +1173,6 @@ static int flexrm_debugfs_stats_show(struct seq_file *file, void *offset)
 
 /* ====== FlexRM interrupt handler ===== */
 
-static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
-{
-	/* We only have MSI for completions so just wakeup IRQ thread */
-	/* Ring related errors will be informed via completion descriptors */
-
-	return IRQ_WAKE_THREAD;
-}
-
 static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
 {
 	flexrm_process_completions(dev_id);
@@ -1271,10 +1263,8 @@ static int flexrm_startup(struct mbox_chan *chan)
 		ret = -ENODEV;
 		goto fail_free_cmpl_memory;
 	}
-	ret = request_threaded_irq(ring->irq,
-				   flexrm_irq_event,
-				   flexrm_irq_thread,
-				   0, dev_name(ring->mbox->dev), ring);
+	ret = request_threaded_irq(ring->irq, NULL, flexrm_irq_thread,
+				   IRQF_ONESHOT, dev_name(ring->mbox->dev), ring);
 	if (ret) {
 		dev_err(ring->mbox->dev,
 			"failed to request ring%d IRQ\n", ring->num);
diff --git a/drivers/media/pci/mgb4/mgb4_trigger.c b/drivers/media/pci/mgb4/mgb4_trigger.c
index 4f9a35904b41..70cad324df60 100644
--- a/drivers/media/pci/mgb4/mgb4_trigger.c
+++ b/drivers/media/pci/mgb4/mgb4_trigger.c
@@ -115,7 +115,7 @@ static int probe_trigger(struct iio_dev *indio_dev, int irq)
 	if (!st->trig)
 		return -ENOMEM;
 
-	ret = request_irq(irq, &iio_trigger_generic_data_rdy_poll, 0,
+	ret = request_irq(irq, &iio_trigger_generic_data_rdy_poll, IRQF_NO_THREAD,
 			  "mgb4-trigger", st->trig);
 	if (ret)
 		goto error_free_trig;
diff --git a/drivers/platform/x86/intel/int0002_vgpio.c b/drivers/platform/x86/intel/int0002_vgpio.c
index 6f5629dc3f8d..562e88025643 100644
--- a/drivers/platform/x86/intel/int0002_vgpio.c
+++ b/drivers/platform/x86/intel/int0002_vgpio.c
@@ -206,8 +206,8 @@ static int int0002_probe(struct platform_device *pdev)
 	 * FIXME: augment this if we managed to pull handling of shared
 	 * IRQs into gpiolib.
 	 */
-	ret = devm_request_irq(dev, irq, int0002_irq,
-			       IRQF_ONESHOT | IRQF_SHARED, "INT0002", chip);
+	ret = devm_request_irq(dev, irq, int0002_irq, IRQF_SHARED, "INT0002",
+			       chip);
 	if (ret) {
 		dev_err(dev, "Error requesting IRQ %d: %d\n", irq, ret);
 		return ret;
diff --git a/drivers/rtc/rtc-amlogic-a4.c b/drivers/rtc/rtc-amlogic-a4.c
index 123fb372fc9f..50938c35af36 100644
--- a/drivers/rtc/rtc-amlogic-a4.c
+++ b/drivers/rtc/rtc-amlogic-a4.c
@@ -369,7 +369,7 @@ static int aml_rtc_probe(struct platform_device *pdev)
 		return PTR_ERR(rtc->rtc_dev);
 
 	ret = devm_request_irq(dev, rtc->irq, aml_rtc_handler,
-			       IRQF_ONESHOT, "aml-rtc alarm", rtc);
+			       0, "aml-rtc alarm", rtc);
 	if (ret) {
 		dev_err_probe(dev, ret, "IRQ%d request failed, ret = %d\n",
 			      rtc->irq, ret);
diff --git a/drivers/scsi/elx/efct/efct_driver.c b/drivers/scsi/elx/efct/efct_driver.c
index 1bd42f7db177..528399f725d4 100644
--- a/drivers/scsi/elx/efct/efct_driver.c
+++ b/drivers/scsi/elx/efct/efct_driver.c
@@ -415,12 +415,6 @@ efct_intr_thread(int irq, void *handle)
 	return IRQ_HANDLED;
 }
 
-static irqreturn_t
-efct_intr_msix(int irq, void *handle)
-{
-	return IRQ_WAKE_THREAD;
-}
-
 static int
 efct_setup_msix(struct efct *efct, u32 num_intrs)
 {
@@ -450,7 +444,7 @@ efct_setup_msix(struct efct *efct, u32 num_intrs)
 		intr_ctx->index = i;
 
 		rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
-					  efct_intr_msix, efct_intr_thread, 0,
+					  NULL, efct_intr_thread, IRQF_ONESHOT,
 					  EFCT_DRIVER_NAME, intr_ctx);
 		if (rc) {
 			dev_err(&efct->pci->dev,
diff --git a/drivers/thermal/qcom/lmh.c b/drivers/thermal/qcom/lmh.c
index ddadcfada513..3d072b7a4a6d 100644
--- a/drivers/thermal/qcom/lmh.c
+++ b/drivers/thermal/qcom/lmh.c
@@ -220,7 +220,7 @@ static int lmh_probe(struct platform_device *pdev)
 	/* Disable the irq and let cpufreq enable it when ready to handle the interrupt */
 	irq_set_status_flags(lmh_data->irq, IRQ_NOAUTOEN);
 	ret = devm_request_irq(dev, lmh_data->irq, lmh_handle_irq,
-			       IRQF_ONESHOT | IRQF_NO_SUSPEND,
+			       IRQF_NO_THREAD | IRQF_NO_SUSPEND,
 			       "lmh-irq", lmh_data);
 	if (ret) {
 		dev_err(dev, "Error %d registering irq %x\n", ret, lmh_data->irq);
diff --git a/drivers/usb/typec/tcpm/fusb302.c b/drivers/usb/typec/tcpm/fusb302.c
index 870a71f953f6..19ff8217818e 100644
--- a/drivers/usb/typec/tcpm/fusb302.c
+++ b/drivers/usb/typec/tcpm/fusb302.c
@@ -1756,8 +1756,7 @@ static int fusb302_probe(struct i2c_client *client)
 	}
 
 	ret = request_irq(chip->gpio_int_n_irq, fusb302_irq_intn,
-			  IRQF_ONESHOT | IRQF_TRIGGER_LOW,
-			  "fsc_interrupt_int_n", chip);
+			  IRQF_TRIGGER_LOW, "fsc_interrupt_int_n", chip);
 	if (ret < 0) {
 		dev_err(dev, "cannot request IRQ for GPIO Int_N, ret=%d", ret);
 		goto tcpm_unregister_port;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 266f2b39213a..b2bb878abd11 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -228,7 +228,7 @@ static inline int __must_check
 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
 		 unsigned long irqflags, const char *devname, void *dev_id)
 {
-	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
+	return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags | IRQF_COND_ONESHOT,
 					 devname, dev_id);
 }
 
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 5f70d3b5d1b1..097ef4dfcdac 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -667,7 +667,7 @@ static inline int wm8350_register_irq(struct wm8350 *wm8350, int irq,
 		return -ENODEV;
 
 	return request_threaded_irq(irq + wm8350->irq_base, NULL,
-				    handler, flags, name, data);
+				    handler, flags | IRQF_ONESHOT, name, data);
 }
 
 static inline void wm8350_free_irq(struct wm8350 *wm8350, int irq, void *data)
[GIT pull] x86/irq for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest x86/irq branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86-irq-2026-02-09

up to:  d441e38a2c87: x86/irq_remapping: Sanitize posted_msi_supported()

Trivial cleanups for the posted MSI interrupt handling

Thanks,

	tglx

------------------>
Thomas Gleixner (2):
      x86/irq: Cleanup posted MSI code
      x86/irq_remapping: Sanitize posted_msi_supported()


 arch/x86/include/asm/irq_remapping.h |  5 +++--
 arch/x86/kernel/irq.c                | 31 +++++++++++++------------------
 drivers/iommu/intel/irq_remapping.c  |  4 ++--
 3 files changed, 18 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/irq_remapping.h b/arch/x86/include/asm/irq_remapping.h
index 4e55d1755846..37b94f484ef3 100644
--- a/arch/x86/include/asm/irq_remapping.h
+++ b/arch/x86/include/asm/irq_remapping.h
@@ -67,9 +67,10 @@ static inline struct irq_domain *arch_get_ir_parent_domain(void)
 
 extern bool enable_posted_msi;
 
-static inline bool posted_msi_supported(void)
+static inline bool posted_msi_enabled(void)
 {
-	return enable_posted_msi && irq_remapping_cap(IRQ_POSTING_CAP);
+	return IS_ENABLED(CONFIG_X86_POSTED_MSI) &&
+		enable_posted_msi && irq_remapping_cap(IRQ_POSTING_CAP);
 }
 
 #else  /* CONFIG_IRQ_REMAP */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index b2fe6181960c..d817febfd4bc 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -401,11 +401,9 @@ static DEFINE_PER_CPU_CACHE_HOT(bool, posted_msi_handler_active);
 
 void intel_posted_msi_init(void)
 {
-	u32 destination;
-	u32 apic_id;
+	u32 destination, apic_id;
 
 	this_cpu_write(posted_msi_pi_desc.nv, POSTED_MSI_NOTIFICATION_VECTOR);
-
 	/*
 	 * APIC destination ID is stored in bit 8:15 while in XAPIC mode.
 	 * VT-d spec. CH 9.11
@@ -449,8 +447,8 @@ static __always_inline bool handle_pending_pir(unsigned long *pir, struct pt_reg
 }
 
 /*
- * Performance data shows that 3 is good enough to harvest 90+% of the benefit
- * on high IRQ rate workload.
+ * Performance data shows that 3 is good enough to harvest 90+% of the
+ * benefit on high interrupt rate workloads.
  */
 #define MAX_POSTED_MSI_COALESCING_LOOP 3
 
@@ -460,11 +458,8 @@ static __always_inline bool handle_pending_pir(unsigned long *pir, struct pt_reg
  */
 DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
 {
+	struct pi_desc *pid = this_cpu_ptr(&posted_msi_pi_desc);
 	struct pt_regs *old_regs = set_irq_regs(regs);
-	struct pi_desc *pid;
-	int i = 0;
-
-	pid = this_cpu_ptr(&posted_msi_pi_desc);
 
 	/* Mark the handler active for intel_ack_posted_msi_irq() */
 	__this_cpu_write(posted_msi_handler_active, true);
@@ -472,25 +467,25 @@ DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
 	irq_enter();
 
 	/*
-	 * Max coalescing count includes the extra round of handle_pending_pir
-	 * after clearing the outstanding notification bit. Hence, at most
-	 * MAX_POSTED_MSI_COALESCING_LOOP - 1 loops are executed here.
+	 * Loop only MAX_POSTED_MSI_COALESCING_LOOP - 1 times here to take
+	 * the final handle_pending_pir() invocation after clearing the
+	 * outstanding notification bit into account.
 	 */
-	while (++i < MAX_POSTED_MSI_COALESCING_LOOP) {
+	for (int i = 1; i < MAX_POSTED_MSI_COALESCING_LOOP; i++) {
 		if (!handle_pending_pir(pid->pir, regs))
 			break;
 	}
 
 	/*
-	 * Clear outstanding notification bit to allow new IRQ notifications,
-	 * do this last to maximize the window of interrupt coalescing.
+	 * Clear the outstanding notification bit to rearm the notification
+	 * mechanism.
 	 */
 	pi_clear_on(pid);
 
 	/*
-	 * There could be a race of PI notification and the clearing of ON bit,
-	 * process PIR bits one last time such that handling the new interrupts
-	 * are not delayed until the next IRQ.
+	 * Clearing the ON bit can race with a notification. Process the
+	 * PIR bits one last time so that handling the new interrupts is
+	 * not delayed until the next notification happens.
 	 */
 	handle_pending_pir(pid->pir, regs);
 
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index 8bcbfe3d9c72..ecb591e98565 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -1368,7 +1368,7 @@ static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
 		break;
 	case X86_IRQ_ALLOC_TYPE_PCI_MSI:
 	case X86_IRQ_ALLOC_TYPE_PCI_MSIX:
-		if (posted_msi_supported()) {
+		if (posted_msi_enabled()) {
 			prepare_irte_posted(irte);
 			data->irq_2_iommu.posted_msi = 1;
 		}
@@ -1460,7 +1460,7 @@ static int intel_irq_remapping_alloc(struct irq_domain *domain,
 
 		irq_data->hwirq = (index << 16) + i;
 		irq_data->chip_data = ird;
-		if (posted_msi_supported() &&
+		if (posted_msi_enabled() &&
 		    ((info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) ||
 		     (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSIX)))
 			irq_data->chip = &intel_ir_chip_post_msi;
[GIT pull] timers/vdso for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest timers/vdso branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-vdso-2026-02-09

up to:  546e9289c74f: vdso/gettimeofday: Force inlining of __cvdso_clock_getres_common()


Updates for the VDSO subsystem:

  - Provide the missing 64-bit variant of clock_getres()

    This allows the extension of CONFIG_COMPAT_32BIT_TIME to the vDSO and
    finally the removal of 32-bit time types from the kernel and UAPI.

  - Remove the useless and broken getcpu_cache from the VDSO

    The intention was to provide a trivial way to retrieve the CPU number from
    the VDSO, but as the VDSO data is per process there is no way to make it
    work.

  - Switch get/put_unaligned() from packed struct to memcpy()

    The packed struct violates strict aliasing rules which requires to pass
    -fno-strict-aliasing to the compiler. As this are scalar values
    __builtin_memcpy() turns them into simple loads and stores

  - Use __typeof_unqual__() for __unqual_scalar_typeof()

    The get/put_unaligned() changes triggered a new sparse warning when __beNN
    types are used with get/put_unaligned() as sparse builds add a special
    'bitwise' attribute to them which prevents sparse to evaluate the Generic
    in __unqual_scalar_typeof().

    Newer sparse versions support __typeof_unqual__() which avoids the problem,
    but requires a recent sparse install. So this adds a sanity check to sparse
    builds, which validates that sparse is available and capable of handling it.

  - Force inline __cvdso_clock_getres_common()

    Compilers sometimes un-inline agressively, which results in function call
    overhead and problems with automatic stack variable initialization.

    Interestingly enough the force inlining results in smaller code than the
    un-inlined variant produced by GCC when optimizing for size.

Thanks,

	tglx

------------------>
Ian Rogers (4):
      parisc: Inline a type punning version of get_unaligned_le32()
      vdso: Switch get/put_unaligned() from packed struct to memcpy()
      tools headers: Update the linux/unaligned.h copy with the kernel sources
      tools headers: Remove unneeded ignoring of warnings in unaligned.h

Peter Zijlstra (1):
      compiler: Use __typeof_unqual__() for __unqual_scalar_typeof()

Thomas Gleixner (1):
      x86/percpu: Make CONFIG_USE_X86_SEG_SUPPORT work with sparse

Thomas Weißschuh (12):
      vdso: Add prototype for __vdso_clock_getres_time64()
      selftests: vDSO: vdso_config: Add configurations for clock_getres_time64()
      selftests: vDSO: vdso_test_abi: Use UAPI system call numbers
      selftests: vDSO: vdso_test_abi: Add test for clock_getres_time64()
      x86/vdso: Provide clock_getres_time64() for x86-32
      ARM: VDSO: Patch out __vdso_clock_getres() if unavailable
      ARM: VDSO: Provide clock_getres_time64()
      arm64: vdso32: Provide clock_getres_time64()
      MIPS: vdso: Provide getres_time64() for 32-bit ABIs
      vdso: Remove struct getcpu_cache
      powerpc/vdso: Provide clock_getres_time64()
      vdso/gettimeofday: Force inlining of __cvdso_clock_getres_common()


 Makefile                                        |  8 ++++
 arch/arm/kernel/vdso.c                          |  2 +
 arch/arm/vdso/vdso.lds.S                        |  1 +
 arch/arm/vdso/vgettimeofday.c                   |  5 +++
 arch/arm64/kernel/vdso32/vdso.lds.S             |  1 +
 arch/arm64/kernel/vdso32/vgettimeofday.c        |  5 +++
 arch/loongarch/vdso/vgetcpu.c                   |  5 +--
 arch/mips/vdso/vdso.lds.S                       |  1 +
 arch/mips/vdso/vgettimeofday.c                  |  5 +++
 arch/parisc/boot/compressed/misc.c              | 15 ++++++-
 arch/powerpc/include/asm/vdso/gettimeofday.h    |  2 +
 arch/powerpc/kernel/vdso/gettimeofday.S         | 12 ++++++
 arch/powerpc/kernel/vdso/vdso32.lds.S           |  1 +
 arch/powerpc/kernel/vdso/vgettimeofday.c        |  6 +++
 arch/s390/kernel/vdso/getcpu.c                  |  3 +-
 arch/s390/kernel/vdso/vdso.h                    |  4 +-
 arch/x86/entry/vdso/vclock_gettime.c            |  8 ++++
 arch/x86/entry/vdso/vdso32/vdso32.lds.S         |  1 +
 arch/x86/entry/vdso/vgetcpu.c                   |  5 +--
 arch/x86/include/asm/percpu.h                   |  8 ++--
 arch/x86/include/asm/vdso/processor.h           |  4 +-
 include/linux/compiler.h                        | 10 -----
 include/linux/compiler_types.h                  | 13 ++++++
 include/linux/getcpu.h                          | 19 ---------
 include/linux/syscalls.h                        |  3 +-
 include/vdso/gettime.h                          |  1 +
 include/vdso/unaligned.h                        | 41 +++++++++++++++---
 kernel/sys.c                                    |  4 +-
 lib/vdso/gettimeofday.c                         |  2 +-
 scripts/checker-valid.sh                        | 19 +++++++++
 tools/include/linux/compiler_types.h            | 22 ++++++++++
 tools/include/linux/unaligned.h                 |  4 --
 tools/include/vdso/unaligned.h                  | 41 +++++++++++++++---
 tools/testing/selftests/vDSO/vdso_config.h      |  4 +-
 tools/testing/selftests/vDSO/vdso_test_abi.c    | 55 ++++++++++++++++++++++++-
 tools/testing/selftests/vDSO/vdso_test_getcpu.c |  4 +-
 36 files changed, 268 insertions(+), 76 deletions(-)
 delete mode 100644 include/linux/getcpu.h
 create mode 100755 scripts/checker-valid.sh

diff --git a/Makefile b/Makefile
index 9d38125263fb..179c9d9a56dd 100644
--- a/Makefile
+++ b/Makefile
@@ -1187,6 +1187,14 @@ CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian)
 # the checker needs the correct machine size
 CHECKFLAGS += $(if $(CONFIG_64BIT),-m64,-m32)
 
+# Validate the checker is available and functional
+ifneq ($(KBUILD_CHECKSRC), 0)
+  ifneq ($(shell $(srctree)/scripts/checker-valid.sh $(CHECK) $(CHECKFLAGS)), 1)
+    $(warning C=$(KBUILD_CHECKSRC) specified, but $(CHECK) is not available or not up to date)
+    KBUILD_CHECKSRC = 0
+  endif
+endif
+
 # Default kernel image to build when no specific target is given.
 # KBUILD_IMAGE may be overruled on the command line or
 # set in the environment
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index e38a30477f3d..0108f33d6bed 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -161,6 +161,8 @@ static void __init patch_vdso(void *ehdr)
 		vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
 		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
 		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
+		vdso_nullpatch_one(&einfo, "__vdso_clock_getres");
+		vdso_nullpatch_one(&einfo, "__vdso_clock_getres_time64");
 	}
 }
 
diff --git a/arch/arm/vdso/vdso.lds.S b/arch/arm/vdso/vdso.lds.S
index 7c08371f4400..74d8d8bc8a40 100644
--- a/arch/arm/vdso/vdso.lds.S
+++ b/arch/arm/vdso/vdso.lds.S
@@ -74,6 +74,7 @@ VERSION
 		__vdso_gettimeofday;
 		__vdso_clock_getres;
 		__vdso_clock_gettime64;
+		__vdso_clock_getres_time64;
 	local: *;
 	};
 }
diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
index 3554aa35f1ba..f7a2f5dc2fdc 100644
--- a/arch/arm/vdso/vgettimeofday.c
+++ b/arch/arm/vdso/vgettimeofday.c
@@ -34,6 +34,11 @@ int __vdso_clock_getres(clockid_t clock_id,
 	return __cvdso_clock_getres_time32(clock_id, res);
 }
 
+int __vdso_clock_getres_time64(clockid_t clock_id, struct __kernel_timespec *res)
+{
+	return __cvdso_clock_getres(clock_id, res);
+}
+
 /* Avoid unresolved references emitted by GCC */
 
 void __aeabi_unwind_cpp_pr0(void)
diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
index e02b27487ce8..c374fb0146f3 100644
--- a/arch/arm64/kernel/vdso32/vdso.lds.S
+++ b/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -86,6 +86,7 @@ VERSION
 		__vdso_gettimeofday;
 		__vdso_clock_getres;
 		__vdso_clock_gettime64;
+		__vdso_clock_getres_time64;
 	local: *;
 	};
 }
diff --git a/arch/arm64/kernel/vdso32/vgettimeofday.c b/arch/arm64/kernel/vdso32/vgettimeofday.c
index 29b4d8f61e39..0c6998ebe491 100644
--- a/arch/arm64/kernel/vdso32/vgettimeofday.c
+++ b/arch/arm64/kernel/vdso32/vgettimeofday.c
@@ -32,6 +32,11 @@ int __vdso_clock_getres(clockid_t clock_id,
 	return __cvdso_clock_getres_time32(clock_id, res);
 }
 
+int __vdso_clock_getres_time64(clockid_t clock_id, struct __kernel_timespec *res)
+{
+	return __cvdso_clock_getres(clock_id, res);
+}
+
 /* Avoid unresolved references emitted by GCC */
 
 void __aeabi_unwind_cpp_pr0(void)
diff --git a/arch/loongarch/vdso/vgetcpu.c b/arch/loongarch/vdso/vgetcpu.c
index 73af49242ecd..6f054ec898c7 100644
--- a/arch/loongarch/vdso/vgetcpu.c
+++ b/arch/loongarch/vdso/vgetcpu.c
@@ -4,7 +4,6 @@
  */
 
 #include <asm/vdso.h>
-#include <linux/getcpu.h>
 
 static __always_inline int read_cpu_id(void)
 {
@@ -28,8 +27,8 @@ static __always_inline int read_cpu_id(void)
 }
 
 extern
-int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused);
-int __vdso_getcpu(unsigned int *cpu, unsigned int *node, struct getcpu_cache *unused)
+int __vdso_getcpu(unsigned int *cpu, unsigned int *node, void *unused);
+int __vdso_getcpu(unsigned int *cpu, unsigned int *node, void *unused)
 {
 	int cpu_id;
 
diff --git a/arch/mips/vdso/vdso.lds.S b/arch/mips/vdso/vdso.lds.S
index c8bbe56d89cb..5d08be3a6b85 100644
--- a/arch/mips/vdso/vdso.lds.S
+++ b/arch/mips/vdso/vdso.lds.S
@@ -103,6 +103,7 @@ VERSION
 		__vdso_clock_getres;
 #if _MIPS_SIM != _MIPS_SIM_ABI64
 		__vdso_clock_gettime64;
+		__vdso_clock_getres_time64;
 #endif
 #endif
 	local: *;
diff --git a/arch/mips/vdso/vgettimeofday.c b/arch/mips/vdso/vgettimeofday.c
index 604afea3f336..1d236215e8f6 100644
--- a/arch/mips/vdso/vgettimeofday.c
+++ b/arch/mips/vdso/vgettimeofday.c
@@ -46,6 +46,11 @@ int __vdso_clock_gettime64(clockid_t clock,
 	return __cvdso_clock_gettime(clock, ts);
 }
 
+int __vdso_clock_getres_time64(clockid_t clock, struct __kernel_timespec *ts)
+{
+	return __cvdso_clock_getres(clock, ts);
+}
+
 #else
 
 int __vdso_clock_gettime(clockid_t clock,
diff --git a/arch/parisc/boot/compressed/misc.c b/arch/parisc/boot/compressed/misc.c
index 9c83bd06ef15..111f267230a1 100644
--- a/arch/parisc/boot/compressed/misc.c
+++ b/arch/parisc/boot/compressed/misc.c
@@ -278,6 +278,19 @@ static void parse_elf(void *output)
 	free(phdrs);
 }
 
+/*
+ * The regular get_unaligned_le32 uses __builtin_memcpy which can trigger
+ * warnings when reading a byte/char output_len as an integer, as the size of a
+ * char is less than that of an integer. Use type punning and the packed
+ * attribute, which requires -fno-strict-aliasing, to work around the problem.
+ */
+static u32 punned_get_unaligned_le32(const void *p)
+{
+	const struct { __le32 x; } __packed * __get_pptr = p;
+
+	return le32_to_cpu(__get_pptr->x);
+}
+
 asmlinkage unsigned long __visible decompress_kernel(unsigned int started_wide,
 		unsigned int command_line,
 		const unsigned int rd_start,
@@ -309,7 +322,7 @@ asmlinkage unsigned long __visible decompress_kernel(unsigned int started_wide,
 	 * leave 2 MB for the stack.
 	 */
 	vmlinux_addr = (unsigned long) &_ebss + 2*1024*1024;
-	vmlinux_len = get_unaligned_le32(&output_len);
+	vmlinux_len = punned_get_unaligned_le32(&output_len);
 	output = (char *) vmlinux_addr;
 
 	/*
diff --git a/arch/powerpc/include/asm/vdso/gettimeofday.h b/arch/powerpc/include/asm/vdso/gettimeofday.h
index ab3df12c8d94..8ea397e26ad0 100644
--- a/arch/powerpc/include/asm/vdso/gettimeofday.h
+++ b/arch/powerpc/include/asm/vdso/gettimeofday.h
@@ -135,6 +135,8 @@ int __c_kernel_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts,
 			       const struct vdso_time_data *vd);
 int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
 			    const struct vdso_time_data *vd);
+int __c_kernel_clock_getres_time64(clockid_t clock_id, struct __kernel_timespec *res,
+				   const struct vdso_time_data *vd);
 #endif
 int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
 			    const struct vdso_time_data *vd);
diff --git a/arch/powerpc/kernel/vdso/gettimeofday.S b/arch/powerpc/kernel/vdso/gettimeofday.S
index 79c967212444..1c8e51691bf8 100644
--- a/arch/powerpc/kernel/vdso/gettimeofday.S
+++ b/arch/powerpc/kernel/vdso/gettimeofday.S
@@ -103,6 +103,18 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
 	cvdso_call __c_kernel_clock_getres
 V_FUNCTION_END(__kernel_clock_getres)
 
+/*
+ * Exact prototype of clock_getres_time64()
+ *
+ * int __kernel_clock_getres(clockid_t clock_id, struct __timespec64 *res);
+ *
+ */
+#ifndef __powerpc64__
+V_FUNCTION_BEGIN(__kernel_clock_getres_time64)
+	cvdso_call __c_kernel_clock_getres_time64
+V_FUNCTION_END(__kernel_clock_getres_time64)
+#endif
+
 
 /*
  * Exact prototype of time()
diff --git a/arch/powerpc/kernel/vdso/vdso32.lds.S b/arch/powerpc/kernel/vdso/vdso32.lds.S
index 72a1012b8a20..3f384a2526ae 100644
--- a/arch/powerpc/kernel/vdso/vdso32.lds.S
+++ b/arch/powerpc/kernel/vdso/vdso32.lds.S
@@ -124,6 +124,7 @@ VERSION
 		__kernel_clock_gettime;
 		__kernel_clock_gettime64;
 		__kernel_clock_getres;
+		__kernel_clock_getres_time64;
 		__kernel_time;
 		__kernel_get_tbfreq;
 		__kernel_sync_dicache;
diff --git a/arch/powerpc/kernel/vdso/vgettimeofday.c b/arch/powerpc/kernel/vdso/vgettimeofday.c
index 6f5167d81af5..3c194e1ab562 100644
--- a/arch/powerpc/kernel/vdso/vgettimeofday.c
+++ b/arch/powerpc/kernel/vdso/vgettimeofday.c
@@ -35,6 +35,12 @@ int __c_kernel_clock_getres(clockid_t clock_id, struct old_timespec32 *res,
 {
 	return __cvdso_clock_getres_time32_data(vd, clock_id, res);
 }
+
+int __c_kernel_clock_getres_time64(clockid_t clock_id, struct __kernel_timespec *res,
+				   const struct vdso_time_data *vd)
+{
+	return __cvdso_clock_getres_data(vd, clock_id, res);
+}
 #endif
 
 int __c_kernel_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz,
diff --git a/arch/s390/kernel/vdso/getcpu.c b/arch/s390/kernel/vdso/getcpu.c
index 5c5d4a848b76..1e17665616c5 100644
--- a/arch/s390/kernel/vdso/getcpu.c
+++ b/arch/s390/kernel/vdso/getcpu.c
@@ -2,11 +2,10 @@
 /* Copyright IBM Corp. 2020 */
 
 #include <linux/compiler.h>
-#include <linux/getcpu.h>
 #include <asm/timex.h>
 #include "vdso.h"
 
-int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, void *unused)
 {
 	union tod_clock clk;
 
diff --git a/arch/s390/kernel/vdso/vdso.h b/arch/s390/kernel/vdso/vdso.h
index 8cff033dd854..1fe52a6f5a56 100644
--- a/arch/s390/kernel/vdso/vdso.h
+++ b/arch/s390/kernel/vdso/vdso.h
@@ -4,9 +4,7 @@
 
 #include <vdso/datapage.h>
 
-struct getcpu_cache;
-
-int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused);
+int __s390_vdso_getcpu(unsigned *cpu, unsigned *node, void *unused);
 int __s390_vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
 int __s390_vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
 int __s390_vdso_clock_getres(clockid_t clock, struct __kernel_timespec *ts);
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 0debc194bd78..027b7e88d753 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -74,4 +74,12 @@ int __vdso_clock_getres(clockid_t clock, struct old_timespec32 *res)
 
 int clock_getres(clockid_t, struct old_timespec32 *)
 	__attribute__((weak, alias("__vdso_clock_getres")));
+
+int __vdso_clock_getres_time64(clockid_t clock, struct __kernel_timespec *ts)
+{
+	return __cvdso_clock_getres(clock, ts);
+}
+
+int clock_getres_time64(clockid_t, struct __kernel_timespec *)
+	__attribute__((weak, alias("__vdso_clock_getres_time64")));
 #endif
diff --git a/arch/x86/entry/vdso/vdso32/vdso32.lds.S b/arch/x86/entry/vdso/vdso32/vdso32.lds.S
index 8a3be07006bb..6f977c103584 100644
--- a/arch/x86/entry/vdso/vdso32/vdso32.lds.S
+++ b/arch/x86/entry/vdso/vdso32/vdso32.lds.S
@@ -28,6 +28,7 @@ VERSION
 		__vdso_time;
 		__vdso_clock_getres;
 		__vdso_clock_gettime64;
+		__vdso_clock_getres_time64;
 		__vdso_getcpu;
 	};
 
diff --git a/arch/x86/entry/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c
index e4640306b2e3..6381b472b7c5 100644
--- a/arch/x86/entry/vdso/vgetcpu.c
+++ b/arch/x86/entry/vdso/vgetcpu.c
@@ -6,17 +6,16 @@
  */
 
 #include <linux/kernel.h>
-#include <linux/getcpu.h>
 #include <asm/segment.h>
 #include <vdso/processor.h>
 
 notrace long
-__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+__vdso_getcpu(unsigned *cpu, unsigned *node, void *unused)
 {
 	vdso_read_cpunode(cpu, node);
 
 	return 0;
 }
 
-long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+long getcpu(unsigned *cpu, unsigned *node, void *tcache)
 	__attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
index 725d0eff7acd..c55058f3d75e 100644
--- a/arch/x86/include/asm/percpu.h
+++ b/arch/x86/include/asm/percpu.h
@@ -137,12 +137,12 @@
 
 #define __raw_cpu_read(size, qual, pcp)					\
 ({									\
-	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp));		\
+	*(qual __my_cpu_type(pcp) * __force)__my_cpu_ptr(&(pcp));	\
 })
 
-#define __raw_cpu_write(size, qual, pcp, val)				\
-do {									\
-	*(qual __my_cpu_type(pcp) *)__my_cpu_ptr(&(pcp)) = (val);	\
+#define __raw_cpu_write(size, qual, pcp, val)					\
+do {										\
+	*(qual __my_cpu_type(pcp) * __force)__my_cpu_ptr(&(pcp)) = (val);	\
 } while (0)
 
 #define __raw_cpu_read_const(pcp)	__raw_cpu_read(, , pcp)
diff --git a/arch/x86/include/asm/vdso/processor.h b/arch/x86/include/asm/vdso/processor.h
index 7000aeb59aa2..93e0e24e5cb4 100644
--- a/arch/x86/include/asm/vdso/processor.h
+++ b/arch/x86/include/asm/vdso/processor.h
@@ -18,9 +18,7 @@ static __always_inline void cpu_relax(void)
 	native_pause();
 }
 
-struct getcpu_cache;
-
-notrace long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused);
+notrace long __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused);
 
 #endif /* __ASSEMBLER__ */
 
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 04487c9bd751..c601222b495a 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -230,16 +230,6 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
 	__BUILD_BUG_ON_ZERO_MSG(!__is_noncstr(p), \
 				"must be non-C-string (not NUL-terminated)")
 
-/*
- * Use __typeof_unqual__() when available.
- *
- * XXX: Remove test for __CHECKER__ once
- * sparse learns about __typeof_unqual__().
- */
-#if CC_HAS_TYPEOF_UNQUAL && !defined(__CHECKER__)
-# define USE_TYPEOF_UNQUAL 1
-#endif
-
 /*
  * Define TYPEOF_UNQUAL() to use __typeof_unqual__() as typeof
  * operator when available, to return an unqualified type of the exp.
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index d3318a3c2577..377df1e64096 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -562,6 +562,14 @@ struct ftrace_likely_data {
 #define asm_inline asm
 #endif
 
+#ifndef __ASSEMBLY__
+/*
+ * Use __typeof_unqual__() when available.
+ */
+#if CC_HAS_TYPEOF_UNQUAL || defined(__CHECKER__)
+# define USE_TYPEOF_UNQUAL 1
+#endif
+
 /* Are two types/vars the same type (ignoring qualifiers)? */
 #define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
 
@@ -569,6 +577,7 @@ struct ftrace_likely_data {
  * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
  *			       non-scalar types unchanged.
  */
+#ifndef USE_TYPEOF_UNQUAL
 /*
  * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
  * is not type-compatible with 'signed char', and we define a separate case.
@@ -586,6 +595,10 @@ struct ftrace_likely_data {
 			 __scalar_type_to_expr_cases(long),		\
 			 __scalar_type_to_expr_cases(long long),	\
 			 default: (x)))
+#else
+#define __unqual_scalar_typeof(x) __typeof_unqual__(x)
+#endif
+#endif /* !__ASSEMBLY__ */
 
 /* Is this type a native word size -- useful for atomic operations */
 #define __native_word(t) \
diff --git a/include/linux/getcpu.h b/include/linux/getcpu.h
deleted file mode 100644
index c304dcdb4eac..000000000000
--- a/include/linux/getcpu.h
+++ /dev/null
@@ -1,19 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_GETCPU_H
-#define _LINUX_GETCPU_H 1
-
-/* Cache for getcpu() to speed it up. Results might be a short time
-   out of date, but will be faster.
-
-   User programs should not refer to the contents of this structure.
-   I repeat they should not refer to it. If they do they will break
-   in future kernels.
-
-   It is only a private cache for vgetcpu(). It will change in future kernels.
-   The user program must store this information per thread (__thread)
-   If you want 100% accurate information pass NULL instead. */
-struct getcpu_cache {
-	unsigned long blob[128 / sizeof(long)];
-};
-
-#endif
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index cf84d98964b2..23704e006afd 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -59,7 +59,6 @@ struct compat_stat;
 struct old_timeval32;
 struct robust_list_head;
 struct futex_waitv;
-struct getcpu_cache;
 struct old_linux_dirent;
 struct perf_event_attr;
 struct file_handle;
@@ -718,7 +717,7 @@ asmlinkage long sys_getrusage(int who, struct rusage __user *ru);
 asmlinkage long sys_umask(int mask);
 asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
 			unsigned long arg4, unsigned long arg5);
-asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
+asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, void __user *cache);
 asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
 				struct timezone __user *tz);
 asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
diff --git a/include/vdso/gettime.h b/include/vdso/gettime.h
index 9ac161866653..16a0a0556b86 100644
--- a/include/vdso/gettime.h
+++ b/include/vdso/gettime.h
@@ -20,5 +20,6 @@ int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
 __kernel_old_time_t __vdso_time(__kernel_old_time_t *t);
 int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
 int __vdso_clock_gettime64(clockid_t clock, struct __kernel_timespec *ts);
+int __vdso_clock_getres_time64(clockid_t clock, struct __kernel_timespec *ts);
 
 #endif
diff --git a/include/vdso/unaligned.h b/include/vdso/unaligned.h
index ff0c06b6513e..9076483c9fbb 100644
--- a/include/vdso/unaligned.h
+++ b/include/vdso/unaligned.h
@@ -2,14 +2,43 @@
 #ifndef __VDSO_UNALIGNED_H
 #define __VDSO_UNALIGNED_H
 
-#define __get_unaligned_t(type, ptr) ({							\
-	const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr);	\
-	__get_pptr->x;									\
+#include <linux/compiler_types.h>
+
+/**
+ * __get_unaligned_t - read an unaligned value from memory.
+ * @type:	the type to load from the pointer.
+ * @ptr:	the pointer to load from.
+ *
+ * Use memcpy to affect an unaligned type sized load avoiding undefined behavior
+ * from approaches like type punning that require -fno-strict-aliasing in order
+ * to be correct. As type may be const, use __unqual_scalar_typeof to map to a
+ * non-const type - you can't memcpy into a const type. The
+ * __get_unaligned_ctrl_type gives __unqual_scalar_typeof its required
+ * expression rather than type, a pointer is used to avoid warnings about mixing
+ * the use of 0 and NULL. The void* cast silences ubsan warnings.
+ */
+#define __get_unaligned_t(type, ptr) ({					\
+	type *__get_unaligned_ctrl_type __always_unused = NULL;		\
+	__unqual_scalar_typeof(*__get_unaligned_ctrl_type) __get_unaligned_val; \
+	__builtin_memcpy(&__get_unaligned_val, (void *)(ptr),		\
+			 sizeof(__get_unaligned_val));			\
+	__get_unaligned_val;						\
 })
 
-#define __put_unaligned_t(type, val, ptr) do {						\
-	struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr);		\
-	__put_pptr->x = (val);								\
+/**
+ * __put_unaligned_t - write an unaligned value to memory.
+ * @type:	the type of the value to store.
+ * @val:	the value to store.
+ * @ptr:	the pointer to store to.
+ *
+ * Use memcpy to affect an unaligned type sized store avoiding undefined
+ * behavior from approaches like type punning that require -fno-strict-aliasing
+ * in order to be correct. The void* cast silences ubsan warnings.
+ */
+#define __put_unaligned_t(type, val, ptr) do {				\
+	type __put_unaligned_val = (val);				\
+	__builtin_memcpy((void *)(ptr), &__put_unaligned_val,		\
+			 sizeof(__put_unaligned_val));			\
 } while (0)
 
 #endif /* __VDSO_UNALIGNED_H */
diff --git a/kernel/sys.c b/kernel/sys.c
index 8b58eece4e58..f1780ab132a3 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -31,7 +31,6 @@
 #include <linux/tty.h>
 #include <linux/signal.h>
 #include <linux/cn_proc.h>
-#include <linux/getcpu.h>
 #include <linux/task_io_accounting_ops.h>
 #include <linux/seccomp.h>
 #include <linux/cpu.h>
@@ -2876,8 +2875,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
 	return error;
 }
 
-SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
-		struct getcpu_cache __user *, unused)
+SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, void __user *, unused)
 {
 	int err = 0;
 	int cpu = raw_smp_processor_id();
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index 95df0153f05a..4399e143d43a 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -421,7 +421,7 @@ static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time
 #endif /* VDSO_HAS_TIME */
 
 #ifdef VDSO_HAS_CLOCK_GETRES
-static __maybe_unused
+static __always_inline
 bool __cvdso_clock_getres_common(const struct vdso_time_data *vd, clockid_t clock,
 				 struct __kernel_timespec *res)
 {
diff --git a/scripts/checker-valid.sh b/scripts/checker-valid.sh
new file mode 100755
index 000000000000..625a789ed1c8
--- /dev/null
+++ b/scripts/checker-valid.sh
@@ -0,0 +1,19 @@
+#!/bin/sh -eu
+# SPDX-License-Identifier: GPL-2.0
+
+[ ! -x "$(command -v "$1")" ] && exit 1
+
+tmp_file=$(mktemp)
+trap "rm -f $tmp_file" EXIT
+
+cat << EOF >$tmp_file
+static inline int u(const int *q)
+{
+	__typeof_unqual__(*q) v = *q;
+	return v;
+}
+EOF
+
+# sparse happily exits with 0 on error so validate
+# there is none on stderr. Use awk as grep is a pain with sh -e
+$@ $tmp_file 2>&1 | awk -v c=1 '/error/{c=0}END{print c}'
diff --git a/tools/include/linux/compiler_types.h b/tools/include/linux/compiler_types.h
index d09f9dc172a4..890982283a5e 100644
--- a/tools/include/linux/compiler_types.h
+++ b/tools/include/linux/compiler_types.h
@@ -40,4 +40,26 @@
 #define asm_goto_output(x...) asm goto(x)
 #endif
 
+/*
+ * __unqual_scalar_typeof(x) - Declare an unqualified scalar type, leaving
+ *			       non-scalar types unchanged.
+ */
+/*
+ * Prefer C11 _Generic for better compile-times and simpler code. Note: 'char'
+ * is not type-compatible with 'signed char', and we define a separate case.
+ */
+#define __scalar_type_to_expr_cases(type)				\
+		unsigned type:	(unsigned type)0,			\
+		signed type:	(signed type)0
+
+#define __unqual_scalar_typeof(x) typeof(				\
+		_Generic((x),						\
+			 char:	(char)0,				\
+			 __scalar_type_to_expr_cases(char),		\
+			 __scalar_type_to_expr_cases(short),		\
+			 __scalar_type_to_expr_cases(int),		\
+			 __scalar_type_to_expr_cases(long),		\
+			 __scalar_type_to_expr_cases(long long),	\
+			 default: (x)))
+
 #endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/tools/include/linux/unaligned.h b/tools/include/linux/unaligned.h
index 395a4464fe73..d51ddafed138 100644
--- a/tools/include/linux/unaligned.h
+++ b/tools/include/linux/unaligned.h
@@ -6,9 +6,6 @@
  * This is the most generic implementation of unaligned accesses
  * and should work almost anywhere.
  */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpacked"
-#pragma GCC diagnostic ignored "-Wattributes"
 #include <vdso/unaligned.h>
 
 #define get_unaligned(ptr)	__get_unaligned_t(typeof(*(ptr)), (ptr))
@@ -143,6 +140,5 @@ static inline u64 get_unaligned_be48(const void *p)
 {
 	return __get_unaligned_be48(p);
 }
-#pragma GCC diagnostic pop
 
 #endif /* __LINUX_UNALIGNED_H */
diff --git a/tools/include/vdso/unaligned.h b/tools/include/vdso/unaligned.h
index ff0c06b6513e..9076483c9fbb 100644
--- a/tools/include/vdso/unaligned.h
+++ b/tools/include/vdso/unaligned.h
@@ -2,14 +2,43 @@
 #ifndef __VDSO_UNALIGNED_H
 #define __VDSO_UNALIGNED_H
 
-#define __get_unaligned_t(type, ptr) ({							\
-	const struct { type x; } __packed * __get_pptr = (typeof(__get_pptr))(ptr);	\
-	__get_pptr->x;									\
+#include <linux/compiler_types.h>
+
+/**
+ * __get_unaligned_t - read an unaligned value from memory.
+ * @type:	the type to load from the pointer.
+ * @ptr:	the pointer to load from.
+ *
+ * Use memcpy to affect an unaligned type sized load avoiding undefined behavior
+ * from approaches like type punning that require -fno-strict-aliasing in order
+ * to be correct. As type may be const, use __unqual_scalar_typeof to map to a
+ * non-const type - you can't memcpy into a const type. The
+ * __get_unaligned_ctrl_type gives __unqual_scalar_typeof its required
+ * expression rather than type, a pointer is used to avoid warnings about mixing
+ * the use of 0 and NULL. The void* cast silences ubsan warnings.
+ */
+#define __get_unaligned_t(type, ptr) ({					\
+	type *__get_unaligned_ctrl_type __always_unused = NULL;		\
+	__unqual_scalar_typeof(*__get_unaligned_ctrl_type) __get_unaligned_val; \
+	__builtin_memcpy(&__get_unaligned_val, (void *)(ptr),		\
+			 sizeof(__get_unaligned_val));			\
+	__get_unaligned_val;						\
 })
 
-#define __put_unaligned_t(type, val, ptr) do {						\
-	struct { type x; } __packed * __put_pptr = (typeof(__put_pptr))(ptr);		\
-	__put_pptr->x = (val);								\
+/**
+ * __put_unaligned_t - write an unaligned value to memory.
+ * @type:	the type of the value to store.
+ * @val:	the value to store.
+ * @ptr:	the pointer to store to.
+ *
+ * Use memcpy to affect an unaligned type sized store avoiding undefined
+ * behavior from approaches like type punning that require -fno-strict-aliasing
+ * in order to be correct. The void* cast silences ubsan warnings.
+ */
+#define __put_unaligned_t(type, val, ptr) do {				\
+	type __put_unaligned_val = (val);				\
+	__builtin_memcpy((void *)(ptr), &__put_unaligned_val,		\
+			 sizeof(__put_unaligned_val));			\
 } while (0)
 
 #endif /* __VDSO_UNALIGNED_H */
diff --git a/tools/testing/selftests/vDSO/vdso_config.h b/tools/testing/selftests/vDSO/vdso_config.h
index 50c261005111..5da223731b81 100644
--- a/tools/testing/selftests/vDSO/vdso_config.h
+++ b/tools/testing/selftests/vDSO/vdso_config.h
@@ -66,7 +66,7 @@ static const char *versions[7] = {
 };
 
 __attribute__((unused))
-static const char *names[2][7] = {
+static const char *names[2][8] = {
 	{
 		"__kernel_gettimeofday",
 		"__kernel_clock_gettime",
@@ -75,6 +75,7 @@ static const char *names[2][7] = {
 		"__kernel_getcpu",
 		"__kernel_clock_gettime64",
 		"__kernel_getrandom",
+		"__kernel_clock_getres_time64",
 	},
 	{
 		"__vdso_gettimeofday",
@@ -84,6 +85,7 @@ static const char *names[2][7] = {
 		"__vdso_getcpu",
 		"__vdso_clock_gettime64",
 		"__vdso_getrandom",
+		"__vdso_clock_getres_time64",
 	},
 };
 
diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c
index c620317eaeea..b162a4ba9c4f 100644
--- a/tools/testing/selftests/vDSO/vdso_test_abi.c
+++ b/tools/testing/selftests/vDSO/vdso_test_abi.c
@@ -36,6 +36,7 @@ typedef long (*vdso_gettimeofday_t)(struct timeval *tv, struct timezone *tz);
 typedef long (*vdso_clock_gettime_t)(clockid_t clk_id, struct timespec *ts);
 typedef long (*vdso_clock_gettime64_t)(clockid_t clk_id, struct vdso_timespec64 *ts);
 typedef long (*vdso_clock_getres_t)(clockid_t clk_id, struct timespec *ts);
+typedef long (*vdso_clock_getres_time64_t)(clockid_t clk_id, struct vdso_timespec64 *ts);
 typedef time_t (*vdso_time_t)(time_t *t);
 
 static const char * const vdso_clock_name[] = {
@@ -179,7 +180,7 @@ static void vdso_test_clock_getres(clockid_t clk_id)
 		clock_getres_fail++;
 	}
 
-	ret = syscall(SYS_clock_getres, clk_id, &sys_ts);
+	ret = syscall(__NR_clock_getres, clk_id, &sys_ts);
 
 	ksft_print_msg("The syscall resolution is %lld %lld\n",
 			(long long)sys_ts.tv_sec, (long long)sys_ts.tv_nsec);
@@ -196,6 +197,55 @@ static void vdso_test_clock_getres(clockid_t clk_id)
 	}
 }
 
+#ifdef __NR_clock_getres_time64
+static void vdso_test_clock_getres_time64(clockid_t clk_id)
+{
+	int clock_getres_fail = 0;
+
+	/* Find clock_getres. */
+	vdso_clock_getres_time64_t vdso_clock_getres_time64 =
+		(vdso_clock_getres_time64_t)vdso_sym(version, name[7]);
+
+	if (!vdso_clock_getres_time64) {
+		ksft_print_msg("Couldn't find %s\n", name[7]);
+		ksft_test_result_skip("%s %s\n", name[7],
+				      vdso_clock_name[clk_id]);
+		return;
+	}
+
+	struct vdso_timespec64 ts, sys_ts;
+	long ret = VDSO_CALL(vdso_clock_getres_time64, 2, clk_id, &ts);
+
+	if (ret == 0) {
+		ksft_print_msg("The vdso resolution is %lld %lld\n",
+			       (long long)ts.tv_sec, (long long)ts.tv_nsec);
+	} else {
+		clock_getres_fail++;
+	}
+
+	ret = syscall(__NR_clock_getres_time64, clk_id, &sys_ts);
+
+	ksft_print_msg("The syscall resolution is %lld %lld\n",
+			(long long)sys_ts.tv_sec, (long long)sys_ts.tv_nsec);
+
+	if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec))
+		clock_getres_fail++;
+
+	if (clock_getres_fail > 0) {
+		ksft_test_result_fail("%s %s\n", name[7],
+				      vdso_clock_name[clk_id]);
+	} else {
+		ksft_test_result_pass("%s %s\n", name[7],
+				      vdso_clock_name[clk_id]);
+	}
+}
+#else /* !__NR_clock_getres_time64 */
+static void vdso_test_clock_getres_time64(clockid_t clk_id)
+{
+	ksft_test_result_skip("%s %s\n", name[7], vdso_clock_name[clk_id]);
+}
+#endif /* __NR_clock_getres_time64 */
+
 /*
  * This function calls vdso_test_clock_gettime and vdso_test_clock_getres
  * with different values for clock_id.
@@ -208,9 +258,10 @@ static inline void vdso_test_clock(clockid_t clock_id)
 	vdso_test_clock_gettime64(clock_id);
 
 	vdso_test_clock_getres(clock_id);
+	vdso_test_clock_getres_time64(clock_id);
 }
 
-#define VDSO_TEST_PLAN	29
+#define VDSO_TEST_PLAN	38
 
 int main(int argc, char **argv)
 {
diff --git a/tools/testing/selftests/vDSO/vdso_test_getcpu.c b/tools/testing/selftests/vDSO/vdso_test_getcpu.c
index bea8ad54da11..3fe49cbdae98 100644
--- a/tools/testing/selftests/vDSO/vdso_test_getcpu.c
+++ b/tools/testing/selftests/vDSO/vdso_test_getcpu.c
@@ -16,9 +16,7 @@
 #include "vdso_config.h"
 #include "vdso_call.h"
 
-struct getcpu_cache;
-typedef long (*getcpu_t)(unsigned int *, unsigned int *,
-			 struct getcpu_cache *);
+typedef long (*getcpu_t)(unsigned int *, unsigned int *, void *);
 
 int main(int argc, char **argv)
 {
[GIT pull] irq/core for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest irq/core branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-core-2026-02-09

up to:  2dfc417414c6: genirq/proc: Replace snprintf with strscpy in register_handler_proc


Updates for the interrupt core subsystem:

 - Remove the interrupt timing infrastructure

   This was added seven years ago to be used for power management purposes, but
   that integration never happened.

 - Clean up the remaining setup_percpu_irq() users

   The memory allocator is available when interrupts can be requested so there
   is not need for static irq_action. Move the remaining users to
   request_percpu_irq() and delete the historical cruft.

 - Warn when interrupt flag inconsistencies are detected in request*_irq().

   Inconsistent flags can lead to hard to diagnose malfunction. The fallout of
   this new warning has been addressed in next and the fixes are coming in via
   the maintainer trees and the tip irq/cleanup pull requests.

 - Invoke affinity notifier when CPU hotplug breaks affinity

   Otherwise the code using the notifier misses the affinity change and
   operates on stale information.

 - The usual cleanups and improvements

Thanks,

	tglx

------------------>
Greg Kroah-Hartman (1):
      irqdomain: Fix up const problem in irq_domain_set_name()

Imran Khan (1):
      genirq/cpuhotplug: Notify about affinity changes breaking the affinity mask

Luigi Rizzo (1):
      genirq: Move clear of kstat_irqs to free_desc()

Marc Zyngier (6):
      genirq: Remove IRQ timing tracking infrastructure
      genirq: Remove __request_percpu_irq() helper
      MIPS: Move IP30 timer to request_percpu_irq()
      MIPS: Move IP27 timer to request_percpu_irq()
      clocksource/drivers/mips-gic-timer: Move GIC timer to request_percpu_irq()
      genirq: Remove setup_percpu_irq()

Sebastian Andrzej Siewior (1):
      genirq: Warn about using IRQF_ONESHOT without a threaded handler

Thorsten Blum (1):
      genirq/proc: Replace snprintf with strscpy in register_handler_proc


 arch/mips/include/asm/cevt-r4k.h     |   1 -
 arch/mips/kernel/cevt-r4k.c          |  11 -
 arch/mips/sgi-ip27/ip27-timer.c      |  10 +-
 arch/mips/sgi-ip30/ip30-timer.c      |   5 +-
 drivers/clocksource/mips-gic-timer.c |  10 +-
 include/linux/interrupt.h            |  24 +-
 include/linux/irq.h                  |   3 -
 kernel/irq/Kconfig                   |   3 -
 kernel/irq/Makefile                  |   4 -
 kernel/irq/cpuhotplug.c              |   6 +-
 kernel/irq/handle.c                  |   2 -
 kernel/irq/internals.h               | 112 +---
 kernel/irq/irqdesc.c                 |   9 +-
 kernel/irq/irqdomain.c               |   2 +-
 kernel/irq/manage.c                  |  81 ++-
 kernel/irq/proc.c                    |   3 +-
 kernel/irq/timings.c                 | 959 -----------------------------------
 lib/Kconfig.debug                    |   8 -
 18 files changed, 53 insertions(+), 1200 deletions(-)
 delete mode 100644 kernel/irq/timings.c

diff --git a/arch/mips/include/asm/cevt-r4k.h b/arch/mips/include/asm/cevt-r4k.h
index 2e13a038d260..5229eb34f28a 100644
--- a/arch/mips/include/asm/cevt-r4k.h
+++ b/arch/mips/include/asm/cevt-r4k.h
@@ -23,7 +23,6 @@ void mips_event_handler(struct clock_event_device *dev);
 int c0_compare_int_usable(void);
 irqreturn_t c0_compare_interrupt(int, void *);
 
-extern struct irqaction c0_compare_irqaction;
 extern int cp0_timer_irq_installed;
 
 #endif /* __ASM_CEVT_R4K_H */
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 5f6e9e2ebbdb..f58325f9bd2b 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -159,17 +159,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 	return IRQ_NONE;
 }
 
-struct irqaction c0_compare_irqaction = {
-	.handler = c0_compare_interrupt,
-	/*
-	 * IRQF_SHARED: The timer interrupt may be shared with other interrupts
-	 * such as perf counter and FDC interrupts.
-	 */
-	.flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
-	.name = "timer",
-};
-
-
 void mips_event_handler(struct clock_event_device *dev)
 {
 }
diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c
index 444b5e0e935f..5f4da05cb2c9 100644
--- a/arch/mips/sgi-ip27/ip27-timer.c
+++ b/arch/mips/sgi-ip27/ip27-timer.c
@@ -58,13 +58,6 @@ static irqreturn_t hub_rt_counter_handler(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-struct irqaction hub_rt_irqaction = {
-	.handler	= hub_rt_counter_handler,
-	.percpu_dev_id	= &hub_rt_clockevent,
-	.flags		= IRQF_PERCPU | IRQF_TIMER,
-	.name		= "hub-rt",
-};
-
 /*
  * This is a hack; we really need to figure these values out dynamically
  *
@@ -103,7 +96,8 @@ static void __init hub_rt_clock_event_global_init(void)
 {
 	irq_set_handler(IP27_RT_TIMER_IRQ, handle_percpu_devid_irq);
 	irq_set_percpu_devid(IP27_RT_TIMER_IRQ);
-	setup_percpu_irq(IP27_RT_TIMER_IRQ, &hub_rt_irqaction);
+	WARN_ON(request_percpu_irq(IP27_RT_TIMER_IRQ, hub_rt_counter_handler,
+				   "hub-rt", &hub_rt_clockevent));
 }
 
 static u64 hub_rt_read(struct clocksource *cs)
diff --git a/arch/mips/sgi-ip30/ip30-timer.c b/arch/mips/sgi-ip30/ip30-timer.c
index 7652f72f0daf..294e1f7e6d8a 100644
--- a/arch/mips/sgi-ip30/ip30-timer.c
+++ b/arch/mips/sgi-ip30/ip30-timer.c
@@ -52,11 +52,10 @@ void __init plat_time_init(void)
 	int irq = get_c0_compare_int();
 
 	cp0_timer_irq_installed = 1;
-	c0_compare_irqaction.percpu_dev_id = &mips_clockevent_device;
-	c0_compare_irqaction.flags &= ~IRQF_SHARED;
 	irq_set_handler(irq, handle_percpu_devid_irq);
 	irq_set_percpu_devid(irq);
-	setup_percpu_irq(irq, &c0_compare_irqaction);
+	WARN_ON(request_percpu_irq(irq, c0_compare_interrupt,
+				   "timer", &mips_clockevent_device));
 	enable_percpu_irq(irq, IRQ_TYPE_NONE);
 
 	ip30_heart_clocksource_init();
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index abb685a080a5..1501c7db9a8e 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -77,13 +77,6 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static struct irqaction gic_compare_irqaction = {
-	.handler = gic_compare_interrupt,
-	.percpu_dev_id = &gic_clockevent_device,
-	.flags = IRQF_PERCPU | IRQF_TIMER,
-	.name = "timer",
-};
-
 static void gic_clockevent_cpu_init(unsigned int cpu,
 				    struct clock_event_device *cd)
 {
@@ -152,7 +145,8 @@ static int gic_clockevent_init(void)
 	if (!gic_frequency)
 		return -ENXIO;
 
-	ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	ret = request_percpu_irq(gic_timer_irq, gic_compare_interrupt,
+				 "timer", &gic_clockevent_device);
 	if (ret < 0) {
 		pr_err("IRQ %d setup failed (%d)\n", gic_timer_irq, ret);
 		return ret;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 266f2b39213a..00c01b0a43be 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -181,9 +181,8 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
 			unsigned long flags, const char *name, void *dev_id);
 
 extern int __must_check
-__request_percpu_irq(unsigned int irq, irq_handler_t handler,
-		     unsigned long flags, const char *devname,
-		     const cpumask_t *affinity, void __percpu *percpu_dev_id);
+request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler, const char *devname,
+			    const cpumask_t *affinity, void __percpu *percpu_dev_id);
 
 extern int __must_check
 request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
@@ -193,17 +192,8 @@ static inline int __must_check
 request_percpu_irq(unsigned int irq, irq_handler_t handler,
 		   const char *devname, void __percpu *percpu_dev_id)
 {
-	return __request_percpu_irq(irq, handler, 0,
-				    devname, NULL, percpu_dev_id);
-}
-
-static inline int __must_check
-request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler,
-			    const char *devname, const cpumask_t *affinity,
-			    void __percpu *percpu_dev_id)
-{
-	return __request_percpu_irq(irq, handler, 0,
-				    devname, affinity, percpu_dev_id);
+	return request_percpu_irq_affinity(irq, handler, devname,
+					   NULL, percpu_dev_id);
 }
 
 extern int __must_check
@@ -871,12 +861,6 @@ static inline void init_irq_proc(void)
 }
 #endif
 
-#ifdef CONFIG_IRQ_TIMINGS
-void irq_timings_enable(void);
-void irq_timings_disable(void);
-u64 irq_timings_next_event(u64 now);
-#endif
-
 struct seq_file;
 int show_interrupts(struct seq_file *p, void *v);
 int arch_show_interrupts(struct seq_file *p, int prec);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4a9f1d7b08c3..67ea759749be 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -595,9 +595,6 @@ enum {
 
 #define IRQ_DEFAULT_INIT_FLAGS	ARCH_IRQ_INIT_FLAGS
 
-struct irqaction;
-extern int setup_percpu_irq(unsigned int irq, struct irqaction *new);
-
 #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
 extern void irq_cpu_online(void);
 extern void irq_cpu_offline(void);
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 1b4254d19a73..05cba4e16dad 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -92,9 +92,6 @@ config GENERIC_MSI_IRQ
 config IRQ_MSI_IOMMU
 	bool
 
-config IRQ_TIMINGS
-	bool
-
 config GENERIC_IRQ_MATRIX_ALLOCATOR
 	bool
 
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 6ab3a4055667..86a2e5ae08f9 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,10 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
 
 obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devres.o kexec.o
-obj-$(CONFIG_IRQ_TIMINGS) += timings.o
-ifeq ($(CONFIG_TEST_IRQ_TIMINGS),y)
-	CFLAGS_timings.o += -DDEBUG
-endif
 obj-$(CONFIG_GENERIC_IRQ_CHIP) += generic-chip.o
 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
 obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 755346ea9819..cd5689e383b0 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -177,9 +177,11 @@ void irq_migrate_all_off_this_cpu(void)
 		bool affinity_broken;
 
 		desc = irq_to_desc(irq);
-		scoped_guard(raw_spinlock, &desc->lock)
+		scoped_guard(raw_spinlock, &desc->lock) {
 			affinity_broken = migrate_one_irq(desc);
-
+			if (affinity_broken && desc->affinity_notify)
+				irq_affinity_schedule_notify_work(desc);
+		}
 		if (affinity_broken) {
 			pr_debug_ratelimited("IRQ %u: no longer affine to CPU%u\n",
 					    irq, smp_processor_id());
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 786f5570a640..b7d52821837b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -188,8 +188,6 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc)
 	unsigned int irq = desc->irq_data.irq;
 	struct irqaction *action;
 
-	record_irq_time(desc);
-
 	for_each_action_of_desc(desc, action) {
 		irqreturn_t res;
 
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 0164ca48da59..9412e57056f5 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -135,6 +135,7 @@ extern bool irq_can_set_affinity_usr(unsigned int irq);
 
 extern int irq_do_set_affinity(struct irq_data *data,
 			       const struct cpumask *dest, bool force);
+extern void irq_affinity_schedule_notify_work(struct irq_desc *desc);
 
 #ifdef CONFIG_SMP
 extern int irq_setup_affinity(struct irq_desc *desc);
@@ -142,7 +143,6 @@ extern int irq_setup_affinity(struct irq_desc *desc);
 static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; }
 #endif
 
-
 #define for_each_action_of_desc(desc, act)			\
 	for (act = desc->action; act; act = act->next)
 
@@ -288,116 +288,6 @@ static inline void
 irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
 #endif
 
-#ifdef CONFIG_IRQ_TIMINGS
-
-#define IRQ_TIMINGS_SHIFT	5
-#define IRQ_TIMINGS_SIZE	(1 << IRQ_TIMINGS_SHIFT)
-#define IRQ_TIMINGS_MASK	(IRQ_TIMINGS_SIZE - 1)
-
-/**
- * struct irq_timings - irq timings storing structure
- * @values: a circular buffer of u64 encoded <timestamp,irq> values
- * @count: the number of elements in the array
- */
-struct irq_timings {
-	u64	values[IRQ_TIMINGS_SIZE];
-	int	count;
-};
-
-DECLARE_PER_CPU(struct irq_timings, irq_timings);
-
-extern void irq_timings_free(int irq);
-extern int irq_timings_alloc(int irq);
-
-static inline void irq_remove_timings(struct irq_desc *desc)
-{
-	desc->istate &= ~IRQS_TIMINGS;
-
-	irq_timings_free(irq_desc_get_irq(desc));
-}
-
-static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act)
-{
-	int irq = irq_desc_get_irq(desc);
-	int ret;
-
-	/*
-	 * We don't need the measurement because the idle code already
-	 * knows the next expiry event.
-	 */
-	if (act->flags & __IRQF_TIMER)
-		return;
-
-	/*
-	 * In case the timing allocation fails, we just want to warn,
-	 * not fail, so letting the system boot anyway.
-	 */
-	ret = irq_timings_alloc(irq);
-	if (ret) {
-		pr_warn("Failed to allocate irq timing stats for irq%d (%d)",
-			irq, ret);
-		return;
-	}
-
-	desc->istate |= IRQS_TIMINGS;
-}
-
-extern void irq_timings_enable(void);
-extern void irq_timings_disable(void);
-
-DECLARE_STATIC_KEY_FALSE(irq_timing_enabled);
-
-/*
- * The interrupt number and the timestamp are encoded into a single
- * u64 variable to optimize the size.
- * 48 bit time stamp and 16 bit IRQ number is way sufficient.
- *  Who cares an IRQ after 78 hours of idle time?
- */
-static inline u64 irq_timing_encode(u64 timestamp, int irq)
-{
-	return (timestamp << 16) | irq;
-}
-
-static inline int irq_timing_decode(u64 value, u64 *timestamp)
-{
-	*timestamp = value >> 16;
-	return value & U16_MAX;
-}
-
-static __always_inline void irq_timings_push(u64 ts, int irq)
-{
-	struct irq_timings *timings = this_cpu_ptr(&irq_timings);
-
-	timings->values[timings->count & IRQ_TIMINGS_MASK] =
-		irq_timing_encode(ts, irq);
-
-	timings->count++;
-}
-
-/*
- * The function record_irq_time is only called in one place in the
- * interrupts handler. We want this function always inline so the code
- * inside is embedded in the function and the static key branching
- * code can act at the higher level. Without the explicit
- * __always_inline we can end up with a function call and a small
- * overhead in the hotpath for nothing.
- */
-static __always_inline void record_irq_time(struct irq_desc *desc)
-{
-	if (!static_branch_likely(&irq_timing_enabled))
-		return;
-
-	if (desc->istate & IRQS_TIMINGS)
-		irq_timings_push(local_clock(), irq_desc_get_irq(desc));
-}
-#else
-static inline void irq_remove_timings(struct irq_desc *desc) {}
-static inline void irq_setup_timings(struct irq_desc *desc,
-				     struct irqaction *act) {};
-static inline void record_irq_time(struct irq_desc *desc) {}
-#endif /* CONFIG_IRQ_TIMINGS */
-
-
 #ifdef CONFIG_GENERIC_IRQ_CHIP
 void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name,
 			   int num_ct, unsigned int irq_base,
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index f8e4e13dbe33..c3bc00e08c58 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -115,8 +115,6 @@ static inline void free_masks(struct irq_desc *desc) { }
 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
 			      const struct cpumask *affinity, struct module *owner)
 {
-	int cpu;
-
 	desc->irq_common_data.handler_data = NULL;
 	desc->irq_common_data.msi_desc = NULL;
 
@@ -134,8 +132,6 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
 	desc->tot_count = 0;
 	desc->name = NULL;
 	desc->owner = owner;
-	for_each_possible_cpu(cpu)
-		*per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { };
 	desc_smp_init(desc, node, affinity);
 }
 
@@ -621,9 +617,14 @@ EXPORT_SYMBOL(irq_to_desc);
 static void free_desc(unsigned int irq)
 {
 	struct irq_desc *desc = irq_to_desc(irq);
+	int cpu;
 
 	scoped_guard(raw_spinlock_irqsave, &desc->lock)
 		desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
+
+	for_each_possible_cpu(cpu)
+		*per_cpu_ptr(desc->kstat_irqs, cpu) = (struct irqstat) { };
+
 	delete_irq_desc(irq);
 }
 
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 2652c4cfd877..094e8916bb66 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -187,7 +187,7 @@ static int irq_domain_set_name(struct irq_domain *domain, const struct irq_domai
 	const struct fwnode_handle *fwnode = info->fwnode;
 
 	if (is_fwnode_irqchip(fwnode)) {
-		struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
+		const struct irqchip_fwid *fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
 
 		/*
 		 * The name_suffix is only intended to be used to avoid a name
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8b1b4c8a4f54..9927e0893be6 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -347,6 +347,21 @@ static bool irq_set_affinity_deactivated(struct irq_data *data,
 	return true;
 }
 
+/**
+ * irq_affinity_schedule_notify_work - Schedule work to notify about affinity change
+ * @desc:  Interrupt descriptor whose affinity changed
+ */
+void irq_affinity_schedule_notify_work(struct irq_desc *desc)
+{
+	lockdep_assert_held(&desc->lock);
+
+	kref_get(&desc->affinity_notify->kref);
+	if (!schedule_work(&desc->affinity_notify->work)) {
+		/* Work was already scheduled, drop our extra ref */
+		kref_put(&desc->affinity_notify->kref, desc->affinity_notify->release);
+	}
+}
+
 int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 			    bool force)
 {
@@ -367,14 +382,9 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 		irq_copy_pending(desc, mask);
 	}
 
-	if (desc->affinity_notify) {
-		kref_get(&desc->affinity_notify->kref);
-		if (!schedule_work(&desc->affinity_notify->work)) {
-			/* Work was already scheduled, drop our extra ref */
-			kref_put(&desc->affinity_notify->kref,
-				 desc->affinity_notify->release);
-		}
-	}
+	if (desc->affinity_notify)
+		irq_affinity_schedule_notify_work(desc);
+
 	irqd_set(data, IRQD_AFFINITY_SET);
 
 	return ret;
@@ -1473,6 +1483,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 	if (!(new->flags & IRQF_TRIGGER_MASK))
 		new->flags |= irqd_get_trigger_type(&desc->irq_data);
 
+	/*
+	 * IRQF_ONESHOT means the interrupt source in the IRQ chip will be
+	 * masked until the threaded handled is done. If there is no thread
+	 * handler then it makes no sense to have IRQF_ONESHOT.
+	 */
+	WARN_ON_ONCE(new->flags & IRQF_ONESHOT && !new->thread_fn);
+
 	/*
 	 * Check whether the interrupt nests into another interrupt
 	 * thread.
@@ -1778,8 +1795,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 	chip_bus_sync_unlock(desc);
 	mutex_unlock(&desc->request_mutex);
 
-	irq_setup_timings(desc, new);
-
 	wake_up_and_wait_for_irq_thread_ready(desc, new);
 	wake_up_and_wait_for_irq_thread_ready(desc, new->secondary);
 
@@ -1950,7 +1965,6 @@ static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
 
 		irq_release_resources(desc);
 		chip_bus_sync_unlock(desc);
-		irq_remove_timings(desc);
 	}
 
 	mutex_unlock(&desc->request_mutex);
@@ -2451,36 +2465,6 @@ void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
 	kfree(__free_percpu_irq(irq, dev_id));
 }
 
-/**
- * setup_percpu_irq - setup a per-cpu interrupt
- * @irq:	Interrupt line to setup
- * @act:	irqaction for the interrupt
- *
- * Used to statically setup per-cpu interrupts in the early boot process.
- */
-int setup_percpu_irq(unsigned int irq, struct irqaction *act)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-	int retval;
-
-	if (!desc || !irq_settings_is_per_cpu_devid(desc))
-		return -EINVAL;
-
-	retval = irq_chip_pm_get(&desc->irq_data);
-	if (retval < 0)
-		return retval;
-
-	if (!act->affinity)
-		act->affinity = cpu_online_mask;
-
-	retval = __setup_irq(irq, desc, act);
-
-	if (retval)
-		irq_chip_pm_put(&desc->irq_data);
-
-	return retval;
-}
-
 static
 struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long flags,
 					  const char *devname, const cpumask_t *affinity,
@@ -2513,10 +2497,9 @@ struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long f
 }
 
 /**
- * __request_percpu_irq - allocate a percpu interrupt line
+ * request_percpu_irq_affinity - allocate a percpu interrupt line
  * @irq:	Interrupt line to allocate
  * @handler:	Function to be called when the IRQ occurs.
- * @flags:	Interrupt type flags (IRQF_TIMER only)
  * @devname:	An ascii name for the claiming device
  * @affinity:	A cpumask describing the target CPUs for this interrupt
  * @dev_id:	A percpu cookie passed back to the handler function
@@ -2529,9 +2512,8 @@ struct irqaction *create_percpu_irqaction(irq_handler_t handler, unsigned long f
  * the handler gets called with the interrupted CPU's instance of
  * that variable.
  */
-int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
-			 unsigned long flags, const char *devname,
-			 const cpumask_t *affinity, void __percpu *dev_id)
+int request_percpu_irq_affinity(unsigned int irq, irq_handler_t handler, const char *devname,
+				const cpumask_t *affinity, void __percpu *dev_id)
 {
 	struct irqaction *action;
 	struct irq_desc *desc;
@@ -2545,10 +2527,7 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
 	    !irq_settings_is_per_cpu_devid(desc))
 		return -EINVAL;
 
-	if (flags && flags != IRQF_TIMER)
-		return -EINVAL;
-
-	action = create_percpu_irqaction(handler, flags, devname, affinity, dev_id);
+	action = create_percpu_irqaction(handler, 0, devname, affinity, dev_id);
 	if (!action)
 		return -ENOMEM;
 
@@ -2567,7 +2546,7 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
 
 	return retval;
 }
-EXPORT_SYMBOL_GPL(__request_percpu_irq);
+EXPORT_SYMBOL_GPL(request_percpu_irq_affinity);
 
 /**
  * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 77258eafbf63..b0999a4f1f68 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -12,6 +12,7 @@
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/mutex.h>
+#include <linux/string.h>
 
 #include "internals.h"
 
@@ -317,7 +318,7 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
 	if (!desc->dir || action->dir || !action->name || !name_unique(irq, action))
 		return;
 
-	snprintf(name, MAX_NAMELEN, "%s", action->name);
+	strscpy(name, action->name);
 
 	/* create /proc/irq/1234/handler/ */
 	action->dir = proc_mkdir(name, desc->dir);
diff --git a/kernel/irq/timings.c b/kernel/irq/timings.c
deleted file mode 100644
index 4b7315e99bd6..000000000000
--- a/kernel/irq/timings.c
+++ /dev/null
@@ -1,959 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-// Copyright (C) 2016, Linaro Ltd - Daniel Lezcano <daniel.lezcano@linaro.org>
-#define pr_fmt(fmt) "irq_timings: " fmt
-
-#include <linux/kernel.h>
-#include <linux/percpu.h>
-#include <linux/slab.h>
-#include <linux/static_key.h>
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/idr.h>
-#include <linux/irq.h>
-#include <linux/math64.h>
-#include <linux/log2.h>
-
-#include <trace/events/irq.h>
-
-#include "internals.h"
-
-DEFINE_STATIC_KEY_FALSE(irq_timing_enabled);
-
-DEFINE_PER_CPU(struct irq_timings, irq_timings);
-
-static DEFINE_IDR(irqt_stats);
-
-void irq_timings_enable(void)
-{
-	static_branch_enable(&irq_timing_enabled);
-}
-
-void irq_timings_disable(void)
-{
-	static_branch_disable(&irq_timing_enabled);
-}
-
-/*
- * The main goal of this algorithm is to predict the next interrupt
- * occurrence on the current CPU.
- *
- * Currently, the interrupt timings are stored in a circular array
- * buffer every time there is an interrupt, as a tuple: the interrupt
- * number and the associated timestamp when the event occurred <irq,
- * timestamp>.
- *
- * For every interrupt occurring in a short period of time, we can
- * measure the elapsed time between the occurrences for the same
- * interrupt and we end up with a suite of intervals. The experience
- * showed the interrupts are often coming following a periodic
- * pattern.
- *
- * The objective of the algorithm is to find out this periodic pattern
- * in a fastest way and use its period to predict the next irq event.
- *
- * When the next interrupt event is requested, we are in the situation
- * where the interrupts are disabled and the circular buffer
- * containing the timings is filled with the events which happened
- * after the previous next-interrupt-event request.
- *
- * At this point, we read the circular buffer and we fill the irq
- * related statistics structure. After this step, the circular array
- * containing the timings is empty because all the values are
- * dispatched in their corresponding buffers.
- *
- * Now for each interrupt, we can predict the next event by using the
- * suffix array, log interval and exponential moving average
- *
- * 1. Suffix array
- *
- * Suffix array is an array of all the suffixes of a string. It is
- * widely used as a data structure for compression, text search, ...
- * For instance for the word 'banana', the suffixes will be: 'banana'
- * 'anana' 'nana' 'ana' 'na' 'a'
- *
- * Usually, the suffix array is sorted but for our purpose it is
- * not necessary and won't provide any improvement in the context of
- * the solved problem where we clearly define the boundaries of the
- * search by a max period and min period.
- *
- * The suffix array will build a suite of intervals of different
- * length and will look for the repetition of each suite. If the suite
- * is repeating then we have the period because it is the length of
- * the suite whatever its position in the buffer.
- *
- * 2. Log interval
- *
- * We saw the irq timings allow to compute the interval of the
- * occurrences for a specific interrupt. We can reasonably assume the
- * longer is the interval, the higher is the error for the next event
- * and we can consider storing those interval values into an array
- * where each slot in the array correspond to an interval at the power
- * of 2 of the index. For example, index 12 will contain values
- * between 2^11 and 2^12.
- *
- * At the end we have an array of values where at each index defines a
- * [2^index - 1, 2 ^ index] interval values allowing to store a large
- * number of values inside a small array.
- *
- * For example, if we have the value 1123, then we store it at
- * ilog2(1123) = 10 index value.
- *
- * Storing those value at the specific index is done by computing an
- * exponential moving average for this specific slot. For instance,
- * for values 1800, 1123, 1453, ... fall under the same slot (10) and
- * the exponential moving average is computed every time a new value
- * is stored at this slot.
- *
- * 3. Exponential Moving Average
- *
- * The EMA is largely used to track a signal for stocks or as a low
- * pass filter. The magic of the formula, is it is very simple and the
- * reactivity of the average can be tuned with the factors called
- * alpha.
- *
- * The higher the alphas are, the faster the average respond to the
- * signal change. In our case, if a slot in the array is a big
- * interval, we can have numbers with a big difference between
- * them. The impact of those differences in the average computation
- * can be tuned by changing the alpha value.
- *
- *
- *  -- The algorithm --
- *
- * We saw the different processing above, now let's see how they are
- * used together.
- *
- * For each interrupt:
- *	For each interval:
- *		Compute the index = ilog2(interval)
- *		Compute a new_ema(buffer[index], interval)
- *		Store the index in a circular buffer
- *
- *	Compute the suffix array of the indexes
- *
- *	For each suffix:
- *		If the suffix is reverse-found 3 times
- *			Return suffix
- *
- *	Return Not found
- *
- * However we can not have endless suffix array to be build, it won't
- * make sense and it will add an extra overhead, so we can restrict
- * this to a maximum suffix length of 5 and a minimum suffix length of
- * 2. The experience showed 5 is the majority of the maximum pattern
- * period found for different devices.
- *
- * The result is a pattern finding less than 1us for an interrupt.
- *
- * Example based on real values:
- *
- * Example 1 : MMC write/read interrupt interval:
- *
- *	223947, 1240, 1384, 1386, 1386,
- *	217416, 1236, 1384, 1386, 1387,
- *	214719, 1241, 1386, 1387, 1384,
- *	213696, 1234, 1384, 1386, 1388,
- *	219904, 1240, 1385, 1389, 1385,
- *	212240, 1240, 1386, 1386, 1386,
- *	214415, 1236, 1384, 1386, 1387,
- *	214276, 1234, 1384, 1388, ?
- *
- * For each element, apply ilog2(value)
- *
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, ?
- *
- * Max period of 5, we take the last (max_period * 3) 15 elements as
- * we can be confident if the pattern repeats itself three times it is
- * a repeating pattern.
- *
- *	             8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, 8,
- *	15, 8, 8, 8, ?
- *
- * Suffixes are:
- *
- *  1) 8, 15, 8, 8, 8  <- max period
- *  2) 8, 15, 8, 8
- *  3) 8, 15, 8
- *  4) 8, 15           <- min period
- *
- * From there we search the repeating pattern for each suffix.
- *
- * buffer: 8, 15, 8, 8, 8, 8, 15, 8, 8, 8, 8, 15, 8, 8, 8
- *         |   |  |  |  |  |   |  |  |  |  |   |  |  |  |
- *         8, 15, 8, 8, 8  |   |  |  |  |  |   |  |  |  |
- *                         8, 15, 8, 8, 8  |   |  |  |  |
- *                                         8, 15, 8, 8, 8
- *
- * When moving the suffix, we found exactly 3 matches.
- *
- * The first suffix with period 5 is repeating.
- *
- * The next event is (3 * max_period) % suffix_period
- *
- * In this example, the result 0, so the next event is suffix[0] => 8
- *
- * However, 8 is the index in the array of exponential moving average
- * which was calculated on the fly when storing the values, so the
- * interval is ema[8] = 1366
- *
- *
- * Example 2:
- *
- *	4, 3, 5, 100,
- *	3, 3, 5, 117,
- *	4, 4, 5, 112,
- *	4, 3, 4, 110,
- *	3, 5, 3, 117,
- *	4, 4, 5, 112,
- *	4, 3, 4, 110,
- *	3, 4, 5, 112,
- *	4, 3, 4, 110
- *
- * ilog2
- *
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4
- *
- * Max period 5:
- *	   0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4,
- *	0, 0, 0, 4
- *
- * Suffixes:
- *
- *  1) 0, 0, 4, 0, 0
- *  2) 0, 0, 4, 0
- *  3) 0, 0, 4
- *  4) 0, 0
- *
- * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
- *         |  |  |  |  |  |  X
- *         0, 0, 4, 0, 0, |  X
- *                        0, 0
- *
- * buffer: 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4, 0, 0, 0, 4
- *         |  |  |  |  |  |  |  |  |  |  |  |  |  |  |
- *         0, 0, 4, 0, |  |  |  |  |  |  |  |  |  |  |
- *                     0, 0, 4, 0, |  |  |  |  |  |  |
- *                                 0, 0, 4, 0, |  |  |
- *                                             0  0  4
- *
- * Pattern is found 3 times, the remaining is 1 which results from
- * (max_period * 3) % suffix_period. This value is the index in the
- * suffix arrays. The suffix array for a period 4 has the value 4
- * at index 1.
- */
-#define EMA_ALPHA_VAL		64
-#define EMA_ALPHA_SHIFT		7
-
-#define PREDICTION_PERIOD_MIN	3
-#define PREDICTION_PERIOD_MAX	5
-#define PREDICTION_FACTOR	4
-#define PREDICTION_MAX		10 /* 2 ^ PREDICTION_MAX useconds */
-#define PREDICTION_BUFFER_SIZE	16 /* slots for EMAs, hardly more than 16 */
-
-/*
- * Number of elements in the circular buffer: If it happens it was
- * flushed before, then the number of elements could be smaller than
- * IRQ_TIMINGS_SIZE, so the count is used, otherwise the array size is
- * used as we wrapped. The index begins from zero when we did not
- * wrap. That could be done in a nicer way with the proper circular
- * array structure type but with the cost of extra computation in the
- * interrupt handler hot path. We choose efficiency.
- */
-#define for_each_irqts(i, irqts)					\
-	for (i = irqts->count < IRQ_TIMINGS_SIZE ?			\
-		     0 : irqts->count & IRQ_TIMINGS_MASK,		\
-		     irqts->count = min(IRQ_TIMINGS_SIZE,		\
-					irqts->count);			\
-	     irqts->count > 0; irqts->count--,				\
-		     i = (i + 1) & IRQ_TIMINGS_MASK)
-
-struct irqt_stat {
-	u64	last_ts;
-	u64	ema_time[PREDICTION_BUFFER_SIZE];
-	int	timings[IRQ_TIMINGS_SIZE];
-	int	circ_timings[IRQ_TIMINGS_SIZE];
-	int	count;
-};
-
-/*
- * Exponential moving average computation
- */
-static u64 irq_timings_ema_new(u64 value, u64 ema_old)
-{
-	s64 diff;
-
-	if (unlikely(!ema_old))
-		return value;
-
-	diff = (value - ema_old) * EMA_ALPHA_VAL;
-	/*
-	 * We can use a s64 type variable to be added with the u64
-	 * ema_old variable as this one will never have its topmost
-	 * bit set, it will be always smaller than 2^63 nanosec
-	 * interrupt interval (292 years).
-	 */
-	return ema_old + (diff >> EMA_ALPHA_SHIFT);
-}
-
-static int irq_timings_next_event_index(int *buffer, size_t len, int period_max)
-{
-	int period;
-
-	/*
-	 * Move the beginning pointer to the end minus the max period x 3.
-	 * We are at the point we can begin searching the pattern
-	 */
-	buffer = &buffer[len - (period_max * 3)];
-
-	/* Adjust the length to the maximum allowed period x 3 */
-	len = period_max * 3;
-
-	/*
-	 * The buffer contains the suite of intervals, in a ilog2
-	 * basis, we are looking for a repetition. We point the
-	 * beginning of the search three times the length of the
-	 * period beginning at the end of the buffer. We do that for
-	 * each suffix.
-	 */
-	for (period = period_max; period >= PREDICTION_PERIOD_MIN; period--) {
-
-		/*
-		 * The first comparison always succeed because the
-		 * suffix is deduced from the first n-period bytes of
-		 * the buffer and we compare the initial suffix with
-		 * itself, so we can skip the first iteration.
-		 */
-		int idx = period;
-		size_t size = period;
-
-		/*
-		 * We look if the suite with period 'i' repeat
-		 * itself. If it is truncated at the end, as it
-		 * repeats we can use the period to find out the next
-		 * element with the modulo.
-		 */
-		while (!memcmp(buffer, &buffer[idx], size * sizeof(int))) {
-
-			/*
-			 * Move the index in a period basis
-			 */
-			idx += size;
-
-			/*
-			 * If this condition is reached, all previous
-			 * memcmp were successful, so the period is
-			 * found.
-			 */
-			if (idx == len)
-				return buffer[len % period];
-
-			/*
-			 * If the remaining elements to compare are
-			 * smaller than the period, readjust the size
-			 * of the comparison for the last iteration.
-			 */
-			if (len - idx < period)
-				size = len - idx;
-		}
-	}
-
-	return -1;
-}
-
-static u64 __irq_timings_next_event(struct irqt_stat *irqs, int irq, u64 now)
-{
-	int index, i, period_max, count, start, min = INT_MAX;
-
-	if ((now - irqs->last_ts) >= NSEC_PER_SEC) {
-		irqs->count = irqs->last_ts = 0;
-		return U64_MAX;
-	}
-
-	/*
-	 * As we want to find three times the repetition, we need a
-	 * number of intervals greater or equal to three times the
-	 * maximum period, otherwise we truncate the max period.
-	 */
-	period_max = irqs->count > (3 * PREDICTION_PERIOD_MAX) ?
-		PREDICTION_PERIOD_MAX : irqs->count / 3;
-
-	/*
-	 * If we don't have enough irq timings for this prediction,
-	 * just bail out.
-	 */
-	if (period_max <= PREDICTION_PERIOD_MIN)
-		return U64_MAX;
-
-	/*
-	 * 'count' will depends if the circular buffer wrapped or not
-	 */
-	count = irqs->count < IRQ_TIMINGS_SIZE ?
-		irqs->count : IRQ_TIMINGS_SIZE;
-
-	start = irqs->count < IRQ_TIMINGS_SIZE ?
-		0 : (irqs->count & IRQ_TIMINGS_MASK);
-
-	/*
-	 * Copy the content of the circular buffer into another buffer
-	 * in order to linearize the buffer instead of dealing with
-	 * wrapping indexes and shifted array which will be prone to
-	 * error and extremely difficult to debug.
-	 */
-	for (i = 0; i < count; i++) {
-		int index = (start + i) & IRQ_TIMINGS_MASK;
-
-		irqs->timings[i] = irqs->circ_timings[index];
-		min = min_t(int, irqs->timings[i], min);
-	}
-
-	index = irq_timings_next_event_index(irqs->timings, count, period_max);
-	if (index < 0)
-		return irqs->last_ts + irqs->ema_time[min];
-
-	return irqs->last_ts + irqs->ema_time[index];
-}
-
-static __always_inline int irq_timings_interval_index(u64 interval)
-{
-	/*
-	 * The PREDICTION_FACTOR increase the interval size for the
-	 * array of exponential average.
-	 */
-	u64 interval_us = (interval >> 10) / PREDICTION_FACTOR;
-
-	return likely(interval_us) ? ilog2(interval_us) : 0;
-}
-
-static __always_inline void __irq_timings_store(int irq, struct irqt_stat *irqs,
-						u64 interval)
-{
-	int index;
-
-	/*
-	 * Get the index in the ema table for this interrupt.
-	 */
-	index = irq_timings_interval_index(interval);
-
-	if (index > PREDICTION_BUFFER_SIZE - 1) {
-		irqs->count = 0;
-		return;
-	}
-
-	/*
-	 * Store the index as an element of the pattern in another
-	 * circular array.
-	 */
-	irqs->circ_timings[irqs->count & IRQ_TIMINGS_MASK] = index;
-
-	irqs->ema_time[index] = irq_timings_ema_new(interval,
-						    irqs->ema_time[index]);
-
-	irqs->count++;
-}
-
-static inline void irq_timings_store(int irq, struct irqt_stat *irqs, u64 ts)
-{
-	u64 old_ts = irqs->last_ts;
-	u64 interval;
-
-	/*
-	 * The timestamps are absolute time values, we need to compute
-	 * the timing interval between two interrupts.
-	 */
-	irqs->last_ts = ts;
-
-	/*
-	 * The interval type is u64 in order to deal with the same
-	 * type in our computation, that prevent mindfuck issues with
-	 * overflow, sign and division.
-	 */
-	interval = ts - old_ts;
-
-	/*
-	 * The interrupt triggered more than one second apart, that
-	 * ends the sequence as predictable for our purpose. In this
-	 * case, assume we have the beginning of a sequence and the
-	 * timestamp is the first value. As it is impossible to
-	 * predict anything at this point, return.
-	 *
-	 * Note the first timestamp of the sequence will always fall
-	 * in this test because the old_ts is zero. That is what we
-	 * want as we need another timestamp to compute an interval.
-	 */
-	if (interval >= NSEC_PER_SEC) {
-		irqs->count = 0;
-		return;
-	}
-
-	__irq_timings_store(irq, irqs, interval);
-}
-
-/**
- * irq_timings_next_event - Return when the next event is supposed to arrive
- * @now: current time
- *
- * During the last busy cycle, the number of interrupts is incremented
- * and stored in the irq_timings structure. This information is
- * necessary to:
- *
- * - know if the index in the table wrapped up:
- *
- *      If more than the array size interrupts happened during the
- *      last busy/idle cycle, the index wrapped up and we have to
- *      begin with the next element in the array which is the last one
- *      in the sequence, otherwise it is at the index 0.
- *
- * - have an indication of the interrupts activity on this CPU
- *   (eg. irq/sec)
- *
- * The values are 'consumed' after inserting in the statistical model,
- * thus the count is reinitialized.
- *
- * The array of values **must** be browsed in the time direction, the
- * timestamp must increase between an element and the next one.
- *
- * Returns a nanosec time based estimation of the earliest interrupt,
- * U64_MAX otherwise.
- */
-u64 irq_timings_next_event(u64 now)
-{
-	struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
-	struct irqt_stat *irqs;
-	struct irqt_stat __percpu *s;
-	u64 ts, next_evt = U64_MAX;
-	int i, irq = 0;
-
-	/*
-	 * This function must be called with the local irq disabled in
-	 * order to prevent the timings circular buffer to be updated
-	 * while we are reading it.
-	 */
-	lockdep_assert_irqs_disabled();
-
-	if (!irqts->count)
-		return next_evt;
-
-	/*
-	 * Number of elements in the circular buffer: If it happens it
-	 * was flushed before, then the number of elements could be
-	 * smaller than IRQ_TIMINGS_SIZE, so the count is used,
-	 * otherwise the array size is used as we wrapped. The index
-	 * begins from zero when we did not wrap. That could be done
-	 * in a nicer way with the proper circular array structure
-	 * type but with the cost of extra computation in the
-	 * interrupt handler hot path. We choose efficiency.
-	 *
-	 * Inject measured irq/timestamp to the pattern prediction
-	 * model while decrementing the counter because we consume the
-	 * data from our circular buffer.
-	 */
-	for_each_irqts(i, irqts) {
-		irq = irq_timing_decode(irqts->values[i], &ts);
-		s = idr_find(&irqt_stats, irq);
-		if (s)
-			irq_timings_store(irq, this_cpu_ptr(s), ts);
-	}
-
-	/*
-	 * Look in the list of interrupts' statistics, the earliest
-	 * next event.
-	 */
-	idr_for_each_entry(&irqt_stats, s, i) {
-
-		irqs = this_cpu_ptr(s);
-
-		ts = __irq_timings_next_event(irqs, i, now);
-		if (ts <= now)
-			return now;
-
-		if (ts < next_evt)
-			next_evt = ts;
-	}
-
-	return next_evt;
-}
-
-void irq_timings_free(int irq)
-{
-	struct irqt_stat __percpu *s;
-
-	s = idr_find(&irqt_stats, irq);
-	if (s) {
-		free_percpu(s);
-		idr_remove(&irqt_stats, irq);
-	}
-}
-
-int irq_timings_alloc(int irq)
-{
-	struct irqt_stat __percpu *s;
-	int id;
-
-	/*
-	 * Some platforms can have the same private interrupt per cpu,
-	 * so this function may be called several times with the
-	 * same interrupt number. Just bail out in case the per cpu
-	 * stat structure is already allocated.
-	 */
-	s = idr_find(&irqt_stats, irq);
-	if (s)
-		return 0;
-
-	s = alloc_percpu(*s);
-	if (!s)
-		return -ENOMEM;
-
-	idr_preload(GFP_KERNEL);
-	id = idr_alloc(&irqt_stats, s, irq, irq + 1, GFP_NOWAIT);
-	idr_preload_end();
-
-	if (id < 0) {
-		free_percpu(s);
-		return id;
-	}
-
-	return 0;
-}
-
-#ifdef CONFIG_TEST_IRQ_TIMINGS
-struct timings_intervals {
-	u64 *intervals;
-	size_t count;
-};
-
-/*
- * Intervals are given in nanosecond base
- */
-static u64 intervals0[] __initdata = {
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000, 500000,
-	10000, 50000, 200000,
-};
-
-static u64 intervals1[] __initdata = {
-	223947000, 1240000, 1384000, 1386000, 1386000,
-	217416000, 1236000, 1384000, 1386000, 1387000,
-	214719000, 1241000, 1386000, 1387000, 1384000,
-	213696000, 1234000, 1384000, 1386000, 1388000,
-	219904000, 1240000, 1385000, 1389000, 1385000,
-	212240000, 1240000, 1386000, 1386000, 1386000,
-	214415000, 1236000, 1384000, 1386000, 1387000,
-	214276000, 1234000,
-};
-
-static u64 intervals2[] __initdata = {
-	4000, 3000, 5000, 100000,
-	3000, 3000, 5000, 117000,
-	4000, 4000, 5000, 112000,
-	4000, 3000, 4000, 110000,
-	3000, 5000, 3000, 117000,
-	4000, 4000, 5000, 112000,
-	4000, 3000, 4000, 110000,
-	3000, 4000, 5000, 112000,
-	4000,
-};
-
-static u64 intervals3[] __initdata = {
-	1385000, 212240000, 1240000,
-	1386000, 214415000, 1236000,
-	1384000, 214276000, 1234000,
-	1386000, 214415000, 1236000,
-	1385000, 212240000, 1240000,
-	1386000, 214415000, 1236000,
-	1384000, 214276000, 1234000,
-	1386000, 214415000, 1236000,
-	1385000, 212240000, 1240000,
-};
-
-static u64 intervals4[] __initdata = {
-	10000, 50000, 10000, 50000,
-	10000, 50000, 10000, 50000,
-	10000, 50000, 10000, 50000,
-	10000, 50000, 10000, 50000,
-	10000, 50000, 10000, 50000,
-	10000, 50000, 10000, 50000,
-	10000, 50000, 10000, 50000,
-	10000, 50000, 10000, 50000,
-	10000,
-};
-
-static struct timings_intervals tis[] __initdata = {
-	{ intervals0, ARRAY_SIZE(intervals0) },
-	{ intervals1, ARRAY_SIZE(intervals1) },
-	{ intervals2, ARRAY_SIZE(intervals2) },
-	{ intervals3, ARRAY_SIZE(intervals3) },
-	{ intervals4, ARRAY_SIZE(intervals4) },
-};
-
-static int __init irq_timings_test_next_index(struct timings_intervals *ti)
-{
-	int _buffer[IRQ_TIMINGS_SIZE];
-	int buffer[IRQ_TIMINGS_SIZE];
-	int index, start, i, count, period_max;
-
-	count = ti->count - 1;
-
-	period_max = count > (3 * PREDICTION_PERIOD_MAX) ?
-		PREDICTION_PERIOD_MAX : count / 3;
-
-	/*
-	 * Inject all values except the last one which will be used
-	 * to compare with the next index result.
-	 */
-	pr_debug("index suite: ");
-
-	for (i = 0; i < count; i++) {
-		index = irq_timings_interval_index(ti->intervals[i]);
-		_buffer[i & IRQ_TIMINGS_MASK] = index;
-		pr_cont("%d ", index);
-	}
-
-	start = count < IRQ_TIMINGS_SIZE ? 0 :
-		count & IRQ_TIMINGS_MASK;
-
-	count = min_t(int, count, IRQ_TIMINGS_SIZE);
-
-	for (i = 0; i < count; i++) {
-		int index = (start + i) & IRQ_TIMINGS_MASK;
-		buffer[i] = _buffer[index];
-	}
-
-	index = irq_timings_next_event_index(buffer, count, period_max);
-	i = irq_timings_interval_index(ti->intervals[ti->count - 1]);
-
-	if (index != i) {
-		pr_err("Expected (%d) and computed (%d) next indexes differ\n",
-		       i, index);
-		return -EINVAL;
-	}
-
-	return 0;
-}
-
-static int __init irq_timings_next_index_selftest(void)
-{
-	int i, ret;
-
-	for (i = 0; i < ARRAY_SIZE(tis); i++) {
-
-		pr_info("---> Injecting intervals number #%d (count=%zd)\n",
-			i, tis[i].count);
-
-		ret = irq_timings_test_next_index(&tis[i]);
-		if (ret)
-			break;
-	}
-
-	return ret;
-}
-
-static int __init irq_timings_test_irqs(struct timings_intervals *ti)
-{
-	struct irqt_stat __percpu *s;
-	struct irqt_stat *irqs;
-	int i, index, ret, irq = 0xACE5;
-
-	ret = irq_timings_alloc(irq);
-	if (ret) {
-		pr_err("Failed to allocate irq timings\n");
-		return ret;
-	}
-
-	s = idr_find(&irqt_stats, irq);
-	if (!s) {
-		ret = -EIDRM;
-		goto out;
-	}
-
-	irqs = this_cpu_ptr(s);
-
-	for (i = 0; i < ti->count; i++) {
-
-		index = irq_timings_interval_index(ti->intervals[i]);
-		pr_debug("%d: interval=%llu ema_index=%d\n",
-			 i, ti->intervals[i], index);
-
-		__irq_timings_store(irq, irqs, ti->intervals[i]);
-		if (irqs->circ_timings[i & IRQ_TIMINGS_MASK] != index) {
-			ret = -EBADSLT;
-			pr_err("Failed to store in the circular buffer\n");
-			goto out;
-		}
-	}
-
-	if (irqs->count != ti->count) {
-		ret = -ERANGE;
-		pr_err("Count differs\n");
-		goto out;
-	}
-
-	ret = 0;
-out:
-	irq_timings_free(irq);
-
-	return ret;
-}
-
-static int __init irq_timings_irqs_selftest(void)
-{
-	int i, ret;
-
-	for (i = 0; i < ARRAY_SIZE(tis); i++) {
-		pr_info("---> Injecting intervals number #%d (count=%zd)\n",
-			i, tis[i].count);
-		ret = irq_timings_test_irqs(&tis[i]);
-		if (ret)
-			break;
-	}
-
-	return ret;
-}
-
-static int __init irq_timings_test_irqts(struct irq_timings *irqts,
-					 unsigned count)
-{
-	int start = count >= IRQ_TIMINGS_SIZE ? count - IRQ_TIMINGS_SIZE : 0;
-	int i, irq, oirq = 0xBEEF;
-	u64 ots = 0xDEAD, ts;
-
-	/*
-	 * Fill the circular buffer by using the dedicated function.
-	 */
-	for (i = 0; i < count; i++) {
-		pr_debug("%d: index=%d, ts=%llX irq=%X\n",
-			 i, i & IRQ_TIMINGS_MASK, ots + i, oirq + i);
-
-		irq_timings_push(ots + i, oirq + i);
-	}
-
-	/*
-	 * Compute the first elements values after the index wrapped
-	 * up or not.
-	 */
-	ots += start;
-	oirq += start;
-
-	/*
-	 * Test the circular buffer count is correct.
-	 */
-	pr_debug("---> Checking timings array count (%d) is right\n", count);
-	if (WARN_ON(irqts->count != count))
-		return -EINVAL;
-
-	/*
-	 * Test the macro allowing to browse all the irqts.
-	 */
-	pr_debug("---> Checking the for_each_irqts() macro\n");
-	for_each_irqts(i, irqts) {
-
-		irq = irq_timing_decode(irqts->values[i], &ts);
-
-		pr_debug("index=%d, ts=%llX / %llX, irq=%X / %X\n",
-			 i, ts, ots, irq, oirq);
-
-		if (WARN_ON(ts != ots || irq != oirq))
-			return -EINVAL;
-
-		ots++; oirq++;
-	}
-
-	/*
-	 * The circular buffer should have be flushed when browsed
-	 * with for_each_irqts
-	 */
-	pr_debug("---> Checking timings array is empty after browsing it\n");
-	if (WARN_ON(irqts->count))
-		return -EINVAL;
-
-	return 0;
-}
-
-static int __init irq_timings_irqts_selftest(void)
-{
-	struct irq_timings *irqts = this_cpu_ptr(&irq_timings);
-	int i, ret;
-
-	/*
-	 * Test the circular buffer with different number of
-	 * elements. The purpose is to test at the limits (empty, half
-	 * full, full, wrapped with the cursor at the boundaries,
-	 * wrapped several times, etc ...
-	 */
-	int count[] = { 0,
-			IRQ_TIMINGS_SIZE >> 1,
-			IRQ_TIMINGS_SIZE,
-			IRQ_TIMINGS_SIZE + (IRQ_TIMINGS_SIZE >> 1),
-			2 * IRQ_TIMINGS_SIZE,
-			(2 * IRQ_TIMINGS_SIZE) + 3,
-	};
-
-	for (i = 0; i < ARRAY_SIZE(count); i++) {
-
-		pr_info("---> Checking the timings with %d/%d values\n",
-			count[i], IRQ_TIMINGS_SIZE);
-
-		ret = irq_timings_test_irqts(irqts, count[i]);
-		if (ret)
-			break;
-	}
-
-	return ret;
-}
-
-static int __init irq_timings_selftest(void)
-{
-	int ret;
-
-	pr_info("------------------- selftest start -----------------\n");
-
-	/*
-	 * At this point, we don't except any subsystem to use the irq
-	 * timings but us, so it should not be enabled.
-	 */
-	if (static_branch_unlikely(&irq_timing_enabled)) {
-		pr_warn("irq timings already initialized, skipping selftest\n");
-		return 0;
-	}
-
-	ret = irq_timings_irqts_selftest();
-	if (ret)
-		goto out;
-
-	ret = irq_timings_irqs_selftest();
-	if (ret)
-		goto out;
-
-	ret = irq_timings_next_index_selftest();
-out:
-	pr_info("---------- selftest end with %s -----------\n",
-		ret ? "failure" : "success");
-
-	return ret;
-}
-early_initcall(irq_timings_selftest);
-#endif
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ba36939fda79..78854756d416 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -2551,14 +2551,6 @@ config TEST_PARMAN
 
 	  If unsure, say N.
 
-config TEST_IRQ_TIMINGS
-	bool "IRQ timings selftest"
-	depends on IRQ_TIMINGS
-	help
-	  Enable this option to test the irq timings code on boot.
-
-	  If unsure, say N.
-
 config TEST_LKM
 	tristate "Test module loading with 'hello world' module"
 	depends on m
[GIT pull] timers/clocksource for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest timers/clocksource branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-clocksource-2026-02-09

up to:  7eaf8e32de5f: Merge tag 'timers-v7.0' of git://git.kernel.org/pub/scm/linux/kernel/git/daniel.lezcano/linux into timers/clocksource


Updates for clockevent/clocksource drivers:

A rather small set of boring cleanups, fixes and improvements.

Thanks,

	tglx

------------------>
Bartosz Golaszewski (1):
      clocksource/drivers/timer-integrator-ap: Add missing Kconfig dependency on OF

Marc Zyngier (1):
      clocksource/drivers/mips-gic-timer: Move GIC timer to request_percpu_irq()

Niklas Söderlund (1):
      clocksource/drivers/sh_tmu: Always leave device running after probe

Soham Metha (1):
      clocksource/drivers/armada-370-xp: Fix dead link to timer binding

Stephen Eta Zhou (1):
      clocksource/drivers/timer-sp804: Fix an Oops when read_current_timer is called on ARM32 platforms where the SP804 is not registered as the sched_clock.


 drivers/clocksource/Kconfig               |  1 +
 drivers/clocksource/mips-gic-timer.c      | 10 ++--------
 drivers/clocksource/sh_tmu.c              | 18 ------------------
 drivers/clocksource/timer-armada-370-xp.c |  2 +-
 drivers/clocksource/timer-sp804.c         | 14 +++++++++-----
 5 files changed, 13 insertions(+), 32 deletions(-)

diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index aa59e5b13351..fd9112706545 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -254,6 +254,7 @@ config KEYSTONE_TIMER
 
 config INTEGRATOR_AP_TIMER
 	bool "Integrator-AP timer driver" if COMPILE_TEST
+	depends on OF
 	select CLKSRC_MMIO
 	help
 	  Enables support for the Integrator-AP timer.
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
index abb685a080a5..1501c7db9a8e 100644
--- a/drivers/clocksource/mips-gic-timer.c
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -77,13 +77,6 @@ static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
 	return IRQ_HANDLED;
 }
 
-static struct irqaction gic_compare_irqaction = {
-	.handler = gic_compare_interrupt,
-	.percpu_dev_id = &gic_clockevent_device,
-	.flags = IRQF_PERCPU | IRQF_TIMER,
-	.name = "timer",
-};
-
 static void gic_clockevent_cpu_init(unsigned int cpu,
 				    struct clock_event_device *cd)
 {
@@ -152,7 +145,8 @@ static int gic_clockevent_init(void)
 	if (!gic_frequency)
 		return -ENXIO;
 
-	ret = setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
+	ret = request_percpu_irq(gic_timer_irq, gic_compare_interrupt,
+				 "timer", &gic_clockevent_device);
 	if (ret < 0) {
 		pr_err("IRQ %d setup failed (%d)\n", gic_timer_irq, ret);
 		return ret;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index beffff81c00f..3fc6ed9b5630 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -143,16 +143,6 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_channel *ch, int start)
 
 static int __sh_tmu_enable(struct sh_tmu_channel *ch)
 {
-	int ret;
-
-	/* enable clock */
-	ret = clk_enable(ch->tmu->clk);
-	if (ret) {
-		dev_err(&ch->tmu->pdev->dev, "ch%u: cannot enable clock\n",
-			ch->index);
-		return ret;
-	}
-
 	/* make sure channel is disabled */
 	sh_tmu_start_stop_ch(ch, 0);
 
@@ -174,7 +164,6 @@ static int sh_tmu_enable(struct sh_tmu_channel *ch)
 	if (ch->enable_count++ > 0)
 		return 0;
 
-	pm_runtime_get_sync(&ch->tmu->pdev->dev);
 	dev_pm_syscore_device(&ch->tmu->pdev->dev, true);
 
 	return __sh_tmu_enable(ch);
@@ -187,9 +176,6 @@ static void __sh_tmu_disable(struct sh_tmu_channel *ch)
 
 	/* disable interrupts in TMU block */
 	sh_tmu_write(ch, TCR, TCR_TPSC_CLK4);
-
-	/* stop clock */
-	clk_disable(ch->tmu->clk);
 }
 
 static void sh_tmu_disable(struct sh_tmu_channel *ch)
@@ -203,7 +189,6 @@ static void sh_tmu_disable(struct sh_tmu_channel *ch)
 	__sh_tmu_disable(ch);
 
 	dev_pm_syscore_device(&ch->tmu->pdev->dev, false);
-	pm_runtime_put(&ch->tmu->pdev->dev);
 }
 
 static void sh_tmu_set_next(struct sh_tmu_channel *ch, unsigned long delta,
@@ -552,7 +537,6 @@ static int sh_tmu_setup(struct sh_tmu_device *tmu, struct platform_device *pdev)
 		goto err_clk_unprepare;
 
 	tmu->rate = clk_get_rate(tmu->clk) / 4;
-	clk_disable(tmu->clk);
 
 	/* Map the memory resource. */
 	ret = sh_tmu_map_memory(tmu);
@@ -626,8 +610,6 @@ static int sh_tmu_probe(struct platform_device *pdev)
  out:
 	if (tmu->has_clockevent || tmu->has_clocksource)
 		pm_runtime_irq_safe(&pdev->dev);
-	else
-		pm_runtime_idle(&pdev->dev);
 
 	return 0;
 }
diff --git a/drivers/clocksource/timer-armada-370-xp.c b/drivers/clocksource/timer-armada-370-xp.c
index f2b4cc40db93..a405a084cf72 100644
--- a/drivers/clocksource/timer-armada-370-xp.c
+++ b/drivers/clocksource/timer-armada-370-xp.c
@@ -22,7 +22,7 @@
  *     doing otherwise leads to using a clocksource whose frequency varies
  *     when doing cpufreq frequency changes.
  *
- * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt
+ * See Documentation/devicetree/bindings/timer/marvell,armada-370-timer.yaml
  */
 
 #include <linux/init.h>
diff --git a/drivers/clocksource/timer-sp804.c b/drivers/clocksource/timer-sp804.c
index e82a95ea4724..d69858427359 100644
--- a/drivers/clocksource/timer-sp804.c
+++ b/drivers/clocksource/timer-sp804.c
@@ -106,21 +106,25 @@ static u64 notrace sp804_read(void)
 	return ~readl_relaxed(sched_clkevt->value);
 }
 
+/* Register delay timer backed by the hardware counter */
 #ifdef CONFIG_ARM
 static struct delay_timer delay;
+static struct sp804_clkevt *delay_clkevt;
+
 static unsigned long sp804_read_delay_timer_read(void)
 {
-	return sp804_read();
+	return ~readl_relaxed(delay_clkevt->value);
 }
 
-static void sp804_register_delay_timer(int freq)
+static void sp804_register_delay_timer(struct sp804_clkevt *clk, int freq)
 {
+	delay_clkevt = clk;
 	delay.freq = freq;
 	delay.read_current_timer = sp804_read_delay_timer_read;
 	register_current_timer_delay(&delay);
 }
 #else
-static inline void sp804_register_delay_timer(int freq) {}
+static inline void sp804_register_delay_timer(struct sp804_clkevt *clk, int freq) {}
 #endif
 
 static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
@@ -135,8 +139,6 @@ static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
 	if (rate < 0)
 		return -EINVAL;
 
-	sp804_register_delay_timer(rate);
-
 	clkevt = sp804_clkevt_get(base);
 
 	writel(0, clkevt->ctrl);
@@ -152,6 +154,8 @@ static int __init sp804_clocksource_and_sched_clock_init(void __iomem *base,
 	clocksource_mmio_init(clkevt->value, name,
 		rate, 200, 32, clocksource_mmio_readl_down);
 
+	sp804_register_delay_timer(clkevt, rate);
+
 	if (use_sched_clock) {
 		sched_clkevt = clkevt;
 		sched_clock_register(sp804_read, 32, rate);
[GIT pull] irq/msi for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest irq/msi branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-msi-2026-02-09

up to:  cb9b6f9d2be6: ALSA: hda/intel: Make MSI address limit based on the device DMA limit


Updates for the [PCI] MSI subsystem:

  - Add interrupt redirection infrastructure

    Some PCI controllers use a single demultiplexing interrupt for the MSI
    interrupts of subordinate devices.

    This prevents setting the interrupt affinity of device interrupts, which
    causes device interrupts to be delivered to a single CPU. That obviously is
    counterproductive for multi-queue devices and interrupt balancing.

    To work around this limitation the new infrastructure installs a dummy
    irq_set_affinity() callback which captures the affinity mask and picks a
    redirection target CPU out of the mask.

    When the PCI controller demultiplexes the interrupts it invokes a new
    handling function in the core, which either runs the interrupt handler in
    the context of the target CPU or delegates it to irq_work on the target CPU.

  - Utilize the interrupt redirection mechanism in the PCI DWC host controller
    driver.

    This allows affinity control for the subordinate device MSI interrupts
    instead of being randomly executed on the CPU which runs the demultiplex
    handler.

  - Replace the binary 64-bit MSI flag with a DMA mask

    Some PCI devices have PCI_MSI_FLAGS_64BIT in the MSI capability, but
    implement less than 64 address bits. This breaks on platforms where such a
    device is assigned an MSI address higher than what's supported.

    With the binary 64-bit flag there is no other choice than disabling 64-bit
    MSI support which leaves the device disfunctional.

    By using a DMA mask the address limit of a device can be described
    correctly which provides support for the above scenario.

  - Make use of the DMA mask based address limit in the hda/intel and radeon
    drivers to enable them on affected platforms.

  - The usual small cleanups and improvements


Thanks,

	tglx

------------------>
Haoxiang Li (1):
      PCI/MSI: Unmap MSI-X region on error

Radu Rendec (4):
      genirq: Add interrupt redirection infrastructure
      PCI: dwc: Code cleanup
      PCI: dwc: Enable MSI affinity support
      genirq: Update effective affinity for redirected interrupts

Randy Dunlap (1):
      genirq/msi: Correct kernel-doc in <linux/msi.h>

Thomas Gleixner (1):
      genirq/redirect: Prevent writing MSI message on affinity change

Vivian Wang (4):
      PCI/MSI: Convert the boolean no_64bit_msi flag to a DMA address mask
      PCI/MSI: Check the device specific address mask in msi_verify_entries()
      drm/radeon: Make MSI address limit based on the device DMA limit
      ALSA: hda/intel: Make MSI address limit based on the device DMA limit


 arch/powerpc/platforms/powernv/pci-ioda.c          |   2 +-
 arch/powerpc/platforms/pseries/msi.c               |   4 +-
 drivers/gpu/drm/radeon/radeon_device.c             |   1 +
 drivers/gpu/drm/radeon/radeon_irq_kms.c            |  10 --
 .../net/ethernet/pensando/ionic/ionic_bus_pci.c    |   2 +-
 drivers/pci/controller/dwc/pcie-designware-host.c  | 127 ++++++++++-----------
 drivers/pci/controller/dwc/pcie-designware.h       |   7 +-
 drivers/pci/msi/msi.c                              |  14 ++-
 drivers/pci/msi/pcidev_msi.c                       |   2 +-
 drivers/pci/probe.c                                |   7 ++
 include/linux/irq.h                                |  10 ++
 include/linux/irqdesc.h                            |  17 ++-
 include/linux/msi.h                                |  13 ++-
 include/linux/pci.h                                |   8 +-
 kernel/irq/chip.c                                  |  24 +++-
 kernel/irq/irqdesc.c                               |  86 +++++++++++++-
 kernel/irq/manage.c                                |  15 ++-
 sound/hda/controllers/intel.c                      |  10 +-
 18 files changed, 247 insertions(+), 112 deletions(-)

diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index b0c1d9d16fb5..1c78fdfb7b03 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1666,7 +1666,7 @@ static int __pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
 		return -ENXIO;
 
 	/* Force 32-bit MSI on some broken devices */
-	if (dev->no_64bit_msi)
+	if (dev->msi_addr_mask < DMA_BIT_MASK(64))
 		is_64 = 0;
 
 	/* Assign XIVE to PE */
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index a82aaa786e9e..7473c7ca1db0 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -383,7 +383,7 @@ static int rtas_prepare_msi_irqs(struct pci_dev *pdev, int nvec_in, int type,
 	 */
 again:
 	if (type == PCI_CAP_ID_MSI) {
-		if (pdev->no_64bit_msi) {
+		if (pdev->msi_addr_mask < DMA_BIT_MASK(64)) {
 			rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
 			if (rc < 0) {
 				/*
@@ -409,7 +409,7 @@ static int rtas_prepare_msi_irqs(struct pci_dev *pdev, int nvec_in, int type,
 		if (use_32bit_msi_hack && rc > 0)
 			rtas_hack_32bit_msi_gen2(pdev);
 	} else {
-		if (pdev->no_64bit_msi)
+		if (pdev->msi_addr_mask < DMA_BIT_MASK(64))
 			rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSIX_FN, nvec);
 		else
 			rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 60afaa8e56b4..5faae0361361 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1374,6 +1374,7 @@ int radeon_device_init(struct radeon_device *rdev,
 		pr_warn("radeon: No suitable DMA available\n");
 		return r;
 	}
+	rdev->pdev->msi_addr_mask = DMA_BIT_MASK(dma_bits);
 	rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
 
 	/* Registers mapping */
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 9961251b44ba..839d619e5602 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -245,16 +245,6 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
 	if (rdev->flags & RADEON_IS_AGP)
 		return false;
 
-	/*
-	 * Older chips have a HW limitation, they can only generate 40 bits
-	 * of address for "64-bit" MSIs which breaks on some platforms, notably
-	 * IBM POWER servers, so we limit them
-	 */
-	if (rdev->family < CHIP_BONAIRE) {
-		dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
-		rdev->pdev->no_64bit_msi = 1;
-	}
-
 	/* force MSI on */
 	if (radeon_msi == 1)
 		return true;
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
index 70d86c5f52fb..0671deae9a28 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c
@@ -331,7 +331,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
 #ifdef CONFIG_PPC64
 	/* Ensure MSI/MSI-X interrupts lie within addressable physical memory */
-	pdev->no_64bit_msi = 1;
+	pdev->msi_addr_mask = DMA_BIT_MASK(32);
 #endif
 
 	err = ionic_setup_one(ionic);
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 372207c33a85..f116591975ff 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -26,9 +26,27 @@ static struct pci_ops dw_pcie_ops;
 static struct pci_ops dw_pcie_ecam_ops;
 static struct pci_ops dw_child_pcie_ops;
 
+#ifdef CONFIG_SMP
+static void dw_irq_noop(struct irq_data *d) { }
+#endif
+
+static bool dw_pcie_init_dev_msi_info(struct device *dev, struct irq_domain *domain,
+				      struct irq_domain *real_parent, struct msi_domain_info *info)
+{
+	if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info))
+		return false;
+
+#ifdef CONFIG_SMP
+	info->chip->irq_ack = dw_irq_noop;
+	info->chip->irq_pre_redirect = irq_chip_pre_redirect_parent;
+#else
+	info->chip->irq_ack = irq_chip_ack_parent;
+#endif
+	return true;
+}
+
 #define DW_PCIE_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS		| \
 				    MSI_FLAG_USE_DEF_CHIP_OPS		| \
-				    MSI_FLAG_NO_AFFINITY		| \
 				    MSI_FLAG_PCI_MSI_MASK_PARENT)
 #define DW_PCIE_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI		| \
 				     MSI_FLAG_PCI_MSIX			| \
@@ -40,41 +58,30 @@ static const struct msi_parent_ops dw_pcie_msi_parent_ops = {
 	.required_flags		= DW_PCIE_MSI_FLAGS_REQUIRED,
 	.supported_flags	= DW_PCIE_MSI_FLAGS_SUPPORTED,
 	.bus_select_token	= DOMAIN_BUS_PCI_MSI,
-	.chip_flags		= MSI_CHIP_FLAG_SET_ACK,
 	.prefix			= "DW-",
-	.init_dev_msi_info	= msi_lib_init_dev_msi_info,
+	.init_dev_msi_info	= dw_pcie_init_dev_msi_info,
 };
 
 /* MSI int handler */
-irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
+void dw_handle_msi_irq(struct dw_pcie_rp *pp)
 {
-	int i, pos;
-	unsigned long val;
-	u32 status, num_ctrls;
-	irqreturn_t ret = IRQ_NONE;
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
+	unsigned int i, num_ctrls;
 
 	num_ctrls = pp->num_vectors / MAX_MSI_IRQS_PER_CTRL;
 
 	for (i = 0; i < num_ctrls; i++) {
-		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS +
-					   (i * MSI_REG_CTRL_BLOCK_SIZE));
+		unsigned int reg_off = i * MSI_REG_CTRL_BLOCK_SIZE;
+		unsigned int irq_off = i * MAX_MSI_IRQS_PER_CTRL;
+		unsigned long status, pos;
+
+		status = dw_pcie_readl_dbi(pci, PCIE_MSI_INTR0_STATUS + reg_off);
 		if (!status)
 			continue;
 
-		ret = IRQ_HANDLED;
-		val = status;
-		pos = 0;
-		while ((pos = find_next_bit(&val, MAX_MSI_IRQS_PER_CTRL,
-					    pos)) != MAX_MSI_IRQS_PER_CTRL) {
-			generic_handle_domain_irq(pp->irq_domain,
-						  (i * MAX_MSI_IRQS_PER_CTRL) +
-						  pos);
-			pos++;
-		}
+		for_each_set_bit(pos, &status, MAX_MSI_IRQS_PER_CTRL)
+			generic_handle_demux_domain_irq(pp->irq_domain, irq_off + pos);
 	}
-
-	return ret;
 }
 
 /* Chained MSI interrupt service routine */
@@ -95,13 +102,10 @@ static void dw_pci_setup_msi_msg(struct irq_data *d, struct msi_msg *msg)
 {
 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
-	u64 msi_target;
-
-	msi_target = (u64)pp->msi_data;
+	u64 msi_target = (u64)pp->msi_data;
 
 	msg->address_lo = lower_32_bits(msi_target);
 	msg->address_hi = upper_32_bits(msi_target);
-
 	msg->data = d->hwirq;
 
 	dev_dbg(pci->dev, "msi#%d address_hi %#x address_lo %#x\n",
@@ -113,18 +117,14 @@ static void dw_pci_bottom_mask(struct irq_data *d)
 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 	unsigned int res, bit, ctrl;
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&pp->lock, flags);
 
+	guard(raw_spinlock)(&pp->lock);
 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 	pp->irq_mask[ctrl] |= BIT(bit);
 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
-
-	raw_spin_unlock_irqrestore(&pp->lock, flags);
 }
 
 static void dw_pci_bottom_unmask(struct irq_data *d)
@@ -132,18 +132,14 @@ static void dw_pci_bottom_unmask(struct irq_data *d)
 	struct dw_pcie_rp *pp = irq_data_get_irq_chip_data(d);
 	struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
 	unsigned int res, bit, ctrl;
-	unsigned long flags;
-
-	raw_spin_lock_irqsave(&pp->lock, flags);
 
+	guard(raw_spinlock)(&pp->lock);
 	ctrl = d->hwirq / MAX_MSI_IRQS_PER_CTRL;
 	res = ctrl * MSI_REG_CTRL_BLOCK_SIZE;
 	bit = d->hwirq % MAX_MSI_IRQS_PER_CTRL;
 
 	pp->irq_mask[ctrl] &= ~BIT(bit);
 	dw_pcie_writel_dbi(pci, PCIE_MSI_INTR0_MASK + res, pp->irq_mask[ctrl]);
-
-	raw_spin_unlock_irqrestore(&pp->lock, flags);
 }
 
 static void dw_pci_bottom_ack(struct irq_data *d)
@@ -160,54 +156,48 @@ static void dw_pci_bottom_ack(struct irq_data *d)
 }
 
 static struct irq_chip dw_pci_msi_bottom_irq_chip = {
-	.name = "DWPCI-MSI",
-	.irq_ack = dw_pci_bottom_ack,
-	.irq_compose_msi_msg = dw_pci_setup_msi_msg,
-	.irq_mask = dw_pci_bottom_mask,
-	.irq_unmask = dw_pci_bottom_unmask,
+	.name			= "DWPCI-MSI",
+	.irq_compose_msi_msg	= dw_pci_setup_msi_msg,
+	.irq_mask		= dw_pci_bottom_mask,
+	.irq_unmask		= dw_pci_bottom_unmask,
+#ifdef CONFIG_SMP
+	.irq_ack		= dw_irq_noop,
+	.irq_pre_redirect	= dw_pci_bottom_ack,
+	.irq_set_affinity	= irq_chip_redirect_set_affinity,
+#else
+	.irq_ack		= dw_pci_bottom_ack,
+#endif
 };
 
-static int dw_pcie_irq_domain_alloc(struct irq_domain *domain,
-				    unsigned int virq, unsigned int nr_irqs,
-				    void *args)
+static int dw_pcie_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+				    unsigned int nr_irqs, void *args)
 {
 	struct dw_pcie_rp *pp = domain->host_data;
-	unsigned long flags;
-	u32 i;
 	int bit;
 
-	raw_spin_lock_irqsave(&pp->lock, flags);
-
-	bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
-				      order_base_2(nr_irqs));
-
-	raw_spin_unlock_irqrestore(&pp->lock, flags);
+	scoped_guard (raw_spinlock_irq, &pp->lock) {
+		bit = bitmap_find_free_region(pp->msi_irq_in_use, pp->num_vectors,
+					      order_base_2(nr_irqs));
+	}
 
 	if (bit < 0)
 		return -ENOSPC;
 
-	for (i = 0; i < nr_irqs; i++)
-		irq_domain_set_info(domain, virq + i, bit + i,
-				    pp->msi_irq_chip,
-				    pp, handle_edge_irq,
-				    NULL, NULL);
-
+	for (unsigned int i = 0; i < nr_irqs; i++) {
+		irq_domain_set_info(domain, virq + i, bit + i, pp->msi_irq_chip,
+				    pp, handle_edge_irq, NULL, NULL);
+	}
 	return 0;
 }
 
-static void dw_pcie_irq_domain_free(struct irq_domain *domain,
-				    unsigned int virq, unsigned int nr_irqs)
+static void dw_pcie_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+				    unsigned int nr_irqs)
 {
 	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
 	struct dw_pcie_rp *pp = domain->host_data;
-	unsigned long flags;
 
-	raw_spin_lock_irqsave(&pp->lock, flags);
-
-	bitmap_release_region(pp->msi_irq_in_use, d->hwirq,
-			      order_base_2(nr_irqs));
-
-	raw_spin_unlock_irqrestore(&pp->lock, flags);
+	guard(raw_spinlock_irq)(&pp->lock);
+	bitmap_release_region(pp->msi_irq_in_use, d->hwirq, order_base_2(nr_irqs));
 }
 
 static const struct irq_domain_ops dw_pcie_msi_domain_ops = {
@@ -241,8 +231,7 @@ void dw_pcie_free_msi(struct dw_pcie_rp *pp)
 
 	for (ctrl = 0; ctrl < MAX_MSI_CTRLS; ctrl++) {
 		if (pp->msi_irq[ctrl] > 0)
-			irq_set_chained_handler_and_data(pp->msi_irq[ctrl],
-							 NULL, NULL);
+			irq_set_chained_handler_and_data(pp->msi_irq[ctrl], NULL, NULL);
 	}
 
 	irq_domain_remove(pp->irq_domain);
diff --git a/drivers/pci/controller/dwc/pcie-designware.h b/drivers/pci/controller/dwc/pcie-designware.h
index 31685951a080..403f6cfe8191 100644
--- a/drivers/pci/controller/dwc/pcie-designware.h
+++ b/drivers/pci/controller/dwc/pcie-designware.h
@@ -821,7 +821,7 @@ static inline enum dw_pcie_ltssm dw_pcie_get_ltssm(struct dw_pcie *pci)
 #ifdef CONFIG_PCIE_DW_HOST
 int dw_pcie_suspend_noirq(struct dw_pcie *pci);
 int dw_pcie_resume_noirq(struct dw_pcie *pci);
-irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp);
+void dw_handle_msi_irq(struct dw_pcie_rp *pp);
 void dw_pcie_msi_init(struct dw_pcie_rp *pp);
 int dw_pcie_msi_host_init(struct dw_pcie_rp *pp);
 void dw_pcie_free_msi(struct dw_pcie_rp *pp);
@@ -842,10 +842,7 @@ static inline int dw_pcie_resume_noirq(struct dw_pcie *pci)
 	return 0;
 }
 
-static inline irqreturn_t dw_handle_msi_irq(struct dw_pcie_rp *pp)
-{
-	return IRQ_NONE;
-}
+static inline void dw_handle_msi_irq(struct dw_pcie_rp *pp) { }
 
 static inline void dw_pcie_msi_init(struct dw_pcie_rp *pp)
 { }
diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c
index 34d664139f48..e2412175d7af 100644
--- a/drivers/pci/msi/msi.c
+++ b/drivers/pci/msi/msi.c
@@ -321,14 +321,16 @@ static int msi_setup_msi_desc(struct pci_dev *dev, int nvec,
 static int msi_verify_entries(struct pci_dev *dev)
 {
 	struct msi_desc *entry;
+	u64 address;
 
-	if (!dev->no_64bit_msi)
+	if (dev->msi_addr_mask == DMA_BIT_MASK(64))
 		return 0;
 
 	msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) {
-		if (entry->msg.address_hi) {
-			pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n",
-				entry->msg.address_hi, entry->msg.address_lo);
+		address = (u64)entry->msg.address_hi << 32 | entry->msg.address_lo;
+		if (address & ~dev->msi_addr_mask) {
+			pci_err(dev, "arch assigned 64-bit MSI address %#llx above device MSI address mask %#llx\n",
+				address, dev->msi_addr_mask);
 			break;
 		}
 	}
@@ -737,7 +739,7 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
 
 	ret = msix_setup_interrupts(dev, entries, nvec, affd);
 	if (ret)
-		goto out_disable;
+		goto out_unmap;
 
 	/* Disable INTX */
 	pci_intx_for_msi(dev, 0);
@@ -758,6 +760,8 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
 	pcibios_free_irq(dev);
 	return 0;
 
+out_unmap:
+	iounmap(dev->msix_base);
 out_disable:
 	dev->msix_enabled = 0;
 	pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
diff --git a/drivers/pci/msi/pcidev_msi.c b/drivers/pci/msi/pcidev_msi.c
index 5520aff53b56..0b0346813092 100644
--- a/drivers/pci/msi/pcidev_msi.c
+++ b/drivers/pci/msi/pcidev_msi.c
@@ -24,7 +24,7 @@ void pci_msi_init(struct pci_dev *dev)
 	}
 
 	if (!(ctrl & PCI_MSI_FLAGS_64BIT))
-		dev->no_64bit_msi = 1;
+		dev->msi_addr_mask = DMA_BIT_MASK(32);
 }
 
 void pci_msix_init(struct pci_dev *dev)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 41183aed8f5d..a2bff57176a3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2047,6 +2047,13 @@ int pci_setup_device(struct pci_dev *dev)
 	 */
 	dev->dma_mask = 0xffffffff;
 
+	/*
+	 * Assume 64-bit addresses for MSI initially. Will be changed to 32-bit
+	 * if MSI (rather than MSI-X) capability does not have
+	 * PCI_MSI_FLAGS_64BIT. Can also be overridden by driver.
+	 */
+	dev->msi_addr_mask = DMA_BIT_MASK(64);
+
 	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
 		     dev->bus->number, PCI_SLOT(dev->devfn),
 		     PCI_FUNC(dev->devfn));
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4a9f1d7b08c3..41d5bc53eefc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -459,6 +459,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  *			checks against the supplied affinity mask are not
  *			required. This is used for CPU hotplug where the
  *			target CPU is not yet set in the cpu_online_mask.
+ * @irq_pre_redirect:	Optional function to be invoked before redirecting
+ *			an interrupt via irq_work. Called only on CONFIG_SMP.
  * @irq_retrigger:	resend an IRQ to the CPU
  * @irq_set_type:	set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ
  * @irq_set_wake:	enable/disable power-management wake-on of an IRQ
@@ -503,6 +505,7 @@ struct irq_chip {
 	void		(*irq_eoi)(struct irq_data *data);
 
 	int		(*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force);
+	void		(*irq_pre_redirect)(struct irq_data *data);
 	int		(*irq_retrigger)(struct irq_data *data);
 	int		(*irq_set_type)(struct irq_data *data, unsigned int flow_type);
 	int		(*irq_set_wake)(struct irq_data *data, unsigned int on);
@@ -687,6 +690,13 @@ extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
 extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type);
 extern int irq_chip_request_resources_parent(struct irq_data *data);
 extern void irq_chip_release_resources_parent(struct irq_data *data);
+#ifdef CONFIG_SMP
+void irq_chip_pre_redirect_parent(struct irq_data *data);
+#endif
+#endif
+
+#ifdef CONFIG_SMP
+int irq_chip_redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force);
 #endif
 
 /* Disable or mask interrupts during a kernel kexec */
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 17902861de76..dae9a9b93665 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -2,9 +2,10 @@
 #ifndef _LINUX_IRQDESC_H
 #define _LINUX_IRQDESC_H
 
-#include <linux/rcupdate.h>
+#include <linux/irq_work.h>
 #include <linux/kobject.h>
 #include <linux/mutex.h>
+#include <linux/rcupdate.h>
 
 /*
  * Core internal functions to deal with irq descriptors
@@ -29,6 +30,17 @@ struct irqstat {
 #endif
 };
 
+/**
+ * struct irq_redirect - interrupt redirection metadata
+ * @work:	Harg irq_work item for handler execution on a different CPU
+ * @target_cpu:	CPU to run irq handler on in case the current CPU is not part
+ *		of the irq affinity mask
+ */
+struct irq_redirect {
+	struct irq_work	work;
+	unsigned int	target_cpu;
+};
+
 /**
  * struct irq_desc - interrupt descriptor
  * @irq_common_data:	per irq and chip data passed down to chip functions
@@ -46,6 +58,7 @@ struct irqstat {
  * @threads_handled:	stats field for deferred spurious detection of threaded handlers
  * @threads_handled_last: comparator field for deferred spurious detection of threaded handlers
  * @lock:		locking for SMP
+ * @redirect:		Facility for redirecting interrupts via irq_work
  * @affinity_hint:	hint to user space for preferred irq affinity
  * @affinity_notify:	context for notification of affinity changes
  * @pending_mask:	pending rebalanced interrupts
@@ -83,6 +96,7 @@ struct irq_desc {
 	raw_spinlock_t		lock;
 	struct cpumask		*percpu_enabled;
 #ifdef CONFIG_SMP
+	struct irq_redirect	redirect;
 	const struct cpumask	*affinity_hint;
 	struct irq_affinity_notify *affinity_notify;
 #ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -185,6 +199,7 @@ int generic_handle_irq_safe(unsigned int irq);
 int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
 int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq);
 int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq);
+bool generic_handle_demux_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq);
 #endif
 
 /* Test to see if a driver has successfully requested an irq */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 8003e3218c46..94cfc3719077 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -49,12 +49,12 @@ typedef struct arch_msi_msg_data {
 #endif
 
 /**
- * msi_msg - Representation of a MSI message
+ * struct msi_msg - Representation of a MSI message
  * @address_lo:		Low 32 bits of msi message address
- * @arch_addrlo:	Architecture specific shadow of @address_lo
+ * @arch_addr_lo:	Architecture specific shadow of @address_lo
  * @address_hi:		High 32 bits of msi message address
  *			(only used when device supports it)
- * @arch_addrhi:	Architecture specific shadow of @address_hi
+ * @arch_addr_hi:	Architecture specific shadow of @address_hi
  * @data:		MSI message data (usually 16 bits)
  * @arch_data:		Architecture specific shadow of @data
  */
@@ -91,7 +91,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
 				    struct msi_msg *msg);
 
 /**
- * pci_msi_desc - PCI/MSI specific MSI descriptor data
+ * struct pci_msi_desc - PCI/MSI specific MSI descriptor data
  *
  * @msi_mask:	[PCI MSI]   MSI cached mask bits
  * @msix_ctrl:	[PCI MSI-X] MSI-X cached per vector control bits
@@ -101,6 +101,7 @@ typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
  * @can_mask:	[PCI MSI/X] Masking supported?
  * @is_64:	[PCI MSI/X] Address size: 0=32bit 1=64bit
  * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
+ * @msi_attrib:	[PCI MSI/X] Compound struct of MSI/X attributes
  * @mask_pos:	[PCI MSI]   Mask register position
  * @mask_base:	[PCI MSI-X] Mask register base address
  */
@@ -169,7 +170,7 @@ struct msi_desc_data {
  *                  Only used if iommu_msi_shift != 0
  * @iommu_msi_shift: Indicates how many bits of the original address should be
  *                   preserved when using iommu_msi_iova.
- * @sysfs_attr:	Pointer to sysfs device attribute
+ * @sysfs_attrs:	Pointer to sysfs device attribute
  *
  * @write_msi_msg:	Callback that may be called when the MSI message
  *			address or data changes
@@ -220,7 +221,7 @@ enum msi_desc_filter {
 /**
  * struct msi_dev_domain - The internals of MSI domain info per device
  * @store:		Xarray for storing MSI descriptor pointers
- * @irqdomain:		Pointer to a per device interrupt domain
+ * @domain:		Pointer to a per device interrupt domain
  */
 struct msi_dev_domain {
 	struct xarray		store;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 864775651c6f..0fe32fef0331 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -377,6 +377,13 @@ struct pci_dev {
 					   0xffffffff.  You only need to change
 					   this if your device has broken DMA
 					   or supports 64-bit transfers.  */
+	u64		msi_addr_mask;	/* Mask of the bits of bus address for
+					   MSI that this device implements.
+					   Normally set based on device
+					   capabilities. You only need to
+					   change this if your device claims
+					   to support 64-bit MSI but implements
+					   fewer than 64 address bits. */
 
 	struct device_dma_parameters dma_parms;
 
@@ -441,7 +448,6 @@ struct pci_dev {
 
 	unsigned int	is_busmaster:1;		/* Is busmaster */
 	unsigned int	no_msi:1;		/* May not use MSI */
-	unsigned int	no_64bit_msi:1;		/* May only use 32-bit MSIs */
 	unsigned int	block_cfg_access:1;	/* Config space access blocked */
 	unsigned int	broken_parity_status:1;	/* Generates false positive parity */
 	unsigned int	irq_reroute_variant:2;	/* Needs IRQ rerouting variant */
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 678f094d261a..ccdc47a7069d 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1122,7 +1122,7 @@ void irq_cpu_offline(void)
 }
 #endif
 
-#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 
 #ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
 /**
@@ -1194,6 +1194,15 @@ EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
 
 #endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
 
+#ifdef CONFIG_SMP
+void irq_chip_pre_redirect_parent(struct irq_data *data)
+{
+	data = data->parent_data;
+	data->chip->irq_pre_redirect(data);
+}
+EXPORT_SYMBOL_GPL(irq_chip_pre_redirect_parent);
+#endif
+
 /**
  * irq_chip_set_parent_state - set the state of a parent interrupt.
  *
@@ -1476,6 +1485,19 @@ void irq_chip_release_resources_parent(struct irq_data *data)
 		data->chip->irq_release_resources(data);
 }
 EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
+
+#ifdef CONFIG_SMP
+int irq_chip_redirect_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
+{
+	struct irq_redirect *redir = &irq_data_to_desc(data)->redirect;
+
+	WRITE_ONCE(redir->target_cpu, cpumask_first(dest));
+	irq_data_update_effective_affinity(data, dest);
+
+	return IRQ_SET_MASK_OK_DONE;
+}
+EXPORT_SYMBOL_GPL(irq_chip_redirect_set_affinity);
 #endif
 
 /**
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index f8e4e13dbe33..501a653d4153 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -78,8 +78,12 @@ static int alloc_masks(struct irq_desc *desc, int node)
 	return 0;
 }
 
-static void desc_smp_init(struct irq_desc *desc, int node,
-			  const struct cpumask *affinity)
+static void irq_redirect_work(struct irq_work *work)
+{
+	handle_irq_desc(container_of(work, struct irq_desc, redirect.work));
+}
+
+static void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity)
 {
 	if (!affinity)
 		affinity = irq_default_affinity;
@@ -91,6 +95,7 @@ static void desc_smp_init(struct irq_desc *desc, int node,
 #ifdef CONFIG_NUMA
 	desc->irq_common_data.node = node;
 #endif
+	desc->redirect.work = IRQ_WORK_INIT_HARD(irq_redirect_work);
 }
 
 static void free_masks(struct irq_desc *desc)
@@ -766,6 +771,83 @@ int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq)
 	WARN_ON_ONCE(!in_nmi());
 	return handle_irq_desc(irq_resolve_mapping(domain, hwirq));
 }
+
+#ifdef CONFIG_SMP
+static bool demux_redirect_remote(struct irq_desc *desc)
+{
+	guard(raw_spinlock)(&desc->lock);
+	const struct cpumask *m = irq_data_get_effective_affinity_mask(&desc->irq_data);
+	unsigned int target_cpu = READ_ONCE(desc->redirect.target_cpu);
+
+	if (desc->irq_data.chip->irq_pre_redirect)
+		desc->irq_data.chip->irq_pre_redirect(&desc->irq_data);
+
+	/*
+	 * If the interrupt handler is already running on a CPU that's included
+	 * in the interrupt's affinity mask, redirection is not necessary.
+	 */
+	if (cpumask_test_cpu(smp_processor_id(), m))
+		return false;
+
+	/*
+	 * The desc->action check protects against IRQ shutdown: __free_irq() sets
+	 * desc->action to NULL while holding desc->lock, which we also hold.
+	 *
+	 * Calling irq_work_queue_on() here is safe w.r.t. CPU unplugging:
+	 *   - takedown_cpu() schedules multi_cpu_stop() on all active CPUs,
+	 *     including the one that's taken down.
+	 *   - multi_cpu_stop() acts like a barrier, which means all active
+	 *     CPUs go through MULTI_STOP_DISABLE_IRQ and disable hard IRQs
+	 *     *before* the dying CPU runs take_cpu_down() in MULTI_STOP_RUN.
+	 *   - Hard IRQs are re-enabled at the end of multi_cpu_stop(), *after*
+	 *     the dying CPU has run take_cpu_down() in MULTI_STOP_RUN.
+	 *   - Since we run in hard IRQ context, we run either before or after
+	 *     take_cpu_down() but never concurrently.
+	 *   - If we run before take_cpu_down(), the dying CPU hasn't been marked
+	 *     offline yet (it's marked via take_cpu_down() -> __cpu_disable()),
+	 *     so the WARN in irq_work_queue_on() can't occur.
+	 *   - Furthermore, the work item we queue will be flushed later via
+	 *     take_cpu_down() -> cpuhp_invoke_callback_range_nofail() ->
+	 *     smpcfd_dying_cpu() -> irq_work_run().
+	 *   - If we run after take_cpu_down(), target_cpu has been already
+	 *     updated via take_cpu_down() -> __cpu_disable(), which eventually
+	 *     calls irq_do_set_affinity() during IRQ migration. So, target_cpu
+	 *     no longer points to the dying CPU in this case.
+	 */
+	if (desc->action)
+		irq_work_queue_on(&desc->redirect.work, target_cpu);
+
+	return true;
+}
+#else /* CONFIG_SMP */
+static bool demux_redirect_remote(struct irq_desc *desc)
+{
+	return false;
+}
+#endif
+
+/**
+ * generic_handle_demux_domain_irq - Invoke the handler for a hardware interrupt
+ *				     of a demultiplexing domain.
+ * @domain:	The domain where to perform the lookup
+ * @hwirq:	The hardware interrupt number to convert to a logical one
+ *
+ * Returns:	True on success, or false if lookup has failed
+ */
+bool generic_handle_demux_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq)
+{
+	struct irq_desc *desc = irq_resolve_mapping(domain, hwirq);
+
+	if (unlikely(!desc))
+		return false;
+
+	if (demux_redirect_remote(desc))
+		return true;
+
+	return !handle_irq_desc(desc);
+}
+EXPORT_SYMBOL_GPL(generic_handle_demux_domain_irq);
+
 #endif
 
 /* Dynamic interrupt handling */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8b1b4c8a4f54..acb4c3de69c6 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -35,6 +35,16 @@ static int __init setup_forced_irqthreads(char *arg)
 early_param("threadirqs", setup_forced_irqthreads);
 #endif
 
+#ifdef CONFIG_SMP
+static inline void synchronize_irqwork(struct irq_desc *desc)
+{
+	/* Synchronize pending or on the fly redirect work */
+	irq_work_sync(&desc->redirect.work);
+}
+#else
+static inline void synchronize_irqwork(struct irq_desc *desc) { }
+#endif
+
 static int __irq_get_irqchip_state(struct irq_data *d, enum irqchip_irq_state which, bool *state);
 
 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
@@ -107,7 +117,9 @@ EXPORT_SYMBOL(synchronize_hardirq);
 
 static void __synchronize_irq(struct irq_desc *desc)
 {
+	synchronize_irqwork(desc);
 	__synchronize_hardirq(desc, true);
+
 	/*
 	 * We made sure that no hardirq handler is running. Now verify that no
 	 * threaded handlers are active.
@@ -217,8 +229,7 @@ static inline void irq_validate_effective_affinity(struct irq_data *data) { }
 
 static DEFINE_PER_CPU(struct cpumask, __tmp_mask);
 
-int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
-			bool force)
+int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
 {
 	struct cpumask *tmp_mask = this_cpu_ptr(&__tmp_mask);
 	struct irq_desc *desc = irq_data_to_desc(data);
diff --git a/sound/hda/controllers/intel.c b/sound/hda/controllers/intel.c
index 1e8e3d61291a..a44de2306a2b 100644
--- a/sound/hda/controllers/intel.c
+++ b/sound/hda/controllers/intel.c
@@ -1903,11 +1903,6 @@ static int azx_first_init(struct azx *chip)
 		chip->gts_present = true;
 #endif
 
-	if (chip->msi && chip->driver_caps & AZX_DCAPS_NO_MSI64) {
-		dev_dbg(card->dev, "Disabling 64bit MSI\n");
-		pci->no_64bit_msi = true;
-	}
-
 	pci_set_master(pci);
 
 	gcap = azx_readw(chip, GCAP);
@@ -1958,6 +1953,11 @@ static int azx_first_init(struct azx *chip)
 		dma_set_mask_and_coherent(&pci->dev, DMA_BIT_MASK(32));
 	dma_set_max_seg_size(&pci->dev, UINT_MAX);
 
+	if (chip->msi && chip->driver_caps & AZX_DCAPS_NO_MSI64) {
+		dev_dbg(card->dev, "Restricting MSI to %u-bit\n", dma_bits);
+		pci->msi_addr_mask = DMA_BIT_MASK(dma_bits);
+	}
+
 	/* read number of streams from GCAP register instead of using
 	 * hardcoded value
 	 */
[GIT pull] timers/core for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest timers/core branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers-core-2026-02-09

up to:  24989330fb99: time/kunit: Document handling of negative years of is_leap()


Updates for the core time subsystem:

  - Inline timecounter_cyc2time() as that is now used in the networking
    hotpath. Inlining it significantly improves performance.

  - Optimize the tick dependency check in case that the tracepoint is disabled,
    which improves the hotpath performance in the tick management code, which
    is a hotpath on transitions in and out of idle.

  - The usual cleanups and improvements


Thanks,

	tglx

------------------>
Eric Dumazet (1):
      time/timecounter: Inline timecounter_cyc2time()

Ionut Nechita (Sunlight Linux) (1):
      tick/nohz: Optimize check_tick_dependency() with early return

Mark Brown (1):
      time/kunit: Document handling of negative years of is_leap()

Thomas Gleixner (1):
      time/sched_clock: Use ACCESS_PRIVATE() to evaluate hrtimer::function

Thomas Weißschuh (3):
      hrtimer: Remove unused resolution constants
      hrtimer: Remove public definition of HIGH_RES_NSEC
      hrtimer: Drop _tv64() helpers


 include/linux/hrtimer.h      | 15 ---------------
 include/linux/hrtimer_defs.h | 20 --------------------
 include/linux/timecounter.h  | 31 +++++++++++++++++++++++++++++--
 kernel/time/hrtimer.c        | 14 +++++++++++---
 kernel/time/sched_clock.c    |  2 +-
 kernel/time/tick-sched.c     |  3 +++
 kernel/time/time_test.c      |  4 +++-
 kernel/time/timecounter.c    | 35 -----------------------------------
 8 files changed, 47 insertions(+), 77 deletions(-)

diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 2cf1bf65b225..eb2d3d9dd064 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -112,12 +112,6 @@ static inline void hrtimer_set_expires_range_ns(struct hrtimer *timer, ktime_t t
 	timer->node.expires = ktime_add_safe(time, ns_to_ktime(delta));
 }
 
-static inline void hrtimer_set_expires_tv64(struct hrtimer *timer, s64 tv64)
-{
-	timer->node.expires = tv64;
-	timer->_softexpires = tv64;
-}
-
 static inline void hrtimer_add_expires(struct hrtimer *timer, ktime_t time)
 {
 	timer->node.expires = ktime_add_safe(timer->node.expires, time);
@@ -140,15 +134,6 @@ static inline ktime_t hrtimer_get_softexpires(const struct hrtimer *timer)
 	return timer->_softexpires;
 }
 
-static inline s64 hrtimer_get_expires_tv64(const struct hrtimer *timer)
-{
-	return timer->node.expires;
-}
-static inline s64 hrtimer_get_softexpires_tv64(const struct hrtimer *timer)
-{
-	return timer->_softexpires;
-}
-
 static inline s64 hrtimer_get_expires_ns(const struct hrtimer *timer)
 {
 	return ktime_to_ns(timer->node.expires);
diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index aa49ffa130e5..02b010df6570 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -6,26 +6,6 @@
 #include <linux/timerqueue.h>
 #include <linux/seqlock.h>
 
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-# define HIGH_RES_NSEC		1
-# define KTIME_HIGH_RES		(HIGH_RES_NSEC)
-# define MONOTONIC_RES_NSEC	HIGH_RES_NSEC
-# define KTIME_MONOTONIC_RES	KTIME_HIGH_RES
-
-#else
-
-# define MONOTONIC_RES_NSEC	LOW_RES_NSEC
-# define KTIME_MONOTONIC_RES	KTIME_LOW_RES
-
-#endif
-
 #ifdef CONFIG_64BIT
 # define __hrtimer_clock_base_align	____cacheline_aligned
 #else
diff --git a/include/linux/timecounter.h b/include/linux/timecounter.h
index dce03a5cafb7..7de6b350e559 100644
--- a/include/linux/timecounter.h
+++ b/include/linux/timecounter.h
@@ -115,6 +115,15 @@ extern void timecounter_init(struct timecounter *tc,
  */
 extern u64 timecounter_read(struct timecounter *tc);
 
+/*
+ * This is like cyclecounter_cyc2ns(), but it is used for computing a
+ * time previous to the time stored in the cycle counter.
+ */
+static inline u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, u64 cycles, u64 frac)
+{
+	return ((cycles * cc->mult) - frac) >> cc->shift;
+}
+
 /**
  * timecounter_cyc2time - convert a cycle counter to same
  *                        time base as values returned by
@@ -131,7 +140,25 @@ extern u64 timecounter_read(struct timecounter *tc);
  *
  * Returns: cycle counter converted to nanoseconds since the initial time stamp
  */
-extern u64 timecounter_cyc2time(const struct timecounter *tc,
-				u64 cycle_tstamp);
+static inline u64 timecounter_cyc2time(const struct timecounter *tc, u64 cycle_tstamp)
+{
+	const struct cyclecounter *cc = tc->cc;
+	u64 delta = (cycle_tstamp - tc->cycle_last) & cc->mask;
+	u64 nsec = tc->nsec, frac = tc->frac;
+
+	/*
+	 * Instead of always treating cycle_tstamp as more recent than
+	 * tc->cycle_last, detect when it is too far in the future and
+	 * treat it as old time stamp instead.
+	 */
+	if (unlikely(delta > cc->mask / 2)) {
+		delta = (tc->cycle_last - cycle_tstamp) & cc->mask;
+		nsec -= cc_cyc2ns_backwards(cc, delta, frac);
+	} else {
+		nsec += cyclecounter_cyc2ns(cc, delta, tc->mask, &frac);
+	}
+
+	return nsec;
+}
 
 #endif
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index f8ea8c8fc895..d0ab2e9e3f30 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -49,6 +49,14 @@
 
 #include "tick-internal.h"
 
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+#define HIGH_RES_NSEC		1
+
 /*
  * Masks for selecting the soft and hard context timers from
  * cpu_base->active
@@ -806,7 +814,7 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
 	struct hrtimer_clock_base *base = timer->base;
 	ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
 
-	WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
+	WARN_ON_ONCE(hrtimer_get_expires(timer) < 0);
 
 	/*
 	 * CLOCK_REALTIME timer might be requested with an absolute
@@ -1053,7 +1061,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 
 		orun = ktime_divns(delta, incr);
 		hrtimer_add_expires_ns(timer, incr * orun);
-		if (hrtimer_get_expires_tv64(timer) > now)
+		if (hrtimer_get_expires(timer) > now)
 			return orun;
 		/*
 		 * This (and the ktime_add() below) is the
@@ -1835,7 +1843,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
 			 * are right-of a not yet expired timer, because that
 			 * timer will have to trigger a wakeup anyway.
 			 */
-			if (basenow < hrtimer_get_softexpires_tv64(timer))
+			if (basenow < hrtimer_get_softexpires(timer))
 				break;
 
 			__run_hrtimer(cpu_base, base, timer, &basenow, flags);
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index f39111830ca3..f3aaef695b8c 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -215,7 +215,7 @@ void sched_clock_register(u64 (*read)(void), int bits, unsigned long rate)
 
 	update_clock_read_data(&rd);
 
-	if (sched_clock_timer.function != NULL) {
+	if (ACCESS_PRIVATE(&sched_clock_timer, function) != NULL) {
 		/* update timeout for clock wrap */
 		hrtimer_start(&sched_clock_timer, cd.wrap_kt,
 			      HRTIMER_MODE_REL_HARD);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 8ddf74e705d3..fd928d374cfc 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -344,6 +344,9 @@ static bool check_tick_dependency(atomic_t *dep)
 {
 	int val = atomic_read(dep);
 
+	if (likely(!tracepoint_enabled(tick_stop)))
+		return !val;
+
 	if (val & TICK_DEP_MASK_POSIX_TIMER) {
 		trace_tick_stop(0, TICK_DEP_MASK_POSIX_TIMER);
 		return true;
diff --git a/kernel/time/time_test.c b/kernel/time/time_test.c
index 2889763165e5..1b99180da288 100644
--- a/kernel/time/time_test.c
+++ b/kernel/time/time_test.c
@@ -4,7 +4,9 @@
 #include <linux/time.h>
 
 /*
- * Traditional implementation of leap year evaluation.
+ * Traditional implementation of leap year evaluation, but note that long
+ * is a signed type and the tests do cover negative year values. So this
+ * can't use the is_leap_year() helper from rtc.h.
  */
 static bool is_leap(long year)
 {
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c
index 3d2a354cfe1c..2e64dbb6302d 100644
--- a/kernel/time/timecounter.c
+++ b/kernel/time/timecounter.c
@@ -62,38 +62,3 @@ u64 timecounter_read(struct timecounter *tc)
 }
 EXPORT_SYMBOL_GPL(timecounter_read);
 
-/*
- * This is like cyclecounter_cyc2ns(), but it is used for computing a
- * time previous to the time stored in the cycle counter.
- */
-static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc,
-			       u64 cycles, u64 mask, u64 frac)
-{
-	u64 ns = (u64) cycles;
-
-	ns = ((ns * cc->mult) - frac) >> cc->shift;
-
-	return ns;
-}
-
-u64 timecounter_cyc2time(const struct timecounter *tc,
-			 u64 cycle_tstamp)
-{
-	u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask;
-	u64 nsec = tc->nsec, frac = tc->frac;
-
-	/*
-	 * Instead of always treating cycle_tstamp as more recent
-	 * than tc->cycle_last, detect when it is too far in the
-	 * future and treat it as old time stamp instead.
-	 */
-	if (delta > tc->cc->mask / 2) {
-		delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask;
-		nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac);
-	} else {
-		nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac);
-	}
-
-	return nsec;
-}
-EXPORT_SYMBOL_GPL(timecounter_cyc2time);
[GIT pull] irq/drivers for v7.0-rc1
Posted by Thomas Gleixner 4 hours ago
Linus,

please pull the latest irq/drivers branch from:

   git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq-drivers-2026-02-09

up to:  6054b10c3288: irqchip/gic-v5: Fix spelling mistake "ouside" -> "outside"

Updates for interrupt chip drivers:

  - Add support for the Renesas RZ/V2N SoC

  - Add a new driver for the Renesas RZ/[TN]2H SoCs

  - Preserve the register state of the RISCV APLIC interrupt controller accross
    suspend/resume

  - Reinitialize the RISCV IMSIC registers after suspend/resume

  - Make the various Loongson interrupt chip drivers 32/64-bit aware

  - Handle the number of hardware interrupts in the SIFIVE PLIC driver
    correctly.

    The hardware interrupt 0 is reserved which resulted in inconsistent
    accounting. That went unnoticed as the off by one is only noticable when
    the number of device interrupts is a multiple of 32.

  - The usual device tree updates, cleanups and improvements all over the place.

Thanks,

	tglx

------------------>
Aniket Limaye (2):
      dt-bindings: interrupt-controller: ti,sci-intr: Per-line interrupt-types
      irqchip/ti-sci-intr: Allow parsing interrupt-types per-line

Biju Das (1):
      irqchip/renesas-rzv2h: Add suspend/resume support

Colin Ian King (1):
      irqchip/gic-v5: Fix spelling mistake "ouside" -> "outside"

Cosmin Tanislav (4):
      dt-bindings: interrupt-controller: Document RZ/{T2H,N2H} ICU
      irqchip: Add RZ/{T2H,N2H} Interrupt Controller (ICU) driver
      arm64: dts: renesas: r9a09g077: Add ICU support
      arm64: dts: renesas: r9a09g087: Add ICU support

Huacai Chen (7):
      irqchip/loongarch-avec: Adjust irqchip driver for 32BIT/64BIT
      irqchip/loongson-liointc: Adjust irqchip driver for 32BIT/64BIT
      irqchip/loongson-eiointc: Adjust irqchip driver for 32BIT/64BIT
      irqchip/loongson-htvec: Adjust irqchip driver for 32BIT/64BIT
      irqchip/loongson-pch-msi: Adjust irqchip driver for 32BIT/64BIT
      irqchip/loongson-pch-pic: Adjust irqchip driver for 32BIT/64BIT
      irqchip: Allow LoongArch irqchip drivers on both 32BIT/64BIT

Lad Prabhakar (2):
      dt-bindings: interrupt-controller: renesas,rzv2h-icu: Document RZ/V2N SoC
      irqchip/renesas-rzv2h: Add support for RZ/V2N SoC

Nick Hu (2):
      irqchip/riscv-imsic: Add a CPU pm notifier to restore the IMSIC on exit
      irqchip/riscv-aplic: Preserve APLIC states across suspend/resume

Thomas Gleixner (2):
      irqchip/aspeed-scu-ic: Remove unused variable mask
      irqchip/sifive-plic: Handle number of hardware interrupts correctly

Vladimir Kondratiev (2):
      irqchip/aslint-sswi: Request IO memory resource
      irqchip/aslint-sswi: Fix error check of of_io_request_and_map() result

Yangyu Chen (1):
      dt-bindings: interrupt-controller: sifive,plic: Clarify the riscv,ndev meaning in PLIC


 .../renesas,r9a09g077-icu.yaml                     | 236 +++++++++++++++++
 .../interrupt-controller/renesas,rzv2h-icu.yaml    |   1 +
 .../interrupt-controller/sifive,plic-1.0.0.yaml    |   4 +-
 .../bindings/interrupt-controller/ti,sci-intr.yaml |  38 ++-
 arch/arm64/boot/dts/renesas/r9a09g077.dtsi         |  73 ++++++
 arch/arm64/boot/dts/renesas/r9a09g087.dtsi         |  73 ++++++
 drivers/irqchip/Kconfig                            |  19 +-
 drivers/irqchip/Makefile                           |   1 +
 drivers/irqchip/irq-aclint-sswi.c                  |   8 +-
 drivers/irqchip/irq-aspeed-scu-ic.c                |   3 +-
 drivers/irqchip/irq-gic-v5-its.c                   |   2 +-
 drivers/irqchip/irq-loongarch-avec.c               |  14 +-
 drivers/irqchip/irq-loongson-eiointc.c             |  36 ++-
 drivers/irqchip/irq-loongson-htvec.c               |  14 +-
 drivers/irqchip/irq-loongson-liointc.c             |   8 +-
 drivers/irqchip/irq-loongson-pch-msi.c             |   9 +-
 drivers/irqchip/irq-loongson-pch-pic.c             |  14 +-
 drivers/irqchip/irq-renesas-rzt2h.c                | 280 +++++++++++++++++++++
 drivers/irqchip/irq-renesas-rzv2h.c                |  61 ++++-
 drivers/irqchip/irq-riscv-aplic-direct.c           |  10 +
 drivers/irqchip/irq-riscv-aplic-main.c             | 170 ++++++++++++-
 drivers/irqchip/irq-riscv-aplic-main.h             |  19 ++
 drivers/irqchip/irq-riscv-imsic-early.c            |  39 ++-
 drivers/irqchip/irq-sifive-plic.c                  |  82 +++---
 drivers/irqchip/irq-ti-sci-intr.c                  |  54 ++--
 drivers/soc/renesas/Kconfig                        |   1 +
 include/linux/irqchip/irq-renesas-rzt2h.h          |  23 ++
 27 files changed, 1172 insertions(+), 120 deletions(-)
 create mode 100644 Documentation/devicetree/bindings/interrupt-controller/renesas,r9a09g077-icu.yaml
 create mode 100644 drivers/irqchip/irq-renesas-rzt2h.c
 create mode 100644 include/linux/irqchip/irq-renesas-rzt2h.h

diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,r9a09g077-icu.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,r9a09g077-icu.yaml
new file mode 100644
index 000000000000..78c01d14e765
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,r9a09g077-icu.yaml
@@ -0,0 +1,236 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interrupt-controller/renesas,r9a09g077-icu.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Renesas RZ/{T2H,N2H} Interrupt Controller
+
+maintainers:
+  - Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>
+
+allOf:
+  - $ref: /schemas/interrupt-controller.yaml#
+
+description:
+  The Interrupt Controller (ICU) handles software-triggered interrupts
+  (INTCPU), external interrupts (IRQ and SEI), error interrupts and DMAC
+  requests.
+
+properties:
+  compatible:
+    oneOf:
+      - const: renesas,r9a09g077-icu # RZ/T2H
+
+      - items:
+          - enum:
+              - renesas,r9a09g087-icu # RZ/N2H
+          - const: renesas,r9a09g077-icu
+
+  reg:
+    items:
+      - description: Non-safety registers (INTCPU0-13, IRQ0-13)
+      - description: Safety registers (INTCPU14-15, IRQ14-15, SEI)
+
+  '#interrupt-cells':
+    description: The first cell is the SPI number of the interrupt, as per user
+      manual. The second cell is used to specify the flag.
+    const: 2
+
+  '#address-cells':
+    const: 0
+
+  interrupt-controller: true
+
+  interrupts:
+    items:
+      - description: Software interrupt 0
+      - description: Software interrupt 1
+      - description: Software interrupt 2
+      - description: Software interrupt 3
+      - description: Software interrupt 4
+      - description: Software interrupt 5
+      - description: Software interrupt 6
+      - description: Software interrupt 7
+      - description: Software interrupt 8
+      - description: Software interrupt 9
+      - description: Software interrupt 10
+      - description: Software interrupt 11
+      - description: Software interrupt 12
+      - description: Software interrupt 13
+      - description: Software interrupt 14
+      - description: Software interrupt 15
+      - description: External pin interrupt 0
+      - description: External pin interrupt 1
+      - description: External pin interrupt 2
+      - description: External pin interrupt 3
+      - description: External pin interrupt 4
+      - description: External pin interrupt 5
+      - description: External pin interrupt 6
+      - description: External pin interrupt 7
+      - description: External pin interrupt 8
+      - description: External pin interrupt 9
+      - description: External pin interrupt 10
+      - description: External pin interrupt 11
+      - description: External pin interrupt 12
+      - description: External pin interrupt 13
+      - description: External pin interrupt 14
+      - description: External pin interrupt 15
+      - description: System error interrupt
+      - description: Cortex-A55 error event 0
+      - description: Cortex-A55 error event 1
+      - description: Cortex-R52 CPU 0 error event 0
+      - description: Cortex-R52 CPU 0 error event 1
+      - description: Cortex-R52 CPU 1 error event 0
+      - description: Cortex-R52 CPU 1 error event 1
+      - description: Peripherals error event 0
+      - description: Peripherals error event 1
+      - description: DSMIF error event 0
+      - description: DSMIF error event 1
+      - description: ENCIF error event 0
+      - description: ENCIF error event 1
+
+  interrupt-names:
+    items:
+      - const: intcpu0
+      - const: intcpu1
+      - const: intcpu2
+      - const: intcpu3
+      - const: intcpu4
+      - const: intcpu5
+      - const: intcpu6
+      - const: intcpu7
+      - const: intcpu8
+      - const: intcpu9
+      - const: intcpu10
+      - const: intcpu11
+      - const: intcpu12
+      - const: intcpu13
+      - const: intcpu14
+      - const: intcpu15
+      - const: irq0
+      - const: irq1
+      - const: irq2
+      - const: irq3
+      - const: irq4
+      - const: irq5
+      - const: irq6
+      - const: irq7
+      - const: irq8
+      - const: irq9
+      - const: irq10
+      - const: irq11
+      - const: irq12
+      - const: irq13
+      - const: irq14
+      - const: irq15
+      - const: sei
+      - const: ca55-err0
+      - const: ca55-err1
+      - const: cr520-err0
+      - const: cr520-err1
+      - const: cr521-err0
+      - const: cr521-err1
+      - const: peri-err0
+      - const: peri-err1
+      - const: dsmif-err0
+      - const: dsmif-err1
+      - const: encif-err0
+      - const: encif-err1
+
+  clocks:
+    maxItems: 1
+
+  power-domains:
+    maxItems: 1
+
+required:
+  - compatible
+  - reg
+  - '#interrupt-cells'
+  - '#address-cells'
+  - interrupt-controller
+  - interrupts
+  - interrupt-names
+  - clocks
+  - power-domains
+
+unevaluatedProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h>
+
+    icu: interrupt-controller@802a0000 {
+      compatible = "renesas,r9a09g077-icu";
+      reg = <0x802a0000 0x10000>,
+            <0x812a0000 0x50>;
+      #interrupt-cells = <2>;
+      #address-cells = <0>;
+      interrupt-controller;
+      interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 1 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 5 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 6 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 7 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 9 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 10 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 11 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 12 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 13 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 14 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 15 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 16 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 17 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 18 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 20 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 23 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 24 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 29 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 406 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 407 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 408 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 409 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 410 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 411 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 412 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 413 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 415 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 416 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 417 IRQ_TYPE_EDGE_RISING>,
+                   <GIC_SPI 418 IRQ_TYPE_EDGE_RISING>;
+      interrupt-names = "intcpu0", "intcpu1", "intcpu2",
+                        "intcpu3", "intcpu4", "intcpu5",
+                        "intcpu6", "intcpu7", "intcpu8",
+                        "intcpu9", "intcpu10", "intcpu11",
+                        "intcpu12", "intcpu13", "intcpu14",
+                        "intcpu15",
+                        "irq0", "irq1", "irq2", "irq3",
+                        "irq4", "irq5", "irq6", "irq7",
+                        "irq8", "irq9", "irq10", "irq11",
+                        "irq12", "irq13", "irq14", "irq15",
+                        "sei",
+                        "ca55-err0", "ca55-err1",
+                        "cr520-err0", "cr520-err1",
+                        "cr521-err0", "cr521-err1",
+                        "peri-err0", "peri-err1",
+                        "dsmif-err0", "dsmif-err1",
+                        "encif-err0", "encif-err1";
+      clocks = <&cpg CPG_CORE R9A09G077_CLK_PCLKM>;
+      power-domains = <&cpg>;
+    };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml
index 3f99c8645767..cb244b8f5e1c 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/renesas,rzv2h-icu.yaml
@@ -22,6 +22,7 @@ properties:
   compatible:
     enum:
       - renesas,r9a09g047-icu # RZ/G3E
+      - renesas,r9a09g056-icu # RZ/V2N
       - renesas,r9a09g057-icu # RZ/V2H(P)
 
   '#interrupt-cells':
diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
index 388fc2c620c0..e0267223887e 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml
@@ -108,7 +108,9 @@ properties:
   riscv,ndev:
     $ref: /schemas/types.yaml#/definitions/uint32
     description:
-      Specifies how many external interrupts are supported by this controller.
+      Specifies how many external (device) interrupts are supported by this
+      controller.  Note that source 0 is reserved in PLIC, so the valid
+      interrupt sources are 1 to riscv,ndev inclusive.
 
   clocks: true
 
diff --git a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml
index c99cc7323c71..de45f0c4b1d1 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml
+++ b/Documentation/devicetree/bindings/interrupt-controller/ti,sci-intr.yaml
@@ -15,8 +15,7 @@ allOf:
 description: |
   The Interrupt Router (INTR) module provides a mechanism to mux M
   interrupt inputs to N interrupt outputs, where all M inputs are selectable
-  to be driven per N output. An Interrupt Router can either handle edge
-  triggered or level triggered interrupts and that is fixed in hardware.
+  to be driven per N output.
 
                                    Interrupt Router
                                +----------------------+
@@ -64,9 +63,14 @@ properties:
   interrupt-controller: true
 
   '#interrupt-cells':
-    const: 1
+    enum: [1, 2]
     description: |
-      The 1st cell should contain interrupt router input hw number.
+      Number of cells in interrupt specifier. Depends on ti,intr-trigger-type:
+      - If ti,intr-trigger-type is present: must be 1
+        The 1st cell should contain interrupt router input hw number.
+      - If ti,intr-trigger-type is absent: must be 2
+        The 1st cell should contain interrupt router input hw number.
+        The 2nd cell should contain interrupt trigger type (preserved by router).
 
   ti,interrupt-ranges:
     $ref: /schemas/types.yaml#/definitions/uint32-matrix
@@ -82,9 +86,22 @@ properties:
         - description: |
             "limit" specifies the limit for translation
 
+if:
+  required:
+    - ti,intr-trigger-type
+then:
+  properties:
+    '#interrupt-cells':
+      const: 1
+      description: Interrupt ID only. Interrupt type is specified globally
+else:
+  properties:
+    '#interrupt-cells':
+      const: 2
+      description: Interrupt ID and corresponding interrupt type
+
 required:
   - compatible
-  - ti,intr-trigger-type
   - interrupt-controller
   - '#interrupt-cells'
   - ti,sci
@@ -105,3 +122,14 @@ examples:
         ti,sci-dev-id = <131>;
         ti,interrupt-ranges = <0 360 32>;
     };
+
+  - |
+    interrupt-controller {
+        compatible = "ti,sci-intr";
+        interrupt-controller;
+        interrupt-parent = <&gic500>;
+        #interrupt-cells = <2>;
+        ti,sci = <&dmsc>;
+        ti,sci-dev-id = <131>;
+        ti,interrupt-ranges = <0 360 32>;
+    };
diff --git a/arch/arm64/boot/dts/renesas/r9a09g077.dtsi b/arch/arm64/boot/dts/renesas/r9a09g077.dtsi
index f5fa6ca06409..e5c98388023f 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g077.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g077.dtsi
@@ -756,6 +756,79 @@ cpg: clock-controller@80280000 {
 			#power-domain-cells = <0>;
 		};
 
+		icu: interrupt-controller@802a0000 {
+			compatible = "renesas,r9a09g077-icu";
+			reg = <0 0x802a0000 0 0x10000>,
+			      <0 0x812a0000 0 0x50>;
+			#interrupt-cells = <2>;
+			#address-cells = <0>;
+			interrupt-controller;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 1 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 5 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 6 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 7 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 9 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 10 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 11 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 12 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 13 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 14 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 15 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 16 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 17 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 18 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 20 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 23 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 24 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 29 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 406 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 407 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 408 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 409 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 410 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 411 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 412 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 413 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 415 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 416 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 417 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 418 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "intcpu0", "intcpu1", "intcpu2",
+					  "intcpu3", "intcpu4", "intcpu5",
+					  "intcpu6", "intcpu7", "intcpu8",
+					  "intcpu9", "intcpu10", "intcpu11",
+					  "intcpu12", "intcpu13", "intcpu14",
+					  "intcpu15",
+					  "irq0", "irq1", "irq2", "irq3",
+					  "irq4", "irq5", "irq6", "irq7",
+					  "irq8", "irq9", "irq10", "irq11",
+					  "irq12", "irq13", "irq14", "irq15",
+					  "sei",
+					  "ca55-err0", "ca55-err1",
+					  "cr520-err0", "cr520-err1",
+					  "cr521-err0", "cr521-err1",
+					  "peri-err0", "peri-err1",
+					  "dsmif-err0", "dsmif-err1",
+					  "encif-err0", "encif-err1";
+			clocks = <&cpg CPG_CORE R9A09G077_CLK_PCLKM>;
+			power-domains = <&cpg>;
+		};
+
 		pinctrl: pinctrl@802c0000 {
 			compatible = "renesas,r9a09g077-pinctrl";
 			reg = <0 0x802c0000 0 0x10000>,
diff --git a/arch/arm64/boot/dts/renesas/r9a09g087.dtsi b/arch/arm64/boot/dts/renesas/r9a09g087.dtsi
index 361a9235f00d..0e0a9c1bc885 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g087.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g087.dtsi
@@ -759,6 +759,79 @@ cpg: clock-controller@80280000 {
 			#power-domain-cells = <0>;
 		};
 
+		icu: interrupt-controller@802a0000 {
+			compatible = "renesas,r9a09g087-icu", "renesas,r9a09g077-icu";
+			reg = <0 0x802a0000 0 0x10000>,
+			      <0 0x812a0000 0 0x50>;
+			#interrupt-cells = <2>;
+			#address-cells = <0>;
+			interrupt-controller;
+			interrupts = <GIC_SPI 0 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 1 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 3 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 5 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 6 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 7 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 9 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 10 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 11 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 12 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 13 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 14 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 15 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 16 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 17 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 18 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 19 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 20 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 21 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 22 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 23 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 24 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 29 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 406 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 407 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 408 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 409 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 410 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 411 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 412 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 413 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 414 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 415 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 416 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 417 IRQ_TYPE_EDGE_RISING>,
+				     <GIC_SPI 418 IRQ_TYPE_EDGE_RISING>;
+			interrupt-names = "intcpu0", "intcpu1", "intcpu2",
+					  "intcpu3", "intcpu4", "intcpu5",
+					  "intcpu6", "intcpu7", "intcpu8",
+					  "intcpu9", "intcpu10", "intcpu11",
+					  "intcpu12", "intcpu13", "intcpu14",
+					  "intcpu15",
+					  "irq0", "irq1", "irq2", "irq3",
+					  "irq4", "irq5", "irq6", "irq7",
+					  "irq8", "irq9", "irq10", "irq11",
+					  "irq12", "irq13", "irq14", "irq15",
+					  "sei",
+					  "ca55-err0", "ca55-err1",
+					  "cr520-err0", "cr520-err1",
+					  "cr521-err0", "cr521-err1",
+					  "peri-err0", "peri-err1",
+					  "dsmif-err0", "dsmif-err1",
+					  "encif-err0", "encif-err1";
+			clocks = <&cpg CPG_CORE R9A09G087_CLK_PCLKM>;
+			power-domains = <&cpg>;
+		};
+
 		pinctrl: pinctrl@802c0000 {
 			compatible = "renesas,r9a09g087-pinctrl";
 			reg = <0 0x802c0000 0 0x10000>,
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index f334f49c9791..f07b00d7fef9 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -297,6 +297,14 @@ config RENESAS_RZG2L_IRQC
 	  Enable support for the Renesas RZ/G2L (and alike SoC) Interrupt Controller
 	  for external devices.
 
+config RENESAS_RZT2H_ICU
+	bool "Renesas RZ/{T2H,N2H} ICU support" if COMPILE_TEST
+	select GENERIC_IRQ_CHIP
+	select IRQ_DOMAIN_HIERARCHY
+	help
+	  Enable support for the Renesas RZ/{T2H,N2H} Interrupt Controller
+	  (ICU).
+
 config RENESAS_RZV2H_ICU
 	bool "Renesas RZ/V2H(P) ICU support" if COMPILE_TEST
 	select GENERIC_IRQ_CHIP
@@ -698,7 +706,7 @@ config IRQ_LOONGARCH_CPU
 
 config LOONGSON_LIOINTC
 	bool "Loongson Local I/O Interrupt Controller"
-	depends on MACH_LOONGSON64
+	depends on MACH_LOONGSON64 || LOONGARCH
 	default y
 	select IRQ_DOMAIN
 	select GENERIC_IRQ_CHIP
@@ -708,7 +716,6 @@ config LOONGSON_LIOINTC
 config LOONGSON_EIOINTC
 	bool "Loongson Extend I/O Interrupt Controller"
 	depends on LOONGARCH
-	depends on MACH_LOONGSON64
 	default MACH_LOONGSON64
 	select IRQ_DOMAIN_HIERARCHY
 	select GENERIC_IRQ_CHIP
@@ -726,7 +733,7 @@ config LOONGSON_HTPIC
 
 config LOONGSON_HTVEC
 	bool "Loongson HyperTransport Interrupt Vector Controller"
-	depends on MACH_LOONGSON64
+	depends on MACH_LOONGSON64 || LOONGARCH
 	default MACH_LOONGSON64
 	select IRQ_DOMAIN_HIERARCHY
 	help
@@ -734,7 +741,7 @@ config LOONGSON_HTVEC
 
 config LOONGSON_PCH_PIC
 	bool "Loongson PCH PIC Controller"
-	depends on MACH_LOONGSON64
+	depends on MACH_LOONGSON64 || LOONGARCH
 	default MACH_LOONGSON64
 	select IRQ_DOMAIN_HIERARCHY
 	select IRQ_FASTEOI_HIERARCHY_HANDLERS
@@ -743,7 +750,7 @@ config LOONGSON_PCH_PIC
 
 config LOONGSON_PCH_MSI
 	bool "Loongson PCH MSI Controller"
-	depends on MACH_LOONGSON64
+	depends on MACH_LOONGSON64 || LOONGARCH
 	depends on PCI
 	default MACH_LOONGSON64
 	select IRQ_DOMAIN_HIERARCHY
@@ -755,7 +762,7 @@ config LOONGSON_PCH_MSI
 config LOONGSON_PCH_LPC
 	bool "Loongson PCH LPC Controller"
 	depends on LOONGARCH
-	depends on MACH_LOONGSON64
+	depends on MACH_LOONGSON64 || LOONGARCH
 	default MACH_LOONGSON64
 	select IRQ_DOMAIN_HIERARCHY
 	help
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 6a229443efe0..26aa3b6ec99f 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -54,6 +54,7 @@ obj-$(CONFIG_RENESAS_INTC_IRQPIN)	+= irq-renesas-intc-irqpin.o
 obj-$(CONFIG_RENESAS_IRQC)		+= irq-renesas-irqc.o
 obj-$(CONFIG_RENESAS_RZA1_IRQC)		+= irq-renesas-rza1.o
 obj-$(CONFIG_RENESAS_RZG2L_IRQC)	+= irq-renesas-rzg2l.o
+obj-$(CONFIG_RENESAS_RZT2H_ICU)		+= irq-renesas-rzt2h.o
 obj-$(CONFIG_RENESAS_RZV2H_ICU)		+= irq-renesas-rzv2h.o
 obj-$(CONFIG_VERSATILE_FPGA_IRQ)	+= irq-versatile-fpga.o
 obj-$(CONFIG_ARCH_NSPIRE)		+= irq-zevio.o
diff --git a/drivers/irqchip/irq-aclint-sswi.c b/drivers/irqchip/irq-aclint-sswi.c
index fee30f3bc5ac..ca06efd86fa1 100644
--- a/drivers/irqchip/irq-aclint-sswi.c
+++ b/drivers/irqchip/irq-aclint-sswi.c
@@ -109,9 +109,11 @@ static int __init aclint_sswi_probe(struct fwnode_handle *fwnode)
 	if (!is_of_node(fwnode))
 		return -EINVAL;
 
-	reg = of_iomap(to_of_node(fwnode), 0);
-	if (!reg)
-		return -ENOMEM;
+	reg = of_io_request_and_map(to_of_node(fwnode), 0, NULL);
+	if (IS_ERR(reg)) {
+		pr_err("%pfwP: Failed to map MMIO region\n", fwnode);
+		return PTR_ERR(reg);
+	}
 
 	/* Parse SSWI setting */
 	rc = aclint_sswi_parse_irq(fwnode, reg);
diff --git a/drivers/irqchip/irq-aspeed-scu-ic.c b/drivers/irqchip/irq-aspeed-scu-ic.c
index bee59c8c4c93..7398c3b9eace 100644
--- a/drivers/irqchip/irq-aspeed-scu-ic.c
+++ b/drivers/irqchip/irq-aspeed-scu-ic.c
@@ -104,11 +104,10 @@ static void aspeed_scu_ic_irq_handler_split(struct irq_desc *desc)
 	struct aspeed_scu_ic *scu_ic = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
 	unsigned long bit, enabled, max, status;
-	unsigned int sts, mask;
+	unsigned int sts;
 
 	chained_irq_enter(chip, desc);
 
-	mask = scu_ic->irq_enable;
 	sts = readl(scu_ic->base + scu_ic->isr);
 	enabled = sts & scu_ic->irq_enable;
 	sts = readl(scu_ic->base + scu_ic->isr);
diff --git a/drivers/irqchip/irq-gic-v5-its.c b/drivers/irqchip/irq-gic-v5-its.c
index 554485f0be1f..f410e178c841 100644
--- a/drivers/irqchip/irq-gic-v5-its.c
+++ b/drivers/irqchip/irq-gic-v5-its.c
@@ -900,7 +900,7 @@ static int gicv5_its_alloc_eventid(struct gicv5_its_dev *its_dev, msi_alloc_info
 		event_id_base = info->hwirq;
 
 		if (event_id_base >= its_dev->num_events) {
-			pr_err("EventID ouside of ITT range; cannot allocate an ITT entry!\n");
+			pr_err("EventID outside of ITT range; cannot allocate an ITT entry!\n");
 
 			return -EINVAL;
 		}
diff --git a/drivers/irqchip/irq-loongarch-avec.c b/drivers/irqchip/irq-loongarch-avec.c
index ba556c008cf3..fb8efde95393 100644
--- a/drivers/irqchip/irq-loongarch-avec.c
+++ b/drivers/irqchip/irq-loongarch-avec.c
@@ -58,11 +58,13 @@ struct avecintc_data {
 
 static inline void avecintc_enable(void)
 {
+#ifdef CONFIG_MACH_LOONGSON64
 	u64 value;
 
 	value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
 	value |= IOCSR_MISC_FUNC_AVEC_EN;
 	iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
+#endif
 }
 
 static inline void avecintc_ack_irq(struct irq_data *d)
@@ -167,7 +169,7 @@ void complete_irq_moving(void)
 	struct pending_list *plist = this_cpu_ptr(&pending_list);
 	struct avecintc_data *adata, *tdata;
 	int cpu, vector, bias;
-	uint64_t isr;
+	unsigned long isr;
 
 	guard(raw_spinlock)(&loongarch_avec.lock);
 
@@ -177,16 +179,16 @@ void complete_irq_moving(void)
 		bias = vector / VECTORS_PER_REG;
 		switch (bias) {
 		case 0:
-			isr = csr_read64(LOONGARCH_CSR_ISR0);
+			isr = csr_read(LOONGARCH_CSR_ISR0);
 			break;
 		case 1:
-			isr = csr_read64(LOONGARCH_CSR_ISR1);
+			isr = csr_read(LOONGARCH_CSR_ISR1);
 			break;
 		case 2:
-			isr = csr_read64(LOONGARCH_CSR_ISR2);
+			isr = csr_read(LOONGARCH_CSR_ISR2);
 			break;
 		case 3:
-			isr = csr_read64(LOONGARCH_CSR_ISR3);
+			isr = csr_read(LOONGARCH_CSR_ISR3);
 			break;
 		}
 
@@ -234,7 +236,7 @@ static void avecintc_irq_dispatch(struct irq_desc *desc)
 	chained_irq_enter(chip, desc);
 
 	while (true) {
-		unsigned long vector = csr_read64(LOONGARCH_CSR_IRR);
+		unsigned long vector = csr_read(LOONGARCH_CSR_IRR);
 		if (vector & IRR_INVALID_MASK)
 			break;
 
diff --git a/drivers/irqchip/irq-loongson-eiointc.c b/drivers/irqchip/irq-loongson-eiointc.c
index ad2105685b48..37e7e1f9bbe3 100644
--- a/drivers/irqchip/irq-loongson-eiointc.c
+++ b/drivers/irqchip/irq-loongson-eiointc.c
@@ -37,9 +37,9 @@
 #define  EXTIOI_ENABLE_INT_ENCODE      BIT(2)
 #define  EXTIOI_ENABLE_CPU_ENCODE      BIT(3)
 
-#define VEC_REG_COUNT		4
-#define VEC_COUNT_PER_REG	64
-#define VEC_COUNT		(VEC_REG_COUNT * VEC_COUNT_PER_REG)
+#define VEC_COUNT		256
+#define VEC_COUNT_PER_REG	BITS_PER_LONG
+#define VEC_REG_COUNT		(VEC_COUNT / BITS_PER_LONG)
 #define VEC_REG_IDX(irq_id)	((irq_id) / VEC_COUNT_PER_REG)
 #define VEC_REG_BIT(irq_id)     ((irq_id) % VEC_COUNT_PER_REG)
 #define EIOINTC_ALL_ENABLE	0xffffffff
@@ -85,11 +85,13 @@ static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
 
 static void eiointc_enable(void)
 {
+#ifdef CONFIG_MACH_LOONGSON64
 	uint64_t misc;
 
 	misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
 	misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
 	iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
+#endif
 }
 
 static int cpu_to_eio_node(int cpu)
@@ -281,12 +283,34 @@ static int eiointc_router_init(unsigned int cpu)
 	return 0;
 }
 
+#if VEC_COUNT_PER_REG == 32
+static inline unsigned long read_isr(int i)
+{
+	return iocsr_read32(EIOINTC_REG_ISR + (i << 2));
+}
+
+static inline void write_isr(int i, unsigned long val)
+{
+	iocsr_write32(val, EIOINTC_REG_ISR + (i << 2));
+}
+#else
+static inline unsigned long read_isr(int i)
+{
+	return iocsr_read64(EIOINTC_REG_ISR + (i << 3));
+}
+
+static inline void write_isr(int i, unsigned long val)
+{
+	iocsr_write64(val, EIOINTC_REG_ISR + (i << 3));
+}
+#endif
+
 static void eiointc_irq_dispatch(struct irq_desc *desc)
 {
 	struct eiointc_ip_route *info = irq_desc_get_handler_data(desc);
 	struct irq_chip *chip = irq_desc_get_chip(desc);
+	unsigned long pending;
 	bool handled = false;
-	u64 pending;
 	int i;
 
 	chained_irq_enter(chip, desc);
@@ -299,14 +323,14 @@ static void eiointc_irq_dispatch(struct irq_desc *desc)
 	 * read ISR for these 64 interrupt vectors rather than all vectors
 	 */
 	for (i = info->start; i < info->end; i++) {
-		pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
+		pending = read_isr(i);
 
 		/* Skip handling if pending bitmap is zero */
 		if (!pending)
 			continue;
 
 		/* Clear the IRQs */
-		iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
+		write_isr(i, pending);
 		while (pending) {
 			int bit = __ffs(pending);
 			int irq = bit + VEC_COUNT_PER_REG * i;
diff --git a/drivers/irqchip/irq-loongson-htvec.c b/drivers/irqchip/irq-loongson-htvec.c
index d2be8e954e92..5a3339da97ad 100644
--- a/drivers/irqchip/irq-loongson-htvec.c
+++ b/drivers/irqchip/irq-loongson-htvec.c
@@ -295,19 +295,19 @@ static int __init acpi_cascade_irqdomain_init(void)
 	return 0;
 }
 
-int __init htvec_acpi_init(struct irq_domain *parent,
-				   struct acpi_madt_ht_pic *acpi_htvec)
+int __init htvec_acpi_init(struct irq_domain *parent, struct acpi_madt_ht_pic *acpi_htvec)
 {
-	int i, ret;
-	int num_parents, parent_irq[8];
+	int i, ret, num_parents, parent_irq[8];
 	struct fwnode_handle *domain_handle;
+	phys_addr_t addr;
 
 	if (!acpi_htvec)
 		return -EINVAL;
 
 	num_parents = HTVEC_MAX_PARENT_IRQ;
+	addr = (phys_addr_t)acpi_htvec->address;
 
-	domain_handle = irq_domain_alloc_fwnode(&acpi_htvec->address);
+	domain_handle = irq_domain_alloc_fwnode(&addr);
 	if (!domain_handle) {
 		pr_err("Unable to allocate domain handle\n");
 		return -ENOMEM;
@@ -317,9 +317,7 @@ int __init htvec_acpi_init(struct irq_domain *parent,
 	for (i = 0; i < HTVEC_MAX_PARENT_IRQ; i++)
 		parent_irq[i] = irq_create_mapping(parent, acpi_htvec->cascade[i]);
 
-	ret = htvec_init(acpi_htvec->address, acpi_htvec->size,
-			num_parents, parent_irq, domain_handle);
-
+	ret = htvec_init(addr, acpi_htvec->size, num_parents, parent_irq, domain_handle);
 	if (ret == 0)
 		ret = acpi_cascade_irqdomain_init();
 	else
diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c
index 0033c2188abc..551597e2c428 100644
--- a/drivers/irqchip/irq-loongson-liointc.c
+++ b/drivers/irqchip/irq-loongson-liointc.c
@@ -394,8 +394,9 @@ static int __init acpi_cascade_irqdomain_init(void)
 
 int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic *acpi_liointc)
 {
-	int ret;
+	phys_addr_t addr = (phys_addr_t)acpi_liointc->address;
 	struct fwnode_handle *domain_handle;
+	int ret;
 
 	parent_int_map[0] = acpi_liointc->cascade_map[0];
 	parent_int_map[1] = acpi_liointc->cascade_map[1];
@@ -403,14 +404,13 @@ int __init liointc_acpi_init(struct irq_domain *parent, struct acpi_madt_lio_pic
 	parent_irq[0] = irq_create_mapping(parent, acpi_liointc->cascade[0]);
 	parent_irq[1] = irq_create_mapping(parent, acpi_liointc->cascade[1]);
 
-	domain_handle = irq_domain_alloc_fwnode(&acpi_liointc->address);
+	domain_handle = irq_domain_alloc_fwnode(&addr);
 	if (!domain_handle) {
 		pr_err("Unable to allocate domain handle\n");
 		return -ENOMEM;
 	}
 
-	ret = liointc_init(acpi_liointc->address, acpi_liointc->size,
-			   1, domain_handle, NULL);
+	ret = liointc_init(addr, acpi_liointc->size, 1, domain_handle, NULL);
 	if (ret == 0)
 		ret = acpi_cascade_irqdomain_init();
 	else
diff --git a/drivers/irqchip/irq-loongson-pch-msi.c b/drivers/irqchip/irq-loongson-pch-msi.c
index 4aedc9b90ff7..91c856c65d9d 100644
--- a/drivers/irqchip/irq-loongson-pch-msi.c
+++ b/drivers/irqchip/irq-loongson-pch-msi.c
@@ -263,12 +263,13 @@ struct fwnode_handle *get_pch_msi_handle(int pci_segment)
 
 int __init pch_msi_acpi_init(struct irq_domain *parent, struct acpi_madt_msi_pic *acpi_pchmsi)
 {
-	int ret;
+	phys_addr_t msg_address = (phys_addr_t)acpi_pchmsi->msg_address;
 	struct fwnode_handle *domain_handle;
+	int ret;
 
-	domain_handle = irq_domain_alloc_fwnode(&acpi_pchmsi->msg_address);
-	ret = pch_msi_init(acpi_pchmsi->msg_address, acpi_pchmsi->start,
-				acpi_pchmsi->count, parent, domain_handle);
+	domain_handle = irq_domain_alloc_fwnode(&msg_address);
+	ret = pch_msi_init(msg_address, acpi_pchmsi->start, acpi_pchmsi->count,
+			   parent, domain_handle);
 	if (ret < 0)
 		irq_domain_free_fwnode(domain_handle);
 
diff --git a/drivers/irqchip/irq-loongson-pch-pic.c b/drivers/irqchip/irq-loongson-pch-pic.c
index c6b369a974a7..f2acaf93f552 100644
--- a/drivers/irqchip/irq-loongson-pch-pic.c
+++ b/drivers/irqchip/irq-loongson-pch-pic.c
@@ -343,7 +343,7 @@ static int pch_pic_init(phys_addr_t addr, unsigned long size, int vec_base,
 		priv->table[i] = PIC_UNDEF_VECTOR;
 
 	priv->ht_vec_base = vec_base;
-	priv->vec_count = ((readq(priv->base) >> 48) & 0xff) + 1;
+	priv->vec_count = ((readl(priv->base + 4) >> 16) & 0xff) + 1;
 	priv->gsi_base = gsi_base;
 
 	priv->pic_domain = irq_domain_create_hierarchy(parent_domain, 0,
@@ -446,23 +446,23 @@ static int __init acpi_cascade_irqdomain_init(void)
 	return 0;
 }
 
-int __init pch_pic_acpi_init(struct irq_domain *parent,
-					struct acpi_madt_bio_pic *acpi_pchpic)
+int __init pch_pic_acpi_init(struct irq_domain *parent, struct acpi_madt_bio_pic *acpi_pchpic)
 {
-	int ret;
+	phys_addr_t addr = (phys_addr_t)acpi_pchpic->address;
 	struct fwnode_handle *domain_handle;
+	int ret;
 
 	if (find_pch_pic(acpi_pchpic->gsi_base) >= 0)
 		return 0;
 
-	domain_handle = irq_domain_alloc_fwnode(&acpi_pchpic->address);
+	domain_handle = irq_domain_alloc_fwnode(&addr);
 	if (!domain_handle) {
 		pr_err("Unable to allocate domain handle\n");
 		return -ENOMEM;
 	}
 
-	ret = pch_pic_init(acpi_pchpic->address, acpi_pchpic->size,
-				0, parent, domain_handle, acpi_pchpic->gsi_base);
+	ret = pch_pic_init(addr, acpi_pchpic->size, 0, parent,
+			   domain_handle, acpi_pchpic->gsi_base);
 
 	if (ret < 0) {
 		irq_domain_free_fwnode(domain_handle);
diff --git a/drivers/irqchip/irq-renesas-rzt2h.c b/drivers/irqchip/irq-renesas-rzt2h.c
new file mode 100644
index 000000000000..53cf80e1155a
--- /dev/null
+++ b/drivers/irqchip/irq-renesas-rzt2h.c
@@ -0,0 +1,280 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/bitfield.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/irq-renesas-rzt2h.h>
+#include <linux/irqdomain.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+#include <linux/spinlock.h>
+
+#define RZT2H_ICU_INTCPU_NS_START		0
+#define RZT2H_ICU_INTCPU_NS_COUNT		14
+
+#define RZT2H_ICU_INTCPU_S_START		(RZT2H_ICU_INTCPU_NS_START +	\
+						 RZT2H_ICU_INTCPU_NS_COUNT)
+#define RZT2H_ICU_INTCPU_S_COUNT		2
+
+#define RZT2H_ICU_IRQ_NS_START			(RZT2H_ICU_INTCPU_S_START +	\
+						 RZT2H_ICU_INTCPU_S_COUNT)
+#define RZT2H_ICU_IRQ_NS_COUNT			14
+
+#define RZT2H_ICU_IRQ_S_START			(RZT2H_ICU_IRQ_NS_START +	\
+						 RZT2H_ICU_IRQ_NS_COUNT)
+#define RZT2H_ICU_IRQ_S_COUNT			2
+
+#define RZT2H_ICU_SEI_START			(RZT2H_ICU_IRQ_S_START +	\
+						 RZT2H_ICU_IRQ_S_COUNT)
+#define RZT2H_ICU_SEI_COUNT			1
+
+#define RZT2H_ICU_NUM_IRQ			(RZT2H_ICU_INTCPU_NS_COUNT +	\
+						 RZT2H_ICU_INTCPU_S_COUNT +	\
+						 RZT2H_ICU_IRQ_NS_COUNT +	\
+						 RZT2H_ICU_IRQ_S_COUNT +	\
+						 RZT2H_ICU_SEI_COUNT)
+
+#define RZT2H_ICU_IRQ_IN_RANGE(n, type)						\
+	((n) >= RZT2H_ICU_##type##_START &&					\
+	 (n) <  RZT2H_ICU_##type##_START + RZT2H_ICU_##type##_COUNT)
+
+#define RZT2H_ICU_PORTNF_MD			0xc
+#define RZT2H_ICU_PORTNF_MDi_MASK(i)		(GENMASK(1, 0) << ((i) * 2))
+#define RZT2H_ICU_PORTNF_MDi_PREP(i, val)	(FIELD_PREP(GENMASK(1, 0), val) << ((i) * 2))
+
+#define RZT2H_ICU_MD_LOW_LEVEL			0b00
+#define RZT2H_ICU_MD_FALLING_EDGE		0b01
+#define RZT2H_ICU_MD_RISING_EDGE		0b10
+#define RZT2H_ICU_MD_BOTH_EDGES			0b11
+
+#define RZT2H_ICU_DMACn_RSSELi(n, i)		(0x7d0 + 0x18 * (n) + 0x4 * (i))
+#define RZT2H_ICU_DMAC_REQ_SELx_MASK(x)		(GENMASK(9, 0) << ((x) * 10))
+#define RZT2H_ICU_DMAC_REQ_SELx_PREP(x, val)	(FIELD_PREP(GENMASK(9, 0), val) << ((x) * 10))
+
+struct rzt2h_icu_priv {
+	void __iomem		*base_ns;
+	void __iomem		*base_s;
+	struct irq_fwspec	fwspec[RZT2H_ICU_NUM_IRQ];
+	raw_spinlock_t		lock;
+};
+
+void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+				u16 req_no)
+{
+	struct rzt2h_icu_priv *priv = platform_get_drvdata(icu_dev);
+	u8 y, upper;
+	u32 val;
+
+	y = dmac_channel / 3;
+	upper = dmac_channel % 3;
+
+	guard(raw_spinlock_irqsave)(&priv->lock);
+	val = readl(priv->base_ns + RZT2H_ICU_DMACn_RSSELi(dmac_index, y));
+	val &= ~RZT2H_ICU_DMAC_REQ_SELx_MASK(upper);
+	val |= RZT2H_ICU_DMAC_REQ_SELx_PREP(upper, req_no);
+	writel(val, priv->base_ns + RZT2H_ICU_DMACn_RSSELi(dmac_index, y));
+}
+EXPORT_SYMBOL_GPL(rzt2h_icu_register_dma_req);
+
+static inline struct rzt2h_icu_priv *irq_data_to_priv(struct irq_data *data)
+{
+	return data->domain->host_data;
+}
+
+static inline int rzt2h_icu_irq_to_offset(struct irq_data *d, void __iomem **base,
+					  unsigned int *offset)
+{
+	struct rzt2h_icu_priv *priv = irq_data_to_priv(d);
+	unsigned int hwirq = irqd_to_hwirq(d);
+
+	/*
+	 * Safety IRQs and SEI use a separate register space from the non-safety IRQs.
+	 * SEI interrupt number follows immediately after the safety IRQs.
+	 */
+	if (RZT2H_ICU_IRQ_IN_RANGE(hwirq, IRQ_NS)) {
+		*offset = hwirq - RZT2H_ICU_IRQ_NS_START;
+		*base = priv->base_ns;
+	} else if (RZT2H_ICU_IRQ_IN_RANGE(hwirq, IRQ_S) || RZT2H_ICU_IRQ_IN_RANGE(hwirq, SEI)) {
+		*offset = hwirq - RZT2H_ICU_IRQ_S_START;
+		*base = priv->base_s;
+	} else {
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rzt2h_icu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct rzt2h_icu_priv *priv = irq_data_to_priv(d);
+	unsigned int offset, parent_type;
+	void __iomem *base;
+	u32 val, md;
+	int ret;
+
+	ret = rzt2h_icu_irq_to_offset(d, &base, &offset);
+	if (ret)
+		return ret;
+
+	switch (type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_LEVEL_LOW:
+		md = RZT2H_ICU_MD_LOW_LEVEL;
+		parent_type = IRQ_TYPE_LEVEL_HIGH;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		md = RZT2H_ICU_MD_FALLING_EDGE;
+		parent_type = IRQ_TYPE_EDGE_RISING;
+		break;
+	case IRQ_TYPE_EDGE_RISING:
+		md = RZT2H_ICU_MD_RISING_EDGE;
+		parent_type = IRQ_TYPE_EDGE_RISING;
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		md = RZT2H_ICU_MD_BOTH_EDGES;
+		parent_type = IRQ_TYPE_EDGE_RISING;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	scoped_guard(raw_spinlock, &priv->lock) {
+		val = readl_relaxed(base + RZT2H_ICU_PORTNF_MD);
+		val &= ~RZT2H_ICU_PORTNF_MDi_MASK(offset);
+		val |= RZT2H_ICU_PORTNF_MDi_PREP(offset, md);
+		writel_relaxed(val, base + RZT2H_ICU_PORTNF_MD);
+	}
+
+	return irq_chip_set_type_parent(d, parent_type);
+}
+
+static int rzt2h_icu_set_type(struct irq_data *d, unsigned int type)
+{
+	unsigned int hw_irq = irqd_to_hwirq(d);
+
+	/* IRQn and SEI are selectable, others are edge-only. */
+	if (RZT2H_ICU_IRQ_IN_RANGE(hw_irq, IRQ_NS) ||
+	    RZT2H_ICU_IRQ_IN_RANGE(hw_irq, IRQ_S) ||
+	    RZT2H_ICU_IRQ_IN_RANGE(hw_irq, SEI))
+		return rzt2h_icu_irq_set_type(d, type);
+
+	if ((type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)
+		return -EINVAL;
+
+	return irq_chip_set_type_parent(d, IRQ_TYPE_EDGE_RISING);
+}
+
+static const struct irq_chip rzt2h_icu_chip = {
+	.name			= "rzt2h-icu",
+	.irq_mask		= irq_chip_mask_parent,
+	.irq_unmask		= irq_chip_unmask_parent,
+	.irq_eoi		= irq_chip_eoi_parent,
+	.irq_set_type		= rzt2h_icu_set_type,
+	.irq_set_wake		= irq_chip_set_wake_parent,
+	.irq_set_affinity	= irq_chip_set_affinity_parent,
+	.irq_retrigger		= irq_chip_retrigger_hierarchy,
+	.irq_get_irqchip_state	= irq_chip_get_parent_state,
+	.irq_set_irqchip_state	= irq_chip_set_parent_state,
+	.flags			= IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_SET_TYPE_MASKED |
+				  IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int rzt2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
+			   void *arg)
+{
+	struct rzt2h_icu_priv *priv = domain->host_data;
+	irq_hw_number_t hwirq;
+	unsigned int type;
+	int ret;
+
+	ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
+	if (ret)
+		return ret;
+
+	ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &rzt2h_icu_chip, NULL);
+	if (ret)
+		return ret;
+
+	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
+}
+
+static const struct irq_domain_ops rzt2h_icu_domain_ops = {
+	.alloc		= rzt2h_icu_alloc,
+	.free		= irq_domain_free_irqs_common,
+	.translate	= irq_domain_translate_twocell,
+};
+
+static int rzt2h_icu_parse_interrupts(struct rzt2h_icu_priv *priv, struct device_node *np)
+{
+	struct of_phandle_args map;
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < RZT2H_ICU_NUM_IRQ; i++) {
+		ret = of_irq_parse_one(np, i, &map);
+		if (ret)
+			return ret;
+
+		of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]);
+	}
+
+	return 0;
+}
+
+static int rzt2h_icu_init(struct platform_device *pdev, struct device_node *parent)
+{
+	struct irq_domain *irq_domain, *parent_domain;
+	struct device_node *node = pdev->dev.of_node;
+	struct device *dev = &pdev->dev;
+	struct rzt2h_icu_priv *priv;
+	int ret;
+
+	parent_domain = irq_find_host(parent);
+	if (!parent_domain)
+		return dev_err_probe(dev, -ENODEV, "cannot find parent domain\n");
+
+	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	raw_spin_lock_init(&priv->lock);
+
+	platform_set_drvdata(pdev, priv);
+
+	priv->base_ns = devm_of_iomap(dev, dev->of_node, 0, NULL);
+	if (IS_ERR(priv->base_ns))
+		return PTR_ERR(priv->base_ns);
+
+	priv->base_s = devm_of_iomap(dev, dev->of_node, 1, NULL);
+	if (IS_ERR(priv->base_s))
+		return PTR_ERR(priv->base_s);
+
+	ret = rzt2h_icu_parse_interrupts(priv, node);
+	if (ret)
+		return dev_err_probe(dev, ret, "cannot parse interrupts: %d\n", ret);
+
+	ret = devm_pm_runtime_enable(dev);
+	if (ret)
+		return dev_err_probe(dev, ret, "devm_pm_runtime_enable failed: %d\n", ret);
+
+	ret = pm_runtime_resume_and_get(dev);
+	if (ret)
+		return dev_err_probe(dev, ret, "pm_runtime_resume_and_get failed: %d\n", ret);
+
+	irq_domain = irq_domain_create_hierarchy(parent_domain, 0, RZT2H_ICU_NUM_IRQ,
+						 dev_fwnode(dev), &rzt2h_icu_domain_ops, priv);
+	if (!irq_domain) {
+		pm_runtime_put(dev);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+IRQCHIP_PLATFORM_DRIVER_BEGIN(rzt2h_icu)
+IRQCHIP_MATCH("renesas,r9a09g077-icu", rzt2h_icu_init)
+IRQCHIP_PLATFORM_DRIVER_END(rzt2h_icu)
+MODULE_AUTHOR("Cosmin Tanislav <cosmin-gabriel.tanislav.xa@renesas.com>");
+MODULE_DESCRIPTION("Renesas RZ/T2H ICU Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/irqchip/irq-renesas-rzv2h.c b/drivers/irqchip/irq-renesas-rzv2h.c
index 899a423b5da8..69980a8ecccc 100644
--- a/drivers/irqchip/irq-renesas-rzv2h.c
+++ b/drivers/irqchip/irq-renesas-rzv2h.c
@@ -20,6 +20,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/reset.h>
 #include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
 
 /* DT "interrupts" indexes */
 #define ICU_IRQ_START				1
@@ -89,6 +90,18 @@
 #define ICU_RZG3E_TSSEL_MAX_VAL			0x8c
 #define ICU_RZV2H_TSSEL_MAX_VAL			0x55
 
+/**
+ * struct rzv2h_irqc_reg_cache - registers cache (necessary for suspend/resume)
+ * @nitsr: ICU_NITSR register
+ * @iitsr: ICU_IITSR register
+ * @titsr: ICU_TITSR registers
+ */
+struct rzv2h_irqc_reg_cache {
+	u32	nitsr;
+	u32	iitsr;
+	u32	titsr[2];
+};
+
 /**
  * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
  * @tssel_lut:		TINT lookup table
@@ -118,13 +131,15 @@ struct rzv2h_hw_info {
  * @fwspec:	IRQ firmware specific data
  * @lock:	Lock to serialize access to hardware registers
  * @info:	Pointer to struct rzv2h_hw_info
+ * @cache:	Registers cache for suspend/resume
  */
-struct rzv2h_icu_priv {
+static struct rzv2h_icu_priv {
 	void __iomem			*base;
 	struct irq_fwspec		fwspec[ICU_NUM_IRQ];
 	raw_spinlock_t			lock;
 	const struct rzv2h_hw_info	*info;
-};
+	struct rzv2h_irqc_reg_cache	cache;
+} *rzv2h_icu_data;
 
 void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
 				u16 req_no)
@@ -412,6 +427,44 @@ static int rzv2h_icu_set_type(struct irq_data *d, unsigned int type)
 	return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
 }
 
+static int rzv2h_irqc_irq_suspend(void *data)
+{
+	struct rzv2h_irqc_reg_cache *cache = &rzv2h_icu_data->cache;
+	void __iomem *base = rzv2h_icu_data->base;
+
+	cache->nitsr = readl_relaxed(base + ICU_NITSR);
+	cache->iitsr = readl_relaxed(base + ICU_IITSR);
+	for (unsigned int i = 0; i < 2; i++)
+		cache->titsr[i] = readl_relaxed(base + rzv2h_icu_data->info->t_offs + ICU_TITSR(i));
+
+	return 0;
+}
+
+static void rzv2h_irqc_irq_resume(void *data)
+{
+	struct rzv2h_irqc_reg_cache *cache = &rzv2h_icu_data->cache;
+	void __iomem *base = rzv2h_icu_data->base;
+
+	/*
+	 * Restore only interrupt type. TSSRx will be restored at the
+	 * request of pin controller to avoid spurious interrupts due
+	 * to invalid PIN states.
+	 */
+	for (unsigned int i = 0; i < 2; i++)
+		writel_relaxed(cache->titsr[i], base + rzv2h_icu_data->info->t_offs + ICU_TITSR(i));
+	writel_relaxed(cache->iitsr, base + ICU_IITSR);
+	writel_relaxed(cache->nitsr, base + ICU_NITSR);
+}
+
+static const struct syscore_ops rzv2h_irqc_syscore_ops = {
+	.suspend	= rzv2h_irqc_irq_suspend,
+	.resume		= rzv2h_irqc_irq_resume,
+};
+
+static struct syscore rzv2h_irqc_syscore = {
+	.ops = &rzv2h_irqc_syscore_ops,
+};
+
 static const struct irq_chip rzv2h_icu_chip = {
 	.name			= "rzv2h-icu",
 	.irq_eoi		= rzv2h_icu_eoi,
@@ -495,7 +548,6 @@ static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_no
 {
 	struct irq_domain *irq_domain, *parent_domain;
 	struct device_node *node = pdev->dev.of_node;
-	struct rzv2h_icu_priv *rzv2h_icu_data;
 	struct reset_control *resetn;
 	int ret;
 
@@ -553,6 +605,8 @@ static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_no
 
 	rzv2h_icu_data->info = hw_info;
 
+	register_syscore(&rzv2h_irqc_syscore);
+
 	/*
 	 * coccicheck complains about a missing put_device call before returning, but it's a false
 	 * positive. We still need &pdev->dev after successfully returning from this function.
@@ -616,6 +670,7 @@ static int rzv2h_icu_probe(struct platform_device *pdev, struct device_node *par
 
 IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
 IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_probe)
+IRQCHIP_MATCH("renesas,r9a09g056-icu", rzv2h_icu_probe)
 IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_probe)
 IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
 MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
diff --git a/drivers/irqchip/irq-riscv-aplic-direct.c b/drivers/irqchip/irq-riscv-aplic-direct.c
index c2a75bf3d20c..5a9650225dd8 100644
--- a/drivers/irqchip/irq-riscv-aplic-direct.c
+++ b/drivers/irqchip/irq-riscv-aplic-direct.c
@@ -8,6 +8,7 @@
 #include <linux/bitfield.h>
 #include <linux/bitops.h>
 #include <linux/cpu.h>
+#include <linux/cpumask.h>
 #include <linux/interrupt.h>
 #include <linux/irqchip.h>
 #include <linux/irqchip/chained_irq.h>
@@ -171,6 +172,15 @@ static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en)
 	writel(de, idc->regs + APLIC_IDC_IDELIVERY);
 }
 
+void aplic_direct_restore_states(struct aplic_priv *priv)
+{
+	struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
+	int cpu;
+
+	for_each_cpu(cpu, &direct->lmask)
+		aplic_idc_set_delivery(per_cpu_ptr(&aplic_idcs, cpu), true);
+}
+
 static int aplic_direct_dying_cpu(unsigned int cpu)
 {
 	if (aplic_direct_parent_irq)
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
index 93e7c51f944a..4495ca26abf5 100644
--- a/drivers/irqchip/irq-riscv-aplic-main.c
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -12,10 +12,169 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
 #include <linux/printk.h>
+#include <linux/syscore_ops.h>
 
 #include "irq-riscv-aplic-main.h"
 
+static LIST_HEAD(aplics);
+
+static void aplic_restore_states(struct aplic_priv *priv)
+{
+	struct aplic_saved_regs *saved_regs = &priv->saved_hw_regs;
+	struct aplic_src_ctrl *srcs;
+	void __iomem *regs;
+	u32 nr_irqs, i;
+
+	regs = priv->regs;
+	writel(saved_regs->domaincfg, regs + APLIC_DOMAINCFG);
+#ifdef CONFIG_RISCV_M_MODE
+	writel(saved_regs->msiaddr, regs + APLIC_xMSICFGADDR);
+	writel(saved_regs->msiaddrh, regs + APLIC_xMSICFGADDRH);
+#endif
+	/*
+	 * The sourcecfg[i] has to be restored prior to the target[i], interrupt-pending and
+	 * interrupt-enable bits. The AIA specification states that "Whenever interrupt source i is
+	 * inactive in an interrupt domain, the corresponding interrupt-pending and interrupt-enable
+	 * bits within the domain are read-only zeros, and register target[i] is also read-only
+	 * zero."
+	 */
+	nr_irqs = priv->nr_irqs;
+	for (i = 0; i < nr_irqs; i++) {
+		srcs = &priv->saved_hw_regs.srcs[i];
+		writel(srcs->sourcecfg, regs + APLIC_SOURCECFG_BASE + i * sizeof(u32));
+		writel(srcs->target, regs + APLIC_TARGET_BASE + i * sizeof(u32));
+	}
+
+	for (i = 0; i <= nr_irqs; i += 32) {
+		srcs = &priv->saved_hw_regs.srcs[i];
+		writel(-1U, regs + APLIC_CLRIE_BASE + (i / 32) * sizeof(u32));
+		writel(srcs->ie, regs + APLIC_SETIE_BASE + (i / 32) * sizeof(u32));
+
+		/* Re-trigger the interrupts if it forwards interrupts to target harts by MSIs */
+		if (!priv->nr_idcs)
+			writel(readl(regs + APLIC_CLRIP_BASE + (i / 32) * sizeof(u32)),
+			       regs + APLIC_SETIP_BASE + (i / 32) * sizeof(u32));
+	}
+
+	if (priv->nr_idcs)
+		aplic_direct_restore_states(priv);
+}
+
+static void aplic_save_states(struct aplic_priv *priv)
+{
+	struct aplic_src_ctrl *srcs;
+	void __iomem *regs;
+	u32 i, nr_irqs;
+
+	regs = priv->regs;
+	nr_irqs = priv->nr_irqs;
+	/* The valid interrupt source IDs range from 1 to N, where N is priv->nr_irqs */
+	for (i = 0; i < nr_irqs; i++) {
+		srcs = &priv->saved_hw_regs.srcs[i];
+		srcs->target = readl(regs + APLIC_TARGET_BASE + i * sizeof(u32));
+
+		if (i % 32)
+			continue;
+
+		srcs->ie = readl(regs + APLIC_SETIE_BASE + (i / 32) * sizeof(u32));
+	}
+
+	/* Save the nr_irqs bit if needed */
+	if (!(nr_irqs % 32)) {
+		srcs = &priv->saved_hw_regs.srcs[nr_irqs];
+		srcs->ie = readl(regs + APLIC_SETIE_BASE + (nr_irqs / 32) * sizeof(u32));
+	}
+}
+
+static int aplic_syscore_suspend(void *data)
+{
+	struct aplic_priv *priv;
+
+	list_for_each_entry(priv, &aplics, head)
+		aplic_save_states(priv);
+
+	return 0;
+}
+
+static void aplic_syscore_resume(void *data)
+{
+	struct aplic_priv *priv;
+
+	list_for_each_entry(priv, &aplics, head)
+		aplic_restore_states(priv);
+}
+
+static struct syscore_ops aplic_syscore_ops = {
+	.suspend = aplic_syscore_suspend,
+	.resume = aplic_syscore_resume,
+};
+
+static struct syscore aplic_syscore = {
+	.ops = &aplic_syscore_ops,
+};
+
+static int aplic_pm_notifier(struct notifier_block *nb, unsigned long action, void *data)
+{
+	struct aplic_priv *priv = container_of(nb, struct aplic_priv, genpd_nb);
+
+	switch (action) {
+	case GENPD_NOTIFY_PRE_OFF:
+		aplic_save_states(priv);
+		break;
+	case GENPD_NOTIFY_ON:
+		aplic_restore_states(priv);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+static void aplic_pm_remove(void *data)
+{
+	struct aplic_priv *priv = data;
+	struct device *dev = priv->dev;
+
+	list_del(&priv->head);
+	if (dev->pm_domain)
+		dev_pm_genpd_remove_notifier(dev);
+}
+
+static int aplic_pm_add(struct device *dev, struct aplic_priv *priv)
+{
+	struct aplic_src_ctrl *srcs;
+	int ret;
+
+	srcs = devm_kzalloc(dev, (priv->nr_irqs + 1) * sizeof(*srcs), GFP_KERNEL);
+	if (!srcs)
+		return -ENOMEM;
+
+	priv->saved_hw_regs.srcs = srcs;
+	list_add(&priv->head, &aplics);
+	if (dev->pm_domain) {
+		priv->genpd_nb.notifier_call = aplic_pm_notifier;
+		ret = dev_pm_genpd_add_notifier(dev, &priv->genpd_nb);
+		if (ret)
+			goto remove_head;
+
+		ret = devm_pm_runtime_enable(dev);
+		if (ret)
+			goto remove_notifier;
+	}
+
+	return devm_add_action_or_reset(dev, aplic_pm_remove, priv);
+
+remove_notifier:
+	dev_pm_genpd_remove_notifier(dev);
+remove_head:
+	list_del(&priv->head);
+	return ret;
+}
+
 void aplic_irq_unmask(struct irq_data *d)
 {
 	struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
@@ -60,6 +219,8 @@ int aplic_irq_set_type(struct irq_data *d, unsigned int type)
 	sourcecfg += (d->hwirq - 1) * sizeof(u32);
 	writel(val, sourcecfg);
 
+	priv->saved_hw_regs.srcs[d->hwirq - 1].sourcecfg = val;
+
 	return 0;
 }
 
@@ -82,6 +243,7 @@ int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
 
 void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode)
 {
+	struct aplic_saved_regs *saved_regs = &priv->saved_hw_regs;
 	u32 val;
 #ifdef CONFIG_RISCV_M_MODE
 	u32 valh;
@@ -95,6 +257,8 @@ void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode)
 		valh |= FIELD_PREP(APLIC_xMSICFGADDRH_HHXS, priv->msicfg.hhxs);
 		writel(val, priv->regs + APLIC_xMSICFGADDR);
 		writel(valh, priv->regs + APLIC_xMSICFGADDRH);
+		saved_regs->msiaddr = val;
+		saved_regs->msiaddrh = valh;
 	}
 #endif
 
@@ -106,6 +270,8 @@ void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode)
 	writel(val, priv->regs + APLIC_DOMAINCFG);
 	if (readl(priv->regs + APLIC_DOMAINCFG) != val)
 		dev_warn(priv->dev, "unable to write 0x%x in domaincfg\n", val);
+
+	saved_regs->domaincfg = val;
 }
 
 static void aplic_init_hw_irqs(struct aplic_priv *priv)
@@ -176,7 +342,7 @@ int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *
 	/* Setup initial state APLIC interrupts */
 	aplic_init_hw_irqs(priv);
 
-	return 0;
+	return aplic_pm_add(dev, priv);
 }
 
 static int aplic_probe(struct platform_device *pdev)
@@ -209,6 +375,8 @@ static int aplic_probe(struct platform_device *pdev)
 	if (rc)
 		dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n",
 			      msi_mode ? "MSI" : "direct");
+	else
+		register_syscore(&aplic_syscore);
 
 #ifdef CONFIG_ACPI
 	if (!acpi_disabled)
diff --git a/drivers/irqchip/irq-riscv-aplic-main.h b/drivers/irqchip/irq-riscv-aplic-main.h
index b0ad8cde69b1..2d8ad7138541 100644
--- a/drivers/irqchip/irq-riscv-aplic-main.h
+++ b/drivers/irqchip/irq-riscv-aplic-main.h
@@ -23,7 +23,25 @@ struct aplic_msicfg {
 	u32			lhxw;
 };
 
+struct aplic_src_ctrl {
+	u32			sourcecfg;
+	u32			target;
+	u32			ie;
+};
+
+struct aplic_saved_regs {
+	u32			domaincfg;
+#ifdef CONFIG_RISCV_M_MODE
+	u32			msiaddr;
+	u32			msiaddrh;
+#endif
+	struct aplic_src_ctrl	*srcs;
+};
+
 struct aplic_priv {
+	struct list_head	head;
+	struct notifier_block	genpd_nb;
+	struct aplic_saved_regs	saved_hw_regs;
 	struct device		*dev;
 	u32			gsi_base;
 	u32			nr_irqs;
@@ -40,6 +58,7 @@ int aplic_irqdomain_translate(struct irq_fwspec *fwspec, u32 gsi_base,
 			      unsigned long *hwirq, unsigned int *type);
 void aplic_init_hw_global(struct aplic_priv *priv, bool msi_mode);
 int aplic_setup_priv(struct aplic_priv *priv, struct device *dev, void __iomem *regs);
+void aplic_direct_restore_states(struct aplic_priv *priv);
 int aplic_direct_setup(struct device *dev, void __iomem *regs);
 #ifdef CONFIG_RISCV_APLIC_MSI
 int aplic_msi_setup(struct device *dev, void __iomem *regs);
diff --git a/drivers/irqchip/irq-riscv-imsic-early.c b/drivers/irqchip/irq-riscv-imsic-early.c
index 6bac67cc0b6d..ba903fa689bd 100644
--- a/drivers/irqchip/irq-riscv-imsic-early.c
+++ b/drivers/irqchip/irq-riscv-imsic-early.c
@@ -7,6 +7,7 @@
 #define pr_fmt(fmt) "riscv-imsic: " fmt
 #include <linux/acpi.h>
 #include <linux/cpu.h>
+#include <linux/cpu_pm.h>
 #include <linux/export.h>
 #include <linux/interrupt.h>
 #include <linux/init.h>
@@ -123,14 +124,8 @@ static void imsic_handle_irq(struct irq_desc *desc)
 	chained_irq_exit(chip, desc);
 }
 
-static int imsic_starting_cpu(unsigned int cpu)
+static void imsic_hw_states_init(void)
 {
-	/* Mark per-CPU IMSIC state as online */
-	imsic_state_online();
-
-	/* Enable per-CPU parent interrupt */
-	enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
-
 	/* Setup IPIs */
 	imsic_ipi_starting_cpu();
 
@@ -142,6 +137,18 @@ static int imsic_starting_cpu(unsigned int cpu)
 
 	/* Enable local interrupt delivery */
 	imsic_local_delivery(true);
+}
+
+static int imsic_starting_cpu(unsigned int cpu)
+{
+	/* Mark per-CPU IMSIC state as online */
+	imsic_state_online();
+
+	/* Enable per-CPU parent interrupt */
+	enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
+
+	/* Initialize the IMSIC registers to enable the interrupt delivery */
+	imsic_hw_states_init();
 
 	return 0;
 }
@@ -157,6 +164,22 @@ static int imsic_dying_cpu(unsigned int cpu)
 	return 0;
 }
 
+static int imsic_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v)
+{
+	switch (cmd) {
+	case CPU_PM_EXIT:
+		/* Initialize the IMSIC registers to enable the interrupt delivery */
+		imsic_hw_states_init();
+		break;
+	}
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block imsic_pm_notifier_block = {
+	.notifier_call = imsic_pm_notifier,
+};
+
 static int __init imsic_early_probe(struct fwnode_handle *fwnode)
 {
 	struct irq_domain *domain;
@@ -194,7 +217,7 @@ static int __init imsic_early_probe(struct fwnode_handle *fwnode)
 	cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting",
 			  imsic_starting_cpu, imsic_dying_cpu);
 
-	return 0;
+	return cpu_pm_register_notifier(&imsic_pm_notifier_block);
 }
 
 static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 210a57959637..60fd8f91762b 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -68,15 +68,17 @@
 #define PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM	1
 
 struct plic_priv {
-	struct fwnode_handle *fwnode;
-	struct cpumask lmask;
-	struct irq_domain *irqdomain;
-	void __iomem *regs;
-	unsigned long plic_quirks;
-	unsigned int nr_irqs;
-	unsigned long *prio_save;
-	u32 gsi_base;
-	int acpi_plic_id;
+	struct fwnode_handle	*fwnode;
+	struct cpumask		lmask;
+	struct irq_domain	*irqdomain;
+	void __iomem		*regs;
+	unsigned long		plic_quirks;
+	/* device interrupts + 1 to compensate for the reserved hwirq 0 */
+	unsigned int __private	total_irqs;
+	unsigned int		irq_groups;
+	unsigned long		*prio_save;
+	u32			gsi_base;
+	int			acpi_plic_id;
 };
 
 struct plic_handler {
@@ -91,6 +93,12 @@ struct plic_handler {
 	u32			*enable_save;
 	struct plic_priv	*priv;
 };
+
+/*
+ * Macro to deal with the insanity of hardware interrupt 0 being reserved */
+#define for_each_device_irq(iter, priv)	\
+	for (unsigned int iter = 1; iter < ACCESS_PRIVATE(priv, total_irqs); iter++)
+
 static int plic_parent_irq __ro_after_init;
 static bool plic_global_setup_done __ro_after_init;
 static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
@@ -257,14 +265,11 @@ static int plic_irq_set_type(struct irq_data *d, unsigned int type)
 
 static int plic_irq_suspend(void *data)
 {
-	struct plic_priv *priv;
-
-	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
+	struct plic_priv *priv = this_cpu_ptr(&plic_handlers)->priv;
 
-	/* irq ID 0 is reserved */
-	for (unsigned int i = 1; i < priv->nr_irqs; i++) {
-		__assign_bit(i, priv->prio_save,
-			     readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID));
+	for_each_device_irq(irq, priv) {
+		__assign_bit(irq, priv->prio_save,
+			     readl(priv->regs + PRIORITY_BASE + irq * PRIORITY_PER_ID));
 	}
 
 	return 0;
@@ -272,18 +277,15 @@ static int plic_irq_suspend(void *data)
 
 static void plic_irq_resume(void *data)
 {
-	unsigned int i, index, cpu;
+	struct plic_priv *priv = this_cpu_ptr(&plic_handlers)->priv;
+	unsigned int index, cpu;
 	unsigned long flags;
 	u32 __iomem *reg;
-	struct plic_priv *priv;
-
-	priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv;
 
-	/* irq ID 0 is reserved */
-	for (i = 1; i < priv->nr_irqs; i++) {
-		index = BIT_WORD(i);
-		writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0,
-		       priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID);
+	for_each_device_irq(irq, priv) {
+		index = BIT_WORD(irq);
+		writel((priv->prio_save[index] & BIT_MASK(irq)) ? 1 : 0,
+		       priv->regs + PRIORITY_BASE + irq * PRIORITY_PER_ID);
 	}
 
 	for_each_present_cpu(cpu) {
@@ -293,7 +295,7 @@ static void plic_irq_resume(void *data)
 			continue;
 
 		raw_spin_lock_irqsave(&handler->enable_lock, flags);
-		for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) {
+		for (unsigned int i = 0; i < priv->irq_groups; i++) {
 			reg = handler->enable_base + i * sizeof(u32);
 			writel(handler->enable_save[i], reg);
 		}
@@ -431,7 +433,7 @@ static u32 cp100_isolate_pending_irq(int nr_irq_groups, struct plic_handler *han
 
 static irq_hw_number_t cp100_get_hwirq(struct plic_handler *handler, void __iomem *claim)
 {
-	int nr_irq_groups = DIV_ROUND_UP(handler->priv->nr_irqs, 32);
+	int nr_irq_groups = handler->priv->irq_groups;
 	u32 __iomem *enable = handler->enable_base;
 	irq_hw_number_t hwirq = 0;
 	u32 iso_mask;
@@ -614,7 +616,6 @@ static int plic_probe(struct fwnode_handle *fwnode)
 	struct plic_handler *handler;
 	u32 nr_irqs, parent_hwirq;
 	struct plic_priv *priv;
-	irq_hw_number_t hwirq;
 	void __iomem *regs;
 	int id, context_id;
 	u32 gsi_base;
@@ -647,7 +648,16 @@ static int plic_probe(struct fwnode_handle *fwnode)
 
 	priv->fwnode = fwnode;
 	priv->plic_quirks = plic_quirks;
-	priv->nr_irqs = nr_irqs;
+	/*
+	 * The firmware provides the number of device interrupts. As
+	 * hardware interrupt 0 is reserved, the number of total interrupts
+	 * is nr_irqs + 1.
+	 */
+	nr_irqs++;
+	ACCESS_PRIVATE(priv, total_irqs) = nr_irqs;
+	/* Precalculate the number of register groups */
+	priv->irq_groups = DIV_ROUND_UP(nr_irqs, 32);
+
 	priv->regs = regs;
 	priv->gsi_base = gsi_base;
 	priv->acpi_plic_id = id;
@@ -686,7 +696,7 @@ static int plic_probe(struct fwnode_handle *fwnode)
 				u32 __iomem *enable_base = priv->regs +	CONTEXT_ENABLE_BASE +
 							   i * CONTEXT_ENABLE_SIZE;
 
-				for (int j = 0; j <= nr_irqs / 32; j++)
+				for (int j = 0; j < priv->irq_groups; j++)
 					writel(0, enable_base + j);
 			}
 			continue;
@@ -718,23 +728,21 @@ static int plic_probe(struct fwnode_handle *fwnode)
 			context_id * CONTEXT_ENABLE_SIZE;
 		handler->priv = priv;
 
-		handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32),
-					       sizeof(*handler->enable_save), GFP_KERNEL);
+		handler->enable_save = kcalloc(priv->irq_groups, sizeof(*handler->enable_save),
+					       GFP_KERNEL);
 		if (!handler->enable_save) {
 			error = -ENOMEM;
 			goto fail_cleanup_contexts;
 		}
 done:
-		for (hwirq = 1; hwirq <= nr_irqs; hwirq++) {
+		for_each_device_irq(hwirq, priv) {
 			plic_toggle(handler, hwirq, 0);
-			writel(1, priv->regs + PRIORITY_BASE +
-				  hwirq * PRIORITY_PER_ID);
+			writel(1, priv->regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
 		}
 		nr_handlers++;
 	}
 
-	priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1,
-						   &plic_irqdomain_ops, priv);
+	priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs, &plic_irqdomain_ops, priv);
 	if (WARN_ON(!priv->irqdomain)) {
 		error = -ENOMEM;
 		goto fail_cleanup_contexts;
diff --git a/drivers/irqchip/irq-ti-sci-intr.c b/drivers/irqchip/irq-ti-sci-intr.c
index 354613e74ad0..0ea17040e934 100644
--- a/drivers/irqchip/irq-ti-sci-intr.c
+++ b/drivers/irqchip/irq-ti-sci-intr.c
@@ -61,12 +61,21 @@ static int ti_sci_intr_irq_domain_translate(struct irq_domain *domain,
 {
 	struct ti_sci_intr_irq_domain *intr = domain->host_data;
 
-	if (fwspec->param_count != 1)
-		return -EINVAL;
+	if (intr->type) {
+		/* Global interrupt-type */
+		if (fwspec->param_count != 1)
+			return -EINVAL;
 
-	*hwirq = fwspec->param[0];
-	*type = intr->type;
+		*hwirq = fwspec->param[0];
+		*type = intr->type;
+	} else {
+		/* Per-Line interrupt-type */
+		if (fwspec->param_count != 2)
+			return -EINVAL;
 
+		*hwirq = fwspec->param[0];
+		*type = fwspec->param[1];
+	}
 	return 0;
 }
 
@@ -128,11 +137,12 @@ static void ti_sci_intr_irq_domain_free(struct irq_domain *domain,
  * @domain:	Pointer to the interrupt router IRQ domain
  * @virq:	Corresponding Linux virtual IRQ number
  * @hwirq:	Corresponding hwirq for the IRQ within this IRQ domain
+ * @hwirq_type:	Corresponding hwirq trigger type for the IRQ within this IRQ domain
  *
  * Returns intr output irq if all went well else appropriate error pointer.
  */
-static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
-					unsigned int virq, u32 hwirq)
+static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain, unsigned int virq,
+					u32 hwirq, u32 hwirq_type)
 {
 	struct ti_sci_intr_irq_domain *intr = domain->host_data;
 	struct device_node *parent_node;
@@ -156,11 +166,22 @@ static int ti_sci_intr_alloc_parent_irq(struct irq_domain *domain,
 		fwspec.param_count = 3;
 		fwspec.param[0] = 0;	/* SPI */
 		fwspec.param[1] = p_hwirq - 32; /* SPI offset */
-		fwspec.param[2] = intr->type;
+		fwspec.param[2] = hwirq_type;
 	} else {
 		/* Parent is Interrupt Router */
-		fwspec.param_count = 1;
-		fwspec.param[0] = p_hwirq;
+		u32 parent_trigger_type;
+
+		if (!of_property_read_u32(parent_node, "ti,intr-trigger-type",
+					  &parent_trigger_type)) {
+			/* Parent has global trigger type */
+			fwspec.param_count = 1;
+			fwspec.param[0] = p_hwirq;
+		} else {
+			/* Parent supports per-line trigger types */
+			fwspec.param_count = 2;
+			fwspec.param[0] = p_hwirq;
+			fwspec.param[1] = hwirq_type;
+		}
 	}
 
 	err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
@@ -196,15 +217,15 @@ static int ti_sci_intr_irq_domain_alloc(struct irq_domain *domain,
 					void *data)
 {
 	struct irq_fwspec *fwspec = data;
+	unsigned int hwirq_type;
 	unsigned long hwirq;
-	unsigned int flags;
 	int err, out_irq;
 
-	err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &flags);
+	err = ti_sci_intr_irq_domain_translate(domain, fwspec, &hwirq, &hwirq_type);
 	if (err)
 		return err;
 
-	out_irq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq);
+	out_irq = ti_sci_intr_alloc_parent_irq(domain, virq, hwirq, hwirq_type);
 	if (out_irq < 0)
 		return out_irq;
 
@@ -247,12 +268,9 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
 		return -ENOMEM;
 
 	intr->dev = dev;
-	ret = of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type",
-				   &intr->type);
-	if (ret) {
-		dev_err(dev, "missing ti,intr-trigger-type property\n");
-		return -EINVAL;
-	}
+
+	if (of_property_read_u32(dev_of_node(dev), "ti,intr-trigger-type", &intr->type))
+		intr->type = IRQ_TYPE_NONE;
 
 	intr->sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
 	if (IS_ERR(intr->sci))
diff --git a/drivers/soc/renesas/Kconfig b/drivers/soc/renesas/Kconfig
index 340a1ff7e92b..198baf890b14 100644
--- a/drivers/soc/renesas/Kconfig
+++ b/drivers/soc/renesas/Kconfig
@@ -423,6 +423,7 @@ config ARCH_R9A09G057
 config ARCH_R9A09G077
 	bool "ARM64 Platform support for R9A09G077 (RZ/T2H)"
 	default y if ARCH_RENESAS
+	select RENESAS_RZT2H_ICU
 	help
 	  This enables support for the Renesas RZ/T2H SoC variants.
 
diff --git a/include/linux/irqchip/irq-renesas-rzt2h.h b/include/linux/irqchip/irq-renesas-rzt2h.h
new file mode 100644
index 000000000000..853fd5ee0b22
--- /dev/null
+++ b/include/linux/irqchip/irq-renesas-rzt2h.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Renesas RZ/T2H Interrupt Control Unit (ICU)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corporation.
+ */
+
+#ifndef __LINUX_IRQ_RENESAS_RZT2H
+#define __LINUX_IRQ_RENESAS_RZT2H
+
+#include <linux/platform_device.h>
+
+#define RZT2H_ICU_DMAC_REQ_NO_DEFAULT		0x3ff
+
+#ifdef CONFIG_RENESAS_RZT2H_ICU
+void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
+				u16 req_no);
+#else
+static inline void rzt2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
+					      u8 dmac_channel, u16 req_no) { }
+#endif
+
+#endif /* __LINUX_IRQ_RENESAS_RZT2H */