It is possible that remote processor is already running before
linux boot or remoteproc platform driver probe. Implement required
remoteproc framework ops to provide resource table address and
connect or disconnect with remote processor in such case.
Signed-off-by: Tanmay Shah <tanmay.shah@amd.com>
---
drivers/remoteproc/xlnx_r5_remoteproc.c | 164 +++++++++++++++++++++++-
1 file changed, 160 insertions(+), 4 deletions(-)
diff --git a/drivers/remoteproc/xlnx_r5_remoteproc.c b/drivers/remoteproc/xlnx_r5_remoteproc.c
index 84243d1dff9f..af7aff5e9098 100644
--- a/drivers/remoteproc/xlnx_r5_remoteproc.c
+++ b/drivers/remoteproc/xlnx_r5_remoteproc.c
@@ -25,6 +25,10 @@
/* RX mailbox client buffer max length */
#define MBOX_CLIENT_BUF_MAX (IPI_BUF_LEN_MAX + \
sizeof(struct zynqmp_ipi_message))
+
+#define RSC_TBL_XLNX_MAGIC ((uint32_t)'x' << 24 | (uint32_t)'a' << 16 | \
+ (uint32_t)'m' << 8 | (uint32_t)'p')
+
/*
* settings for RPU cluster mode which
* reflects possible values of xlnx,cluster-mode dt-property
@@ -73,6 +77,15 @@ struct mbox_info {
struct mbox_chan *rx_chan;
};
+/* Xilinx Platform specific data structure */
+struct rsc_tbl_data {
+ const int version;
+ const u32 magic_num;
+ const u32 comp_magic_num;
+ const u32 rsc_tbl_size;
+ const uintptr_t rsc_tbl;
+} __packed;
+
/*
* Hardcoded TCM bank values. This will stay in driver to maintain backward
* compatibility with device-tree that does not have TCM information.
@@ -95,20 +108,24 @@ static const struct mem_bank_data zynqmp_tcm_banks_lockstep[] = {
/**
* struct zynqmp_r5_core
*
+ * @rsc_tbl_va: resource table virtual address
* @dev: device of RPU instance
* @np: device node of RPU instance
* @tcm_bank_count: number TCM banks accessible to this RPU
* @tcm_banks: array of each TCM bank data
* @rproc: rproc handle
+ * @rsc_tbl_size: resource table size retrieved from remote
* @pm_domain_id: RPU CPU power domain id
* @ipi: pointer to mailbox information
*/
struct zynqmp_r5_core {
+ struct resource_table *rsc_tbl_va;
struct device *dev;
struct device_node *np;
int tcm_bank_count;
struct mem_bank_data **tcm_banks;
struct rproc *rproc;
+ u32 rsc_tbl_size;
u32 pm_domain_id;
struct mbox_info *ipi;
};
@@ -621,10 +638,19 @@ static int zynqmp_r5_rproc_prepare(struct rproc *rproc)
{
int ret;
- ret = add_tcm_banks(rproc);
- if (ret) {
- dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
- return ret;
+ /**
+ * For attach/detach use case, Firmware is already loaded so
+ * TCM isn't really needed at all. Also, for security TCM can be
+ * locked in such case and linux may not have access at all.
+ * So avoid adding TCM banks. TCM power-domains requested during attach
+ * callback.
+ */
+ if (rproc->state != RPROC_DETACHED) {
+ ret = add_tcm_banks(rproc);
+ if (ret) {
+ dev_err(&rproc->dev, "failed to get TCM banks, err %d\n", ret);
+ return ret;
+ }
}
ret = add_mem_regions_carveout(rproc);
@@ -662,6 +688,123 @@ static int zynqmp_r5_rproc_unprepare(struct rproc *rproc)
return 0;
}
+static struct resource_table *zynqmp_r5_get_loaded_rsc_table(struct rproc *rproc,
+ size_t *size)
+{
+ struct zynqmp_r5_core *r5_core;
+
+ r5_core = rproc->priv;
+
+ *size = r5_core->rsc_tbl_size;
+
+ return r5_core->rsc_tbl_va;
+}
+
+static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
+{
+ struct device *dev = r5_core->dev;
+ struct rsc_tbl_data *rsc_data_va;
+ struct resource_table *rsc_addr;
+ struct resource res_mem;
+ struct device_node *np;
+ int ret;
+
+ /**
+ * It is expected from remote processor firmware to provide resource
+ * table address via struct rsc_tbl_data data structure.
+ * Start address of first entry under "memory-region" property list
+ * contains that data structure which holds resource table address, size
+ * and some magic number to validate correct resource table entry.
+ */
+ np = of_parse_phandle(r5_core->np, "memory-region", 0);
+ if (!np) {
+ dev_err(dev, "failed to get memory region dev node\n");
+ return -EINVAL;
+ }
+
+ ret = of_address_to_resource(np, 0, &res_mem);
+ if (ret) {
+ dev_err(dev, "failed to get memory-region resource addr\n");
+ return -EINVAL;
+ }
+
+ rsc_data_va = devm_ioremap_wc(dev, res_mem.start,
+ sizeof(struct rsc_tbl_data));
+ if (!rsc_data_va) {
+ dev_err(dev, "failed to map resource table data address\n");
+ return -EIO;
+ }
+
+ /**
+ * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
+ * do not consider resource table address valid and don't attach
+ */
+ if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
+ rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
+ dev_dbg(dev, "invalid magic number, won't attach\n");
+ return -EINVAL;
+ }
+
+ rsc_addr = ioremap_wc(rsc_data_va->rsc_tbl,
+ rsc_data_va->rsc_tbl_size);
+ if (!rsc_addr) {
+ dev_err(dev, "failed to get rsc_addr\n");
+ return -EINVAL;
+ }
+
+ /**
+ * As of now resource table version 1 is expected. Don't fail to attach
+ * but warn users about it.
+ */
+ if (rsc_addr->ver != 1)
+ dev_warn(dev, "unexpected resource table version %d\n",
+ rsc_addr->ver);
+
+ r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
+ r5_core->rsc_tbl_va = rsc_addr;
+
+ return 0;
+}
+
+static int zynqmp_r5_attach(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+ int i, pm_domain_id, ret;
+
+ /*
+ * Firmware is loaded in TCM. Request TCM power domains to notify
+ * platform management controller that TCM is in use. This will be
+ * released during unprepare callback.
+ */
+ for (i = 0; i < r5_core->tcm_bank_count; i++) {
+ pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
+ ret = zynqmp_pm_request_node(pm_domain_id,
+ ZYNQMP_PM_CAPABILITY_ACCESS, 0,
+ ZYNQMP_PM_REQUEST_ACK_BLOCKING);
+ if (ret < 0)
+ pr_warn("TCM %d can't be requested\n", i);
+ }
+
+ return 0;
+}
+
+static int zynqmp_r5_detach(struct rproc *rproc)
+{
+ struct zynqmp_r5_core *r5_core = rproc->priv;
+
+ /*
+ * Generate last notification to remote after clearing virtio flag.
+ * Remote can avoid polling on virtio reset flag if kick is generated
+ * during detach by host and check virtio reset flag on kick interrupt.
+ */
+ zynqmp_r5_rproc_kick(rproc, 0);
+
+ iounmap(r5_core->rsc_tbl_va);
+ r5_core->rsc_tbl_va = NULL;
+
+ return 0;
+}
+
static const struct rproc_ops zynqmp_r5_rproc_ops = {
.prepare = zynqmp_r5_rproc_prepare,
.unprepare = zynqmp_r5_rproc_unprepare,
@@ -673,6 +816,9 @@ static const struct rproc_ops zynqmp_r5_rproc_ops = {
.sanity_check = rproc_elf_sanity_check,
.get_boot_addr = rproc_elf_get_boot_addr,
.kick = zynqmp_r5_rproc_kick,
+ .get_loaded_rsc_table = zynqmp_r5_get_loaded_rsc_table,
+ .attach = zynqmp_r5_attach,
+ .detach = zynqmp_r5_detach,
};
/**
@@ -723,6 +869,16 @@ static struct zynqmp_r5_core *zynqmp_r5_add_rproc_core(struct device *cdev)
goto free_rproc;
}
+ /*
+ * Move rproc state to DETACHED to give one time opportunity to attach
+ * if firmware is already available in the memory. This can happen if
+ * firmware is loaded via debugger or by any other agent in the system.
+ * If firmware isn't available in the memory and resource table isn't found,
+ * then rproc state stay OFFLINE.
+ */
+ if (!zynqmp_r5_get_rsc_table_va(r5_core))
+ r5_rproc->state = RPROC_DETACHED;
+
r5_core->rproc = r5_rproc;
return r5_core;
--
2.25.1
Hi Tanmay,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 0496190c4d42965acb31b9da1b6dac3509791062]
url: https://github.com/intel-lab-lkp/linux/commits/Tanmay-Shah/drivers-remoteproc-xlnx-add-attach-detach-support/20240503-071225
base: 0496190c4d42965acb31b9da1b6dac3509791062
patch link: https://lore.kernel.org/r/20240502231021.370047-2-tanmay.shah%40amd.com
patch subject: [PATCH 1/2] drivers: remoteproc: xlnx: add attach detach support
config: arm64-randconfig-r113-20240506 (https://download.01.org/0day-ci/archive/20240506/202405060611.jBQBF7iB-lkp@intel.com/config)
compiler: aarch64-linux-gcc (GCC) 13.2.0
reproduce: (https://download.01.org/0day-ci/archive/20240506/202405060611.jBQBF7iB-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202405060611.jBQBF7iB-lkp@intel.com/
sparse warnings: (new ones prefixed by >>)
drivers/remoteproc/xlnx_r5_remoteproc.c:404:20: sparse: sparse: cast removes address space '__iomem' of expression
drivers/remoteproc/xlnx_r5_remoteproc.c:522:20: sparse: sparse: cast removes address space '__iomem' of expression
>> drivers/remoteproc/xlnx_r5_remoteproc.c:731:21: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct rsc_tbl_data *rsc_data_va @@ got void [noderef] __iomem * @@
drivers/remoteproc/xlnx_r5_remoteproc.c:731:21: sparse: expected struct rsc_tbl_data *rsc_data_va
drivers/remoteproc/xlnx_r5_remoteproc.c:731:21: sparse: got void [noderef] __iomem *
>> drivers/remoteproc/xlnx_r5_remoteproc.c:748:18: sparse: sparse: incorrect type in assignment (different address spaces) @@ expected struct resource_table *rsc_addr @@ got void [noderef] __iomem * @@
drivers/remoteproc/xlnx_r5_remoteproc.c:748:18: sparse: expected struct resource_table *rsc_addr
drivers/remoteproc/xlnx_r5_remoteproc.c:748:18: sparse: got void [noderef] __iomem *
>> drivers/remoteproc/xlnx_r5_remoteproc.c:802:24: sparse: sparse: incorrect type in argument 1 (different address spaces) @@ expected void volatile [noderef] __iomem *addr @@ got struct resource_table *rsc_tbl_va @@
drivers/remoteproc/xlnx_r5_remoteproc.c:802:24: sparse: expected void volatile [noderef] __iomem *addr
drivers/remoteproc/xlnx_r5_remoteproc.c:802:24: sparse: got struct resource_table *rsc_tbl_va
vim +731 drivers/remoteproc/xlnx_r5_remoteproc.c
702
703 static int zynqmp_r5_get_rsc_table_va(struct zynqmp_r5_core *r5_core)
704 {
705 struct device *dev = r5_core->dev;
706 struct rsc_tbl_data *rsc_data_va;
707 struct resource_table *rsc_addr;
708 struct resource res_mem;
709 struct device_node *np;
710 int ret;
711
712 /**
713 * It is expected from remote processor firmware to provide resource
714 * table address via struct rsc_tbl_data data structure.
715 * Start address of first entry under "memory-region" property list
716 * contains that data structure which holds resource table address, size
717 * and some magic number to validate correct resource table entry.
718 */
719 np = of_parse_phandle(r5_core->np, "memory-region", 0);
720 if (!np) {
721 dev_err(dev, "failed to get memory region dev node\n");
722 return -EINVAL;
723 }
724
725 ret = of_address_to_resource(np, 0, &res_mem);
726 if (ret) {
727 dev_err(dev, "failed to get memory-region resource addr\n");
728 return -EINVAL;
729 }
730
> 731 rsc_data_va = devm_ioremap_wc(dev, res_mem.start,
732 sizeof(struct rsc_tbl_data));
733 if (!rsc_data_va) {
734 dev_err(dev, "failed to map resource table data address\n");
735 return -EIO;
736 }
737
738 /**
739 * If RSC_TBL_XLNX_MAGIC number and its complement isn't found then
740 * do not consider resource table address valid and don't attach
741 */
742 if (rsc_data_va->magic_num != RSC_TBL_XLNX_MAGIC ||
743 rsc_data_va->comp_magic_num != ~RSC_TBL_XLNX_MAGIC) {
744 dev_dbg(dev, "invalid magic number, won't attach\n");
745 return -EINVAL;
746 }
747
> 748 rsc_addr = ioremap_wc(rsc_data_va->rsc_tbl,
749 rsc_data_va->rsc_tbl_size);
750 if (!rsc_addr) {
751 dev_err(dev, "failed to get rsc_addr\n");
752 return -EINVAL;
753 }
754
755 /**
756 * As of now resource table version 1 is expected. Don't fail to attach
757 * but warn users about it.
758 */
759 if (rsc_addr->ver != 1)
760 dev_warn(dev, "unexpected resource table version %d\n",
761 rsc_addr->ver);
762
763 r5_core->rsc_tbl_size = rsc_data_va->rsc_tbl_size;
764 r5_core->rsc_tbl_va = rsc_addr;
765
766 return 0;
767 }
768
769 static int zynqmp_r5_attach(struct rproc *rproc)
770 {
771 struct zynqmp_r5_core *r5_core = rproc->priv;
772 int i, pm_domain_id, ret;
773
774 /*
775 * Firmware is loaded in TCM. Request TCM power domains to notify
776 * platform management controller that TCM is in use. This will be
777 * released during unprepare callback.
778 */
779 for (i = 0; i < r5_core->tcm_bank_count; i++) {
780 pm_domain_id = r5_core->tcm_banks[i]->pm_domain_id;
781 ret = zynqmp_pm_request_node(pm_domain_id,
782 ZYNQMP_PM_CAPABILITY_ACCESS, 0,
783 ZYNQMP_PM_REQUEST_ACK_BLOCKING);
784 if (ret < 0)
785 pr_warn("TCM %d can't be requested\n", i);
786 }
787
788 return 0;
789 }
790
791 static int zynqmp_r5_detach(struct rproc *rproc)
792 {
793 struct zynqmp_r5_core *r5_core = rproc->priv;
794
795 /*
796 * Generate last notification to remote after clearing virtio flag.
797 * Remote can avoid polling on virtio reset flag if kick is generated
798 * during detach by host and check virtio reset flag on kick interrupt.
799 */
800 zynqmp_r5_rproc_kick(rproc, 0);
801
> 802 iounmap(r5_core->rsc_tbl_va);
803 r5_core->rsc_tbl_va = NULL;
804
805 return 0;
806 }
807
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
© 2016 - 2025 Red Hat, Inc.