Throughout the code we're accessing the board memmap, most of the time,
by accessing it statically via 'virt_memmap'. This static map is also
assigned in the machine state in s->memmap.
We're also passing it as a variable to some fdt functions, which is
unorthodox since we can spare a function argument by accessing it
statically or via the machine state.
All the current forms are valid but not all of the are scalable. In the
future we will version this board, and then all this code will need
rework because it should point to the updated memmap. In this case,
we'll want to assign the adequate versioned memmap once during init,
in s->memmap like it is being done today, and the rest of the code
will access the updated map via s->memmap.
We're also enforcing the pattern of using s->memmap instead of assigning
it to a temp variable 'memmap'. Code is copy/pasted around all the time
and being consistent is important.
We'll start these rather mechanical changes with virt_machine_init().
Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
---
hw/riscv/virt.c | 54 ++++++++++++++++++++++++-------------------------
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
index c9d255d8a8..6e3e34879f 100644
--- a/hw/riscv/virt.c
+++ b/hw/riscv/virt.c
@@ -1527,7 +1527,6 @@ static void virt_machine_done(Notifier *notifier, void *data)
static void virt_machine_init(MachineState *machine)
{
- const MemMapEntry *memmap = virt_memmap;
RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
MemoryRegion *system_memory = get_system_memory();
MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
@@ -1535,6 +1534,8 @@ static void virt_machine_init(MachineState *machine)
int i, base_hartid, hart_count;
int socket_count = riscv_socket_count(machine);
+ s->memmap = virt_memmap;
+
/* Check socket count limit */
if (VIRT_SOCKETS_MAX < socket_count) {
error_report("number of sockets/nodes should be less than %d",
@@ -1582,7 +1583,7 @@ static void virt_machine_init(MachineState *machine)
if (virt_aclint_allowed() && s->have_aclint) {
if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
/* Per-socket ACLINT MTIMER */
- riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
+ riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
@@ -1591,28 +1592,28 @@ static void virt_machine_init(MachineState *machine)
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
} else {
/* Per-socket ACLINT MSWI, MTIMER, and SSWI */
- riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
- i * memmap[VIRT_CLINT].size,
+ riscv_aclint_swi_create(s->memmap[VIRT_CLINT].base +
+ i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
- riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
- i * memmap[VIRT_CLINT].size +
+ riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
+ i * s->memmap[VIRT_CLINT].size +
RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP,
RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
- riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
- i * memmap[VIRT_ACLINT_SSWI].size,
+ riscv_aclint_swi_create(s->memmap[VIRT_ACLINT_SSWI].base +
+ i * s->memmap[VIRT_ACLINT_SSWI].size,
base_hartid, hart_count, true);
}
} else if (tcg_enabled()) {
/* Per-socket SiFive CLINT */
riscv_aclint_swi_create(
- memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
+ s->memmap[VIRT_CLINT].base + i * s->memmap[VIRT_CLINT].size,
base_hartid, hart_count, false);
- riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
- i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
+ riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
+ i * s->memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
@@ -1620,11 +1621,11 @@ static void virt_machine_init(MachineState *machine)
/* Per-socket interrupt controller */
if (s->aia_type == VIRT_AIA_TYPE_NONE) {
- s->irqchip[i] = virt_create_plic(memmap, i,
+ s->irqchip[i] = virt_create_plic(s->memmap, i,
base_hartid, hart_count);
} else {
s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
- memmap, i, base_hartid,
+ s->memmap, i, base_hartid,
hart_count);
}
@@ -1646,8 +1647,8 @@ static void virt_machine_init(MachineState *machine)
if (kvm_enabled() && virt_use_kvm_aia_aplic_imsic(s->aia_type)) {
kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
- memmap[VIRT_APLIC_S].base,
- memmap[VIRT_IMSIC_S].base,
+ s->memmap[VIRT_APLIC_S].base,
+ s->memmap[VIRT_IMSIC_S].base,
s->aia_guests);
}
@@ -1663,21 +1664,20 @@ static void virt_machine_init(MachineState *machine)
virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE;
} else {
virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE;
- virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size;
+ virt_high_pcie_memmap.base = s->memmap[VIRT_DRAM].base +
+ machine->ram_size;
virt_high_pcie_memmap.base =
ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size);
}
- s->memmap = virt_memmap;
-
/* register system main memory (actual RAM) */
- memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base,
- machine->ram);
+ memory_region_add_subregion(system_memory, s->memmap[VIRT_DRAM].base,
+ machine->ram);
/* boot rom */
memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
- memmap[VIRT_MROM].size, &error_fatal);
- memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
+ s->memmap[VIRT_MROM].size, &error_fatal);
+ memory_region_add_subregion(system_memory, s->memmap[VIRT_MROM].base,
mask_rom);
/*
@@ -1688,12 +1688,12 @@ static void virt_machine_init(MachineState *machine)
rom_set_fw(s->fw_cfg);
/* SiFive Test MMIO device */
- sifive_test_create(memmap[VIRT_TEST].base);
+ sifive_test_create(s->memmap[VIRT_TEST].base);
/* VirtIO MMIO devices */
for (i = 0; i < VIRTIO_COUNT; i++) {
sysbus_create_simple("virtio-mmio",
- memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
+ s->memmap[VIRT_VIRTIO].base + i * s->memmap[VIRT_VIRTIO].size,
qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
}
@@ -1701,11 +1701,11 @@ static void virt_machine_init(MachineState *machine)
create_platform_bus(s, mmio_irqchip);
- serial_mm_init(system_memory, memmap[VIRT_UART0].base,
+ serial_mm_init(system_memory, s->memmap[VIRT_UART0].base,
0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193,
serial_hd(0), DEVICE_LITTLE_ENDIAN);
- sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
+ sysbus_create_simple("goldfish_rtc", s->memmap[VIRT_RTC].base,
qdev_get_gpio_in(mmio_irqchip, RTC_IRQ));
for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
@@ -1723,7 +1723,7 @@ static void virt_machine_init(MachineState *machine)
exit(1);
}
} else {
- create_fdt(s, memmap);
+ create_fdt(s, s->memmap);
}
if (virt_is_iommu_sys_enabled(s)) {
--
2.49.0
On Wed, Apr 23, 2025 at 9:11 PM Daniel Henrique Barboza
<dbarboza@ventanamicro.com> wrote:
>
> Throughout the code we're accessing the board memmap, most of the time,
> by accessing it statically via 'virt_memmap'. This static map is also
> assigned in the machine state in s->memmap.
>
> We're also passing it as a variable to some fdt functions, which is
> unorthodox since we can spare a function argument by accessing it
> statically or via the machine state.
>
> All the current forms are valid but not all of the are scalable. In the
> future we will version this board, and then all this code will need
> rework because it should point to the updated memmap. In this case,
> we'll want to assign the adequate versioned memmap once during init,
> in s->memmap like it is being done today, and the rest of the code
> will access the updated map via s->memmap.
>
> We're also enforcing the pattern of using s->memmap instead of assigning
> it to a temp variable 'memmap'. Code is copy/pasted around all the time
> and being consistent is important.
>
> We'll start these rather mechanical changes with virt_machine_init().
>
> Signed-off-by: Daniel Henrique Barboza <dbarboza@ventanamicro.com>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Alistair
> ---
> hw/riscv/virt.c | 54 ++++++++++++++++++++++++-------------------------
> 1 file changed, 27 insertions(+), 27 deletions(-)
>
> diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c
> index c9d255d8a8..6e3e34879f 100644
> --- a/hw/riscv/virt.c
> +++ b/hw/riscv/virt.c
> @@ -1527,7 +1527,6 @@ static void virt_machine_done(Notifier *notifier, void *data)
>
> static void virt_machine_init(MachineState *machine)
> {
> - const MemMapEntry *memmap = virt_memmap;
> RISCVVirtState *s = RISCV_VIRT_MACHINE(machine);
> MemoryRegion *system_memory = get_system_memory();
> MemoryRegion *mask_rom = g_new(MemoryRegion, 1);
> @@ -1535,6 +1534,8 @@ static void virt_machine_init(MachineState *machine)
> int i, base_hartid, hart_count;
> int socket_count = riscv_socket_count(machine);
>
> + s->memmap = virt_memmap;
> +
> /* Check socket count limit */
> if (VIRT_SOCKETS_MAX < socket_count) {
> error_report("number of sockets/nodes should be less than %d",
> @@ -1582,7 +1583,7 @@ static void virt_machine_init(MachineState *machine)
> if (virt_aclint_allowed() && s->have_aclint) {
> if (s->aia_type == VIRT_AIA_TYPE_APLIC_IMSIC) {
> /* Per-socket ACLINT MTIMER */
> - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
> + riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
> i * RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
> RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
> base_hartid, hart_count,
> @@ -1591,28 +1592,28 @@ static void virt_machine_init(MachineState *machine)
> RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
> } else {
> /* Per-socket ACLINT MSWI, MTIMER, and SSWI */
> - riscv_aclint_swi_create(memmap[VIRT_CLINT].base +
> - i * memmap[VIRT_CLINT].size,
> + riscv_aclint_swi_create(s->memmap[VIRT_CLINT].base +
> + i * s->memmap[VIRT_CLINT].size,
> base_hartid, hart_count, false);
> - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
> - i * memmap[VIRT_CLINT].size +
> + riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
> + i * s->memmap[VIRT_CLINT].size +
> RISCV_ACLINT_SWI_SIZE,
> RISCV_ACLINT_DEFAULT_MTIMER_SIZE,
> base_hartid, hart_count,
> RISCV_ACLINT_DEFAULT_MTIMECMP,
> RISCV_ACLINT_DEFAULT_MTIME,
> RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
> - riscv_aclint_swi_create(memmap[VIRT_ACLINT_SSWI].base +
> - i * memmap[VIRT_ACLINT_SSWI].size,
> + riscv_aclint_swi_create(s->memmap[VIRT_ACLINT_SSWI].base +
> + i * s->memmap[VIRT_ACLINT_SSWI].size,
> base_hartid, hart_count, true);
> }
> } else if (tcg_enabled()) {
> /* Per-socket SiFive CLINT */
> riscv_aclint_swi_create(
> - memmap[VIRT_CLINT].base + i * memmap[VIRT_CLINT].size,
> + s->memmap[VIRT_CLINT].base + i * s->memmap[VIRT_CLINT].size,
> base_hartid, hart_count, false);
> - riscv_aclint_mtimer_create(memmap[VIRT_CLINT].base +
> - i * memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
> + riscv_aclint_mtimer_create(s->memmap[VIRT_CLINT].base +
> + i * s->memmap[VIRT_CLINT].size + RISCV_ACLINT_SWI_SIZE,
> RISCV_ACLINT_DEFAULT_MTIMER_SIZE, base_hartid, hart_count,
> RISCV_ACLINT_DEFAULT_MTIMECMP, RISCV_ACLINT_DEFAULT_MTIME,
> RISCV_ACLINT_DEFAULT_TIMEBASE_FREQ, true);
> @@ -1620,11 +1621,11 @@ static void virt_machine_init(MachineState *machine)
>
> /* Per-socket interrupt controller */
> if (s->aia_type == VIRT_AIA_TYPE_NONE) {
> - s->irqchip[i] = virt_create_plic(memmap, i,
> + s->irqchip[i] = virt_create_plic(s->memmap, i,
> base_hartid, hart_count);
> } else {
> s->irqchip[i] = virt_create_aia(s->aia_type, s->aia_guests,
> - memmap, i, base_hartid,
> + s->memmap, i, base_hartid,
> hart_count);
> }
>
> @@ -1646,8 +1647,8 @@ static void virt_machine_init(MachineState *machine)
> if (kvm_enabled() && virt_use_kvm_aia_aplic_imsic(s->aia_type)) {
> kvm_riscv_aia_create(machine, IMSIC_MMIO_GROUP_MIN_SHIFT,
> VIRT_IRQCHIP_NUM_SOURCES, VIRT_IRQCHIP_NUM_MSIS,
> - memmap[VIRT_APLIC_S].base,
> - memmap[VIRT_IMSIC_S].base,
> + s->memmap[VIRT_APLIC_S].base,
> + s->memmap[VIRT_IMSIC_S].base,
> s->aia_guests);
> }
>
> @@ -1663,21 +1664,20 @@ static void virt_machine_init(MachineState *machine)
> virt_high_pcie_memmap.size = VIRT32_HIGH_PCIE_MMIO_SIZE;
> } else {
> virt_high_pcie_memmap.size = VIRT64_HIGH_PCIE_MMIO_SIZE;
> - virt_high_pcie_memmap.base = memmap[VIRT_DRAM].base + machine->ram_size;
> + virt_high_pcie_memmap.base = s->memmap[VIRT_DRAM].base +
> + machine->ram_size;
> virt_high_pcie_memmap.base =
> ROUND_UP(virt_high_pcie_memmap.base, virt_high_pcie_memmap.size);
> }
>
> - s->memmap = virt_memmap;
> -
> /* register system main memory (actual RAM) */
> - memory_region_add_subregion(system_memory, memmap[VIRT_DRAM].base,
> - machine->ram);
> + memory_region_add_subregion(system_memory, s->memmap[VIRT_DRAM].base,
> + machine->ram);
>
> /* boot rom */
> memory_region_init_rom(mask_rom, NULL, "riscv_virt_board.mrom",
> - memmap[VIRT_MROM].size, &error_fatal);
> - memory_region_add_subregion(system_memory, memmap[VIRT_MROM].base,
> + s->memmap[VIRT_MROM].size, &error_fatal);
> + memory_region_add_subregion(system_memory, s->memmap[VIRT_MROM].base,
> mask_rom);
>
> /*
> @@ -1688,12 +1688,12 @@ static void virt_machine_init(MachineState *machine)
> rom_set_fw(s->fw_cfg);
>
> /* SiFive Test MMIO device */
> - sifive_test_create(memmap[VIRT_TEST].base);
> + sifive_test_create(s->memmap[VIRT_TEST].base);
>
> /* VirtIO MMIO devices */
> for (i = 0; i < VIRTIO_COUNT; i++) {
> sysbus_create_simple("virtio-mmio",
> - memmap[VIRT_VIRTIO].base + i * memmap[VIRT_VIRTIO].size,
> + s->memmap[VIRT_VIRTIO].base + i * s->memmap[VIRT_VIRTIO].size,
> qdev_get_gpio_in(virtio_irqchip, VIRTIO_IRQ + i));
> }
>
> @@ -1701,11 +1701,11 @@ static void virt_machine_init(MachineState *machine)
>
> create_platform_bus(s, mmio_irqchip);
>
> - serial_mm_init(system_memory, memmap[VIRT_UART0].base,
> + serial_mm_init(system_memory, s->memmap[VIRT_UART0].base,
> 0, qdev_get_gpio_in(mmio_irqchip, UART0_IRQ), 399193,
> serial_hd(0), DEVICE_LITTLE_ENDIAN);
>
> - sysbus_create_simple("goldfish_rtc", memmap[VIRT_RTC].base,
> + sysbus_create_simple("goldfish_rtc", s->memmap[VIRT_RTC].base,
> qdev_get_gpio_in(mmio_irqchip, RTC_IRQ));
>
> for (i = 0; i < ARRAY_SIZE(s->flash); i++) {
> @@ -1723,7 +1723,7 @@ static void virt_machine_init(MachineState *machine)
> exit(1);
> }
> } else {
> - create_fdt(s, memmap);
> + create_fdt(s, s->memmap);
> }
>
> if (virt_is_iommu_sys_enabled(s)) {
> --
> 2.49.0
>
>
On Wed, 23 Apr 2025 at 20:37, Daniel Henrique Barboza <dbarboza@ventanamicro.com> wrote: > > Throughout the code we're accessing the board memmap, most of the time, > by accessing it statically via 'virt_memmap'. This static map is also > assigned in the machine state in s->memmap. > > We're also passing it as a variable to some fdt functions, which is > unorthodox since we can spare a function argument by accessing it > statically or via the machine state. > > All the current forms are valid but not all of the are scalable. In the > future we will version this board, and then all this code will need > rework because it should point to the updated memmap. In this case, > we'll want to assign the adequate versioned memmap once during init, > in s->memmap like it is being done today, and the rest of the code > will access the updated map via s->memmap. I was writing a patch for a machine and came across the same inconsistencies. Nice clean up. Some of the device initlisation code could be refactored out to be shared by other machines within the riscv directory. Related, parts of the device tree creation could belong to the model, instead of to the machine, as the properties are a property (!) of the device. With that in mind we should consider passing the eg. fdt pointer and the MemMap pointer instead of machine state, where practical. > We're also enforcing the pattern of using s->memmap instead of assigning > it to a temp variable 'memmap'. Code is copy/pasted around all the time > and being consistent is important. Reviewed-by: Joel Stanley <joel@jms.id.au> Cheers, Joel
On 4/24/25 6:51 AM, Joel Stanley wrote: > On Wed, 23 Apr 2025 at 20:37, Daniel Henrique Barboza > <dbarboza@ventanamicro.com> wrote: >> >> Throughout the code we're accessing the board memmap, most of the time, >> by accessing it statically via 'virt_memmap'. This static map is also >> assigned in the machine state in s->memmap. >> >> We're also passing it as a variable to some fdt functions, which is >> unorthodox since we can spare a function argument by accessing it >> statically or via the machine state. >> >> All the current forms are valid but not all of the are scalable. In the >> future we will version this board, and then all this code will need >> rework because it should point to the updated memmap. In this case, >> we'll want to assign the adequate versioned memmap once during init, >> in s->memmap like it is being done today, and the rest of the code >> will access the updated map via s->memmap. > > I was writing a patch for a machine and came across the same > inconsistencies. Nice clean up. > > Some of the device initlisation code could be refactored out to be > shared by other machines within the riscv directory. Related, parts of > the device tree creation could belong to the model, instead of to the > machine, as the properties are a property (!) of the device. Yes, delegating the FDT creation to the device, instead of having each machine to create the (mostly) same FDT code over and over again, is something that I've considering for awhile. I keep postponing it mainly because I would like to verify with the DT folks if there's a guarantee that a given device/CPU DT is always the same, i.e. a device DT is always the same regardless of the machine. I have a guess that that this is indeed the case but a confirmation would be nice .... Conor, care to comment? In this refactor we could then create FDTs by passing along a memmap pointer and a fdt pointer, as you've suggested. All this said, there's no need to do such FDT refactory all at once. I think I'll start with the most common devices between RISC-V boards and go from there. Thanks, Daniel > > With that in mind we should consider passing the eg. fdt pointer and > the MemMap pointer instead of machine state, where practical. > >> We're also enforcing the pattern of using s->memmap instead of assigning >> it to a temp variable 'memmap'. Code is copy/pasted around all the time >> and being consistent is important. > > Reviewed-by: Joel Stanley <joel@jms.id.au> > > Cheers, > > Joel
On Fri, 25 Apr 2025 at 21:23, Daniel Henrique Barboza <dbarboza@ventanamicro.com> wrote: > > > > On 4/24/25 6:51 AM, Joel Stanley wrote: > > On Wed, 23 Apr 2025 at 20:37, Daniel Henrique Barboza > > <dbarboza@ventanamicro.com> wrote: > >> > >> Throughout the code we're accessing the board memmap, most of the time, > >> by accessing it statically via 'virt_memmap'. This static map is also > >> assigned in the machine state in s->memmap. > >> > >> We're also passing it as a variable to some fdt functions, which is > >> unorthodox since we can spare a function argument by accessing it > >> statically or via the machine state. > >> > >> All the current forms are valid but not all of the are scalable. In the > >> future we will version this board, and then all this code will need > >> rework because it should point to the updated memmap. In this case, > >> we'll want to assign the adequate versioned memmap once during init, > >> in s->memmap like it is being done today, and the rest of the code > >> will access the updated map via s->memmap. > > > > I was writing a patch for a machine and came across the same > > inconsistencies. Nice clean up. > > > > Some of the device initlisation code could be refactored out to be > > shared by other machines within the riscv directory. Related, parts of > > the device tree creation could belong to the model, instead of to the > > machine, as the properties are a property (!) of the device. > > > Yes, delegating the FDT creation to the device, instead of having each machine > to create the (mostly) same FDT code over and over again, is something that > I've considering for awhile. > > I keep postponing it mainly because I would like to verify with the DT folks if > there's a guarantee that a given device/CPU DT is always the same, i.e. a device > DT is always the same regardless of the machine. I have a guess that that this is > indeed the case but a confirmation would be nice .... Conor, care to comment? I'd be interested in Coner's thoughts on this. My understanding is bindings strive to specify the hardware independent of the machine it's part of. We have bindings in the kernel tree, and associated drivers that use those bindings, that work fine on different machines. The litex peripherals are an extreme case of this; peripherals defined in python that are attached to soft cores often running on a FPGA. At a practical level generating the device tree for a given device does need to take into account specifics of the machine. Things like interrupt properties depend on the interrupt device you're delivering to (some have two cells to provide a 'flags' parameter alongside the irq number). In general anything that contains phandles could end up being machine specific. Another case is the number of cells in reg properties, which depend on the bus the device is described to be on. These are things that some common code would need to handle, not show stoppers for the idea. > In this refactor we could then create FDTs by passing along a memmap pointer and > a fdt pointer, as you've suggested. > > All this said, there's no need to do such FDT refactory all at once. I think I'll > start with the most common devices between RISC-V boards and go from there. Agreed! It was just something to keep in mind when deciding what pointers to pass around. Cheers, Joel
Yo, On Tue, Apr 29, 2025 at 02:55:44PM +0930, Joel Stanley wrote: > On Fri, 25 Apr 2025 at 21:23, Daniel Henrique Barboza > <dbarboza@ventanamicro.com> wrote: > > On 4/24/25 6:51 AM, Joel Stanley wrote: > > > On Wed, 23 Apr 2025 at 20:37, Daniel Henrique Barboza > > > <dbarboza@ventanamicro.com> wrote: > > >> > > >> Throughout the code we're accessing the board memmap, most of the time, > > >> by accessing it statically via 'virt_memmap'. This static map is also > > >> assigned in the machine state in s->memmap. > > >> > > >> We're also passing it as a variable to some fdt functions, which is > > >> unorthodox since we can spare a function argument by accessing it > > >> statically or via the machine state. > > >> > > >> All the current forms are valid but not all of the are scalable. In the > > >> future we will version this board, and then all this code will need > > >> rework because it should point to the updated memmap. In this case, > > >> we'll want to assign the adequate versioned memmap once during init, > > >> in s->memmap like it is being done today, and the rest of the code > > >> will access the updated map via s->memmap. > > > > > > I was writing a patch for a machine and came across the same > > > inconsistencies. Nice clean up. > > > > > > Some of the device initlisation code could be refactored out to be > > > shared by other machines within the riscv directory. Related, parts of > > > the device tree creation could belong to the model, instead of to the > > > machine, as the properties are a property (!) of the device. > > > > Yes, delegating the FDT creation to the device, instead of having each machine > > to create the (mostly) same FDT code over and over again, is something that > > I've considering for awhile. > > > > I keep postponing it mainly because I would like to verify with the DT folks if > > there's a guarantee that a given device/CPU DT is always the same, i.e. a device > > DT is always the same regardless of the machine. I have a guess that that this is > > indeed the case but a confirmation would be nice .... Conor, care to comment? Uhm, if the device node was always the same, regardless of "machine" then there'd be no need for any properties other than compatible/reg/links to provider nodes! But unfortunately that's not what properties are limited to, there's properties for a whole host of other elements of device configuration. Generally speaking, properties will be fixed for a given "machine"/board, but some could change depending on things like AMP affecting which mailbox channel you're permitted to use or using a different firmware (or version of said firmware) can change a node. For example, the extensions supported by a CPU can change based on what version of OpenSBI you're running. The latter might not affect QEMU, because you're providing a DT for the firmware to edit, but from the OS point of view there's different, but valid and complete, descriptions for the same physical hardware. Also, on the same machine you might have variation between two instances of the same IP, other than just the reg/provider links changing, for example if there's two different dwmac ethernet controllers configured to provide different maximum speeds. So yeah, I can't give you any assurance that even one specific IP/CPU on a specific SoC on a specific machine/board will have the same device node unfortunately. > My understanding is bindings strive to specify the hardware > independent of the machine it's part of. We have bindings in the > kernel tree, and associated drivers that use those bindings, that work > fine on different machines. The litex peripherals are an extreme case > of this; peripherals defined in python that are attached to soft cores > often running on a FPGA. Yeah, FPGA stuff is an extreme case, but a great thing to use as a test for the idea, since you could have properties that change from compilation to compilation. Take a look at something like snps,dwmac.yaml where there's about seven thousand different properties, many of which could be varied if it were in an FPGA design rather than an ASIC.. The other kinda extreme case in variability is the stuff that's on a board rather than an SoC, for example the same mmc/sd card controller on an SoC can have different device nodes depending on how a board has been designed. What Linux calls "IIO" devices are also impacted massively by how a board is configured or use-case, since they're often measurement devices with different analogue circuitry connected to them. I'm not 100% sure how all that translates to a machine in QEMU, dunno if running a different AMP context would require a different machine etc, but I hope that gives a general impression of what could potentially vary? LMK if not. Conor. > At a practical level generating the device tree for a given device > does need to take into account specifics of the machine. > > Things like interrupt properties depend on the interrupt device you're > delivering to (some have two cells to provide a 'flags' parameter > alongside the irq number). In general anything that contains phandles > could end up being machine specific. Another case is the number of > cells in reg properties, which depend on the bus the device is > described to be on. >
© 2016 - 2025 Red Hat, Inc.