Erasing each block individually is slow, so this patch reworks the logic
to erase as much as possible in one go.
Signed-off-by: Christian Speich <c.speich@avm.de>
---
hw/sd/sd.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
index 94ef3cc62582717ee044c4b114b7f22bd1b4a256..42870fa19414be61e43d2e07619ed193cc514319 100644
--- a/hw/sd/sd.c
+++ b/hw/sd/sd.c
@@ -1086,7 +1086,6 @@ static void sd_erase(SDState *sd)
bool sdsc = true;
uint64_t wpnum;
uint64_t erase_addr;
- int erase_len = 1 << HWBLOCK_SHIFT;
trace_sdcard_erase(sd->erase_start, sd->erase_end);
if (sd->erase_start == INVALID_ADDRESS
@@ -1115,19 +1114,26 @@ static void sd_erase(SDState *sd)
sd->erase_end = INVALID_ADDRESS;
sd->csd[14] |= 0x40;
- for (erase_addr = erase_start; erase_addr <= erase_end;
- erase_addr += erase_len) {
- if (sdsc) {
- /* Only SDSC cards support write protect groups */
+ /* Only SDSC cards support write protect groups */
+ if (sdsc) {
+ for (erase_addr = erase_start; erase_addr <= erase_end;
+ erase_addr = ROUND_UP(erase_addr + 1, WPGROUP_SIZE)) {
+ uint64_t wp_group_end = ROUND_UP(erase_addr + 1, WPGROUP_SIZE) - 1;
+ size_t to_erase = MIN(erase_end, wp_group_end) - erase_addr;
+
wpnum = sd_addr_to_wpnum(erase_addr);
assert(wpnum < sd->wp_group_bits);
if (test_bit(wpnum, sd->wp_group_bmap)) {
sd->card_status |= WP_ERASE_SKIP;
continue;
}
+
+ blk_pwrite_zeroes(sd->blk, erase_addr + sd_part_offset(sd),
+ to_erase, 0);
}
- blk_pwrite_zeroes(sd->blk, erase_addr + sd_part_offset(sd),
- erase_len, 0);
+ } else {
+ blk_pwrite_zeroes(sd->blk, erase_start + sd_part_offset(sd),
+ erase_end - erase_start, 0);
}
}
--
2.43.0