From nobody Mon Feb 9 02:08:26 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of _spf.google.com designates 209.85.128.42 as permitted sender) client-ip=209.85.128.42; envelope-from=philippe.mathieu.daude@gmail.com; helo=mail-wm1-f42.google.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of _spf.google.com designates 209.85.128.42 as permitted sender) smtp.mailfrom=philippe.mathieu.daude@gmail.com ARC-Seal: i=1; a=rsa-sha256; t=1623247818; cv=none; d=zohomail.com; s=zohoarc; b=e3kefy9IFMx74KVeXzf/HB738IIriItEk1eJvUOm+0CkmlsaBmH+ygUsNf6AJt8LkTnWuy7vP9jZ7eFzhk/6oJE4FlnNE4IPfFhMz/pb3iWMRfEwDbri3QWGPaQldSRZkMjq++/qDrTIrLDBLqtXtRUyV0aadUJoQEHP2V0pD4k= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1623247818; h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:MIME-Version:Message-ID:References:Sender:Subject:To; bh=SK4Sda9oKAO+kAJdqn+QafWI9XvksejZSvmI3F9R0lo=; b=eY9ikVGd/rtujbsFYGyTCZ+KUkR1SxlXI8qQaAGx7oZyHyVguhoOPSqUGrsQ4nk3/MhFVMmm1pN7CxeEWLqpsx+T8/v6bqfOoro/U3J1PB+wxjrIZEhp8Dtmta4E08/v6es3xeWbNkJgql0sd0nMNDjo9j2m5nYh7sI/MXut06s= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of _spf.google.com designates 209.85.128.42 as permitted sender) smtp.mailfrom=philippe.mathieu.daude@gmail.com Received: from mail-wm1-f42.google.com (mail-wm1-f42.google.com [209.85.128.42]) by mx.zohomail.com with SMTPS id 1623247818936697.7067616504783; Wed, 9 Jun 2021 07:10:18 -0700 (PDT) Received: by mail-wm1-f42.google.com with SMTP id l11-20020a05600c4f0bb029017a7cd488f5so4351800wmq.0 for ; Wed, 09 Jun 2021 07:10:18 -0700 (PDT) Return-Path: Return-Path: Received: from x1w.redhat.com (235.red-83-57-168.dynamicip.rima-tde.net. [83.57.168.235]) by smtp.gmail.com with ESMTPSA id j12sm24258wrt.69.2021.06.09.07.10.16 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Wed, 09 Jun 2021 07:10:16 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=SK4Sda9oKAO+kAJdqn+QafWI9XvksejZSvmI3F9R0lo=; b=ThhnxJXqlKNpUl5qYUi6A0MSXeZgFTHQMckiI4DPHkoeqz7qS8usBhZ8su0vGTZ2iF FODzMTSK7RdUBKP+NaxESlyOILQTRZsRo5lBMyIXa6xzvse/9nK56d7A4wqjgE5ckzlN t/8paIt1g9D1UUbGh0o2A65Dj7d/zLnMetFCKhXAokFYFPG/bGbKtKhJPCa2e4tHArxe WQ421EjaxNRBhZoZV/GU0p39CrKPkDnJHVZDyIXXghvyn1L2BkAehhKcANcyac9955gy uYozoC89NtM/romJsw8tDT7du/eO1wmNo5urq4gY4BPJBoeLrujgmT6zgx/TL02vuUOl 7dhw== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:sender:from:to:cc:subject:date:message-id :in-reply-to:references:mime-version:content-transfer-encoding; bh=SK4Sda9oKAO+kAJdqn+QafWI9XvksejZSvmI3F9R0lo=; b=bI9JMTt+E1yuOL1Dl49cQOjUO2NvoFO1Pwh+YQOariNwVcuVwN/x/53sZOtG1mcr0A R+CVFJWcsHuFPALVUnHaLnAU34y8kio4bDdD83IC8X3H+zQzxSx309myfTJXd15jomZr scu6/o2AEnOr24e90un4C7D5Hu728vKPojOcHxR8Kq2qMxSpqEwbR6WahcUBjLG4+Ukz Qe2f+zMTbIGSmiDKrC+3aDLyi/m2jNOCmH9xDJPlQsFdeMf83Jhh6DBjNlqO8v7gN6jB KmkhjOZqMinnql4k4EQsGvr+vy0mz7CT50dLWUEbV6vmLZI7+F62/Nk6pRXJ+jLIhEUz 0oLw== X-Gm-Message-State: AOAM531BFvJAFkr7hNeP23tZM5oSNIH6JGr9OvHZT/VdKdzW7tY/EAlk 1BHexrerqaS7TViVvXiBh88= X-Google-Smtp-Source: ABdhPJxxYSgNV6WU1qc3iTRGMNfGLj5hPfnTOD1rGGyzO0/Xjn03K5FFl2GPQtIdhU6MiAFnPyey5w== X-Received: by 2002:a05:600c:35c3:: with SMTP id r3mr10033186wmq.169.1623247816991; Wed, 09 Jun 2021 07:10:16 -0700 (PDT) Sender: =?UTF-8?Q?Philippe_Mathieu=2DDaud=C3=A9?= From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= To: qemu-devel@nongnu.org Cc: Peter Maydell , Richard Henderson , Mark Cave-Ayland , =?UTF-8?q?Alex=20Benn=C3=A9e?= , Paolo Bonzini , =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Subject: [PATCH v2 1/2] accel/tcg/cputlb: Extract load_helper_unaligned() from load_helper() Date: Wed, 9 Jun 2021 16:10:09 +0200 Message-Id: <20210609141010.1066750-2-f4bug@amsat.org> X-Mailer: git-send-email 2.31.1 In-Reply-To: <20210609141010.1066750-1-f4bug@amsat.org> References: <20210609141010.1066750-1-f4bug@amsat.org> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @gmail.com) Replace a goto statement by an inlined function for easier review. No logical change intended. Inspired-by: Mark Cave-Ayland Signed-off-by: Philippe Mathieu-Daud=C3=A9 --- accel/tcg/cputlb.c | 54 ++++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 21 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f24348e9793..2b5d569412c 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1851,6 +1851,34 @@ load_memop(const void *haddr, MemOp op) } } =20 +static inline uint64_t QEMU_ALWAYS_INLINE +load_helper_unaligned(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, + uintptr_t retaddr, MemOp op, bool code_read, + FullLoadHelper *full_load) +{ + size_t size =3D memop_size(op); + target_ulong addr1, addr2; + uint64_t res; + uint64_t r1, r2; + unsigned shift; + + addr1 =3D addr & ~((target_ulong)size - 1); + addr2 =3D addr1 + size; + r1 =3D full_load(env, addr1, oi, retaddr); + r2 =3D full_load(env, addr2, oi, retaddr); + shift =3D (addr & (size - 1)) * 8; + + if (memop_big_endian(op)) { + /* Big-endian combine. */ + res =3D (r1 << shift) | (r2 >> ((size * 8) - shift)); + } else { + /* Little-endian combine. */ + res =3D (r1 >> shift) | (r2 << ((size * 8) - shift)); + } + + return res & MAKE_64BIT_MASK(0, size * 8); +} + static inline uint64_t QEMU_ALWAYS_INLINE load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, @@ -1866,7 +1894,6 @@ load_helper(CPUArchState *env, target_ulong addr, TCG= MemOpIdx oi, code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; unsigned a_bits =3D get_alignment_bits(get_memop(oi)); void *haddr; - uint64_t res; size_t size =3D memop_size(op); =20 /* Handle CPU specific unaligned behaviour */ @@ -1893,9 +1920,10 @@ load_helper(CPUArchState *env, target_ulong addr, TC= GMemOpIdx oi, CPUIOTLBEntry *iotlbentry; bool need_swap; =20 - /* For anything that is unaligned, recurse through full_load. */ + /* For anything that is unaligned, recurse through byte loads. */ if ((addr & (size - 1)) !=3D 0) { - goto do_unaligned_access; + return load_helper_unaligned(env, addr, oi, retaddr, op, + code_read, full_load); } =20 iotlbentry =3D &env_tlb(env)->d[mmu_idx].iotlb[index]; @@ -1932,24 +1960,8 @@ load_helper(CPUArchState *env, target_ulong addr, TC= GMemOpIdx oi, if (size > 1 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 >=3D TARGET_PAGE_SIZE)) { - target_ulong addr1, addr2; - uint64_t r1, r2; - unsigned shift; - do_unaligned_access: - addr1 =3D addr & ~((target_ulong)size - 1); - addr2 =3D addr1 + size; - r1 =3D full_load(env, addr1, oi, retaddr); - r2 =3D full_load(env, addr2, oi, retaddr); - shift =3D (addr & (size - 1)) * 8; - - if (memop_big_endian(op)) { - /* Big-endian combine. */ - res =3D (r1 << shift) | (r2 >> ((size * 8) - shift)); - } else { - /* Little-endian combine. */ - res =3D (r1 >> shift) | (r2 << ((size * 8) - shift)); - } - return res & MAKE_64BIT_MASK(0, size * 8); + return load_helper_unaligned(env, addr, oi, retaddr, op, + code_read, full_load); } =20 haddr =3D (void *)((uintptr_t)addr + entry->addend); --=20 2.31.1 From nobody Mon Feb 9 02:08:26 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of _spf.google.com designates 209.85.128.43 as permitted sender) client-ip=209.85.128.43; envelope-from=philippe.mathieu.daude@gmail.com; helo=mail-wm1-f43.google.com; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of _spf.google.com designates 209.85.128.43 as permitted sender) smtp.mailfrom=philippe.mathieu.daude@gmail.com ARC-Seal: i=1; a=rsa-sha256; t=1623247823; cv=none; d=zohomail.com; s=zohoarc; b=ZJwftH79tyu6KohYA/GNq7MqcVmkpsCWR+sJDwS4XVlJTsn97CbLe+UkHeG4fQ1XgBS2XTAx69t97sfssnY8tzac0Ic952j7Atf0+OyWXgXuuOVSCF4XqJ2SzAcNQQn6TGpjZ4oBfyJ+BSD/GGBZWtAc3ix9MzBejycqqBlNRjU= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1623247823; h=Content-Type:Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:MIME-Version:Message-ID:References:Sender:Subject:To; bh=cuhVhtS4uB4qhQc1SbOfv31p5K7UypZDG5J1mCuh5mk=; b=IzM/k36zMQCypDNc0cOaqALDu76O12Z/JCNNJpnVOdgX5Ygo1w89s8zdoJcHqU2OgowynAyInwZuIQP8+gKea6OaZn3aD68WBccb9xhdJw4LysnywCfO1BwLwl4/PIxxOIll6oMIWlmRAUMXSZUxJy7HEmpXVATia5+gPZEzQeg= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of _spf.google.com designates 209.85.128.43 as permitted sender) smtp.mailfrom=philippe.mathieu.daude@gmail.com Received: from mail-wm1-f43.google.com (mail-wm1-f43.google.com [209.85.128.43]) by mx.zohomail.com with SMTPS id 1623247823652859.9965220297101; Wed, 9 Jun 2021 07:10:23 -0700 (PDT) Received: by mail-wm1-f43.google.com with SMTP id g204so4100896wmf.5 for ; Wed, 09 Jun 2021 07:10:23 -0700 (PDT) Return-Path: Return-Path: Received: from x1w.redhat.com (235.red-83-57-168.dynamicip.rima-tde.net. [83.57.168.235]) by smtp.gmail.com with ESMTPSA id o129sm6840928wmo.22.2021.06.09.07.10.20 (version=TLS1_3 cipher=TLS_AES_256_GCM_SHA384 bits=256/256); Wed, 09 Jun 2021 07:10:21 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=gmail.com; s=20161025; h=sender:from:to:cc:subject:date:message-id:in-reply-to:references :mime-version:content-transfer-encoding; bh=cuhVhtS4uB4qhQc1SbOfv31p5K7UypZDG5J1mCuh5mk=; b=PABDiovBdnW5xAST3wfDXOSnd/vSEZPzNTAKz9pih1Gt9HRvcgXaCADnqaYXtT1Mbr vas6GtpG1kvFqlrESMr+ZBhSb+qwY09/Q9k9zlaT+zb0Gr9kuShc6AVOPO4DoroWzKWe OIw+cZupY6tpyiAYhI5l0ugr7zj9vXTjBv2V/oPwy/lbMq/Q0Krw0qe5PND5GOYUBbuZ 22IMgYSMyi98R7n9uhHXpG60z4mqK73iGTcM4CPcHJh+hxj5ZM+GiT6OjLaZBJ0hdNPN yLO4Z4NdcuyMyiAPTdDw/g5vlqNwe+0+MmTymp/B4qxo8GlxuDzfwPOJ4o+ck4YhHuxM HDlg== X-Google-DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=1e100.net; s=20161025; h=x-gm-message-state:sender:from:to:cc:subject:date:message-id :in-reply-to:references:mime-version:content-transfer-encoding; bh=cuhVhtS4uB4qhQc1SbOfv31p5K7UypZDG5J1mCuh5mk=; b=FcFFEEBVYjKNfzY1l0fhFKHXKhgS3vaRtPodZrvjSyaSf3vNJ6iDKMafl0Jnl+Oq44 /TQImK0VDXoQ10aro6tpI+wmofe6JZwD8cRAn4/bpFTDDiWZmCTS0hnxzyPXHzk56uNW 7L4ZhnfblQqB8WM6hU/a/4qriYEc0g8BRfb0sHxaYQ3jj3OCcgIA/lDjgla93ehzazu1 hSKSsPau2/08du+rrGbaD4nvRFZdN4IFadZKEMj5txmRPwyYE9WujzAi7kRsKHerJpfk 1cKx21+RaCrqWhpTp3ntu3l41KwBHel5GRkjJ/No3QiiGTH7oykPeqpUToNA5ahkdOoV rk7A== X-Gm-Message-State: AOAM533+0TCxaBZQQllD8LhYDdQ/UlHioeVB6vdCKPzP8zBNFzRwOcGI lFuV/KbnaxcWCJgpGkKKiBs= X-Google-Smtp-Source: ABdhPJwi4ZIsuG2y9uz+H+2a4Le3wnz7iXU5lYDa7AjNoPzF00nG5e14uqIUDFpdcq9VNTTKDkpiMg== X-Received: by 2002:a1c:f60f:: with SMTP id w15mr10052226wmc.5.1623247821792; Wed, 09 Jun 2021 07:10:21 -0700 (PDT) Sender: =?UTF-8?Q?Philippe_Mathieu=2DDaud=C3=A9?= From: =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= To: qemu-devel@nongnu.org Cc: Peter Maydell , Richard Henderson , Mark Cave-Ayland , =?UTF-8?q?Alex=20Benn=C3=A9e?= , Paolo Bonzini , =?UTF-8?q?Philippe=20Mathieu-Daud=C3=A9?= Subject: [RFC PATCH v2 2/2] cputlb: implement load_helper_unaligned() for unaligned loads Date: Wed, 9 Jun 2021 16:10:10 +0200 Message-Id: <20210609141010.1066750-3-f4bug@amsat.org> X-Mailer: git-send-email 2.31.1 In-Reply-To: <20210609141010.1066750-1-f4bug@amsat.org> References: <20210609141010.1066750-1-f4bug@amsat.org> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @gmail.com) From: Mark Cave-Ayland [RFC because this is currently only lightly tested and there have been some discussions about whether this should be handled elsewhere in the memory AP= I] If an unaligned load is required then the load is split into 2 separate acc= esses and combined together within load_helper(). This does not work correctly wi= th MMIO accesses because the original access size is used for both individual accesses causing the little and big endian combine to return the wrong resu= lt. There is already a similar solution in place for store_helper() where an un= aligned access is handled by a separate store_helper_unaligned() function which ins= tead of using the original access size, uses a single-byte access size to shift = and combine the result correctly regardless of the orignal access size or endia= n. Implement a similar load_helper_unaligned() function which uses the same ap= proach for unaligned loads to return the correct result according to the original = test case. Signed-off-by: Mark Cave-Ayland Resolves: https://gitlab.com/qemu-project/qemu/-/issues/360 Message-Id: <20210609093528.9616-1-mark.cave-ayland@ilande.co.uk> [PMD: Extract load_helper_unaligned() in earlier patch] Signed-off-by: Philippe Mathieu-Daud=C3=A9 --- accel/tcg/cputlb.c | 84 +++++++++++++++++++++++++++++++++++++--------- 1 file changed, 68 insertions(+), 16 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 2b5d569412c..f8a790d8b4a 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1856,27 +1856,79 @@ load_helper_unaligned(CPUArchState *env, target_ulo= ng addr, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, FullLoadHelper *full_load) { + uintptr_t mmu_idx =3D get_mmuidx(oi); size_t size =3D memop_size(op); - target_ulong addr1, addr2; - uint64_t res; - uint64_t r1, r2; - unsigned shift; + uintptr_t index, index2; + CPUTLBEntry *entry, *entry2; + const size_t tlb_off =3D code_read ? + offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read= ); + const MMUAccessType access_type =3D + code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; + target_ulong page2, tlb_addr, tlb_addr2; + uint64_t val =3D 0; + size_t size2; + int i; =20 - addr1 =3D addr & ~((target_ulong)size - 1); - addr2 =3D addr1 + size; - r1 =3D full_load(env, addr1, oi, retaddr); - r2 =3D full_load(env, addr2, oi, retaddr); - shift =3D (addr & (size - 1)) * 8; + /* + * Ensure the second page is in the TLB. Note that the first page + * is already guaranteed to be filled, and that the second page + * cannot evict the first. + */ + page2 =3D (addr + size) & TARGET_PAGE_MASK; + size2 =3D (addr + size) & ~TARGET_PAGE_MASK; + index2 =3D tlb_index(env, mmu_idx, page2); + entry2 =3D tlb_entry(env, mmu_idx, page2); =20 - if (memop_big_endian(op)) { - /* Big-endian combine. */ - res =3D (r1 << shift) | (r2 >> ((size * 8) - shift)); - } else { - /* Little-endian combine. */ - res =3D (r1 >> shift) | (r2 << ((size * 8) - shift)); + tlb_addr2 =3D code_read ? entry2->addr_code : entry2->addr_read; + if (!tlb_hit_page(tlb_addr2, page2)) { + if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { + tlb_fill(env_cpu(env), page2, size2, access_type, + mmu_idx, retaddr); + index2 =3D tlb_index(env, mmu_idx, page2); + entry2 =3D tlb_entry(env, mmu_idx, page2); + } + tlb_addr2 =3D code_read ? entry2->addr_code : entry2->addr_read; } =20 - return res & MAKE_64BIT_MASK(0, size * 8); + index =3D tlb_index(env, mmu_idx, addr); + entry =3D tlb_entry(env, mmu_idx, addr); + tlb_addr =3D code_read ? entry->addr_code : entry->addr_read; + + /* + * Handle watchpoints + */ + if (unlikely(tlb_addr & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), addr, size - size2, + env_tlb(env)->d[mmu_idx].iotlb[index].attrs, + BP_MEM_READ, retaddr); + } + if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { + cpu_check_watchpoint(env_cpu(env), page2, size2, + env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, + BP_MEM_READ, retaddr); + } + + /* + * XXX: not efficient, but simple. + * This loop must go in the forward direction to avoid issues + * with self-modifying code in Windows 64-bit. + */ + oi =3D make_memop_idx(MO_UB, mmu_idx); + if (memop_big_endian(op)) { + for (i =3D 0; i < size; ++i) { + /* Big-endian load. */ + uint8_t val8 =3D helper_ret_ldub_mmu(env, addr + i, oi, retadd= r); + val |=3D val8 << (((size - 1) * 8) - (i * 8)); + } + } else { + for (i =3D 0; i < size; ++i) { + /* Little-endian load. */ + uint8_t val8 =3D helper_ret_ldub_mmu(env, addr + i, oi, retadd= r); + val |=3D val8 << (i * 8); + } + } + + return val & MAKE_64BIT_MASK(0, size * 8); } =20 static inline uint64_t QEMU_ALWAYS_INLINE --=20 2.31.1