Rename the API methods using the explicit 'unaligned'
description instead of 'he' which stands for 'host
endianness'.
Inspired-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
---
include/qemu/bswap.h | 32 ++++++++++++++--------------
include/qemu/memory_ldst_unaligned.h | 16 +++++++-------
accel/tcg/translator.c | 6 +++---
hw/display/ati_2d.c | 2 +-
hw/display/sm501.c | 19 ++++++++++-------
hw/remote/vfio-user-obj.c | 4 ++--
hw/vmapple/virtio-blk.c | 2 +-
net/checksum.c | 6 +++---
system/memory.c | 4 ++--
system/physmem.c | 8 +++----
ui/vnc-enc-tight.c | 2 +-
util/bufferiszero.c | 6 +++---
accel/tcg/ldst_atomicity.c.inc | 10 ++++-----
13 files changed, 60 insertions(+), 57 deletions(-)
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
index e8b944988c3..f7c806085fb 100644
--- a/include/qemu/bswap.h
+++ b/include/qemu/bswap.h
@@ -237,82 +237,82 @@ static inline void stb_p(void *ptr, uint8_t v)
static inline int lduw_le_p(const void *ptr)
{
- return (uint16_t)le_bswap(lduw_he_p(ptr), 16);
+ return (uint16_t)le_bswap(lduw_unaligned_p(ptr), 16);
}
static inline int ldsw_le_p(const void *ptr)
{
- return (int16_t)le_bswap(lduw_he_p(ptr), 16);
+ return (int16_t)le_bswap(lduw_unaligned_p(ptr), 16);
}
static inline int ldl_le_p(const void *ptr)
{
- return le_bswap(ldl_he_p(ptr), 32);
+ return le_bswap(ldl_unaligned_p(ptr), 32);
}
static inline uint64_t ldq_le_p(const void *ptr)
{
- return le_bswap(ldq_he_p(ptr), 64);
+ return le_bswap(ldq_unaligned_p(ptr), 64);
}
static inline void stw_le_p(void *ptr, uint16_t v)
{
- stw_he_p(ptr, le_bswap(v, 16));
+ stw_unaligned_p(ptr, le_bswap(v, 16));
}
static inline void st24_le_p(void *ptr, uint32_t v)
{
- st24_he_p(ptr, le_bswap24(v));
+ st24_unaligned_p(ptr, le_bswap24(v));
}
static inline void stl_le_p(void *ptr, uint32_t v)
{
- stl_he_p(ptr, le_bswap(v, 32));
+ stl_unaligned_p(ptr, le_bswap(v, 32));
}
static inline void stq_le_p(void *ptr, uint64_t v)
{
- stq_he_p(ptr, le_bswap(v, 64));
+ stq_unaligned_p(ptr, le_bswap(v, 64));
}
static inline int lduw_be_p(const void *ptr)
{
- return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
+ return (uint16_t)be_bswap(lduw_unaligned_p(ptr), 16);
}
static inline int ldsw_be_p(const void *ptr)
{
- return (int16_t)be_bswap(lduw_he_p(ptr), 16);
+ return (int16_t)be_bswap(lduw_unaligned_p(ptr), 16);
}
static inline int ldl_be_p(const void *ptr)
{
- return be_bswap(ldl_he_p(ptr), 32);
+ return be_bswap(ldl_unaligned_p(ptr), 32);
}
static inline uint64_t ldq_be_p(const void *ptr)
{
- return be_bswap(ldq_he_p(ptr), 64);
+ return be_bswap(ldq_unaligned_p(ptr), 64);
}
static inline void stw_be_p(void *ptr, uint16_t v)
{
- stw_he_p(ptr, be_bswap(v, 16));
+ stw_unaligned_p(ptr, be_bswap(v, 16));
}
static inline void st24_be_p(void *ptr, uint32_t v)
{
- st24_he_p(ptr, be_bswap24(v));
+ st24_unaligned_p(ptr, be_bswap24(v));
}
static inline void stl_be_p(void *ptr, uint32_t v)
{
- stl_he_p(ptr, be_bswap(v, 32));
+ stl_unaligned_p(ptr, be_bswap(v, 32));
}
static inline void stq_be_p(void *ptr, uint64_t v)
{
- stq_he_p(ptr, be_bswap(v, 64));
+ stq_unaligned_p(ptr, be_bswap(v, 64));
}
/* Store v to p as a sz byte value in host order */
diff --git a/include/qemu/memory_ldst_unaligned.h b/include/qemu/memory_ldst_unaligned.h
index f6b64e8fe9c..6b0cc3e2777 100644
--- a/include/qemu/memory_ldst_unaligned.h
+++ b/include/qemu/memory_ldst_unaligned.h
@@ -16,50 +16,50 @@
* of good performance.
*/
-static inline int lduw_he_p(const void *ptr)
+static inline int lduw_unaligned_p(const void *ptr)
{
uint16_t r;
__builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
-static inline int ldsw_he_p(const void *ptr)
+static inline int ldsw_unaligned_p(const void *ptr)
{
int16_t r;
__builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
-static inline void stw_he_p(void *ptr, uint16_t v)
+static inline void stw_unaligned_p(void *ptr, uint16_t v)
{
__builtin_memcpy(ptr, &v, sizeof(v));
}
-static inline void st24_he_p(void *ptr, uint32_t v)
+static inline void st24_unaligned_p(void *ptr, uint32_t v)
{
__builtin_memcpy(ptr, &v, 3);
}
-static inline int ldl_he_p(const void *ptr)
+static inline int ldl_unaligned_p(const void *ptr)
{
int32_t r;
__builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
-static inline void stl_he_p(void *ptr, uint32_t v)
+static inline void stl_unaligned_p(void *ptr, uint32_t v)
{
__builtin_memcpy(ptr, &v, sizeof(v));
}
-static inline uint64_t ldq_he_p(const void *ptr)
+static inline uint64_t ldq_unaligned_p(const void *ptr)
{
uint64_t r;
__builtin_memcpy(&r, ptr, sizeof(r));
return r;
}
-static inline void stq_he_p(void *ptr, uint64_t v)
+static inline void stq_unaligned_p(void *ptr, uint64_t v)
{
__builtin_memcpy(ptr, &v, sizeof(v));
}
diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
index 86cdd70c47f..497128c96c5 100644
--- a/accel/tcg/translator.c
+++ b/accel/tcg/translator.c
@@ -342,14 +342,14 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
case 2:
if (QEMU_IS_ALIGNED(pc, 2)) {
uint16_t t = qatomic_read((uint16_t *)host);
- stw_he_p(dest, t);
+ stw_unaligned_p(dest, t);
return true;
}
break;
case 4:
if (QEMU_IS_ALIGNED(pc, 4)) {
uint32_t t = qatomic_read((uint32_t *)host);
- stl_he_p(dest, t);
+ stl_unaligned_p(dest, t);
return true;
}
break;
@@ -357,7 +357,7 @@ static bool translator_ld(CPUArchState *env, DisasContextBase *db,
case 8:
if (QEMU_IS_ALIGNED(pc, 8)) {
uint64_t t = qatomic_read__nocheck((uint64_t *)host);
- stq_he_p(dest, t);
+ stq_unaligned_p(dest, t);
return true;
}
break;
diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c
index 24a3c3e942f..333d78d7455 100644
--- a/hw/display/ati_2d.c
+++ b/hw/display/ati_2d.c
@@ -222,7 +222,7 @@ void ati_2d_blt(ATIVGAState *s)
for (y = 0; y < s->regs.dst_height; y++) {
i = dst_x * bypp + (dst_y + y) * dst_pitch;
for (x = 0; x < s->regs.dst_width; x++, i += bypp) {
- stn_he_p(&dst_bits[i], bypp, filler);
+ stn_unaligned_p(&dst_bits[i], bypp, filler);
}
}
}
diff --git a/hw/display/sm501.c b/hw/display/sm501.c
index 51efe2ad41f..776fedebb13 100644
--- a/hw/display/sm501.c
+++ b/hw/display/sm501.c
@@ -768,7 +768,7 @@ static void sm501_2d_operation(SM501State *s)
for (y = 0; y < height; y++) {
i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
for (x = 0; x < width; x++, i += bypp) {
- stn_he_p(&d[i], bypp, ~ldn_he_p(&d[i], bypp));
+ stn_unaligned_p(&d[i], bypp, ~ldn_unaligned_p(&d[i], bypp));
}
}
} else if (!rop_mode && rop == 0x99) {
@@ -781,8 +781,9 @@ static void sm501_2d_operation(SM501State *s)
i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
j = (src_x + (src_y + y) * src_pitch) * bypp;
for (x = 0; x < width; x++, i += bypp, j += bypp) {
- stn_he_p(&d[i], bypp,
- ~(ldn_he_p(&sp[j], bypp) ^ ldn_he_p(&d[i], bypp)));
+ stn_unaligned_p(&d[i], bypp,
+ ~(ldn_unaligned_p(&sp[j], bypp)
+ ^ ldn_unaligned_p(&d[i], bypp)));
}
}
} else if (!rop_mode && rop == 0xee) {
@@ -795,8 +796,9 @@ static void sm501_2d_operation(SM501State *s)
i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
j = (src_x + (src_y + y) * src_pitch) * bypp;
for (x = 0; x < width; x++, i += bypp, j += bypp) {
- stn_he_p(&d[i], bypp,
- ldn_he_p(&sp[j], bypp) | ldn_he_p(&d[i], bypp));
+ stn_unaligned_p(&d[i], bypp,
+ ldn_unaligned_p(&sp[j], bypp)
+ | ldn_unaligned_p(&d[i], bypp));
}
}
} else {
@@ -818,8 +820,9 @@ static void sm501_2d_operation(SM501State *s)
if (width == 1 && height == 1) {
unsigned int si = (src_x + src_y * src_pitch) * bypp;
unsigned int di = (dst_x + dst_y * dst_pitch) * bypp;
- stn_he_p(&s->local_mem[dst_base + di], bypp,
- ldn_he_p(&s->local_mem[src_base + si], bypp));
+ stn_unaligned_p(&s->local_mem[dst_base + di], bypp,
+ ldn_unaligned_p(&s->local_mem[src_base + si],
+ bypp));
break;
}
/* If reverse blit do simple check for overlaps */
@@ -917,7 +920,7 @@ static void sm501_2d_operation(SM501State *s)
for (y = 0; y < height; y++) {
i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
for (x = 0; x < width; x++, i += bypp) {
- stn_he_p(&d[i], bypp, color);
+ stn_unaligned_p(&d[i], bypp, color);
}
}
}
diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c
index 9c9ac8b0d92..44adff10eb9 100644
--- a/hw/remote/vfio-user-obj.c
+++ b/hw/remote/vfio-user-obj.c
@@ -392,7 +392,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
access_size = memory_access_size(mr, size, offset);
if (is_write) {
- val = ldn_he_p(ptr, access_size);
+ val = ldn_unaligned_p(ptr, access_size);
result = memory_region_dispatch_write(mr, offset, val,
size_memop(access_size),
@@ -402,7 +402,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t *buf, hwaddr offset,
size_memop(access_size),
MEMTXATTRS_UNSPECIFIED);
- stn_he_p(ptr, access_size, val);
+ stn_unaligned_p(ptr, access_size, val);
}
if (release_lock) {
diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c
index f5e8e92df75..805bcbc9467 100644
--- a/hw/vmapple/virtio-blk.c
+++ b/hw/vmapple/virtio-blk.c
@@ -79,7 +79,7 @@ static void vmapple_virtio_blk_get_config(VirtIODevice *vdev, uint8_t *config)
g_assert(dev->parent_obj.config_size >= endof(struct virtio_blk_config, zoned));
/* Apple abuses the field for max_secure_erase_sectors as type id */
- stl_he_p(&blkcfg->max_secure_erase_sectors, dev->apple_type);
+ stl_unaligned_p(&blkcfg->max_secure_erase_sectors, dev->apple_type);
}
static void vmapple_virtio_blk_class_init(ObjectClass *klass, const void *data)
diff --git a/net/checksum.c b/net/checksum.c
index 18be31c26e5..56314f5e76d 100644
--- a/net/checksum.c
+++ b/net/checksum.c
@@ -110,7 +110,7 @@ void net_checksum_calculate(void *data, int length, int csum_flag)
/* Calculate IP checksum */
if (csum_flag & CSUM_IP) {
- stw_he_p(&ip->ip_sum, 0);
+ stw_unaligned_p(&ip->ip_sum, 0);
csum = net_raw_checksum((uint8_t *)ip, IP_HDR_GET_LEN(ip));
stw_be_p(&ip->ip_sum, csum);
}
@@ -142,7 +142,7 @@ void net_checksum_calculate(void *data, int length, int csum_flag)
}
/* Set csum to 0 */
- stw_he_p(&tcp->th_sum, 0);
+ stw_unaligned_p(&tcp->th_sum, 0);
csum = net_checksum_tcpudp(ip_len, ip->ip_p,
(uint8_t *)&ip->ip_src,
@@ -166,7 +166,7 @@ void net_checksum_calculate(void *data, int length, int csum_flag)
}
/* Set csum to 0 */
- stw_he_p(&udp->uh_sum, 0);
+ stw_unaligned_p(&udp->uh_sum, 0);
csum = net_checksum_tcpudp(ip_len, ip->ip_p,
(uint8_t *)&ip->ip_src,
diff --git a/system/memory.c b/system/memory.c
index 8b84661ae36..653d46e8e79 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -1365,7 +1365,7 @@ static uint64_t memory_region_ram_device_read(void *opaque,
hwaddr addr, unsigned size)
{
MemoryRegion *mr = opaque;
- uint64_t data = ldn_he_p(mr->ram_block->host + addr, size);
+ uint64_t data = ldn_unaligned_p(mr->ram_block->host + addr, size);
trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size);
@@ -1379,7 +1379,7 @@ static void memory_region_ram_device_write(void *opaque, hwaddr addr,
trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size);
- stn_he_p(mr->ram_block->host + addr, size, data);
+ stn_unaligned_p(mr->ram_block->host + addr, size, data);
}
static const MemoryRegionOps ram_device_mem_ops = {
diff --git a/system/physmem.c b/system/physmem.c
index fb69cdb57d9..d62af6f2480 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -3284,7 +3284,7 @@ static MemTxResult flatview_write_continue_step(MemTxAttrs attrs,
/*
* Assure Coverity (and ourselves) that we are not going to OVERRUN
- * the buffer by following ldn_he_p().
+ * the buffer by following ldn_unaligned_p().
*/
#ifdef QEMU_STATIC_ANALYSIS
assert((*l == 1 && len >= 1) ||
@@ -3292,7 +3292,7 @@ static MemTxResult flatview_write_continue_step(MemTxAttrs attrs,
(*l == 4 && len >= 4) ||
(*l == 8 && len >= 8));
#endif
- val = ldn_he_p(buf, *l);
+ val = ldn_unaligned_p(buf, *l);
result = memory_region_dispatch_write(mr, mr_addr, val,
size_memop(*l), attrs);
if (release_lock) {
@@ -3379,7 +3379,7 @@ static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
/*
* Assure Coverity (and ourselves) that we are not going to OVERRUN
- * the buffer by following stn_he_p().
+ * the buffer by following stn_unaligned_p().
*/
#ifdef QEMU_STATIC_ANALYSIS
assert((*l == 1 && len >= 1) ||
@@ -3387,7 +3387,7 @@ static MemTxResult flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
(*l == 4 && len >= 4) ||
(*l == 8 && len >= 8));
#endif
- stn_he_p(buf, *l, val);
+ stn_unaligned_p(buf, *l, val);
if (release_lock) {
bql_unlock();
diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c
index b645aebccef..62d825dc05f 100644
--- a/ui/vnc-enc-tight.c
+++ b/ui/vnc-enc-tight.c
@@ -912,7 +912,7 @@ static void tight_pack24(VncState *vs, uint8_t *buf, size_t count, size_t *ret)
}
while (count--) {
- pix = ldl_he_p(buf8);
+ pix = ldl_unaligned_p(buf8);
*buf++ = (char)(pix >> rshift);
*buf++ = (char)(pix >> gshift);
*buf++ = (char)(pix >> bshift);
diff --git a/util/bufferiszero.c b/util/bufferiszero.c
index 9548dd3ad1b..d7775dc891d 100644
--- a/util/bufferiszero.c
+++ b/util/bufferiszero.c
@@ -38,10 +38,10 @@ static bool buffer_is_zero_int_lt256(const void *buf, size_t len)
* the beginning and end of the buffer.
*/
if (unlikely(len <= 8)) {
- return (ldl_he_p(buf) | ldl_he_p(buf + len - 4)) == 0;
+ return (ldl_unaligned_p(buf) | ldl_unaligned_p(buf + len - 4)) == 0;
}
- t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
+ t = ldq_unaligned_p(buf) | ldq_unaligned_p(buf + len - 8);
p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
@@ -58,7 +58,7 @@ static bool buffer_is_zero_int_ge256(const void *buf, size_t len)
* Use unaligned memory access functions to handle
* the beginning and end of the buffer.
*/
- uint64_t t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
+ uint64_t t = ldq_unaligned_p(buf) | ldq_unaligned_p(buf + len - 8);
const uint64_t *p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
const uint64_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
diff --git a/accel/tcg/ldst_atomicity.c.inc b/accel/tcg/ldst_atomicity.c.inc
index c735add2615..eeb18c716c0 100644
--- a/accel/tcg/ldst_atomicity.c.inc
+++ b/accel/tcg/ldst_atomicity.c.inc
@@ -415,7 +415,7 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
- return lduw_he_p(pv);
+ return lduw_unaligned_p(pv);
case MO_16:
/* The only case remaining is MO_ATOM_WITHIN16. */
if (!HAVE_al8_fast && (pi & 3) == 1) {
@@ -512,7 +512,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
}
switch (atmax) {
case MO_8:
- return ldq_he_p(pv);
+ return ldq_unaligned_p(pv);
case MO_16:
return load_atom_8_by_2(pv);
case MO_32:
@@ -875,7 +875,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
atmax = required_atomicity(cpu, pi, memop);
if (atmax == MO_8) {
- stw_he_p(pv, val);
+ stw_unaligned_p(pv, val);
return;
}
@@ -928,7 +928,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
- stl_he_p(pv, val);
+ stl_unaligned_p(pv, val);
return;
case MO_16:
store_atom_4_by_2(pv, val);
@@ -996,7 +996,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
atmax = required_atomicity(cpu, pi, memop);
switch (atmax) {
case MO_8:
- stq_he_p(pv, val);
+ stq_unaligned_p(pv, val);
return;
case MO_16:
store_atom_8_by_2(pv, val);
--
2.52.0
On 12/29/25 03:18, Philippe Mathieu-Daudé wrote: > Rename the API methods using the explicit 'unaligned' > description instead of 'he' which stands for 'host > endianness'. > > Inspired-by: Paolo Bonzini<pbonzini@redhat.com> > Signed-off-by: Philippe Mathieu-Daudé<philmd@linaro.org> > --- > include/qemu/bswap.h | 32 ++++++++++++++-------------- > include/qemu/memory_ldst_unaligned.h | 16 +++++++------- > accel/tcg/translator.c | 6 +++--- > hw/display/ati_2d.c | 2 +- > hw/display/sm501.c | 19 ++++++++++------- > hw/remote/vfio-user-obj.c | 4 ++-- > hw/vmapple/virtio-blk.c | 2 +- > net/checksum.c | 6 +++--- > system/memory.c | 4 ++-- > system/physmem.c | 8 +++---- > ui/vnc-enc-tight.c | 2 +- > util/bufferiszero.c | 6 +++--- > accel/tcg/ldst_atomicity.c.inc | 10 ++++----- > 13 files changed, 60 insertions(+), 57 deletions(-) Reviewed-by: Richard Henderson <richard.henderson@linaro.org> r~
Il dom 28 dic 2025, 17:19 Philippe Mathieu-Daudé <philmd@linaro.org> ha
scritto:
> Rename the API methods using the explicit 'unaligned'
> description instead of 'he' which stands for 'host
> endianness'.
>
> Inspired-by: Paolo Bonzini <pbonzini@redhat.com>
> Signed-off-by: Philippe Mathieu-Daudé <philmd@linaro.org>
> ---
> include/qemu/bswap.h | 32 ++++++++++++++--------------
> include/qemu/memory_ldst_unaligned.h | 16 +++++++-------
>
No "memory_" since it's for host pointers and not guest memory addresses.
Paolo
accel/tcg/translator.c | 6 +++---
> hw/display/ati_2d.c | 2 +-
> hw/display/sm501.c | 19 ++++++++++-------
> hw/remote/vfio-user-obj.c | 4 ++--
> hw/vmapple/virtio-blk.c | 2 +-
> net/checksum.c | 6 +++---
> system/memory.c | 4 ++--
> system/physmem.c | 8 +++----
> ui/vnc-enc-tight.c | 2 +-
> util/bufferiszero.c | 6 +++---
> accel/tcg/ldst_atomicity.c.inc | 10 ++++-----
> 13 files changed, 60 insertions(+), 57 deletions(-)
>
> diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
> index e8b944988c3..f7c806085fb 100644
> --- a/include/qemu/bswap.h
> +++ b/include/qemu/bswap.h
> @@ -237,82 +237,82 @@ static inline void stb_p(void *ptr, uint8_t v)
>
> static inline int lduw_le_p(const void *ptr)
> {
> - return (uint16_t)le_bswap(lduw_he_p(ptr), 16);
> + return (uint16_t)le_bswap(lduw_unaligned_p(ptr), 16);
> }
>
> static inline int ldsw_le_p(const void *ptr)
> {
> - return (int16_t)le_bswap(lduw_he_p(ptr), 16);
> + return (int16_t)le_bswap(lduw_unaligned_p(ptr), 16);
> }
>
> static inline int ldl_le_p(const void *ptr)
> {
> - return le_bswap(ldl_he_p(ptr), 32);
> + return le_bswap(ldl_unaligned_p(ptr), 32);
> }
>
> static inline uint64_t ldq_le_p(const void *ptr)
> {
> - return le_bswap(ldq_he_p(ptr), 64);
> + return le_bswap(ldq_unaligned_p(ptr), 64);
> }
>
> static inline void stw_le_p(void *ptr, uint16_t v)
> {
> - stw_he_p(ptr, le_bswap(v, 16));
> + stw_unaligned_p(ptr, le_bswap(v, 16));
> }
>
> static inline void st24_le_p(void *ptr, uint32_t v)
> {
> - st24_he_p(ptr, le_bswap24(v));
> + st24_unaligned_p(ptr, le_bswap24(v));
> }
>
> static inline void stl_le_p(void *ptr, uint32_t v)
> {
> - stl_he_p(ptr, le_bswap(v, 32));
> + stl_unaligned_p(ptr, le_bswap(v, 32));
> }
>
> static inline void stq_le_p(void *ptr, uint64_t v)
> {
> - stq_he_p(ptr, le_bswap(v, 64));
> + stq_unaligned_p(ptr, le_bswap(v, 64));
> }
>
> static inline int lduw_be_p(const void *ptr)
> {
> - return (uint16_t)be_bswap(lduw_he_p(ptr), 16);
> + return (uint16_t)be_bswap(lduw_unaligned_p(ptr), 16);
> }
>
> static inline int ldsw_be_p(const void *ptr)
> {
> - return (int16_t)be_bswap(lduw_he_p(ptr), 16);
> + return (int16_t)be_bswap(lduw_unaligned_p(ptr), 16);
> }
>
> static inline int ldl_be_p(const void *ptr)
> {
> - return be_bswap(ldl_he_p(ptr), 32);
> + return be_bswap(ldl_unaligned_p(ptr), 32);
> }
>
> static inline uint64_t ldq_be_p(const void *ptr)
> {
> - return be_bswap(ldq_he_p(ptr), 64);
> + return be_bswap(ldq_unaligned_p(ptr), 64);
> }
>
> static inline void stw_be_p(void *ptr, uint16_t v)
> {
> - stw_he_p(ptr, be_bswap(v, 16));
> + stw_unaligned_p(ptr, be_bswap(v, 16));
> }
>
> static inline void st24_be_p(void *ptr, uint32_t v)
> {
> - st24_he_p(ptr, be_bswap24(v));
> + st24_unaligned_p(ptr, be_bswap24(v));
> }
>
> static inline void stl_be_p(void *ptr, uint32_t v)
> {
> - stl_he_p(ptr, be_bswap(v, 32));
> + stl_unaligned_p(ptr, be_bswap(v, 32));
> }
>
> static inline void stq_be_p(void *ptr, uint64_t v)
> {
> - stq_he_p(ptr, be_bswap(v, 64));
> + stq_unaligned_p(ptr, be_bswap(v, 64));
> }
>
> /* Store v to p as a sz byte value in host order */
> diff --git a/include/qemu/memory_ldst_unaligned.h
> b/include/qemu/memory_ldst_unaligned.h
> index f6b64e8fe9c..6b0cc3e2777 100644
> --- a/include/qemu/memory_ldst_unaligned.h
> +++ b/include/qemu/memory_ldst_unaligned.h
> @@ -16,50 +16,50 @@
> * of good performance.
> */
>
> -static inline int lduw_he_p(const void *ptr)
> +static inline int lduw_unaligned_p(const void *ptr)
> {
> uint16_t r;
> __builtin_memcpy(&r, ptr, sizeof(r));
> return r;
> }
>
> -static inline int ldsw_he_p(const void *ptr)
> +static inline int ldsw_unaligned_p(const void *ptr)
> {
> int16_t r;
> __builtin_memcpy(&r, ptr, sizeof(r));
> return r;
> }
>
> -static inline void stw_he_p(void *ptr, uint16_t v)
> +static inline void stw_unaligned_p(void *ptr, uint16_t v)
> {
> __builtin_memcpy(ptr, &v, sizeof(v));
> }
>
> -static inline void st24_he_p(void *ptr, uint32_t v)
> +static inline void st24_unaligned_p(void *ptr, uint32_t v)
> {
> __builtin_memcpy(ptr, &v, 3);
> }
>
> -static inline int ldl_he_p(const void *ptr)
> +static inline int ldl_unaligned_p(const void *ptr)
> {
> int32_t r;
> __builtin_memcpy(&r, ptr, sizeof(r));
> return r;
> }
>
> -static inline void stl_he_p(void *ptr, uint32_t v)
> +static inline void stl_unaligned_p(void *ptr, uint32_t v)
> {
> __builtin_memcpy(ptr, &v, sizeof(v));
> }
>
> -static inline uint64_t ldq_he_p(const void *ptr)
> +static inline uint64_t ldq_unaligned_p(const void *ptr)
> {
> uint64_t r;
> __builtin_memcpy(&r, ptr, sizeof(r));
> return r;
> }
>
> -static inline void stq_he_p(void *ptr, uint64_t v)
> +static inline void stq_unaligned_p(void *ptr, uint64_t v)
> {
> __builtin_memcpy(ptr, &v, sizeof(v));
> }
> diff --git a/accel/tcg/translator.c b/accel/tcg/translator.c
> index 86cdd70c47f..497128c96c5 100644
> --- a/accel/tcg/translator.c
> +++ b/accel/tcg/translator.c
> @@ -342,14 +342,14 @@ static bool translator_ld(CPUArchState *env,
> DisasContextBase *db,
> case 2:
> if (QEMU_IS_ALIGNED(pc, 2)) {
> uint16_t t = qatomic_read((uint16_t *)host);
> - stw_he_p(dest, t);
> + stw_unaligned_p(dest, t);
> return true;
> }
> break;
> case 4:
> if (QEMU_IS_ALIGNED(pc, 4)) {
> uint32_t t = qatomic_read((uint32_t *)host);
> - stl_he_p(dest, t);
> + stl_unaligned_p(dest, t);
> return true;
> }
> break;
> @@ -357,7 +357,7 @@ static bool translator_ld(CPUArchState *env,
> DisasContextBase *db,
> case 8:
> if (QEMU_IS_ALIGNED(pc, 8)) {
> uint64_t t = qatomic_read__nocheck((uint64_t *)host);
> - stq_he_p(dest, t);
> + stq_unaligned_p(dest, t);
> return true;
> }
> break;
> diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c
> index 24a3c3e942f..333d78d7455 100644
> --- a/hw/display/ati_2d.c
> +++ b/hw/display/ati_2d.c
> @@ -222,7 +222,7 @@ void ati_2d_blt(ATIVGAState *s)
> for (y = 0; y < s->regs.dst_height; y++) {
> i = dst_x * bypp + (dst_y + y) * dst_pitch;
> for (x = 0; x < s->regs.dst_width; x++, i += bypp) {
> - stn_he_p(&dst_bits[i], bypp, filler);
> + stn_unaligned_p(&dst_bits[i], bypp, filler);
> }
> }
> }
> diff --git a/hw/display/sm501.c b/hw/display/sm501.c
> index 51efe2ad41f..776fedebb13 100644
> --- a/hw/display/sm501.c
> +++ b/hw/display/sm501.c
> @@ -768,7 +768,7 @@ static void sm501_2d_operation(SM501State *s)
> for (y = 0; y < height; y++) {
> i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
> for (x = 0; x < width; x++, i += bypp) {
> - stn_he_p(&d[i], bypp, ~ldn_he_p(&d[i], bypp));
> + stn_unaligned_p(&d[i], bypp, ~ldn_unaligned_p(&d[i],
> bypp));
> }
> }
> } else if (!rop_mode && rop == 0x99) {
> @@ -781,8 +781,9 @@ static void sm501_2d_operation(SM501State *s)
> i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
> j = (src_x + (src_y + y) * src_pitch) * bypp;
> for (x = 0; x < width; x++, i += bypp, j += bypp) {
> - stn_he_p(&d[i], bypp,
> - ~(ldn_he_p(&sp[j], bypp) ^ ldn_he_p(&d[i],
> bypp)));
> + stn_unaligned_p(&d[i], bypp,
> + ~(ldn_unaligned_p(&sp[j], bypp)
> + ^ ldn_unaligned_p(&d[i], bypp)));
> }
> }
> } else if (!rop_mode && rop == 0xee) {
> @@ -795,8 +796,9 @@ static void sm501_2d_operation(SM501State *s)
> i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
> j = (src_x + (src_y + y) * src_pitch) * bypp;
> for (x = 0; x < width; x++, i += bypp, j += bypp) {
> - stn_he_p(&d[i], bypp,
> - ldn_he_p(&sp[j], bypp) | ldn_he_p(&d[i],
> bypp));
> + stn_unaligned_p(&d[i], bypp,
> + ldn_unaligned_p(&sp[j], bypp)
> + | ldn_unaligned_p(&d[i], bypp));
> }
> }
> } else {
> @@ -818,8 +820,9 @@ static void sm501_2d_operation(SM501State *s)
> if (width == 1 && height == 1) {
> unsigned int si = (src_x + src_y * src_pitch) * bypp;
> unsigned int di = (dst_x + dst_y * dst_pitch) * bypp;
> - stn_he_p(&s->local_mem[dst_base + di], bypp,
> - ldn_he_p(&s->local_mem[src_base + si], bypp));
> + stn_unaligned_p(&s->local_mem[dst_base + di], bypp,
> + ldn_unaligned_p(&s->local_mem[src_base +
> si],
> + bypp));
> break;
> }
> /* If reverse blit do simple check for overlaps */
> @@ -917,7 +920,7 @@ static void sm501_2d_operation(SM501State *s)
> for (y = 0; y < height; y++) {
> i = (dst_x + (dst_y + y) * dst_pitch) * bypp;
> for (x = 0; x < width; x++, i += bypp) {
> - stn_he_p(&d[i], bypp, color);
> + stn_unaligned_p(&d[i], bypp, color);
> }
> }
> }
> diff --git a/hw/remote/vfio-user-obj.c b/hw/remote/vfio-user-obj.c
> index 9c9ac8b0d92..44adff10eb9 100644
> --- a/hw/remote/vfio-user-obj.c
> +++ b/hw/remote/vfio-user-obj.c
> @@ -392,7 +392,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t
> *buf, hwaddr offset,
> access_size = memory_access_size(mr, size, offset);
>
> if (is_write) {
> - val = ldn_he_p(ptr, access_size);
> + val = ldn_unaligned_p(ptr, access_size);
>
> result = memory_region_dispatch_write(mr, offset, val,
> size_memop(access_size),
> @@ -402,7 +402,7 @@ static int vfu_object_mr_rw(MemoryRegion *mr, uint8_t
> *buf, hwaddr offset,
> size_memop(access_size),
> MEMTXATTRS_UNSPECIFIED);
>
> - stn_he_p(ptr, access_size, val);
> + stn_unaligned_p(ptr, access_size, val);
> }
>
> if (release_lock) {
> diff --git a/hw/vmapple/virtio-blk.c b/hw/vmapple/virtio-blk.c
> index f5e8e92df75..805bcbc9467 100644
> --- a/hw/vmapple/virtio-blk.c
> +++ b/hw/vmapple/virtio-blk.c
> @@ -79,7 +79,7 @@ static void vmapple_virtio_blk_get_config(VirtIODevice
> *vdev, uint8_t *config)
> g_assert(dev->parent_obj.config_size >= endof(struct
> virtio_blk_config, zoned));
>
> /* Apple abuses the field for max_secure_erase_sectors as type id */
> - stl_he_p(&blkcfg->max_secure_erase_sectors, dev->apple_type);
> + stl_unaligned_p(&blkcfg->max_secure_erase_sectors, dev->apple_type);
> }
>
> static void vmapple_virtio_blk_class_init(ObjectClass *klass, const void
> *data)
> diff --git a/net/checksum.c b/net/checksum.c
> index 18be31c26e5..56314f5e76d 100644
> --- a/net/checksum.c
> +++ b/net/checksum.c
> @@ -110,7 +110,7 @@ void net_checksum_calculate(void *data, int length,
> int csum_flag)
>
> /* Calculate IP checksum */
> if (csum_flag & CSUM_IP) {
> - stw_he_p(&ip->ip_sum, 0);
> + stw_unaligned_p(&ip->ip_sum, 0);
> csum = net_raw_checksum((uint8_t *)ip, IP_HDR_GET_LEN(ip));
> stw_be_p(&ip->ip_sum, csum);
> }
> @@ -142,7 +142,7 @@ void net_checksum_calculate(void *data, int length,
> int csum_flag)
> }
>
> /* Set csum to 0 */
> - stw_he_p(&tcp->th_sum, 0);
> + stw_unaligned_p(&tcp->th_sum, 0);
>
> csum = net_checksum_tcpudp(ip_len, ip->ip_p,
> (uint8_t *)&ip->ip_src,
> @@ -166,7 +166,7 @@ void net_checksum_calculate(void *data, int length,
> int csum_flag)
> }
>
> /* Set csum to 0 */
> - stw_he_p(&udp->uh_sum, 0);
> + stw_unaligned_p(&udp->uh_sum, 0);
>
> csum = net_checksum_tcpudp(ip_len, ip->ip_p,
> (uint8_t *)&ip->ip_src,
> diff --git a/system/memory.c b/system/memory.c
> index 8b84661ae36..653d46e8e79 100644
> --- a/system/memory.c
> +++ b/system/memory.c
> @@ -1365,7 +1365,7 @@ static uint64_t memory_region_ram_device_read(void
> *opaque,
> hwaddr addr, unsigned size)
> {
> MemoryRegion *mr = opaque;
> - uint64_t data = ldn_he_p(mr->ram_block->host + addr, size);
> + uint64_t data = ldn_unaligned_p(mr->ram_block->host + addr, size);
>
> trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data,
> size);
>
> @@ -1379,7 +1379,7 @@ static void memory_region_ram_device_write(void
> *opaque, hwaddr addr,
>
> trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data,
> size);
>
> - stn_he_p(mr->ram_block->host + addr, size, data);
> + stn_unaligned_p(mr->ram_block->host + addr, size, data);
> }
>
> static const MemoryRegionOps ram_device_mem_ops = {
> diff --git a/system/physmem.c b/system/physmem.c
> index fb69cdb57d9..d62af6f2480 100644
> --- a/system/physmem.c
> +++ b/system/physmem.c
> @@ -3284,7 +3284,7 @@ static MemTxResult
> flatview_write_continue_step(MemTxAttrs attrs,
>
> /*
> * Assure Coverity (and ourselves) that we are not going to
> OVERRUN
> - * the buffer by following ldn_he_p().
> + * the buffer by following ldn_unaligned_p().
> */
> #ifdef QEMU_STATIC_ANALYSIS
> assert((*l == 1 && len >= 1) ||
> @@ -3292,7 +3292,7 @@ static MemTxResult
> flatview_write_continue_step(MemTxAttrs attrs,
> (*l == 4 && len >= 4) ||
> (*l == 8 && len >= 8));
> #endif
> - val = ldn_he_p(buf, *l);
> + val = ldn_unaligned_p(buf, *l);
> result = memory_region_dispatch_write(mr, mr_addr, val,
> size_memop(*l), attrs);
> if (release_lock) {
> @@ -3379,7 +3379,7 @@ static MemTxResult
> flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
>
> /*
> * Assure Coverity (and ourselves) that we are not going to
> OVERRUN
> - * the buffer by following stn_he_p().
> + * the buffer by following stn_unaligned_p().
> */
> #ifdef QEMU_STATIC_ANALYSIS
> assert((*l == 1 && len >= 1) ||
> @@ -3387,7 +3387,7 @@ static MemTxResult
> flatview_read_continue_step(MemTxAttrs attrs, uint8_t *buf,
> (*l == 4 && len >= 4) ||
> (*l == 8 && len >= 8));
> #endif
> - stn_he_p(buf, *l, val);
> + stn_unaligned_p(buf, *l, val);
>
> if (release_lock) {
> bql_unlock();
> diff --git a/ui/vnc-enc-tight.c b/ui/vnc-enc-tight.c
> index b645aebccef..62d825dc05f 100644
> --- a/ui/vnc-enc-tight.c
> +++ b/ui/vnc-enc-tight.c
> @@ -912,7 +912,7 @@ static void tight_pack24(VncState *vs, uint8_t *buf,
> size_t count, size_t *ret)
> }
>
> while (count--) {
> - pix = ldl_he_p(buf8);
> + pix = ldl_unaligned_p(buf8);
> *buf++ = (char)(pix >> rshift);
> *buf++ = (char)(pix >> gshift);
> *buf++ = (char)(pix >> bshift);
> diff --git a/util/bufferiszero.c b/util/bufferiszero.c
> index 9548dd3ad1b..d7775dc891d 100644
> --- a/util/bufferiszero.c
> +++ b/util/bufferiszero.c
> @@ -38,10 +38,10 @@ static bool buffer_is_zero_int_lt256(const void *buf,
> size_t len)
> * the beginning and end of the buffer.
> */
> if (unlikely(len <= 8)) {
> - return (ldl_he_p(buf) | ldl_he_p(buf + len - 4)) == 0;
> + return (ldl_unaligned_p(buf) | ldl_unaligned_p(buf + len - 4)) ==
> 0;
> }
>
> - t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
> + t = ldq_unaligned_p(buf) | ldq_unaligned_p(buf + len - 8);
> p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
> e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
>
> @@ -58,7 +58,7 @@ static bool buffer_is_zero_int_ge256(const void *buf,
> size_t len)
> * Use unaligned memory access functions to handle
> * the beginning and end of the buffer.
> */
> - uint64_t t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
> + uint64_t t = ldq_unaligned_p(buf) | ldq_unaligned_p(buf + len - 8);
> const uint64_t *p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
> const uint64_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
>
> diff --git a/accel/tcg/ldst_atomicity.c.inc
> b/accel/tcg/ldst_atomicity.c.inc
> index c735add2615..eeb18c716c0 100644
> --- a/accel/tcg/ldst_atomicity.c.inc
> +++ b/accel/tcg/ldst_atomicity.c.inc
> @@ -415,7 +415,7 @@ static uint16_t load_atom_2(CPUState *cpu, uintptr_t
> ra,
> atmax = required_atomicity(cpu, pi, memop);
> switch (atmax) {
> case MO_8:
> - return lduw_he_p(pv);
> + return lduw_unaligned_p(pv);
> case MO_16:
> /* The only case remaining is MO_ATOM_WITHIN16. */
> if (!HAVE_al8_fast && (pi & 3) == 1) {
> @@ -512,7 +512,7 @@ static uint64_t load_atom_8(CPUState *cpu, uintptr_t
> ra,
> }
> switch (atmax) {
> case MO_8:
> - return ldq_he_p(pv);
> + return ldq_unaligned_p(pv);
> case MO_16:
> return load_atom_8_by_2(pv);
> case MO_32:
> @@ -875,7 +875,7 @@ static void store_atom_2(CPUState *cpu, uintptr_t ra,
>
> atmax = required_atomicity(cpu, pi, memop);
> if (atmax == MO_8) {
> - stw_he_p(pv, val);
> + stw_unaligned_p(pv, val);
> return;
> }
>
> @@ -928,7 +928,7 @@ static void store_atom_4(CPUState *cpu, uintptr_t ra,
> atmax = required_atomicity(cpu, pi, memop);
> switch (atmax) {
> case MO_8:
> - stl_he_p(pv, val);
> + stl_unaligned_p(pv, val);
> return;
> case MO_16:
> store_atom_4_by_2(pv, val);
> @@ -996,7 +996,7 @@ static void store_atom_8(CPUState *cpu, uintptr_t ra,
> atmax = required_atomicity(cpu, pi, memop);
> switch (atmax) {
> case MO_8:
> - stq_he_p(pv, val);
> + stq_unaligned_p(pv, val);
> return;
> case MO_16:
> store_atom_8_by_2(pv, val);
> --
> 2.52.0
>
>
© 2016 - 2026 Red Hat, Inc.