There's a common pattern in QEMU where a function needs to perform
a data load or store of an N byte integer in a particular endianness.
At the moment this is handled by doing a switch() on the size and
calling the appropriate ld*_p or st*_p function for each size.
Provide a new family of functions ldn_*_p() and stn_*_p() which
take the size as an argument and do the switch() themselves.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
---
include/exec/cpu-all.h | 4 +++
include/qemu/bswap.h | 52 +++++++++++++++++++++++++++++++++++++
docs/devel/loads-stores.rst | 15 +++++++++++
3 files changed, 71 insertions(+)
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index a635f532f97..07ec3808342 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -133,6 +133,8 @@ static inline void tswap64s(uint64_t *s)
#define stq_p(p, v) stq_be_p(p, v)
#define stfl_p(p, v) stfl_be_p(p, v)
#define stfq_p(p, v) stfq_be_p(p, v)
+#define ldn_p(p, sz ldn_be_p(p, sz)
+#define stn_p(p, sz, v) stn_be_p(p, sz, v)
#else
#define lduw_p(p) lduw_le_p(p)
#define ldsw_p(p) ldsw_le_p(p)
@@ -145,6 +147,8 @@ static inline void tswap64s(uint64_t *s)
#define stq_p(p, v) stq_le_p(p, v)
#define stfl_p(p, v) stfl_le_p(p, v)
#define stfq_p(p, v) stfq_le_p(p, v)
+#define ldn_p(p, sz) ldn_le_p(p, sz)
+#define stn_p(p, sz, v) stn_le_p(p, sz, v)
#endif
/* MMU memory access macros */
diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
index 3f28f661b15..a684c1a7a29 100644
--- a/include/qemu/bswap.h
+++ b/include/qemu/bswap.h
@@ -290,6 +290,15 @@ typedef union {
* For accessors that take a guest address rather than a
* host address, see the cpu_{ld,st}_* accessors defined in
* cpu_ldst.h.
+ *
+ * For cases where the size to be used is not fixed at compile time,
+ * there are
+ * stn{endian}_p(ptr, sz, val)
+ * which stores @val to @ptr as an @endian-order number @sz bytes in size
+ * and
+ * ldn{endian}_p(ptr, sz)
+ * which loads @sz bytes from @ptr as an unsigned @endian-order number
+ * and returns it in a uint64_t.
*/
static inline int ldub_p(const void *ptr)
@@ -495,6 +504,49 @@ static inline unsigned long leul_to_cpu(unsigned long v)
#endif
}
+/* Store v to p as a sz byte value in host order */
+#define DO_STN_LDN_P(END) \
+ static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
+ { \
+ switch (sz) { \
+ case 1: \
+ stb_p(ptr, v); \
+ break; \
+ case 2: \
+ stw_ ## END ## _p(ptr, v); \
+ break; \
+ case 4: \
+ stl_ ## END ## _p(ptr, v); \
+ break; \
+ case 8: \
+ stq_ ## END ## _p(ptr, v); \
+ break; \
+ default: \
+ g_assert_not_reached(); \
+ } \
+ } \
+ static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
+ { \
+ switch (sz) { \
+ case 1: \
+ return ldub_p(ptr); \
+ case 2: \
+ return lduw_ ## END ## _p(ptr); \
+ case 4: \
+ return (uint32_t)ldl_ ## END ## _p(ptr); \
+ case 8: \
+ return ldq_ ## END ## _p(ptr); \
+ default: \
+ g_assert_not_reached(); \
+ } \
+ }
+
+DO_STN_LDN_P(he)
+DO_STN_LDN_P(le)
+DO_STN_LDN_P(be)
+
+#undef DO_STN_LDN_P
+
#undef le_bswap
#undef be_bswap
#undef le_bswaps
diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
index 6a990cc2438..57d8c524bfe 100644
--- a/docs/devel/loads-stores.rst
+++ b/docs/devel/loads-stores.rst
@@ -53,9 +53,24 @@ The ``_{endian}`` infix is omitted for target-endian accesses.
The target endian accessors are only available to source
files which are built per-target.
+There are also functions which take the size as an argument:
+
+load: ``ldn{endian}_p(ptr, sz)``
+
+which performs an unsigned load of ``sz`` bytes from ``ptr``
+as an ``{endian}`` order value and returns it in a uint64_t.
+
+store: ``stn{endian}_p(ptr, sz, val)``
+
+which stores ``val`` to ``ptr`` as an ``{endian}`` order value
+of size ``sz`` bytes.
+
+
Regexes for git grep
- ``\<ldf\?[us]\?[bwlq]\(_[hbl]e\)\?_p\>``
- ``\<stf\?[bwlq]\(_[hbl]e\)\?_p\>``
+ - ``\<ldn_\([hbl]e\)?_p\>``
+ - ``\<stn_\([hbl]e\)?_p\>``
``cpu_{ld,st}_*``
~~~~~~~~~~~~~~~~~
--
2.17.1
Hi Peter,
On 06/11/2018 02:10 PM, Peter Maydell wrote:
> There's a common pattern in QEMU where a function needs to perform
> a data load or store of an N byte integer in a particular endianness.
> At the moment this is handled by doing a switch() on the size and
> calling the appropriate ld*_p or st*_p function for each size.
>
> Provide a new family of functions ldn_*_p() and stn_*_p() which
> take the size as an argument and do the switch() themselves.
>
> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
> ---
> include/exec/cpu-all.h | 4 +++
> include/qemu/bswap.h | 52 +++++++++++++++++++++++++++++++++++++
> docs/devel/loads-stores.rst | 15 +++++++++++
> 3 files changed, 71 insertions(+)
>
> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
> index a635f532f97..07ec3808342 100644
> --- a/include/exec/cpu-all.h
> +++ b/include/exec/cpu-all.h
> @@ -133,6 +133,8 @@ static inline void tswap64s(uint64_t *s)
> #define stq_p(p, v) stq_be_p(p, v)
> #define stfl_p(p, v) stfl_be_p(p, v)
> #define stfq_p(p, v) stfq_be_p(p, v)
> +#define ldn_p(p, sz ldn_be_p(p, sz)
> +#define stn_p(p, sz, v) stn_be_p(p, sz, v)
> #else
> #define lduw_p(p) lduw_le_p(p)
> #define ldsw_p(p) ldsw_le_p(p)
> @@ -145,6 +147,8 @@ static inline void tswap64s(uint64_t *s)
> #define stq_p(p, v) stq_le_p(p, v)
> #define stfl_p(p, v) stfl_le_p(p, v)
> #define stfq_p(p, v) stfq_le_p(p, v)
> +#define ldn_p(p, sz) ldn_le_p(p, sz)
> +#define stn_p(p, sz, v) stn_le_p(p, sz, v)
> #endif
>
> /* MMU memory access macros */
> diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h
> index 3f28f661b15..a684c1a7a29 100644
> --- a/include/qemu/bswap.h
> +++ b/include/qemu/bswap.h
> @@ -290,6 +290,15 @@ typedef union {
> * For accessors that take a guest address rather than a
> * host address, see the cpu_{ld,st}_* accessors defined in
> * cpu_ldst.h.
> + *
> + * For cases where the size to be used is not fixed at compile time,
> + * there are
> + * stn{endian}_p(ptr, sz, val)
> + * which stores @val to @ptr as an @endian-order number @sz bytes in size
> + * and
> + * ldn{endian}_p(ptr, sz)
> + * which loads @sz bytes from @ptr as an unsigned @endian-order number
> + * and returns it in a uint64_t.
> */
>
> static inline int ldub_p(const void *ptr)
> @@ -495,6 +504,49 @@ static inline unsigned long leul_to_cpu(unsigned long v)
> #endif
> }
>
> +/* Store v to p as a sz byte value in host order */
> +#define DO_STN_LDN_P(END) \
> + static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
> + { \
> + switch (sz) { \
> + case 1: \
> + stb_p(ptr, v); \
> + break; \
> + case 2: \
> + stw_ ## END ## _p(ptr, v); \
> + break; \
> + case 4: \
> + stl_ ## END ## _p(ptr, v); \
> + break; \
> + case 8: \
> + stq_ ## END ## _p(ptr, v); \
> + break; \
> + default: \
> + g_assert_not_reached(); \
As with the recent discussion with Markus about whether using abort() or
g_assert_not_reached(), I'd prefer to keep abort() here (which is what
exec.c currently uses).
[http://lists.nongnu.org/archive/html/qemu-devel/2018-06/msg01869.html]
> + } \
> + } \
> + static inline uint64_t ldn_## END ## _p(const void *ptr, int sz) \
> + { \
> + switch (sz) { \
> + case 1: \
> + return ldub_p(ptr); \
> + case 2: \
> + return lduw_ ## END ## _p(ptr); \
> + case 4: \
> + return (uint32_t)ldl_ ## END ## _p(ptr); \
> + case 8: \
> + return ldq_ ## END ## _p(ptr); \
> + default: \
> + g_assert_not_reached(); \
Ditto.
> + } \
> + }
> +
> +DO_STN_LDN_P(he)
> +DO_STN_LDN_P(le)
> +DO_STN_LDN_P(be)
> +
> +#undef DO_STN_LDN_P
> +
> #undef le_bswap
> #undef be_bswap
> #undef le_bswaps
> diff --git a/docs/devel/loads-stores.rst b/docs/devel/loads-stores.rst
> index 6a990cc2438..57d8c524bfe 100644
> --- a/docs/devel/loads-stores.rst
> +++ b/docs/devel/loads-stores.rst
> @@ -53,9 +53,24 @@ The ``_{endian}`` infix is omitted for target-endian accesses.
> The target endian accessors are only available to source
> files which are built per-target.
>
> +There are also functions which take the size as an argument:
> +
> +load: ``ldn{endian}_p(ptr, sz)``
> +
> +which performs an unsigned load of ``sz`` bytes from ``ptr``
> +as an ``{endian}`` order value and returns it in a uint64_t.
> +
> +store: ``stn{endian}_p(ptr, sz, val)``
> +
> +which stores ``val`` to ``ptr`` as an ``{endian}`` order value
> +of size ``sz`` bytes.
> +
> +
> Regexes for git grep
> - ``\<ldf\?[us]\?[bwlq]\(_[hbl]e\)\?_p\>``
> - ``\<stf\?[bwlq]\(_[hbl]e\)\?_p\>``
> + - ``\<ldn_\([hbl]e\)?_p\>``
> + - ``\<stn_\([hbl]e\)?_p\>``
>
> ``cpu_{ld,st}_*``
> ~~~~~~~~~~~~~~~~~
>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
On 11 June 2018 at 18:43, Philippe Mathieu-Daudé <f4bug@amsat.org> wrote:
> Hi Peter,
>
> On 06/11/2018 02:10 PM, Peter Maydell wrote:
>> +/* Store v to p as a sz byte value in host order */
>> +#define DO_STN_LDN_P(END) \
>> + static inline void stn_## END ## _p(void *ptr, int sz, uint64_t v) \
>> + { \
>> + switch (sz) { \
>> + case 1: \
>> + stb_p(ptr, v); \
>> + break; \
>> + case 2: \
>> + stw_ ## END ## _p(ptr, v); \
>> + break; \
>> + case 4: \
>> + stl_ ## END ## _p(ptr, v); \
>> + break; \
>> + case 8: \
>> + stq_ ## END ## _p(ptr, v); \
>> + break; \
>> + default: \
>> + g_assert_not_reached(); \
>
> As with the recent discussion with Markus about whether using abort() or
> g_assert_not_reached(), I'd prefer to keep abort() here (which is what
> exec.c currently uses).
>
> [http://lists.nongnu.org/archive/html/qemu-devel/2018-06/msg01869.html]
I couldn't find anything obvious about the merits of abort() vs
g_assert_not_reached() in that email. g_assert_not_reached() is clear
about what it's doing and we use it in hundreds of places in the
codebase, whereas abort() is used for various things, including
situations where flow-of-execution clearly can get to that location.
So I prefer g_assert_not_reached().
thanks
-- PMM
On 11 June 2018 at 18:10, Peter Maydell <peter.maydell@linaro.org> wrote: > There's a common pattern in QEMU where a function needs to perform > a data load or store of an N byte integer in a particular endianness. > At the moment this is handled by doing a switch() on the size and > calling the appropriate ld*_p or st*_p function for each size. > > Provide a new family of functions ldn_*_p() and stn_*_p() which > take the size as an argument and do the switch() themselves. > > Signed-off-by: Peter Maydell <peter.maydell@linaro.org> > --- > include/exec/cpu-all.h | 4 +++ > include/qemu/bswap.h | 52 +++++++++++++++++++++++++++++++++++++ > docs/devel/loads-stores.rst | 15 +++++++++++ > 3 files changed, 71 insertions(+) > > diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h > index a635f532f97..07ec3808342 100644 > --- a/include/exec/cpu-all.h > +++ b/include/exec/cpu-all.h > @@ -133,6 +133,8 @@ static inline void tswap64s(uint64_t *s) > #define stq_p(p, v) stq_be_p(p, v) > #define stfl_p(p, v) stfl_be_p(p, v) > #define stfq_p(p, v) stfq_be_p(p, v) > +#define ldn_p(p, sz ldn_be_p(p, sz) Silly typo here -- missing ')'. thanks -- PMM
On 06/11/2018 07:52 AM, Peter Maydell wrote: > On 11 June 2018 at 18:10, Peter Maydell <peter.maydell@linaro.org> wrote: >> There's a common pattern in QEMU where a function needs to perform >> a data load or store of an N byte integer in a particular endianness. >> At the moment this is handled by doing a switch() on the size and >> calling the appropriate ld*_p or st*_p function for each size. >> >> Provide a new family of functions ldn_*_p() and stn_*_p() which >> take the size as an argument and do the switch() themselves. >> >> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> >> --- >> include/exec/cpu-all.h | 4 +++ >> include/qemu/bswap.h | 52 +++++++++++++++++++++++++++++++++++++ >> docs/devel/loads-stores.rst | 15 +++++++++++ >> 3 files changed, 71 insertions(+) >> >> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h >> index a635f532f97..07ec3808342 100644 >> --- a/include/exec/cpu-all.h >> +++ b/include/exec/cpu-all.h >> @@ -133,6 +133,8 @@ static inline void tswap64s(uint64_t *s) >> #define stq_p(p, v) stq_be_p(p, v) >> #define stfl_p(p, v) stfl_be_p(p, v) >> #define stfq_p(p, v) stfq_be_p(p, v) >> +#define ldn_p(p, sz ldn_be_p(p, sz) > > Silly typo here -- missing ')'. Reviewed-by: Richard Henderson <richard.henderson@linaro.org> r~
© 2016 - 2025 Red Hat, Inc.