All three of these records have tail padding, leaking stack rubble into the
trace buffer. Introduce an explicit _pad field and have the compiler zero the
padding automatically.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: George Dunlap <George.Dunlap@eu.citrix.com>
CC: Ian Jackson <iwj@xenproject.org>
CC: Jan Beulich <JBeulich@suse.com>
CC: Stefano Stabellini <sstabellini@kernel.org>
CC: Wei Liu <wl@xen.org>
CC: Julien Grall <julien@xen.org>
CC: Dario Faggioli <dfaggioli@suse.com>
---
xen/common/sched/credit2.c | 46 +++++++++++++++++++++++++++-------------------
1 file changed, 27 insertions(+), 19 deletions(-)
diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index 6396b38e044c..d5f41bc3d603 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -1106,12 +1106,14 @@ _runq_assign(struct csched2_unit *svc, struct csched2_runqueue_data *rqd)
if ( unlikely(tb_init_done) )
{
struct {
- unsigned unit:16, dom:16;
- unsigned rqi:16;
- } d;
- d.dom = svc->unit->domain->domain_id;
- d.unit = svc->unit->unit_id;
- d.rqi=rqd->id;
+ uint16_t unit, dom;
+ uint16_t rqi, _pad;
+ } d = {
+ .unit = svc->unit->unit_id,
+ .dom = svc->unit->domain->domain_id,
+ .rqi = rqd->id,
+ };
+
__trace_var(TRC_CSCHED2_RUNQ_ASSIGN, 1,
sizeof(d),
(unsigned char *)&d);
@@ -1336,13 +1338,16 @@ update_runq_load(const struct scheduler *ops,
{
struct {
uint64_t rq_avgload, b_avgload;
- unsigned rq_load:16, rq_id:8, shift:8;
- } d;
- d.rq_id = rqd->id;
- d.rq_load = rqd->load;
- d.rq_avgload = rqd->avgload;
- d.b_avgload = rqd->b_avgload;
- d.shift = P;
+ uint16_t rq_load; uint8_t rq_id, shift;
+ uint32_t _pad;
+ } d = {
+ .rq_avgload = rqd->avgload,
+ .b_avgload = rqd->b_avgload,
+ .rq_load = rqd->load,
+ .rq_id = rqd->id,
+ .shift = P,
+ };
+
__trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1,
sizeof(d),
(unsigned char *)&d);
@@ -2799,12 +2804,15 @@ static void balance_load(const struct scheduler *ops, int cpu, s_time_t now)
{
struct {
uint64_t lb_avgload, ob_avgload;
- unsigned lrq_id:16, orq_id:16;
- } d;
- d.lrq_id = st.lrqd->id;
- d.lb_avgload = st.lrqd->b_avgload;
- d.orq_id = st.orqd->id;
- d.ob_avgload = st.orqd->b_avgload;
+ uint16_t lrq_id, orq_id;
+ uint32_t _pad;
+ } d = {
+ .lb_avgload = st.lrqd->b_avgload,
+ .ob_avgload = st.orqd->b_avgload,
+ .lrq_id = st.lrqd->id,
+ .orq_id = st.orqd->id,
+ };
+
__trace_var(TRC_CSCHED2_LOAD_BALANCE, 1,
sizeof(d),
(unsigned char *)&d);
--
2.11.0
On 17.09.2021 10:45, Andrew Cooper wrote:
> @@ -1336,13 +1338,16 @@ update_runq_load(const struct scheduler *ops,
> {
> struct {
> uint64_t rq_avgload, b_avgload;
> - unsigned rq_load:16, rq_id:8, shift:8;
> - } d;
> - d.rq_id = rqd->id;
> - d.rq_load = rqd->load;
> - d.rq_avgload = rqd->avgload;
> - d.b_avgload = rqd->b_avgload;
> - d.shift = P;
> + uint16_t rq_load; uint8_t rq_id, shift;
Split into two lines? Preferably with this adjustment
Reviewed-by: Jan Beulich <jbeulich@suse.com>
I'd like to note that the remaining uses of "unsigned int" or plain
"int" in some of the instances you don't touch assume
sizeof(int) == 32, while generally we assume only sizeof(int) >= 32.
This is one of the cases where fixed width types are imo mandatory
to use.
Jan
On 17/09/2021 14:10, Jan Beulich wrote:
> On 17.09.2021 10:45, Andrew Cooper wrote:
>> @@ -1336,13 +1338,16 @@ update_runq_load(const struct scheduler *ops,
>> {
>> struct {
>> uint64_t rq_avgload, b_avgload;
>> - unsigned rq_load:16, rq_id:8, shift:8;
>> - } d;
>> - d.rq_id = rqd->id;
>> - d.rq_load = rqd->load;
>> - d.rq_avgload = rqd->avgload;
>> - d.b_avgload = rqd->b_avgload;
>> - d.shift = P;
>> + uint16_t rq_load; uint8_t rq_id, shift;
> Split into two lines? Preferably with this adjustment
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
Thanks.
> I'd like to note that the remaining uses of "unsigned int" or plain
> "int" in some of the instances you don't touch assume
> sizeof(int) == 32, while generally we assume only sizeof(int) >= 32.
> This is one of the cases where fixed width types are imo mandatory
> to use.
See patch 5. There was far too much cleanup to merge with this patch.
~Andrew
© 2016 - 2026 Red Hat, Inc.