All three of these records have tail padding, leaking stack rubble into the
trace buffer.  Introduce an explicit _pad field and have the compiler zero the
padding automatically.

Signed-off-by: Andrew Cooper <[email protected]>
---
CC: George Dunlap <[email protected]>
CC: Ian Jackson <[email protected]>
CC: Jan Beulich <[email protected]>
CC: Stefano Stabellini <[email protected]>
CC: Wei Liu <[email protected]>
CC: Julien Grall <[email protected]>
CC: Dario Faggioli <[email protected]>
---
 xen/common/sched/credit2.c | 46 +++++++++++++++++++++++++++-------------------
 1 file changed, 27 insertions(+), 19 deletions(-)

diff --git a/xen/common/sched/credit2.c b/xen/common/sched/credit2.c
index 6396b38e044c..d5f41bc3d603 100644
--- a/xen/common/sched/credit2.c
+++ b/xen/common/sched/credit2.c
@@ -1106,12 +1106,14 @@ _runq_assign(struct csched2_unit *svc, struct 
csched2_runqueue_data *rqd)
     if ( unlikely(tb_init_done) )
     {
         struct {
-            unsigned unit:16, dom:16;
-            unsigned rqi:16;
-        } d;
-        d.dom = svc->unit->domain->domain_id;
-        d.unit = svc->unit->unit_id;
-        d.rqi=rqd->id;
+            uint16_t unit, dom;
+            uint16_t rqi, _pad;
+        } d = {
+            .unit = svc->unit->unit_id,
+            .dom  = svc->unit->domain->domain_id,
+            .rqi  = rqd->id,
+        };
+
         __trace_var(TRC_CSCHED2_RUNQ_ASSIGN, 1,
                     sizeof(d),
                     (unsigned char *)&d);
@@ -1336,13 +1338,16 @@ update_runq_load(const struct scheduler *ops,
     {
         struct {
             uint64_t rq_avgload, b_avgload;
-            unsigned rq_load:16, rq_id:8, shift:8;
-        } d;
-        d.rq_id = rqd->id;
-        d.rq_load = rqd->load;
-        d.rq_avgload = rqd->avgload;
-        d.b_avgload = rqd->b_avgload;
-        d.shift = P;
+            uint16_t rq_load; uint8_t rq_id, shift;
+            uint32_t _pad;
+        } d = {
+            .rq_avgload  = rqd->avgload,
+            .b_avgload   = rqd->b_avgload,
+            .rq_load     = rqd->load,
+            .rq_id       = rqd->id,
+            .shift       = P,
+        };
+
         __trace_var(TRC_CSCHED2_UPDATE_RUNQ_LOAD, 1,
                     sizeof(d),
                     (unsigned char *)&d);
@@ -2799,12 +2804,15 @@ static void balance_load(const struct scheduler *ops, 
int cpu, s_time_t now)
     {
         struct {
             uint64_t lb_avgload, ob_avgload;
-            unsigned lrq_id:16, orq_id:16;
-        } d;
-        d.lrq_id = st.lrqd->id;
-        d.lb_avgload = st.lrqd->b_avgload;
-        d.orq_id = st.orqd->id;
-        d.ob_avgload = st.orqd->b_avgload;
+            uint16_t lrq_id, orq_id;
+            uint32_t _pad;
+        } d = {
+            .lb_avgload  = st.lrqd->b_avgload,
+            .ob_avgload  = st.orqd->b_avgload,
+            .lrq_id      = st.lrqd->id,
+            .orq_id      = st.orqd->id,
+        };
+
         __trace_var(TRC_CSCHED2_LOAD_BALANCE, 1,
                     sizeof(d),
                     (unsigned char *)&d);
-- 
2.11.0


Reply via email to