1: don't leave locks uninitialized upon allocation failure 2: don't pass name into registration function Jan
Even if a specific struct lock_profile instance can't be allocated, the lock itself should still be functional. As this isn't a production use feature, also log a message in the event that the profiling struct can't be allocated. Fixes: d98feda5c756 ("Make lock profiling usable again") Signed-off-by: Jan Beulich <jbeulich@suse.com> --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -103,10 +103,16 @@ struct lock_profile_qhead { do { \ struct lock_profile *prof; \ prof = xzalloc(struct lock_profile); \ - if (!prof) break; \ + (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof); \ + if ( !prof ) \ + { \ + printk(XENLOG_WARNING \ + "lock profiling unavailable for %p(%d)'s " #l "\n", \ + s, (s)->profile_head.idx); \ + break; \ + } \ prof->name = #l; \ prof->lock = &(s)->l; \ - (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof); \ prof->next = (s)->profile_head.elem_q; \ (s)->profile_head.elem_q = prof; \ } while(0)
On 23/07/2020 11:51, Jan Beulich wrote: > Even if a specific struct lock_profile instance can't be allocated, the > lock itself should still be functional. As this isn't a production use > feature, also log a message in the event that the profiling struct can't > be allocated. > > Fixes: d98feda5c756 ("Make lock profiling usable again") > Signed-off-by: Jan Beulich <jbeulich@suse.com> > > --- a/xen/include/xen/spinlock.h > +++ b/xen/include/xen/spinlock.h > @@ -103,10 +103,16 @@ struct lock_profile_qhead { > do { \ > struct lock_profile *prof; \ > prof = xzalloc(struct lock_profile); \ > - if (!prof) break; \ > + (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof); \ > + if ( !prof ) \ > + { \ > + printk(XENLOG_WARNING \ > + "lock profiling unavailable for %p(%d)'s " #l "\n", \ > + s, (s)->profile_head.idx); \ You'll end up with far less .rodata if you use %s and pass #l in as a parameter. Either way, Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> > + break; \ > + } \ > prof->name = #l; \ > prof->lock = &(s)->l; \ > - (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof); \ > prof->next = (s)->profile_head.elem_q; \ > (s)->profile_head.elem_q = prof; \ > } while(0) >
On 23.07.2020 13:23, Andrew Cooper wrote: > On 23/07/2020 11:51, Jan Beulich wrote: >> Even if a specific struct lock_profile instance can't be allocated, the >> lock itself should still be functional. As this isn't a production use >> feature, also log a message in the event that the profiling struct can't >> be allocated. >> >> Fixes: d98feda5c756 ("Make lock profiling usable again") >> Signed-off-by: Jan Beulich <jbeulich@suse.com> >> >> --- a/xen/include/xen/spinlock.h >> +++ b/xen/include/xen/spinlock.h >> @@ -103,10 +103,16 @@ struct lock_profile_qhead { >> do { \ >> struct lock_profile *prof; \ >> prof = xzalloc(struct lock_profile); \ >> - if (!prof) break; \ >> + (s)->l = (spinlock_t)_SPIN_LOCK_UNLOCKED(prof); \ >> + if ( !prof ) \ >> + { \ >> + printk(XENLOG_WARNING \ >> + "lock profiling unavailable for %p(%d)'s " #l "\n", \ >> + s, (s)->profile_head.idx); \ > > You'll end up with far less .rodata if you use %s and pass #l in as a > parameter. Well, "far less" perhaps goes a little far, as we currently have just three use sites, but I see your point and hence will switch. > Either way, Acked-by: Andrew Cooper <andrew.cooper3@citrix.com> Thanks, Jan
The type uniquely identifies the associated name, hence the name fields can be statically initialized. Also constify not just the involved struct field, but also struct lock_profile's. Rather than specifying lock_profile_ancs[]' dimension at definition time, add a suitable build time check, such that at least missing tail additions to the initializer can be spotted easily. Signed-off-by: Jan Beulich <jbeulich@suse.com> --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -392,7 +392,7 @@ struct domain *domain_create(domid_t dom d->max_vcpus = config->max_vcpus; } - lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain"); + lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid); if ( (err = xsm_alloc_security_domain(d)) != 0 ) goto fail; --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -338,7 +338,7 @@ void _spin_unlock_recursive(spinlock_t * struct lock_profile_anc { struct lock_profile_qhead *head_q; /* first head of this type */ - char *name; /* descriptive string for print */ + const char *name; /* descriptive string for print */ }; typedef void lock_profile_subfunc( @@ -348,7 +348,10 @@ extern struct lock_profile *__lock_profi extern struct lock_profile *__lock_profile_end; static s_time_t lock_profile_start; -static struct lock_profile_anc lock_profile_ancs[LOCKPROF_TYPE_N]; +static struct lock_profile_anc lock_profile_ancs[] = { + [LOCKPROF_TYPE_GLOBAL] = { .name = "Global" }, + [LOCKPROF_TYPE_PERDOM] = { .name = "Domain" }, +}; static struct lock_profile_qhead lock_profile_glb_q; static spinlock_t lock_profile_lock = SPIN_LOCK_UNLOCKED; @@ -473,13 +476,12 @@ int spinlock_profile_control(struct xen_ } void _lock_profile_register_struct( - int32_t type, struct lock_profile_qhead *qhead, int32_t idx, char *name) + int32_t type, struct lock_profile_qhead *qhead, int32_t idx) { qhead->idx = idx; spin_lock(&lock_profile_lock); qhead->head_q = lock_profile_ancs[type].head_q; lock_profile_ancs[type].head_q = qhead; - lock_profile_ancs[type].name = name; spin_unlock(&lock_profile_lock); } @@ -504,6 +506,8 @@ static int __init lock_prof_init(void) { struct lock_profile **q; + BUILD_BUG_ON(ARRAY_SIZE(lock_profile_ancs) != LOCKPROF_TYPE_N); + for ( q = &__lock_profile_start; q < &__lock_profile_end; q++ ) { (*q)->next = lock_profile_glb_q.elem_q; @@ -511,9 +515,8 @@ static int __init lock_prof_init(void) (*q)->lock->profile = *q; } - _lock_profile_register_struct( - LOCKPROF_TYPE_GLOBAL, &lock_profile_glb_q, - 0, "Global lock"); + _lock_profile_register_struct(LOCKPROF_TYPE_GLOBAL, + &lock_profile_glb_q, 0); return 0; } --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -72,7 +72,7 @@ struct spinlock; struct lock_profile { struct lock_profile *next; /* forward link */ - char *name; /* lock name */ + const char *name; /* lock name */ struct spinlock *lock; /* the lock itself */ u64 lock_cnt; /* # of complete locking ops */ u64 block_cnt; /* # of complete wait for lock */ @@ -118,11 +118,11 @@ struct lock_profile_qhead { } while(0) void _lock_profile_register_struct( - int32_t, struct lock_profile_qhead *, int32_t, char *); + int32_t, struct lock_profile_qhead *, int32_t); void _lock_profile_deregister_struct(int32_t, struct lock_profile_qhead *); -#define lock_profile_register_struct(type, ptr, idx, print) \ - _lock_profile_register_struct(type, &((ptr)->profile_head), idx, print) +#define lock_profile_register_struct(type, ptr, idx) \ + _lock_profile_register_struct(type, &((ptr)->profile_head), idx) #define lock_profile_deregister_struct(type, ptr) \ _lock_profile_deregister_struct(type, &((ptr)->profile_head)) @@ -138,7 +138,7 @@ struct lock_profile_qhead { }; #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l)) -#define lock_profile_register_struct(type, ptr, idx, print) +#define lock_profile_register_struct(type, ptr, idx) #define lock_profile_deregister_struct(type, ptr) #define spinlock_profile_printall(key)
On 23/07/2020 11:52, Jan Beulich wrote: > The type uniquely identifies the associated name, hence the name fields > can be statically initialized. > > Also constify not just the involved struct field, but also struct > lock_profile's. Rather than specifying lock_profile_ancs[]' dimension at > definition time, add a suitable build time check, such that at least > missing tail additions to the initializer can be spotted easily. > > Signed-off-by: Jan Beulich <jbeulich@suse.com> Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
© 2016 - 2024 Red Hat, Inc.