The cpupool id is an unsigned value in the public interface header, so
there is no reason why it is a signed value in struct cpupool.
Switch it to unsigned int.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- new patch
---
xen/common/sched/core.c | 2 +-
xen/common/sched/cpupool.c | 48 +++++++++++++++++++-------------------
xen/common/sched/private.h | 8 +++----
xen/include/xen/sched.h | 4 ++--
4 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/xen/common/sched/core.c b/xen/common/sched/core.c
index f8c81592af..6063f6d9ea 100644
--- a/xen/common/sched/core.c
+++ b/xen/common/sched/core.c
@@ -757,7 +757,7 @@ void sched_destroy_vcpu(struct vcpu *v)
}
}
-int sched_init_domain(struct domain *d, int poolid)
+int sched_init_domain(struct domain *d, unsigned int poolid)
{
void *sdom;
int ret;
diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c
index 84f326ea63..01fa71dd00 100644
--- a/xen/common/sched/cpupool.c
+++ b/xen/common/sched/cpupool.c
@@ -187,7 +187,7 @@ static struct cpupool *alloc_cpupool_struct(void)
* the searched id is returned
* returns NULL if not found.
*/
-static struct cpupool *__cpupool_find_by_id(int id, bool exact)
+static struct cpupool *__cpupool_find_by_id(unsigned int id, bool exact)
{
struct cpupool **q;
@@ -200,12 +200,12 @@ static struct cpupool *__cpupool_find_by_id(int id, bool exact)
return (!exact || (*q == NULL) || ((*q)->cpupool_id == id)) ? *q : NULL;
}
-static struct cpupool *cpupool_find_by_id(int poolid)
+static struct cpupool *cpupool_find_by_id(unsigned int poolid)
{
return __cpupool_find_by_id(poolid, true);
}
-static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
+static struct cpupool *__cpupool_get_by_id(unsigned int poolid, bool exact)
{
struct cpupool *c;
spin_lock(&cpupool_lock);
@@ -216,12 +216,12 @@ static struct cpupool *__cpupool_get_by_id(int poolid, bool exact)
return c;
}
-struct cpupool *cpupool_get_by_id(int poolid)
+struct cpupool *cpupool_get_by_id(unsigned int poolid)
{
return __cpupool_get_by_id(poolid, true);
}
-static struct cpupool *cpupool_get_next_by_id(int poolid)
+static struct cpupool *cpupool_get_next_by_id(unsigned int poolid)
{
return __cpupool_get_by_id(poolid, false);
}
@@ -243,11 +243,11 @@ void cpupool_put(struct cpupool *pool)
* - unknown scheduler
*/
static struct cpupool *cpupool_create(
- int poolid, unsigned int sched_id, int *perr)
+ unsigned int poolid, unsigned int sched_id, int *perr)
{
struct cpupool *c;
struct cpupool **q;
- int last = 0;
+ unsigned int last = 0;
*perr = -ENOMEM;
if ( (c = alloc_cpupool_struct()) == NULL )
@@ -256,7 +256,7 @@ static struct cpupool *cpupool_create(
/* One reference for caller, one reference for cpupool_destroy(). */
atomic_set(&c->refcnt, 2);
- debugtrace_printk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
+ debugtrace_printk("cpupool_create(pool=%u,sched=%u)\n", poolid, sched_id);
spin_lock(&cpupool_lock);
@@ -295,7 +295,7 @@ static struct cpupool *cpupool_create(
spin_unlock(&cpupool_lock);
- debugtrace_printk("Created cpupool %d with scheduler %s (%s)\n",
+ debugtrace_printk("Created cpupool %u with scheduler %s (%s)\n",
c->cpupool_id, c->sched->name, c->sched->opt_name);
*perr = 0;
@@ -337,7 +337,7 @@ static int cpupool_destroy(struct cpupool *c)
cpupool_put(c);
- debugtrace_printk("cpupool_destroy(pool=%d)\n", c->cpupool_id);
+ debugtrace_printk("cpupool_destroy(pool=%u)\n", c->cpupool_id);
return 0;
}
@@ -521,7 +521,7 @@ static long cpupool_unassign_cpu_helper(void *info)
struct cpupool *c = info;
long ret;
- debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+ debugtrace_printk("cpupool_unassign_cpu(pool=%u,cpu=%d)\n",
cpupool_cpu_moving->cpupool_id, cpupool_moving_cpu);
spin_lock(&cpupool_lock);
@@ -551,7 +551,7 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
int ret;
unsigned int master_cpu;
- debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d)\n",
+ debugtrace_printk("cpupool_unassign_cpu(pool=%u,cpu=%d)\n",
c->cpupool_id, cpu);
if ( !cpu_online(cpu) )
@@ -561,7 +561,7 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
ret = cpupool_unassign_cpu_start(c, master_cpu);
if ( ret )
{
- debugtrace_printk("cpupool_unassign_cpu(pool=%d,cpu=%d) ret %d\n",
+ debugtrace_printk("cpupool_unassign_cpu(pool=%u,cpu=%d) ret %d\n",
c->cpupool_id, cpu, ret);
return ret;
}
@@ -582,7 +582,7 @@ static int cpupool_unassign_cpu(struct cpupool *c, unsigned int cpu)
* - pool does not exist
* - no cpu assigned to pool
*/
-int cpupool_add_domain(struct domain *d, int poolid)
+int cpupool_add_domain(struct domain *d, unsigned int poolid)
{
struct cpupool *c;
int rc;
@@ -604,7 +604,7 @@ int cpupool_add_domain(struct domain *d, int poolid)
rc = 0;
}
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool_add_domain(dom=%d,pool=%d) n_dom %d rc %d\n",
+ debugtrace_printk("cpupool_add_domain(dom=%d,pool=%u) n_dom %d rc %d\n",
d->domain_id, poolid, n_dom, rc);
return rc;
}
@@ -614,7 +614,7 @@ int cpupool_add_domain(struct domain *d, int poolid)
*/
void cpupool_rm_domain(struct domain *d)
{
- int cpupool_id;
+ unsigned int cpupool_id;
int n_dom;
if ( d->cpupool == NULL )
@@ -625,7 +625,7 @@ void cpupool_rm_domain(struct domain *d)
n_dom = d->cpupool->n_dom;
d->cpupool = NULL;
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool_rm_domain(dom=%d,pool=%d) n_dom %d\n",
+ debugtrace_printk("cpupool_rm_domain(dom=%d,pool=%u) n_dom %d\n",
d->domain_id, cpupool_id, n_dom);
return;
}
@@ -767,7 +767,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
case XEN_SYSCTL_CPUPOOL_OP_CREATE:
{
- int poolid;
+ unsigned int poolid;
poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
CPUPOOLID_NONE: op->cpupool_id;
@@ -811,7 +811,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
const cpumask_t *cpus;
cpu = op->cpu;
- debugtrace_printk("cpupool_assign_cpu(pool=%d,cpu=%d)\n",
+ debugtrace_printk("cpupool_assign_cpu(pool=%u,cpu=%u)\n",
op->cpupool_id, cpu);
spin_lock(&cpupool_lock);
@@ -844,7 +844,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
addcpu_out:
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool_assign_cpu(pool=%d,cpu=%d) ret %d\n",
+ debugtrace_printk("cpupool_assign_cpu(pool=%u,cpu=%u) ret %d\n",
op->cpupool_id, cpu, ret);
}
@@ -885,7 +885,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
rcu_unlock_domain(d);
break;
}
- debugtrace_printk("cpupool move_domain(dom=%d)->pool=%d\n",
+ debugtrace_printk("cpupool move_domain(dom=%d)->pool=%u\n",
d->domain_id, op->cpupool_id);
ret = -ENOENT;
spin_lock(&cpupool_lock);
@@ -895,7 +895,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
ret = cpupool_move_domain_locked(d, c);
spin_unlock(&cpupool_lock);
- debugtrace_printk("cpupool move_domain(dom=%d)->pool=%d ret %d\n",
+ debugtrace_printk("cpupool move_domain(dom=%d)->pool=%u ret %d\n",
d->domain_id, op->cpupool_id, ret);
rcu_unlock_domain(d);
}
@@ -916,7 +916,7 @@ int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op)
return ret;
}
-int cpupool_get_id(const struct domain *d)
+unsigned int cpupool_get_id(const struct domain *d)
{
return d->cpupool ? d->cpupool->cpupool_id : CPUPOOLID_NONE;
}
@@ -946,7 +946,7 @@ void dump_runq(unsigned char key)
for_each_cpupool(c)
{
- printk("Cpupool %d:\n", (*c)->cpupool_id);
+ printk("Cpupool %u:\n", (*c)->cpupool_id);
printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid));
sched_gran_print((*c)->gran, cpupool_get_granularity(*c));
schedule_dump(*c);
diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h
index 685992cab9..e69d9be1e8 100644
--- a/xen/common/sched/private.h
+++ b/xen/common/sched/private.h
@@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
struct cpupool
{
- int cpupool_id;
-#define CPUPOOLID_NONE (-1)
+ unsigned int cpupool_id;
+#define CPUPOOLID_NONE (~0U)
unsigned int n_dom;
cpumask_var_t cpu_valid; /* all cpus assigned to pool */
cpumask_var_t res_valid; /* all scheduling resources of pool */
@@ -601,9 +601,9 @@ int cpu_disable_scheduler(unsigned int cpu);
int schedule_cpu_add(unsigned int cpu, struct cpupool *c);
int schedule_cpu_rm(unsigned int cpu);
int sched_move_domain(struct domain *d, struct cpupool *c);
-struct cpupool *cpupool_get_by_id(int poolid);
+struct cpupool *cpupool_get_by_id(unsigned int poolid);
void cpupool_put(struct cpupool *pool);
-int cpupool_add_domain(struct domain *d, int poolid);
+int cpupool_add_domain(struct domain *d, unsigned int poolid);
void cpupool_rm_domain(struct domain *d);
#endif /* __XEN_SCHED_IF_H__ */
diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
index a345cc01f8..b2878e7b2a 100644
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -691,7 +691,7 @@ void noreturn asm_domain_crash_synchronous(unsigned long addr);
void scheduler_init(void);
int sched_init_vcpu(struct vcpu *v);
void sched_destroy_vcpu(struct vcpu *v);
-int sched_init_domain(struct domain *d, int poolid);
+int sched_init_domain(struct domain *d, unsigned int poolid);
void sched_destroy_domain(struct domain *d);
long sched_adjust(struct domain *, struct xen_domctl_scheduler_op *);
long sched_adjust_global(struct xen_sysctl_scheduler_op *);
@@ -1089,7 +1089,7 @@ static always_inline bool is_cpufreq_controller(const struct domain *d)
int cpupool_move_domain(struct domain *d, struct cpupool *c);
int cpupool_do_sysctl(struct xen_sysctl_cpupool_op *op);
-int cpupool_get_id(const struct domain *d);
+unsigned int cpupool_get_id(const struct domain *d);
const cpumask_t *cpupool_valid_cpus(const struct cpupool *pool);
extern void dump_runq(unsigned char key);
--
2.26.2
On 01.12.2020 09:21, Juergen Gross wrote:
> @@ -243,11 +243,11 @@ void cpupool_put(struct cpupool *pool)
> * - unknown scheduler
> */
> static struct cpupool *cpupool_create(
> - int poolid, unsigned int sched_id, int *perr)
> + unsigned int poolid, unsigned int sched_id, int *perr)
> {
> struct cpupool *c;
> struct cpupool **q;
> - int last = 0;
> + unsigned int last = 0;
>
> *perr = -ENOMEM;
> if ( (c = alloc_cpupool_struct()) == NULL )
> @@ -256,7 +256,7 @@ static struct cpupool *cpupool_create(
> /* One reference for caller, one reference for cpupool_destroy(). */
> atomic_set(&c->refcnt, 2);
>
> - debugtrace_printk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
> + debugtrace_printk("cpupool_create(pool=%u,sched=%u)\n", poolid, sched_id);
>
> spin_lock(&cpupool_lock);
Below from here we have
c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
which I think can (a) wrap to zero and (b) cause a pool with id
CPUPOOLID_NONE to be created. The former is bad in any event, and
the latter will cause confusion at least with cpupool_add_domain()
and cpupool_get_id(). I realize this is a tangential problem, i.e.
may want fixing in a separate change.
> --- a/xen/common/sched/private.h
> +++ b/xen/common/sched/private.h
> @@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
>
> struct cpupool
> {
> - int cpupool_id;
> -#define CPUPOOLID_NONE (-1)
> + unsigned int cpupool_id;
> +#define CPUPOOLID_NONE (~0U)
How about using XEN_SYSCTL_CPUPOOL_PAR_ANY here? Furthermore,
together with the remark above, I think you also want to consider
the case of sizeof(unsigned int) > sizeof(uint32_t).
Jan
On 01.12.20 09:55, Jan Beulich wrote:
> On 01.12.2020 09:21, Juergen Gross wrote:
>> @@ -243,11 +243,11 @@ void cpupool_put(struct cpupool *pool)
>> * - unknown scheduler
>> */
>> static struct cpupool *cpupool_create(
>> - int poolid, unsigned int sched_id, int *perr)
>> + unsigned int poolid, unsigned int sched_id, int *perr)
>> {
>> struct cpupool *c;
>> struct cpupool **q;
>> - int last = 0;
>> + unsigned int last = 0;
>>
>> *perr = -ENOMEM;
>> if ( (c = alloc_cpupool_struct()) == NULL )
>> @@ -256,7 +256,7 @@ static struct cpupool *cpupool_create(
>> /* One reference for caller, one reference for cpupool_destroy(). */
>> atomic_set(&c->refcnt, 2);
>>
>> - debugtrace_printk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
>> + debugtrace_printk("cpupool_create(pool=%u,sched=%u)\n", poolid, sched_id);
>>
>> spin_lock(&cpupool_lock);
>
> Below from here we have
>
> c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
>
> which I think can (a) wrap to zero and (b) cause a pool with id
> CPUPOOLID_NONE to be created. The former is bad in any event, and
> the latter will cause confusion at least with cpupool_add_domain()
> and cpupool_get_id(). I realize this is a tangential problem, i.e.
> may want fixing in a separate change.
Yes, this is an issue today already, and it is fixed in patch 5.
>
>> --- a/xen/common/sched/private.h
>> +++ b/xen/common/sched/private.h
>> @@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
>>
>> struct cpupool
>> {
>> - int cpupool_id;
>> -#define CPUPOOLID_NONE (-1)
>> + unsigned int cpupool_id;
>> +#define CPUPOOLID_NONE (~0U)
>
> How about using XEN_SYSCTL_CPUPOOL_PAR_ANY here? Furthermore,
> together with the remark above, I think you also want to consider
> the case of sizeof(unsigned int) > sizeof(uint32_t).
With patch 5 this should be completely fine.
Juergen
On 01.12.2020 10:01, Jürgen Groß wrote:
> On 01.12.20 09:55, Jan Beulich wrote:
>> On 01.12.2020 09:21, Juergen Gross wrote:
>>> --- a/xen/common/sched/private.h
>>> +++ b/xen/common/sched/private.h
>>> @@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
>>>
>>> struct cpupool
>>> {
>>> - int cpupool_id;
>>> -#define CPUPOOLID_NONE (-1)
>>> + unsigned int cpupool_id;
>>> +#define CPUPOOLID_NONE (~0U)
>>
>> How about using XEN_SYSCTL_CPUPOOL_PAR_ANY here? Furthermore,
>> together with the remark above, I think you also want to consider
>> the case of sizeof(unsigned int) > sizeof(uint32_t).
>
> With patch 5 this should be completely fine.
I don't think so, as there still will be CPUPOOLID_NONE !=
XEN_SYSCTL_CPUPOOL_PAR_ANY in the mentioned case.
Jan
On 07.12.20 10:59, Jan Beulich wrote:
> On 01.12.2020 10:01, Jürgen Groß wrote:
>> On 01.12.20 09:55, Jan Beulich wrote:
>>> On 01.12.2020 09:21, Juergen Gross wrote:
>>>> --- a/xen/common/sched/private.h
>>>> +++ b/xen/common/sched/private.h
>>>> @@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
>>>>
>>>> struct cpupool
>>>> {
>>>> - int cpupool_id;
>>>> -#define CPUPOOLID_NONE (-1)
>>>> + unsigned int cpupool_id;
>>>> +#define CPUPOOLID_NONE (~0U)
>>>
>>> How about using XEN_SYSCTL_CPUPOOL_PAR_ANY here? Furthermore,
>>> together with the remark above, I think you also want to consider
>>> the case of sizeof(unsigned int) > sizeof(uint32_t).
>>
>> With patch 5 this should be completely fine.
>
> I don't think so, as there still will be CPUPOOLID_NONE !=
> XEN_SYSCTL_CPUPOOL_PAR_ANY in the mentioned case.
I don't see that being relevant, as we have in cpupool_do_sysctl():
poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
CPUPOOLID_NONE: op->cpupool_id;
Juergen
On 07.12.2020 15:48, Jürgen Groß wrote:
> On 07.12.20 10:59, Jan Beulich wrote:
>> On 01.12.2020 10:01, Jürgen Groß wrote:
>>> On 01.12.20 09:55, Jan Beulich wrote:
>>>> On 01.12.2020 09:21, Juergen Gross wrote:
>>>>> --- a/xen/common/sched/private.h
>>>>> +++ b/xen/common/sched/private.h
>>>>> @@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
>>>>>
>>>>> struct cpupool
>>>>> {
>>>>> - int cpupool_id;
>>>>> -#define CPUPOOLID_NONE (-1)
>>>>> + unsigned int cpupool_id;
>>>>> +#define CPUPOOLID_NONE (~0U)
>>>>
>>>> How about using XEN_SYSCTL_CPUPOOL_PAR_ANY here? Furthermore,
>>>> together with the remark above, I think you also want to consider
>>>> the case of sizeof(unsigned int) > sizeof(uint32_t).
>>>
>>> With patch 5 this should be completely fine.
>>
>> I don't think so, as there still will be CPUPOOLID_NONE !=
>> XEN_SYSCTL_CPUPOOL_PAR_ANY in the mentioned case.
>
> I don't see that being relevant, as we have in cpupool_do_sysctl():
>
> poolid = (op->cpupool_id == XEN_SYSCTL_CPUPOOL_PAR_ANY) ?
> CPUPOOLID_NONE: op->cpupool_id;
Oh, sorry for the noise then. I forgot about this transformation.
Jan
On 01.12.2020 10:01, Jürgen Groß wrote:
> On 01.12.20 09:55, Jan Beulich wrote:
>> On 01.12.2020 09:21, Juergen Gross wrote:
>>> @@ -243,11 +243,11 @@ void cpupool_put(struct cpupool *pool)
>>> * - unknown scheduler
>>> */
>>> static struct cpupool *cpupool_create(
>>> - int poolid, unsigned int sched_id, int *perr)
>>> + unsigned int poolid, unsigned int sched_id, int *perr)
>>> {
>>> struct cpupool *c;
>>> struct cpupool **q;
>>> - int last = 0;
>>> + unsigned int last = 0;
>>>
>>> *perr = -ENOMEM;
>>> if ( (c = alloc_cpupool_struct()) == NULL )
>>> @@ -256,7 +256,7 @@ static struct cpupool *cpupool_create(
>>> /* One reference for caller, one reference for cpupool_destroy(). */
>>> atomic_set(&c->refcnt, 2);
>>>
>>> - debugtrace_printk("cpupool_create(pool=%d,sched=%u)\n", poolid, sched_id);
>>> + debugtrace_printk("cpupool_create(pool=%u,sched=%u)\n", poolid, sched_id);
>>>
>>> spin_lock(&cpupool_lock);
>>
>> Below from here we have
>>
>> c->cpupool_id = (poolid == CPUPOOLID_NONE) ? (last + 1) : poolid;
>>
>> which I think can (a) wrap to zero and (b) cause a pool with id
>> CPUPOOLID_NONE to be created. The former is bad in any event, and
>> the latter will cause confusion at least with cpupool_add_domain()
>> and cpupool_get_id(). I realize this is a tangential problem, i.e.
>> may want fixing in a separate change.
>
> Yes, this is an issue today already, and it is fixed in patch 5.
>
>>
>>> --- a/xen/common/sched/private.h
>>> +++ b/xen/common/sched/private.h
>>> @@ -505,8 +505,8 @@ static inline void sched_unit_unpause(const struct sched_unit *unit)
>>>
>>> struct cpupool
>>> {
>>> - int cpupool_id;
>>> -#define CPUPOOLID_NONE (-1)
>>> + unsigned int cpupool_id;
>>> +#define CPUPOOLID_NONE (~0U)
>>
>> How about using XEN_SYSCTL_CPUPOOL_PAR_ANY here? Furthermore,
>> together with the remark above, I think you also want to consider
>> the case of sizeof(unsigned int) > sizeof(uint32_t).
>
> With patch 5 this should be completely fine.
Ah - I didn't expect this kind of fix in a patch with that title,
but yes.
Jan
On Tue, 2020-12-01 at 09:21 +0100, Juergen Gross wrote: > The cpupool id is an unsigned value in the public interface header, > so > there is no reason why it is a signed value in struct cpupool. > > Switch it to unsigned int. > I think we can add: "No functional change intended" > Signed-off-by: Juergen Gross <jgross@suse.com> > IAC: Reviewed-by: Dario Faggioli <dfaggioli@suse.com> Regards -- Dario Faggioli, Ph.D http://about.me/dario.faggioli Virtualization Software Engineer SUSE Labs, SUSE https://www.suse.com/ ------------------------------------------------------------------- <<This happens because _I_ choose it to happen!>> (Raistlin Majere)
On 04.12.2020 16:52, Dario Faggioli wrote: > On Tue, 2020-12-01 at 09:21 +0100, Juergen Gross wrote: >> The cpupool id is an unsigned value in the public interface header, >> so >> there is no reason why it is a signed value in struct cpupool. >> >> Switch it to unsigned int. >> > I think we can add: > > "No functional change intended" > >> Signed-off-by: Juergen Gross <jgross@suse.com> >> > IAC: > > Reviewed-by: Dario Faggioli <dfaggioli@suse.com> FAOD this applies without any further changes, i.e. not even my suggestion regarding the definition of CPUPOOLID_NONE to XEN_SYSCTL_CPUPOOL_PAR_ANY, or - not said explicitly in the earlier reply - at least the avoidance of open-coding UINT_MAX? Jan
On 04.12.2020 16:52, Dario Faggioli wrote:
> On Tue, 2020-12-01 at 09:21 +0100, Juergen Gross wrote:
>> The cpupool id is an unsigned value in the public interface header,
>> so
>> there is no reason why it is a signed value in struct cpupool.
>>
>> Switch it to unsigned int.
>>
> I think we can add:
>
> "No functional change intended"
I've not added this - there is an intentional change at least
for
if ( (poolid != CPUPOOLID_NONE) && (last >= poolid) )
Jan
© 2016 - 2026 Red Hat, Inc.