From nobody Mon Feb 9 04:45:07 2026 Delivered-To: importer@patchew.org Received-SPF: pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) client-ip=192.237.175.120; envelope-from=xen-devel-bounces@lists.xenproject.org; helo=lists.xenproject.org; Authentication-Results: mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass(p=quarantine dis=none) header.from=suse.com ARC-Seal: i=1; a=rsa-sha256; t=1606810926; cv=none; d=zohomail.com; s=zohoarc; b=UxyCt17PsyO8Y2P+xuNttwFNWVjD+aETsrQJyK1MDLnkX8IA5dL9oJ9ZLy7IcJ5AwFdVlpPui3AxfxQKjwlsj1e06o4bcfD2b6cDqUcg4fxD5RJgV7qJi8rxzvZRU2YYX0dz2/6890hGEg2ZIHBkTPU6FaWCNlRCs4H/aqa3xLU= ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=zohomail.com; s=zohoarc; t=1606810926; h=Content-Transfer-Encoding:Cc:Date:From:In-Reply-To:List-Subscribe:List-Post:List-Id:List-Help:List-Unsubscribe:MIME-Version:Message-ID:References:Sender:Subject:To; bh=mU9Pbrvb5/SbczGpyv+43AFg9f7JL1E6nM4w4+Kc8Ys=; b=ekCBELRvI8FWPnukYKc+1B6rH5lRyI8Z33tIXWWDrUHAP1pd90iDSvxHROeZDEzkOI1fh5AJhac7TPEFhz6n+mEMoR+PIwdmYS9WIt9npMPqKtIlTt+gr5Xc6h4qWRX89Zw02BJxH8oqBkLzG3jjNYUo13X1dcTdTGDrh49DKpY= ARC-Authentication-Results: i=1; mx.zohomail.com; dkim=pass; spf=pass (zohomail.com: domain of lists.xenproject.org designates 192.237.175.120 as permitted sender) smtp.mailfrom=xen-devel-bounces@lists.xenproject.org; dmarc=pass header.from= (p=quarantine dis=none) header.from= Return-Path: Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) by mx.zohomail.com with SMTPS id 1606810926627971.3000270390813; Tue, 1 Dec 2020 00:22:06 -0800 (PST) Received: from list by lists.xenproject.org with outflank-mailman.41630.74988 (Exim 4.92) (envelope-from ) id 1kk0uz-0004v6-Uc; Tue, 01 Dec 2020 08:21:49 +0000 Received: by outflank-mailman (output) from mailman id 41630.74988; Tue, 01 Dec 2020 08:21:49 +0000 Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1kk0uz-0004uW-FQ; Tue, 01 Dec 2020 08:21:49 +0000 Received: by outflank-mailman (input) for mailman id 41630; Tue, 01 Dec 2020 08:21:47 +0000 Received: from us1-rack-iad1.inumbo.com ([172.99.69.81]) by lists.xenproject.org with esmtp (Exim 4.92) (envelope-from ) id 1kk0ux-0004Uj-JC for xen-devel@lists.xenproject.org; Tue, 01 Dec 2020 08:21:47 +0000 Received: from mx2.suse.de (unknown [195.135.220.15]) by us1-rack-iad1.inumbo.com (Halon) with ESMTPS id d4e9ee5b-0ccc-45a7-ab93-42acbd9a5809; Tue, 01 Dec 2020 08:21:33 +0000 (UTC) Received: from relay2.suse.de (unknown [195.135.221.27]) by mx2.suse.de (Postfix) with ESMTP id D6DE7ADA2; Tue, 1 Dec 2020 08:21:31 +0000 (UTC) X-Outflank-Mailman: Message body and most headers restored to incoming version X-BeenThere: xen-devel@lists.xenproject.org List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Errors-To: xen-devel-bounces@lists.xenproject.org Precedence: list Sender: "Xen-devel" X-Inumbo-ID: d4e9ee5b-0ccc-45a7-ab93-42acbd9a5809 X-Virus-Scanned: by amavisd-new at test-mx.suse.de DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=suse.com; s=susede1; t=1606810891; h=from:from:reply-to:date:date:message-id:message-id:to:to:cc:cc: mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=mU9Pbrvb5/SbczGpyv+43AFg9f7JL1E6nM4w4+Kc8Ys=; b=ILvsAfc3DQfxMeBh7/UeyY7thcPBDBnKKQx/m85uM/6vnUDiWbyPot23PoNk501NzKpZEM KKYg3q5EdesuiQhDwclZrDFL62v2x8JPBIqUHpyfsi2HGSgabmKh+LC1RrVB5LNMxo5k/E vbgd9p2QtYA+jDJDCKCnQ77jyX/Ezns= From: Juergen Gross To: xen-devel@lists.xenproject.org Cc: Juergen Gross , Dario Faggioli , George Dunlap Subject: [PATCH v2 05/17] xen/cpupool: switch cpupool list to normal list interface Date: Tue, 1 Dec 2020 09:21:16 +0100 Message-Id: <20201201082128.15239-6-jgross@suse.com> X-Mailer: git-send-email 2.26.2 In-Reply-To: <20201201082128.15239-1-jgross@suse.com> References: <20201201082128.15239-1-jgross@suse.com> MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable X-ZohoMail-DKIM: pass (identity @suse.com) Content-Type: text/plain; charset="utf-8" Instead of open coding something like a linked list just use the available functionality from list.h. The allocation of a new cpupool id is not aware of a possible wrap. Fix that. While adding the required new include to private.h sort the includes. Signed-off-by: From: Juergen Gross Reviewed-by: Dario Faggioli --- V2: - new patch Signed-off-by: Juergen Gross --- xen/common/sched/cpupool.c | 100 ++++++++++++++++++++----------------- xen/common/sched/private.h | 4 +- 2 files changed, 57 insertions(+), 47 deletions(-) diff --git a/xen/common/sched/cpupool.c b/xen/common/sched/cpupool.c index 01fa71dd00..714cd47ae9 100644 --- a/xen/common/sched/cpupool.c +++ b/xen/common/sched/cpupool.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -23,13 +24,10 @@ =20 #include "private.h" =20 -#define for_each_cpupool(ptr) \ - for ((ptr) =3D &cpupool_list; *(ptr) !=3D NULL; (ptr) =3D &((*(ptr))->= next)) - struct cpupool *cpupool0; /* Initial cpupool with Dom0 */ cpumask_t cpupool_free_cpus; /* cpus not in any cpupool */ =20 -static struct cpupool *cpupool_list; /* linked list, sorted by poolid = */ +static LIST_HEAD(cpupool_list); /* linked list, sorted by poolid = */ =20 static int cpupool_moving_cpu =3D -1; static struct cpupool *cpupool_cpu_moving =3D NULL; @@ -189,15 +187,15 @@ static struct cpupool *alloc_cpupool_struct(void) */ static struct cpupool *__cpupool_find_by_id(unsigned int id, bool exact) { - struct cpupool **q; + struct cpupool *q; =20 ASSERT(spin_is_locked(&cpupool_lock)); =20 - for_each_cpupool(q) - if ( (*q)->cpupool_id >=3D id ) - break; + list_for_each_entry(q, &cpupool_list, list) + if ( q->cpupool_id =3D=3D id || (!exact && q->cpupool_id > id) ) + return q; =20 - return (!exact || (*q =3D=3D NULL) || ((*q)->cpupool_id =3D=3D id)) ? = *q : NULL; + return NULL; } =20 static struct cpupool *cpupool_find_by_id(unsigned int poolid) @@ -246,8 +244,7 @@ static struct cpupool *cpupool_create( unsigned int poolid, unsigned int sched_id, int *perr) { struct cpupool *c; - struct cpupool **q; - unsigned int last =3D 0; + struct cpupool *q; =20 *perr =3D -ENOMEM; if ( (c =3D alloc_cpupool_struct()) =3D=3D NULL ) @@ -260,23 +257,42 @@ static struct cpupool *cpupool_create( =20 spin_lock(&cpupool_lock); =20 - for_each_cpupool(q) + if ( poolid !=3D CPUPOOLID_NONE ) { - last =3D (*q)->cpupool_id; - if ( (poolid !=3D CPUPOOLID_NONE) && (last >=3D poolid) ) - break; + q =3D __cpupool_find_by_id(poolid, false); + if ( !q ) + list_add_tail(&c->list, &cpupool_list); + else + { + list_add_tail(&c->list, &q->list); + if ( q->cpupool_id =3D=3D poolid ) + { + *perr =3D -EEXIST; + goto err; + } + } + + c->cpupool_id =3D poolid; } - if ( *q !=3D NULL ) + else { - if ( (*q)->cpupool_id =3D=3D poolid ) + /* Cpupool 0 is created with specified id at boot and never remove= d. */ + ASSERT(!list_empty(&cpupool_list)); + + q =3D list_last_entry(&cpupool_list, struct cpupool, list); + /* In case of wrap search for first free id. */ + if ( q->cpupool_id =3D=3D CPUPOOLID_NONE - 1 ) { - *perr =3D -EEXIST; - goto err; + list_for_each_entry(q, &cpupool_list, list) + if ( q->cpupool_id + 1 !=3D list_next_entry(q, list)->cpup= ool_id ) + break; } - c->next =3D *q; + + list_add(&c->list, &q->list); + + c->cpupool_id =3D q->cpupool_id + 1; } =20 - c->cpupool_id =3D (poolid =3D=3D CPUPOOLID_NONE) ? (last + 1) : poolid; if ( poolid =3D=3D 0 ) { c->sched =3D scheduler_get_default(); @@ -291,8 +307,6 @@ static struct cpupool *cpupool_create( c->gran =3D opt_sched_granularity; c->sched_gran =3D sched_granularity; =20 - *q =3D c; - spin_unlock(&cpupool_lock); =20 debugtrace_printk("Created cpupool %u with scheduler %s (%s)\n", @@ -302,6 +316,8 @@ static struct cpupool *cpupool_create( return c; =20 err: + list_del(&c->list); + spin_unlock(&cpupool_lock); free_cpupool_struct(c); return NULL; @@ -312,27 +328,19 @@ static struct cpupool *cpupool_create( * possible failures: * - pool still in use * - cpus still assigned to pool - * - pool not in list */ static int cpupool_destroy(struct cpupool *c) { - struct cpupool **q; - spin_lock(&cpupool_lock); - for_each_cpupool(q) - if ( *q =3D=3D c ) - break; - if ( *q !=3D c ) - { - spin_unlock(&cpupool_lock); - return -ENOENT; - } + if ( (c->n_dom !=3D 0) || cpumask_weight(c->cpu_valid) ) { spin_unlock(&cpupool_lock); return -EBUSY; } - *q =3D c->next; + + list_del(&c->list); + spin_unlock(&cpupool_lock); =20 cpupool_put(c); @@ -732,17 +740,17 @@ static int cpupool_cpu_remove_prologue(unsigned int c= pu) */ static void cpupool_cpu_remove_forced(unsigned int cpu) { - struct cpupool **c; + struct cpupool *c; int ret; unsigned int master_cpu =3D sched_get_resource_cpu(cpu); =20 - for_each_cpupool ( c ) + list_for_each_entry(c, &cpupool_list, list) { - if ( cpumask_test_cpu(master_cpu, (*c)->cpu_valid) ) + if ( cpumask_test_cpu(master_cpu, c->cpu_valid) ) { - ret =3D cpupool_unassign_cpu_start(*c, master_cpu); + ret =3D cpupool_unassign_cpu_start(c, master_cpu); BUG_ON(ret); - ret =3D cpupool_unassign_cpu_finish(*c); + ret =3D cpupool_unassign_cpu_finish(c); BUG_ON(ret); } } @@ -929,7 +937,7 @@ const cpumask_t *cpupool_valid_cpus(const struct cpupoo= l *pool) void dump_runq(unsigned char key) { s_time_t now =3D NOW(); - struct cpupool **c; + struct cpupool *c; =20 spin_lock(&cpupool_lock); =20 @@ -944,12 +952,12 @@ void dump_runq(unsigned char key) schedule_dump(NULL); } =20 - for_each_cpupool(c) + list_for_each_entry(c, &cpupool_list, list) { - printk("Cpupool %u:\n", (*c)->cpupool_id); - printk("Cpus: %*pbl\n", CPUMASK_PR((*c)->cpu_valid)); - sched_gran_print((*c)->gran, cpupool_get_granularity(*c)); - schedule_dump(*c); + printk("Cpupool %u:\n", c->cpupool_id); + printk("Cpus: %*pbl\n", CPUMASK_PR(c->cpu_valid)); + sched_gran_print(c->gran, cpupool_get_granularity(c)); + schedule_dump(c); } =20 spin_unlock(&cpupool_lock); diff --git a/xen/common/sched/private.h b/xen/common/sched/private.h index e69d9be1e8..6953cefa6e 100644 --- a/xen/common/sched/private.h +++ b/xen/common/sched/private.h @@ -8,8 +8,9 @@ #ifndef __XEN_SCHED_IF_H__ #define __XEN_SCHED_IF_H__ =20 -#include #include +#include +#include #include =20 /* cpus currently in no cpupool */ @@ -510,6 +511,7 @@ struct cpupool unsigned int n_dom; cpumask_var_t cpu_valid; /* all cpus assigned to pool */ cpumask_var_t res_valid; /* all scheduling resources of pool */ + struct list_head list; struct cpupool *next; struct scheduler *sched; atomic_t refcnt; --=20 2.26.2