From nobody Sun Feb 8 06:56:18 2026 Received: from PH8PR06CU001.outbound.protection.outlook.com (mail-westus3azon11012058.outbound.protection.outlook.com [40.107.209.58]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3008D287272; Fri, 19 Dec 2025 20:38:19 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=fail smtp.client-ip=40.107.209.58 ARC-Seal: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176701; cv=fail; b=s6d94Uo9pZRkuHgs/VTDyLLrhg2m66SkpfsIuZNXcHMpIahLDgEL4yNQqCsqp+dZtp/jKdYZVvWLUeXJFIgTbjAJYfEudn3c1O5dZ87THgkTb9/HUsR6jB8NUjWO3ainZUA9AkOSx11YK+bFQ9NFSl5RmccodK08C7v35u7H0eY= ARC-Message-Signature: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176701; c=relaxed/simple; bh=D3FJ3IFGq+rDsmqWVRWiyhB5Ba+Y8y483tvhyshc+a0=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: Content-Type:MIME-Version; b=YpFeJkQqVB8nGnbNHAq6/Oi3xYhhpo/2Hyq2074PYi634hMW0M6oNByrvpJy9VM8COqEHmlrQo6uHjmjox3bIx/7+5lNFDqyBtclkTSHT0Ajx86emg1R9YDZXmlhUsVCJYL/NLBYvhcOxh0HaeGNMucAZq7K7DN2CL/apA+Y0Xw= ARC-Authentication-Results: i=2; smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com; spf=fail smtp.mailfrom=nvidia.com; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b=HNMtX81W; arc=fail smtp.client-ip=40.107.209.58 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com Authentication-Results: smtp.subspace.kernel.org; spf=fail smtp.mailfrom=nvidia.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b="HNMtX81W" ARC-Seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=Lx5uZKy8u2GgHJaXj3hs8yDYgj9fERXWLrEA785kXHnIndJiDrbQdnYhtHqmr1QDmPcaz54L6Qpkt6hQxF0jxHISoR5t0zAfK8taeYUjAYAEb7U5U9DbnXcrV4puTa+SBLwcjcD/vZ1nBT5id6mH9O9HmqswcJk+Zfw11LGoLAvCD8qDP8dGZOXth7jVq0Pu3P04tmYAa/c/Dpehv7Rgu99WTsmPqPkZVbajbcUjeaXQXAosyTGpZM+HS89yelFfycoRuzBw+02euDuEdBkEkvrAXpL8N7uUZmHlZA3TcBrTf4lE5TcFV45mZdxDovGs3lZDyitHUhkEILbXV9K5+Q== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=FtGo4HQrJufygVNv+Og9Q6ag1tjCmrljK1Ejpojmfg0=; b=YU3nO4UMbaxi2Zhu9P0jzFlj301wzM7cRqJJjun126InwQQu2E+553vsIsclPVQPHvjghE5Ss13XzlowAaMEV9TnBrdglNIndeShrugpf49Or7FJrpMB4gqUM7kk3KvIOm4d24DidT+nRyL3NBnes722hN4eiyMoXMS7N/glAMjUdkKx0KeAKA0/WHuXOsqQjbfErbDo/9J+Qu9Koh8Cq7/9K0y5q5KUyvmQonA0AjFoinsRWVm8+Vb2DpKqOclbFz+byLGGc1xV5a1uPUC40qh9h6F4zSeWacuiWHj1PSzvukDkBRwxW3uilv+KN8e4xllZo2PADWyn4f29ymVs3Q== ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=nvidia.com; dmarc=pass action=none header.from=nvidia.com; dkim=pass header.d=nvidia.com; arc=none DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com; s=selector2; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=FtGo4HQrJufygVNv+Og9Q6ag1tjCmrljK1Ejpojmfg0=; b=HNMtX81WRb8QMIKmY9NWX4WO6GB72oLrFyTIBwD3u9ysjguEX5EqLC9f9Dc347E7s070p3c8TMEqiaTKYz15wOT+SBOqKVPNHc4C+kIwVOo6sp4u/GklK+TEukIT/6tNvQygMPPiJ6MKrikHVww5O04nuitQPz9Idhfaw/f3u0mh6JzLdVBqxgKI02IiaokdHQwMBHjWH8RlCK0dSnn/YFiXVun/5GmU8TertfQJzOwdLIzlRg/plsVm+zVHMOBP5oaz+P5ouF+z+0EcHkBbHe6d8r2Imjeh/XeDXoSR/VmWSEwSizJ/Wnt0AVXDdXZqZjDIfPZD4L7gs8qulDw52Q== Authentication-Results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=nvidia.com; Received: from SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) by CH2PR12MB4248.namprd12.prod.outlook.com (2603:10b6:610:7a::23) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.9434.6; Fri, 19 Dec 2025 20:38:11 +0000 Received: from SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91]) by SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91%2]) with mapi id 15.20.9434.009; Fri, 19 Dec 2025 20:38:11 +0000 From: Joel Fernandes To: Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , David Airlie , Simona Vetter , Jonathan Corbet , Alex Deucher , =?UTF-8?q?Christian=20K=C3=B6nig?= , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , Huang Rui , Matthew Auld , Matthew Brost , Lucas De Marchi , =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= , Helge Deller Cc: Danilo Krummrich , Alice Ryhl , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?UTF-8?q?Bj=C3=B6rn=20Roy=20Baron?= , Benno Lossin , Andreas Hindborg , Trevor Gross , John Hubbard , Alistair Popple , Timur Tabi , Edwin Peer , Alexandre Courbot , Andrea Righi , Philipp Stanner , Elle Rhumsaa , Daniel Almeida , joel@joelfernandes.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org, amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org, Joel Fernandes Subject: [PATCH RFC v5 1/6] rust: clist: Add support to interface with C linked lists Date: Fri, 19 Dec 2025 15:38:00 -0500 Message-Id: <20251219203805.1246586-2-joelagnelf@nvidia.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251219203805.1246586-1-joelagnelf@nvidia.com> References: <20251219203805.1246586-1-joelagnelf@nvidia.com> Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: BL0PR0102CA0003.prod.exchangelabs.com (2603:10b6:207:18::16) To SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-MS-PublicTrafficType: Email X-MS-TrafficTypeDiagnostic: SN7PR12MB8059:EE_|CH2PR12MB4248:EE_ X-MS-Office365-Filtering-Correlation-Id: 0cff2915-dfef-4b4d-1632-08de3f3e86c2 X-MS-Exchange-SenderADCheck: 1 X-MS-Exchange-AntiSpam-Relay: 0 X-Microsoft-Antispam: BCL:0;ARA:13230040|7416014|366016|376014|1800799024|921020; X-Microsoft-Antispam-Message-Info: =?us-ascii?Q?aXfZGmJ+e/S2t3owlD64oRw+VxjCIVpoRC+nmSsLc+/Jw8hgMQHPwspg0aJs?= =?us-ascii?Q?5k8s3Q6VO9DW5dA0IEY3H/zfspphMM0TAJpcishHq/USVFHwzmzGEWUA7rVP?= =?us-ascii?Q?qLRfAG0MDgyFz1p1L5mS9KG393ZzGD7esqB93dg5StOIT+5Clyu/NMRdInCB?= =?us-ascii?Q?KuWdfHj7L8dGTg5kYLPaJDTEPQnCu9cG9IahA2DVz60VklHyACFrJ7TbpU/u?= =?us-ascii?Q?vL+w/TKWHsbgb6EDfETda1JElTn0gGFtjZUBeZr4xtHsEY24Uctln3x2A0uH?= =?us-ascii?Q?x0SOmAZnhZy8uYj0Hy7I5p08QKG0w787HKXo6f1GljX0S+rnaqKaqbtyzcYJ?= =?us-ascii?Q?JhZH6lyEN9C6Iki7E4NkwmN9WCdvABY/8t16nRDC1Ipar6fdM8INRte/xebb?= =?us-ascii?Q?oR114IGVQW8WnxRTeOcR/BTr8snitBBfCUtDHjPtr+mvY5wwBtNAYzCXb/oe?= =?us-ascii?Q?8fjg+B1yjSs9XuPa5OSnIwvMI2PxIEBehlljG1iGc9cdbIEuyjqzcNT01mLd?= =?us-ascii?Q?OoCZ5Tlw4BZ0NncQJCgxupCBSjwq21CxlvHesTkAs3iv6AUswWyBnE5CA4Q+?= =?us-ascii?Q?e/eoy50fxeO2GimzbZBmI+1D+KliXEHfTkqh27ZNmJljY9zVsD+r9huDuo8E?= =?us-ascii?Q?saqh3k3xHDucAHCabQY646VcSmjVfxkHezyL5VN5jj7SxFhhAaTqhLepQyfY?= =?us-ascii?Q?QcR3JVE3dCKCqRCX9g9GnS3BOUapj8N4PF4fIbNWjggogs/bypEACqOhX5yZ?= =?us-ascii?Q?SIAhYG4gBle6t03PtURJ7GBNZxt871ab+pv746IB40nJZYE9DCD0/UOHlXos?= =?us-ascii?Q?ynCGZjE3Ny2If1FuZzskq3ylMW9LOEiYw/LdnJlUbK1mPVS2uFNUzBUX8S66?= =?us-ascii?Q?lavwOfxSHavvsfDhF+iOI3vNVBBFZrEcYIJhQuGtpEQiAbSYAp0ltaxBwGRt?= =?us-ascii?Q?KJ5Yx6MVvY/rY2FNuxJY+JiIfYbJKKjOqLMGK5URg+pZUwQ1DK33zohVgllq?= =?us-ascii?Q?YMVtWdSgFs3EvYICe3RHLvEbjnGB+G4nnPl2rOKrM8yrVmunCgO3qMD1IXNe?= =?us-ascii?Q?hA6Ee+wuFlOvmiLWoY/ahrpcULN5NstYl4XXculnKbgmEI28U3aI/BqQnFUs?= =?us-ascii?Q?sY/wnXGmg+j9LVq5Ebx7iSgWyJ/0++u6qm0YHwo6Mq0XHKbvDpYckOAAYDxP?= =?us-ascii?Q?4IwzYna/5UaK7eo6pQt9dab9OdHM/8KAh/I49mQqYbxkXRa2YKEF0mTW2peN?= =?us-ascii?Q?2C9Wj3fSnSEdW1hTg2P7H/pdO1T7BTlZOlrurVef8FUTkgesjSyz5aV6HxjE?= =?us-ascii?Q?XnspmPAcOIeHBLZNPNg79odCjVi+gudQMoX4n4yoJsjRh5jOv7jZl1ooAtAR?= =?us-ascii?Q?Ezd8EUeCC/qoAgq4ZgkithFJk3+YPJpouRUzsSbxFHmLjOWzNmARDEQ9EMtS?= =?us-ascii?Q?oxn+IQGYiugqB+0idrkZPltOgEZBUmurAScjrxyrLBwAEXbQLupOaffoEmAV?= =?us-ascii?Q?UJ7z239kh7ggIYc=3D?= X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SN7PR12MB8059.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(13230040)(7416014)(366016)(376014)(1800799024)(921020);DIR:OUT;SFP:1101; X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1 X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?/+cVOJ1s6Bc77Kgo4DpMsRpsiXFL6B660KuuOeJUZSEFWIKCfTtecJ59E9L7?= =?us-ascii?Q?PJhwr+GAwoWREHlx3piAWgNMgeUpeJGMEn/ZH2basSCCysOI3aB2Rt3wRm1x?= =?us-ascii?Q?LQ1hKEciCJGyg2CdoZTkJ7Sk0uuTiHDy1obInYB+yq7vhmPp/GnKvuAzeNnG?= =?us-ascii?Q?9vR5/D6Zzpae5OIOpA1SG9c9bSoaBrd2fpYBHas9KSzRVE5P2O1twppYlOBW?= =?us-ascii?Q?N//L8i2WB9RfwvhtWfUhMprlk8uKbySbv7ueJ6pQ4C+/xUJJ75eOr60bqA0Y?= =?us-ascii?Q?61fKYSY+UScQ4ilrBI7FSaGIDtNk9GVV+VjvyjqM7I+qSbewe0LmPVOX/rU1?= =?us-ascii?Q?TDJ89g7mi0yge5XvSS4wLWJ74D7Bl9ISgxCxNigTy8o6KTFJuOg3FQH71xXo?= =?us-ascii?Q?ycuap7Dq33JTjN1gp1uA99yXDgbuTzuu5cct4jZyj9JS9I02Gi/sUVWGgbX+?= =?us-ascii?Q?DVABXHL9GUNcPsEfJYoEQyZODboVh6DlscOhV4zIPMGzmRvV6R4Gkiy3PYFt?= =?us-ascii?Q?7H/KM7G39JqWMPzXNIacC697uw5kGWnrW4mbsWcQkTnn6DsVVMNhmJU03SIc?= =?us-ascii?Q?Zz7mNv2PG/ES+hHUtJv+hgRNY1NLMtVY7RCvTDZXgx53rlRztRxqzn3gnEj7?= =?us-ascii?Q?1yzU8J+CDceInPhr+jVdVpiqF8heJPhoJkQO74zpVniC3+Jc7PoDARr/Difl?= =?us-ascii?Q?Km8KKh5xUKYhnF++4sDsqDHT8PGpJWFZ42NLEABaLAWWcGtyQU0xHCmiiU81?= =?us-ascii?Q?UkrXp71URuWpwX/KNZnPUOEXyX2kXLDxNMvQTMrkio0daNNafl0V+YUXPeRJ?= =?us-ascii?Q?j4UbUWtZh2iy3xNykUh3yFjk52vl9f7QK2JexRkXbwfOCYptrKCrWXAF7hRY?= =?us-ascii?Q?wUoLGYDzC0k81F2p5GP9ZcBL60+tZOGF14OCzovL/Gv5fRg3CYgDo3YZOpUF?= =?us-ascii?Q?HB9NGY+WM51R1h+A3OxpET1+GYvnSt6GZgq/eJznpJlCrXVjzLtdwn1bd24f?= =?us-ascii?Q?VZ/I8debfitTWfQK9KS/XCZJDHqr60zFl7VVbaf02KaSa/NK62qKBCrihpQN?= =?us-ascii?Q?rql1MIrnOMAjCFk7ZK2RXBEeYhDBYswUMSFALAWxeosbv6Dv5nteVpA8X12b?= =?us-ascii?Q?6e7HL1lFuHvI3jS7s/Oc6rYlBa8m8QSC720VjRiY8adkYzA0yDHcfMm8sHVT?= =?us-ascii?Q?AukJjwu5RpjMy0iW+YtJRT9O/ZMMxZQHZNOCzY3DGIqMeQmSbXor4krj9JUI?= =?us-ascii?Q?Krx/ShzJkCpJPmd9XteDM+6HZ0n2UUK4RIVe5AymvMi/vTgc/DVHWBKi5z/0?= =?us-ascii?Q?5Wfcxcyjax8k0tqbVbWud4bqblcsIPp4nuLm7SHKio1A1FUBiF0TaY5+YWva?= =?us-ascii?Q?GxLMX6s64+uRs8/DNGOOQ5Oop+71fXT+0XXUeb9ogOaXVP62z9mJme3BXNvv?= =?us-ascii?Q?GUPLgAijB7yhlE5/KQbk3kTnKRihi4CTP+qJajX+I9W/itr8SSPfLX3G0hqq?= =?us-ascii?Q?pAc+Bc3pawsbu1Fyx3A5U1itWU/S2n5KH7K1M2KZ4xTiESEoUVgss8Lo+5Go?= =?us-ascii?Q?8l0czkmRvAwREnzVL5A8MafvzFZVohzmvBghR4WGboNfUNu0O94y3cw5w+sC?= =?us-ascii?Q?gw00PV4seSjdzSAwgQgyZjPzLm1ZTma1vuWD44p0IKHA71baNUtNpGOqwT/H?= =?us-ascii?Q?cXHex978751iIlhyYRLS8s8SfA4DjE+MWuPzzHl/aTaCF3fMmuHJ5R9CL1+E?= =?us-ascii?Q?FiczH5Xp2g=3D=3D?= X-OriginatorOrg: Nvidia.com X-MS-Exchange-CrossTenant-Network-Message-Id: 0cff2915-dfef-4b4d-1632-08de3f3e86c2 X-MS-Exchange-CrossTenant-AuthSource: SN7PR12MB8059.namprd12.prod.outlook.com X-MS-Exchange-CrossTenant-AuthAs: Internal X-MS-Exchange-CrossTenant-OriginalArrivalTime: 19 Dec 2025 20:38:11.6224 (UTC) X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted X-MS-Exchange-CrossTenant-Id: 43083d15-7273-40c1-b7db-39efd9ccc17a X-MS-Exchange-CrossTenant-MailboxType: HOSTED X-MS-Exchange-CrossTenant-UserPrincipalName: eBP8bvQ90khjPyEaOxAvV/g0dIisaNdafst3d1jV+/0P52fzbJuoX0W7UQS/Ss+K7nk/1+ruebuMGNSQIGU3Jg== X-MS-Exchange-Transport-CrossTenantHeadersStamped: CH2PR12MB4248 Content-Type: text/plain; charset="utf-8" Add a new module `clist` for working with C's doubly circular linked lists. Provide low-level iteration over list nodes. Typed iteration over actual items is provided with a `clist_create` macro to assist in creation of the `Clist` type. Signed-off-by: Joel Fernandes --- MAINTAINERS | 7 + rust/helpers/helpers.c | 1 + rust/helpers/list.c | 12 ++ rust/kernel/clist.rs | 357 +++++++++++++++++++++++++++++++++++++++++ rust/kernel/lib.rs | 1 + 5 files changed, 378 insertions(+) create mode 100644 rust/helpers/list.c create mode 100644 rust/kernel/clist.rs diff --git a/MAINTAINERS b/MAINTAINERS index 5b11839cba9d..1b72e539ab34 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22928,6 +22928,13 @@ F: rust/kernel/init.rs F: rust/pin-init/ K: \bpin-init\b|pin_init\b|PinInit =20 +RUST TO C LIST INTERFACES +M: Joel Fernandes +M: Alexandre Courbot +L: rust-for-linux@vger.kernel.org +S: Maintained +F: rust/kernel/clist.rs + RXRPC SOCKETS (AF_RXRPC) M: David Howells M: Marc Dionne diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 79c72762ad9c..634fa2386bbb 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -32,6 +32,7 @@ #include "io.c" #include "jump_label.c" #include "kunit.c" +#include "list.c" #include "maple_tree.c" #include "mm.c" #include "mutex.c" diff --git a/rust/helpers/list.c b/rust/helpers/list.c new file mode 100644 index 000000000000..6044979c7a2e --- /dev/null +++ b/rust/helpers/list.c @@ -0,0 +1,12 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Helpers for C Circular doubly linked list implementation. + */ + +#include + +void rust_helper_list_add_tail(struct list_head *new, struct list_head *he= ad) +{ + list_add_tail(new, head); +} diff --git a/rust/kernel/clist.rs b/rust/kernel/clist.rs new file mode 100644 index 000000000000..b4ee3149903a --- /dev/null +++ b/rust/kernel/clist.rs @@ -0,0 +1,357 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! A C doubly circular intrusive linked list interface for rust code. +//! +//! # Examples +//! +//! ``` +//! use kernel::{ +//! bindings, +//! clist::init_list_head, +//! clist_create, +//! types::Opaque, // +//! }; +//! # // Create test list with values (0, 10, 20) - normally done by C cod= e but it is +//! # // emulated here for doctests using the C bindings. +//! # use core::mem::MaybeUninit; +//! # +//! # /// C struct with embedded `list_head` (typically will be allocated = by C code). +//! # #[repr(C)] +//! # pub(crate) struct SampleItemC { +//! # pub value: i32, +//! # pub link: bindings::list_head, +//! # } +//! # +//! # let mut head =3D MaybeUninit::::uninit(); +//! # +//! # let head =3D head.as_mut_ptr(); +//! # // SAFETY: head and all the items are test objects allocated in this= scope. +//! # unsafe { init_list_head(head) }; +//! # +//! # let mut items =3D [ +//! # MaybeUninit::::uninit(), +//! # MaybeUninit::::uninit(), +//! # MaybeUninit::::uninit(), +//! # ]; +//! # +//! # for (i, item) in items.iter_mut().enumerate() { +//! # let ptr =3D item.as_mut_ptr(); +//! # // SAFETY: pointers are to allocated test objects with a list_he= ad field. +//! # unsafe { +//! # (*ptr).value =3D i as i32 * 10; +//! # // addr_of_mut!() computes address of link directly as link = is uninitialized. +//! # init_list_head(core::ptr::addr_of_mut!((*ptr).link)); +//! # bindings::list_add_tail(&mut (*ptr).link, head); +//! # } +//! # } +//! +//! // Rust wrapper for the C struct. +//! // The list item struct in this example is defined in C code as: +//! // struct SampleItemC { +//! // int value; +//! // struct list_head link; +//! // }; +//! // +//! #[repr(transparent)] +//! pub(crate) struct Item(Opaque); +//! +//! impl Item { +//! pub(crate) fn value(&self) -> i32 { +//! // SAFETY: [`Item`] has same layout as [`SampleItemC`]. +//! unsafe { (*self.0.get()).value } +//! } +//! } +//! +//! // Create typed [`CList`] from sentinel head. +//! // SAFETY: head is valid, items are [`SampleItemC`] with embedded `lin= k` field. +//! let list =3D unsafe { clist_create!(head, Item, SampleItemC, link) }; +//! +//! // Iterate directly over typed items. +//! let mut found_0 =3D false; +//! let mut found_10 =3D false; +//! let mut found_20 =3D false; +//! +//! for item in list.iter() { +//! let val =3D item.value(); +//! if val =3D=3D 0 { found_0 =3D true; } +//! if val =3D=3D 10 { found_10 =3D true; } +//! if val =3D=3D 20 { found_20 =3D true; } +//! } +//! +//! assert!(found_0 && found_10 && found_20); +//! ``` + +use core::{ + iter::FusedIterator, + marker::PhantomData, // +}; + +use crate::{ + bindings, + types::Opaque, // +}; + +use pin_init::PinInit; + +/// Initialize a `list_head` object to point to itself. +/// +/// # Safety +/// +/// `list` must be a valid pointer to a `list_head` object. +#[inline] +pub unsafe fn init_list_head(list: *mut bindings::list_head) { + // SAFETY: Caller guarantees `list` is a valid pointer to a `list_head= `. + unsafe { + (*list).next =3D list; + (*list).prev =3D list; + } +} + +/// Wraps a `list_head` object for use in intrusive linked lists. +/// +/// # Invariants +/// +/// - [`CListHead`] represents an allocated and valid `list_head` structur= e. +/// - Once a ClistHead is created in Rust, it will not be modified by non-= Rust code. +/// - All `list_head` for individual items are not modified for the lifeti= me of [`CListHead`]. +#[repr(transparent)] +pub struct CListHead(Opaque); + +impl CListHead { + /// Create a `&CListHead` reference from a raw `list_head` pointer. + /// + /// # Safety + /// + /// - `ptr` must be a valid pointer to an allocated and initialized `l= ist_head` structure. + /// - `ptr` must remain valid and unmodified for the lifetime `'a`. + #[inline] + pub unsafe fn from_raw<'a>(ptr: *mut bindings::list_head) -> &'a Self { + // SAFETY: + // - [`CListHead`] has same layout as `list_head`. + // - `ptr` is valid and unmodified for 'a. + unsafe { &*ptr.cast() } + } + + /// Get the raw `list_head` pointer. + #[inline] + pub fn as_raw(&self) -> *mut bindings::list_head { + self.0.get() + } + + /// Get the next [`CListHead`] in the list. + #[inline] + pub fn next(&self) -> &Self { + let raw =3D self.as_raw(); + // SAFETY: + // - `self.as_raw()` is valid per type invariants. + // - The `next` pointer is guaranteed to be non-NULL. + unsafe { Self::from_raw((*raw).next) } + } + + /// Get the previous [`CListHead`] in the list. + #[inline] + pub fn prev(&self) -> &Self { + let raw =3D self.as_raw(); + // SAFETY: + // - self.as_raw() is valid per type invariants. + // - The `prev` pointer is guaranteed to be non-NULL. + unsafe { Self::from_raw((*raw).prev) } + } + + /// Check if this node is linked in a list (not isolated). + #[inline] + pub fn is_linked(&self) -> bool { + let raw =3D self.as_raw(); + // SAFETY: self.as_raw() is valid per type invariants. + unsafe { (*raw).next !=3D raw && (*raw).prev !=3D raw } + } + + /// Fallible pin-initializer that initializes and then calls user clos= ure. + /// + /// Initializes the list head first, then passes `&CListHead` to the c= losure. + /// This hides the raw FFI pointer from the user. + pub fn try_init( + init_func: impl FnOnce(&CListHead) -> Result<(), E>, + ) -> impl PinInit { + // SAFETY: init_list_head initializes the list_head to point to it= self. + // After initialization, we create a reference to pass to the clos= ure. + unsafe { + pin_init::pin_init_from_closure(move |slot: *mut Self| { + init_list_head(slot.cast()); + // SAFETY: slot is now initialized, safe to create referen= ce. + init_func(&*slot) + }) + } + } +} + +// SAFETY: [`CListHead`] can be sent to any thread. +unsafe impl Send for CListHead {} + +// SAFETY: [`CListHead`] can be shared among threads as it is not modified +// by non-Rust code per type invariants. +unsafe impl Sync for CListHead {} + +impl PartialEq for CListHead { + fn eq(&self, other: &Self) -> bool { + self.as_raw() =3D=3D other.as_raw() + } +} + +impl Eq for CListHead {} + +/// Low-level iterator over `list_head` nodes. +/// +/// An iterator used to iterate over a C intrusive linked list (`list_head= `). Caller has to +/// perform conversion of returned [`CListHead`] to an item (using `contai= ner_of` macro or similar). +/// +/// # Invariants +/// +/// [`CListHeadIter`] is iterating over an allocated, initialized and vali= d list. +struct CListHeadIter<'a> { + current_head: &'a CListHead, + list_head: &'a CListHead, +} + +impl<'a> Iterator for CListHeadIter<'a> { + type Item =3D &'a CListHead; + + #[inline] + fn next(&mut self) -> Option { + // Advance to next node. + let next =3D self.current_head.next(); + + // Check if we've circled back to the sentinel head. + if next =3D=3D self.list_head { + None + } else { + self.current_head =3D next; + Some(self.current_head) + } + } +} + +impl<'a> FusedIterator for CListHeadIter<'a> {} + +/// A typed C linked list with a sentinel head. +/// +/// A sentinel head represents the entire linked list and can be used for +/// iteration over items of type `T`, it is not associated with a specific= item. +/// +/// The const generic `OFFSET` specifies the byte offset of the `list_head= ` field within +/// the struct that `T` wraps. +/// +/// # Invariants +/// +/// - `head` is an allocated and valid C `list_head` structure that is the= list's sentinel. +/// - `OFFSET` is the byte offset of the `list_head` field within the stru= ct that `T` wraps. +/// - All the list's `list_head` nodes are allocated and have valid next/p= rev pointers. +/// - The underlying `list_head` (and entire list) is not modified for the= lifetime `'a`. +pub struct CList<'a, T, const OFFSET: usize> { + head: &'a CListHead, + _phantom: PhantomData<&'a T>, +} + +impl<'a, T, const OFFSET: usize> CList<'a, T, OFFSET> { + /// Create a typed [`CList`] from a raw sentinel `list_head` pointer. + /// + /// # Safety + /// + /// - `ptr` must be a valid pointer to an allocated and initialized `l= ist_head` structure + /// representing a list sentinel. + /// - `ptr` must remain valid and unmodified for the lifetime `'a`. + /// - The list must contain items where the `list_head` field is at by= te offset `OFFSET`. + /// - `T` must be `#[repr(transparent)]` over the C struct. + #[inline] + pub unsafe fn from_raw(ptr: *mut bindings::list_head) -> Self { + Self { + // SAFETY: Caller guarantees `ptr` is a valid, sentinel `list_= head` object. + head: unsafe { CListHead::from_raw(ptr) }, + _phantom: PhantomData, + } + } + + /// Get the raw sentinel `list_head` pointer. + #[inline] + pub fn as_raw(&self) -> *mut bindings::list_head { + self.head.as_raw() + } + + /// Check if the list is empty. + #[inline] + pub fn is_empty(&self) -> bool { + let raw =3D self.as_raw(); + // SAFETY: self.as_raw() is valid per type invariants. + unsafe { (*raw).next =3D=3D raw } + } + + /// Create an iterator over typed items. + #[inline] + pub fn iter(&self) -> CListIter<'a, T, OFFSET> { + CListIter { + head_iter: CListHeadIter { + current_head: self.head, + list_head: self.head, + }, + _phantom: PhantomData, + } + } +} + +/// High-level iterator over typed list items. +pub struct CListIter<'a, T, const OFFSET: usize> { + head_iter: CListHeadIter<'a>, + _phantom: PhantomData<&'a T>, +} + +impl<'a, T, const OFFSET: usize> Iterator for CListIter<'a, T, OFFSET> { + type Item =3D &'a T; + + fn next(&mut self) -> Option { + let head =3D self.head_iter.next()?; + + // Convert to item using OFFSET. + // SAFETY: `item_ptr` calculation from `OFFSET` (calculated using = offset_of!) + // is valid per invariants. + Some(unsafe { &*head.as_raw().byte_sub(OFFSET).cast::() }) + } +} + +impl<'a, T, const OFFSET: usize> FusedIterator for CListIter<'a, T, OFFSET= > {} + +/// Create a C doubly-circular linked list interface [`CList`] from a raw = `list_head` pointer. +/// +/// This macro creates a [`CList`] that can iterate over items = of type `$rust_type` +/// linked via the `$field` field in the underlying C struct `$c_type`. +/// +/// # Arguments +/// +/// - `$head`: Raw pointer to the sentinel `list_head` object (`*mut bindi= ngs::list_head`). +/// - `$rust_type`: Each item's rust wrapper type. +/// - `$c_type`: Each item's C struct type that contains the embedded `lis= t_head`. +/// - `$field`: The name of the `list_head` field within the C struct. +/// +/// # Safety +/// +/// The caller must ensure: +/// - `$head` is a valid, initialized sentinel `list_head` pointing to a l= ist that remains +/// unmodified for the lifetime of the rust [`CList`]. +/// - The list contains items of type `$c_type` linked via an embedded `$f= ield`. +/// - `$rust_type` is `#[repr(transparent)]` over `$c_type` or has compati= ble layout. +/// - The macro is called from an unsafe block. +/// +/// # Examples +/// +/// Refer to the examples in the [`crate::clist`] module documentation. +#[macro_export] +macro_rules! clist_create { + ($head:expr, $rust_type:ty, $c_type:ty, $($field:tt).+) =3D> {{ + // Compile-time check that field path is a list_head. + let _: fn(*const $c_type) -> *const $crate::bindings::list_head = =3D + |p| ::core::ptr::addr_of!((*p).$($field).+); + + // Calculate offset and create `CList`. + const OFFSET: usize =3D ::core::mem::offset_of!($c_type, $($field)= .+); + $crate::clist::CList::<$rust_type, OFFSET>::from_raw($head) + }}; +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index f812cf120042..cd7e6a1055b0 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -75,6 +75,7 @@ pub mod bug; #[doc(hidden)] pub mod build_assert; +pub mod clist; pub mod clk; #[cfg(CONFIG_CONFIGFS_FS)] pub mod configfs; --=20 2.34.1 From nobody Sun Feb 8 06:56:18 2026 Received: from SJ2PR03CU001.outbound.protection.outlook.com (mail-westusazon11012006.outbound.protection.outlook.com [52.101.43.6]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0691026F2A8; Fri, 19 Dec 2025 20:38:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=fail smtp.client-ip=52.101.43.6 ARC-Seal: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176739; cv=fail; b=IJrBXWk1aw+hSPq/A4GXJjr5J0mkIze489hXsgw4CrvoF98rzPLdqhF+u4lvzJPm1DRYwREuFH5zW7obSztGfNwMKvjN34K37cN5Nq8KZcW67RfkzvfKwQrclk281odD5b93HvJlHb+6XM4hLTUjDpfstQvmB2RRDEX7cwkjTpM= ARC-Message-Signature: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176739; c=relaxed/simple; bh=NKcmg9xXwFus4ZkbAJtMyW6/dfNjg1ypJWLNESvV8kI=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: Content-Type:MIME-Version; b=uncyDWbtsv4sC3Jx7/XqGUkGINJIjX1ZGd1PZUkqNdP1EyKYWNIqNNu3UISHsizKPOcPFUbtTeOyi1VCRIlqvWhK3GT9vYTkkgNTQ/+oj6a/guOKqII3U3jYYnlLnKehjtU81LF4DEc6MtIhIlYC+tlNpL8MncVbOxSIuxeOxZQ= ARC-Authentication-Results: i=2; smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com; spf=fail smtp.mailfrom=nvidia.com; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b=D2lQewiF; arc=fail smtp.client-ip=52.101.43.6 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com Authentication-Results: smtp.subspace.kernel.org; spf=fail smtp.mailfrom=nvidia.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b="D2lQewiF" ARC-Seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=ANpxFvC+oF9cqWX5C5C7rj2sakJ4b+X+k4/dn6eQqgx2Q7GQ6drJghg3tTosaw5ajNNg94gMYac5CHm+XhAKYsDrFMbE63g7lGEGiRv4bockJCFXMnkFU4IjbQQWxWLHtcEz2Rto7ySSnFxt2BLTtfpfufADCXPoZsNzgMndW246I0kY7vKRIqoCiKLDKDFkNWT1lhyXB0gO5JQ/hRIReKkZnMYv/8ugSoOdTFH6Qc/QWGwza1y9YWqrYEzvCWFLoot9aFlL5mq0zhuRAMPKLRw0b+NK6P9BIGntG9uE8p7IgHLU5mc4j2vA4XJjlDoTUWRjxwavLMpJccRJxLVP+g== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=X/D9hpu0qDfWYSk++sQ4Vri+eDmTn2KSyK1ufNXdR5o=; b=HCnkCCUO2t+lotm7OKeArwKZMBh3I21v10Ue4PLJRyZR8eIL82kPavjfOWrBGBPzSUaeGSIGopzKjnQve/eOqsjtaXSemzUMQKtmJYz+XtvEp6rVOJslLMHoylV35XrN7Y8u12qkIEH4CfOm/Gt5V2myL+MQDqxc9xdZ8jgCcKO1cErlgbRHrCmxZCD2iX31NNPQO/qt4G8LVwiunr6ZxeIN2aGkI3Q6mCfR8wqi3hhC62gIZVfJTkHT3zXQyKEO59kbAZEAD1j0KCrotnnLtFbzyXQG29t1XRzcN5h7CLDCbxkDGQhVt0ucuhEVXJ4YmbP3HV4F87e3vqymqQbuhA== ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=nvidia.com; dmarc=pass action=none header.from=nvidia.com; dkim=pass header.d=nvidia.com; arc=none DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com; s=selector2; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=X/D9hpu0qDfWYSk++sQ4Vri+eDmTn2KSyK1ufNXdR5o=; b=D2lQewiFJU7+CC9K5AOzbX0EMmBjNpbyJ1vUVdXWYdoVXJ82Wy27q0Edyk/gz8+c3DApP3oRdMDQlOx8jNnoDjmDBG6bHqNccmDmmpRbYueXhpM2vS+a30CT1TqISOcibNZnPIKtWa533soMEk3Y7eSvDK3Xe5SpAgBSWh5WKIn40qoGkm/8VJKz85sNdbmfcxdbRVVZDobM+/nTYecBTyWlICp48xAqiXWTxXTjxHOVT/gE2x8Q0TIFGbtq/GI/8si9Wc0/7ZHRxZc3nqaeoLsDtjbcF46IO9zhcfqT2AoMW1I55BCTAC3KCeyh9aQjacB7XoeWYVjC8V1KTrolhg== Authentication-Results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=nvidia.com; Received: from SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) by CH2PR12MB4248.namprd12.prod.outlook.com (2603:10b6:610:7a::23) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.9434.6; Fri, 19 Dec 2025 20:38:15 +0000 Received: from SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91]) by SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91%2]) with mapi id 15.20.9434.009; Fri, 19 Dec 2025 20:38:13 +0000 From: Joel Fernandes To: Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , David Airlie , Simona Vetter , Jonathan Corbet , Alex Deucher , =?UTF-8?q?Christian=20K=C3=B6nig?= , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , Huang Rui , Matthew Auld , Matthew Brost , Lucas De Marchi , =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= , Helge Deller Cc: Danilo Krummrich , Alice Ryhl , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?UTF-8?q?Bj=C3=B6rn=20Roy=20Baron?= , Benno Lossin , Andreas Hindborg , Trevor Gross , John Hubbard , Alistair Popple , Timur Tabi , Edwin Peer , Alexandre Courbot , Andrea Righi , Philipp Stanner , Elle Rhumsaa , Daniel Almeida , joel@joelfernandes.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org, amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org, Joel Fernandes Subject: [PATCH RFC v5 2/6] gpu: Move DRM buddy allocator one level up Date: Fri, 19 Dec 2025 15:38:01 -0500 Message-Id: <20251219203805.1246586-3-joelagnelf@nvidia.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251219203805.1246586-1-joelagnelf@nvidia.com> References: <20251219203805.1246586-1-joelagnelf@nvidia.com> Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: BLAP220CA0011.NAMP220.PROD.OUTLOOK.COM (2603:10b6:208:32c::16) To SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-MS-PublicTrafficType: Email X-MS-TrafficTypeDiagnostic: SN7PR12MB8059:EE_|CH2PR12MB4248:EE_ X-MS-Office365-Filtering-Correlation-Id: add2c354-7b7f-470b-65d6-08de3f3e87f4 X-MS-Exchange-SenderADCheck: 1 X-MS-Exchange-AntiSpam-Relay: 0 X-Microsoft-Antispam: BCL:0;ARA:13230040|7416014|366016|376014|1800799024|921020; X-Microsoft-Antispam-Message-Info: =?utf-8?B?bFhIVWcvaHlLaUthS1hReUxTVENSdUoxRGZJWThKNnorNzYyTUhlcUh4Zkhh?= =?utf-8?B?RitndmFZSmVTb0g3SHhhTU03UVZqcXpHWGlMRkVPaURtRjBGRGhTdThvL3lE?= =?utf-8?B?VXhvcUVOVHFWZXJMUlZEYmFZbUdJSXNkKzBwOG81b1pMSHBxaDd0MXVPOHky?= =?utf-8?B?dUV5MWlQcTlDZ0VLL3l3c0JxSjFGS056WGpFUlAvRXg1ZlJNaXhRdjM2TVdT?= =?utf-8?B?VldpZ2t4NmlkakNHVStHaHNIemZBNjVSUFpvQUoxeE9JVDk3OEt2RWlUa2Nq?= =?utf-8?B?UjBlUSsra0FDZ2JZbG11YTM4WkNjaXVxamJsYWc2TjJsL3IxZEFFNlEzOVIr?= =?utf-8?B?YlV6RXROL09NaXNFUmt2eTdHbHF5cXRRU2twU1Rjd2hBT01Vejkwak1XUjVm?= =?utf-8?B?UFQvVVdCL2NTaWpSUkduWmlkM0ZGODA0K0RmSW4zZFhGREEvRGh4N1FzQmtO?= =?utf-8?B?SUtOMW4xc1p5WTZYV2VYVHlQYlloallIdURXY3U1OU5KS3lzNGlmb2VKOHVs?= =?utf-8?B?akhsVWFQRTNXeWdNdVJYbEJiNDhIa2JTRlV1Zk5vS2U0ZmVPSHpBalRNUy82?= =?utf-8?B?Z3FoUjZvbHg3eXpmYUY3dmJHR2ZleXFSMVdmRVBXeFl3bFkxRFBub1p4UHkx?= =?utf-8?B?bGlRV0xHY05lY05CNHd0UWhmekxqSjdTei9pOHU2eWZ2REEyT1dWVzhhSjRt?= =?utf-8?B?dU9NNW1FdVRobnBWOHdlSzlJWXd2alIrajR5WUJXUURUclNndkxDcXc0RXBv?= =?utf-8?B?dFBOdnJQNFllenlRMGVua1Znc2F2RWJ2ZVlRRitoOWU0OWsvdnI0WGE5anE1?= =?utf-8?B?V1RWSDkrYVF4UXh6b1NYR25WTkE4dWJXU21wMllDZjlCaW5NWUo5bS9Bd09Q?= =?utf-8?B?d2xnRmMzL1pQMmZTc2VCb2hiRUFZNFppa0YxenhoeVZtNUk3V1RyUllWRzhQ?= =?utf-8?B?eWpEZFM3blUxeS9NL1h3YzJNVzlKSTRVYVVjRVBqRlRoM1JjaVVHTTVHaXlI?= =?utf-8?B?NFRuUUVXOGg3eC92SGtVTERqWnllNHMxVWgrQ1doK0dBcHRzT1RPQ3AyeG1C?= =?utf-8?B?bEZUaWJoV2YwbkQyS2czd2wxbk5nY3ZCaWZqSHBOVEVuUmtCelhHVHduMHZt?= =?utf-8?B?VmpGVldCNWFwc0F2RDExWldXbmkzR3l2bmdBWkpkMFNYaTVCc1dQWUpuZkdn?= =?utf-8?B?OGtlNGZvZnBqTTA2d2lCclVPdU1aUjZybVppWWRzc0F5WHNQRTNZR3kza3M5?= =?utf-8?B?Nkw4WlovM1FRS3FXSWxrcHNFMCtWV1RZL0trdk5WTHpla0JSUDVuK1pPQ1JO?= =?utf-8?B?Um02b3liUVJpYUNWQVhTL1dINUYrcTVKcFdrUTFOVFhxS0dGTTI2RVMwVEtx?= =?utf-8?B?cTdPdjdTWmYvWjhQL29MVlVrNTdJYndpODNzeDRFV2FzRHN1d3F4Ni9aRWp1?= =?utf-8?B?WjhCK20xVHJSQlVBd0N5UkJkUkNZZE9zZkdEZTU1T1lLMVBYSElkeWtoZ1kx?= =?utf-8?B?akM4UktKZ0NYc2pPQkQzWU1UWFhEdUlWZDhIL0pBdFZ5eTB4T2EwZlU4d0Uv?= =?utf-8?B?em9OaDdpN1ZGY2xrb1JLaVZOOGR6ckV3WGMwVE1TMUtNOFlyUVk2MGlONHpC?= =?utf-8?B?OHBwWjd0NG9BbUM2OTBHSUIrTUxxc0laeUxLOWI5WWd5Y3R3V0FMdm0vY3dj?= =?utf-8?B?UFBDUEpQQi9LVnVTcmg0VDlLdVN6YXF6eFoyOHoycERnOVhaclJ0NjczSEI3?= =?utf-8?B?VUg2NGRTQVhtanc1ckFOSlVsbEkvOWRzTW0yRVltN0NIbFl1OUdZaDJOZ1lE?= =?utf-8?B?UjlVYzRmeWJjWWVHdThVS2drbmtaWkNhangxZHVWemhodFdXcnlGV254TTAw?= =?utf-8?B?cE5QczRsUjVkcXpWQ0R6ZmlOWE9yeWVDWkZIN0RyTUkzamdpd0VpSWJiTUR3?= =?utf-8?B?SVpkZThvWlAxZVFGU3BkOVNnNVppZGdtMk1VNzlZeUJBSWJ6MStzWjYzVEhV?= =?utf-8?Q?cPMe19+IVDD2xg4GC3O7wbGqJVuYbc=3D?= X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SN7PR12MB8059.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(13230040)(7416014)(366016)(376014)(1800799024)(921020);DIR:OUT;SFP:1101; X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1 X-MS-Exchange-AntiSpam-MessageData-0: =?utf-8?B?UU1qa3JmNWZaMk9uWk9EKzZGcDlLM3VwaDFyT2paeVUyN1pPVVdWYjYwZmVk?= =?utf-8?B?WjZKQVpaVVgwdUdaS3BpSHdHc1JCZllrNnpGK09kVzhCbCtrajhHazUzY0Vl?= =?utf-8?B?d21vWnFQQi9Xbk1tc0VZeTNobWpqYURzaERrRTNKY01qL1VTYWREWGFEMmVS?= =?utf-8?B?c1FiNm9xZi94NGlYa3dtalZCL2JxbTFNU2lxb0lhb0dPajMyUXRWRk1YLzhV?= =?utf-8?B?czcxaEh0eEgzcis0T0tpVXpMbFd1aXhJcEwydmkwblRsRDJmd204NGQrdnlP?= =?utf-8?B?dlQwMGZWZ3RteUhmdDI4WEsrejlZRzdwV1ovL3R3RUM1Q1hGNzZpUCs0TnVX?= =?utf-8?B?enlHczJIWndPMDZRc2lkQ1BHQVRrY205aERTaFpYSi9FcUx6VnhKWG9kNWti?= =?utf-8?B?MXhDRGl5eXQ1cWdpQlBIRlZncUFuVkVxZHU4ZGFjVjBPaHRpbEp6WmJFZmEx?= =?utf-8?B?bTBVb2xBWmpXUGdmYzEzUnMwR1RyV3dxUXE1N1J5M1NDN05RSWlZNGRhZzc3?= =?utf-8?B?TzVFOTJ0WnZlMnd3bHoxZnB0TldXNlhLaXVzWVBoS1dyTWxqR0J6eUhZOC9j?= =?utf-8?B?aXlzZHlCQjNJNFkrcXdVSnRTYzJrOS9OL0RUeERTa21BandEN1FKamIrSjBj?= =?utf-8?B?ZnpVSmVkMkQwMkFvckhTWGZDSnFpUFZnMUk5S013MlI4LzhzMTFxdzdsOEMr?= =?utf-8?B?dzlOOFAwazRxc3FhK1VvU1JGOHZlQ2JSYUdTQ3J0cFFrblhoNGFDSS94aGQv?= =?utf-8?B?MEFYODM4WFFBd2NGYUZIM2tIcVY5bzVwSm01cGZsa2wzZlVSa29YZjlLTEU5?= =?utf-8?B?OFp0N3RGdTIyWjJ1YWNYQzY0L1huS0s0THNKRkdLODFaWkxwMk5xZHNSdFhP?= =?utf-8?B?SUhWT3J5TGtxclY1MVc5RmhqdGdGRndnSjZkcm5nVnNPekFFNTVETk5ZMFZP?= =?utf-8?B?bWYySVVxM0lyMjNQbzBWb0hoK0UzU3ROV3gyNFU4TUNNM3NCZERxT0RQRGZV?= =?utf-8?B?Wmt2aFJ6TDhmbE9xZTNUalF0V2hGN1I2dkYyWHNmZ09Bc0tQVGhMcjRUVXY4?= =?utf-8?B?MVpERnZSTkcwdmNGODNtbHlnN1REbDhPVUZDY2hqZ2MxdDhyWEFsNFoweU4v?= =?utf-8?B?eFFJSU56REdkaW9WVFJmWDJkVGlsVGMvMngvMHNUNFNRV2d1SkJOQkFKMEJh?= =?utf-8?B?dzh4QVhvdFVycSt4RElIK0pYK0w0MkVQNzJFdEdCWFVZN2QwSmVDUENrdGdx?= =?utf-8?B?YUtLVURBYWgwM1o5LysvT0lBMFkrTWVhRDJJeitCckRZZTVQUEhsRVJBRWNF?= =?utf-8?B?SElzdXZJRllUVGNBTGlxWVpsejk3bTc4VEJDRGFnTTNQV1pqL2VYSnMxQzlX?= =?utf-8?B?TEI2Z1liYVdVcXQrRVRaQjlkdEN5d29iRkppM3BCd1N1ZFBXWVdRbndySUMw?= =?utf-8?B?VDBUc1hKMVcycXdaejI3SnU4SGZOS1hYR0lKVnJaa1dwNmpyZUk0bWFlSm5w?= =?utf-8?B?WjVWdkh2cGtqcWVxYlhPaklmdjgvMnoyK3VFZjNueXZTWWJYU0luM0lQcW40?= =?utf-8?B?cloxZUYwSFJXd09hKythSkZRaXRJZCtSYVVEZFdpNnF3QlhhL0Jjam5NODV2?= =?utf-8?B?ZXpVQSsxMk41YkdBb09QTzJYSVI3ZWFvQkQzZTBTNmhaWTdhSlBsU3hEY1Iz?= =?utf-8?B?STQwYWo4bkFmUEpRYkhTRktNWVdnS3RLVVBKSWtRY2IrZzF6R0YyTDBPbVAx?= =?utf-8?B?dGZhV2M0VWlVT0dZcjhDN25mZlQvSW5veE93NTRsbGNxaloyRUpmQUdOT05O?= =?utf-8?B?SHNqY2tnYmwzdW1QaStYd09zcW5KL1lRUkpKbTlLMThzNzliQ3hzRnVDVE1z?= =?utf-8?B?ZUxTR05XcTFadzlXS09pRzFFVjVuSk0vOWF4OWlzN3hKdWhWT0RVTE45anBI?= =?utf-8?B?QVQzMFBFbTQvN1BhQVJFcEJKZmlZU0Jjb2QwaEkrNkNGNDM1N3NBbVJwWUxn?= =?utf-8?B?KzlDamxBR01NQ25MWno4YXBJNnB5NjVNMXJ3bTBJTDFUMmcreklsYjVSMnRU?= =?utf-8?B?UFRDbm9SSXBjcUJXMDhON1lGYXUwbUhKTlV2L2dVNFpSQnoycElhSGs2cFFi?= =?utf-8?B?SlgvdEFQeUo4WHZDd3FGNWl2Q3Y4aFhXQmJtL0lsbG9MenlYclJXeGxKaWdY?= =?utf-8?B?d0ZRRWhNSzA0bXU0QXRkQVVYWEdiaHBNR2FadGh5V2tzRGlhMm03Y2JrNHNX?= =?utf-8?B?RU9TbFVncklGNEJrZkxjQTYrZ2ZoOFdxL2JZR2IvS0lvMFplT200aHQydmY5?= =?utf-8?B?Wld4aFAvT2M2aDVMeXg4NGRrTy9BZXJ6NTdBRkZGL0N6a3JlYUJVUT09?= X-OriginatorOrg: Nvidia.com X-MS-Exchange-CrossTenant-Network-Message-Id: add2c354-7b7f-470b-65d6-08de3f3e87f4 X-MS-Exchange-CrossTenant-AuthSource: SN7PR12MB8059.namprd12.prod.outlook.com X-MS-Exchange-CrossTenant-AuthAs: Internal X-MS-Exchange-CrossTenant-OriginalArrivalTime: 19 Dec 2025 20:38:13.8812 (UTC) X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted X-MS-Exchange-CrossTenant-Id: 43083d15-7273-40c1-b7db-39efd9ccc17a X-MS-Exchange-CrossTenant-MailboxType: HOSTED X-MS-Exchange-CrossTenant-UserPrincipalName: hf48e9I/gD2/wN7g0muW1jkQRTyIcG9NG3fw+/oagcmoETQTUYatGib1dgzko1JUTj1oG1xRO+M0wHhtG/mnJQ== X-MS-Exchange-Transport-CrossTenantHeadersStamped: CH2PR12MB4248 Move the DRM buddy allocator one level up so that it can be used by GPU drivers (example, nova-core) that have usecases other than DRM (such as VFIO vGPU support). Modify the API, structures and Kconfigs to use "gpu_buddy" terminology. Adapt the drivers and tests to use the new API. The commit cannot be split due to bissectability, however no functional change is intended. Verified by running K-UNIT tests and build tested various configurations. Signed-off-by: Joel Fernandes --- Documentation/gpu/drm-mm.rst | 10 +- drivers/gpu/Kconfig | 13 + drivers/gpu/Makefile | 2 + drivers/gpu/buddy.c | 1310 +++++++++++++++++ drivers/gpu/drm/Kconfig | 1 + drivers/gpu/drm/Kconfig.debug | 4 +- drivers/gpu/drm/amd/amdgpu/Kconfig | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- .../gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 80 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h | 20 +- drivers/gpu/drm/drm_buddy.c | 1287 +--------------- drivers/gpu/drm/i915/Kconfig | 1 + drivers/gpu/drm/i915/i915_scatterlist.c | 10 +- drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 55 +- drivers/gpu/drm/i915/i915_ttm_buddy_manager.h | 6 +- .../drm/i915/selftests/intel_memory_region.c | 20 +- drivers/gpu/drm/tests/Makefile | 1 - .../gpu/drm/ttm/tests/ttm_bo_validate_test.c | 5 +- drivers/gpu/drm/ttm/tests/ttm_mock_manager.c | 18 +- drivers/gpu/drm/ttm/tests/ttm_mock_manager.h | 4 +- drivers/gpu/drm/xe/Kconfig | 1 + drivers/gpu/drm/xe/xe_res_cursor.h | 34 +- drivers/gpu/drm/xe/xe_svm.c | 12 +- drivers/gpu/drm/xe/xe_ttm_vram_mgr.c | 73 +- drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h | 4 +- drivers/gpu/tests/Makefile | 3 + .../gpu_buddy_test.c} | 390 ++--- drivers/gpu/tests/gpu_random.c | 48 + drivers/gpu/tests/gpu_random.h | 28 + drivers/video/Kconfig | 2 + include/drm/drm_buddy.h | 163 +- include/linux/gpu_buddy.h | 177 +++ 33 files changed, 1997 insertions(+), 1800 deletions(-) create mode 100644 drivers/gpu/Kconfig create mode 100644 drivers/gpu/buddy.c create mode 100644 drivers/gpu/tests/Makefile rename drivers/gpu/{drm/tests/drm_buddy_test.c =3D> tests/gpu_buddy_test.c= } (68%) create mode 100644 drivers/gpu/tests/gpu_random.c create mode 100644 drivers/gpu/tests/gpu_random.h create mode 100644 include/linux/gpu_buddy.h diff --git a/Documentation/gpu/drm-mm.rst b/Documentation/gpu/drm-mm.rst index d55751cad67c..8e0d31230b29 100644 --- a/Documentation/gpu/drm-mm.rst +++ b/Documentation/gpu/drm-mm.rst @@ -509,8 +509,14 @@ DRM GPUVM Function References DRM Buddy Allocator =3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D =20 -DRM Buddy Function References ------------------------------ +Buddy Allocator Function References (GPU buddy) +----------------------------------------------- + +.. kernel-doc:: drivers/gpu/buddy.c + :export: + +DRM Buddy Specific Logging Function References +---------------------------------------------- =20 .. kernel-doc:: drivers/gpu/drm/drm_buddy.c :export: diff --git a/drivers/gpu/Kconfig b/drivers/gpu/Kconfig new file mode 100644 index 000000000000..59bac03b0df6 --- /dev/null +++ b/drivers/gpu/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 + +config GPU_BUDDY + tristate + help + A page based buddy allocator for GPU memory. + +config GPU_BUDDY_KUNIT_TEST + tristate "KUnit tests for GPU buddy allocator" if !KUNIT_ALL_TESTS + depends on GPU_BUDDY && KUNIT + default KUNIT_ALL_TESTS + help + KUnit tests for the GPU buddy allocator. diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile index 36a54d456630..5063caccabdf 100644 --- a/drivers/gpu/Makefile +++ b/drivers/gpu/Makefile @@ -6,3 +6,5 @@ obj-y +=3D host1x/ drm/ vga/ obj-$(CONFIG_IMX_IPUV3_CORE) +=3D ipu-v3/ obj-$(CONFIG_TRACE_GPU_MEM) +=3D trace/ obj-$(CONFIG_NOVA_CORE) +=3D nova-core/ +obj-$(CONFIG_GPU_BUDDY) +=3D buddy.o +obj-y +=3D tests/ diff --git a/drivers/gpu/buddy.c b/drivers/gpu/buddy.c new file mode 100644 index 000000000000..1347c0436617 --- /dev/null +++ b/drivers/gpu/buddy.c @@ -0,0 +1,1310 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright =C2=A9 2021 Intel Corporation + */ + +#include + +#include +#include +#include +#include +#include + +static struct kmem_cache *slab_blocks; + +static struct gpu_buddy_block *gpu_block_alloc(struct gpu_buddy *mm, + struct gpu_buddy_block *parent, + unsigned int order, + u64 offset) +{ + struct gpu_buddy_block *block; + + BUG_ON(order > GPU_BUDDY_MAX_ORDER); + + block =3D kmem_cache_zalloc(slab_blocks, GFP_KERNEL); + if (!block) + return NULL; + + block->header =3D offset; + block->header |=3D order; + block->parent =3D parent; + + RB_CLEAR_NODE(&block->rb); + + BUG_ON(block->header & GPU_BUDDY_HEADER_UNUSED); + return block; +} + +static void gpu_block_free(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + kmem_cache_free(slab_blocks, block); +} + +static enum gpu_buddy_free_tree +get_block_tree(struct gpu_buddy_block *block) +{ + return gpu_buddy_block_is_clear(block) ? + GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; +} + +static struct gpu_buddy_block * +rbtree_get_free_block(const struct rb_node *node) +{ + return node ? rb_entry(node, struct gpu_buddy_block, rb) : NULL; +} + +static struct gpu_buddy_block * +rbtree_last_free_block(struct rb_root *root) +{ + return rbtree_get_free_block(rb_last(root)); +} + +static bool rbtree_is_empty(struct rb_root *root) +{ + return RB_EMPTY_ROOT(root); +} + +static bool gpu_buddy_block_offset_less(const struct gpu_buddy_block *bloc= k, + const struct gpu_buddy_block *node) +{ + return gpu_buddy_block_offset(block) < gpu_buddy_block_offset(node); +} + +static bool rbtree_block_offset_less(struct rb_node *block, + const struct rb_node *node) +{ + return gpu_buddy_block_offset_less(rbtree_get_free_block(block), + rbtree_get_free_block(node)); +} + +static void rbtree_insert(struct gpu_buddy *mm, + struct gpu_buddy_block *block, + enum gpu_buddy_free_tree tree) +{ + rb_add(&block->rb, + &mm->free_trees[tree][gpu_buddy_block_order(block)], + rbtree_block_offset_less); +} + +static void rbtree_remove(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + unsigned int order =3D gpu_buddy_block_order(block); + enum gpu_buddy_free_tree tree; + struct rb_root *root; + + tree =3D get_block_tree(block); + root =3D &mm->free_trees[tree][order]; + + rb_erase(&block->rb, root); + RB_CLEAR_NODE(&block->rb); +} + +static void clear_reset(struct gpu_buddy_block *block) +{ + block->header &=3D ~GPU_BUDDY_HEADER_CLEAR; +} + +static void mark_cleared(struct gpu_buddy_block *block) +{ + block->header |=3D GPU_BUDDY_HEADER_CLEAR; +} + +static void mark_allocated(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + block->header &=3D ~GPU_BUDDY_HEADER_STATE; + block->header |=3D GPU_BUDDY_ALLOCATED; + + rbtree_remove(mm, block); +} + +static void mark_free(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + enum gpu_buddy_free_tree tree; + + block->header &=3D ~GPU_BUDDY_HEADER_STATE; + block->header |=3D GPU_BUDDY_FREE; + + tree =3D get_block_tree(block); + rbtree_insert(mm, block, tree); +} + +static void mark_split(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + block->header &=3D ~GPU_BUDDY_HEADER_STATE; + block->header |=3D GPU_BUDDY_SPLIT; + + rbtree_remove(mm, block); +} + +static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) +{ + return s1 <=3D e2 && e1 >=3D s2; +} + +static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) +{ + return s1 <=3D s2 && e1 >=3D e2; +} + +static struct gpu_buddy_block * +__get_buddy(struct gpu_buddy_block *block) +{ + struct gpu_buddy_block *parent; + + parent =3D block->parent; + if (!parent) + return NULL; + + if (parent->left =3D=3D block) + return parent->right; + + return parent->left; +} + +static unsigned int __gpu_buddy_free(struct gpu_buddy *mm, + struct gpu_buddy_block *block, + bool force_merge) +{ + struct gpu_buddy_block *parent; + unsigned int order; + + while ((parent =3D block->parent)) { + struct gpu_buddy_block *buddy; + + buddy =3D __get_buddy(block); + + if (!gpu_buddy_block_is_free(buddy)) + break; + + if (!force_merge) { + /* + * Check the block and its buddy clear state and exit + * the loop if they both have the dissimilar state. + */ + if (gpu_buddy_block_is_clear(block) !=3D + gpu_buddy_block_is_clear(buddy)) + break; + + if (gpu_buddy_block_is_clear(block)) + mark_cleared(parent); + } + + rbtree_remove(mm, buddy); + if (force_merge && gpu_buddy_block_is_clear(buddy)) + mm->clear_avail -=3D gpu_buddy_block_size(mm, buddy); + + gpu_block_free(mm, block); + gpu_block_free(mm, buddy); + + block =3D parent; + } + + order =3D gpu_buddy_block_order(block); + mark_free(mm, block); + + return order; +} + +static int __force_merge(struct gpu_buddy *mm, + u64 start, + u64 end, + unsigned int min_order) +{ + unsigned int tree, order; + int i; + + if (!min_order) + return -ENOMEM; + + if (min_order > mm->max_order) + return -EINVAL; + + for_each_free_tree(tree) { + for (i =3D min_order - 1; i >=3D 0; i--) { + struct rb_node *iter =3D rb_last(&mm->free_trees[tree][i]); + + while (iter) { + struct gpu_buddy_block *block, *buddy; + u64 block_start, block_end; + + block =3D rbtree_get_free_block(iter); + iter =3D rb_prev(iter); + + if (!block || !block->parent) + continue; + + block_start =3D gpu_buddy_block_offset(block); + block_end =3D block_start + gpu_buddy_block_size(mm, block) - 1; + + if (!contains(start, end, block_start, block_end)) + continue; + + buddy =3D __get_buddy(block); + if (!gpu_buddy_block_is_free(buddy)) + continue; + + WARN_ON(gpu_buddy_block_is_clear(block) =3D=3D + gpu_buddy_block_is_clear(buddy)); + + /* + * Advance to the next node when the current node is the buddy, + * as freeing the block will also remove its buddy from the tree. + */ + if (iter =3D=3D &buddy->rb) + iter =3D rb_prev(iter); + + rbtree_remove(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -=3D gpu_buddy_block_size(mm, block); + + order =3D __gpu_buddy_free(mm, block, true); + if (order >=3D min_order) + return 0; + } + } + } + + return -ENOMEM; +} + +/** + * gpu_buddy_init - init memory manager + * + * @mm: GPU buddy manager to initialize + * @size: size in bytes to manage + * @chunk_size: minimum page size in bytes for our allocations + * + * Initializes the memory manager and its resources. + * + * Returns: + * 0 on success, error code on failure. + */ +int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size) +{ + unsigned int i, j, root_count =3D 0; + u64 offset =3D 0; + + if (size < chunk_size) + return -EINVAL; + + if (chunk_size < SZ_4K) + return -EINVAL; + + if (!is_power_of_2(chunk_size)) + return -EINVAL; + + size =3D round_down(size, chunk_size); + + mm->size =3D size; + mm->avail =3D size; + mm->clear_avail =3D 0; + mm->chunk_size =3D chunk_size; + mm->max_order =3D ilog2(size) - ilog2(chunk_size); + + BUG_ON(mm->max_order > GPU_BUDDY_MAX_ORDER); + + mm->free_trees =3D kmalloc_array(GPU_BUDDY_MAX_FREE_TREES, + sizeof(*mm->free_trees), + GFP_KERNEL); + if (!mm->free_trees) + return -ENOMEM; + + for_each_free_tree(i) { + mm->free_trees[i] =3D kmalloc_array(mm->max_order + 1, + sizeof(struct rb_root), + GFP_KERNEL); + if (!mm->free_trees[i]) + goto out_free_tree; + + for (j =3D 0; j <=3D mm->max_order; ++j) + mm->free_trees[i][j] =3D RB_ROOT; + } + + mm->n_roots =3D hweight64(size); + + mm->roots =3D kmalloc_array(mm->n_roots, + sizeof(struct gpu_buddy_block *), + GFP_KERNEL); + if (!mm->roots) + goto out_free_tree; + + /* + * Split into power-of-two blocks, in case we are given a size that is + * not itself a power-of-two. + */ + do { + struct gpu_buddy_block *root; + unsigned int order; + u64 root_size; + + order =3D ilog2(size) - ilog2(chunk_size); + root_size =3D chunk_size << order; + + root =3D gpu_block_alloc(mm, NULL, order, offset); + if (!root) + goto out_free_roots; + + mark_free(mm, root); + + BUG_ON(root_count > mm->max_order); + BUG_ON(gpu_buddy_block_size(mm, root) < chunk_size); + + mm->roots[root_count] =3D root; + + offset +=3D root_size; + size -=3D root_size; + root_count++; + } while (size); + + return 0; + +out_free_roots: + while (root_count--) + gpu_block_free(mm, mm->roots[root_count]); + kfree(mm->roots); +out_free_tree: + while (i--) + kfree(mm->free_trees[i]); + kfree(mm->free_trees); + return -ENOMEM; +} +EXPORT_SYMBOL(gpu_buddy_init); + +/** + * gpu_buddy_fini - tear down the memory manager + * + * @mm: GPU buddy manager to free + * + * Cleanup memory manager resources and the freetree + */ +void gpu_buddy_fini(struct gpu_buddy *mm) +{ + u64 root_size, size, start; + unsigned int order; + int i; + + size =3D mm->size; + + for (i =3D 0; i < mm->n_roots; ++i) { + order =3D ilog2(size) - ilog2(mm->chunk_size); + start =3D gpu_buddy_block_offset(mm->roots[i]); + __force_merge(mm, start, start + size, order); + + if (WARN_ON(!gpu_buddy_block_is_free(mm->roots[i]))) + kunit_fail_current_test("buddy_fini() root"); + + gpu_block_free(mm, mm->roots[i]); + + root_size =3D mm->chunk_size << order; + size -=3D root_size; + } + + WARN_ON(mm->avail !=3D mm->size); + + for_each_free_tree(i) + kfree(mm->free_trees[i]); + kfree(mm->roots); +} +EXPORT_SYMBOL(gpu_buddy_fini); + +static int split_block(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + unsigned int block_order =3D gpu_buddy_block_order(block) - 1; + u64 offset =3D gpu_buddy_block_offset(block); + + BUG_ON(!gpu_buddy_block_is_free(block)); + BUG_ON(!gpu_buddy_block_order(block)); + + block->left =3D gpu_block_alloc(mm, block, block_order, offset); + if (!block->left) + return -ENOMEM; + + block->right =3D gpu_block_alloc(mm, block, block_order, + offset + (mm->chunk_size << block_order)); + if (!block->right) { + gpu_block_free(mm, block->left); + return -ENOMEM; + } + + mark_split(mm, block); + + if (gpu_buddy_block_is_clear(block)) { + mark_cleared(block->left); + mark_cleared(block->right); + clear_reset(block); + } + + mark_free(mm, block->left); + mark_free(mm, block->right); + + return 0; +} + +/** + * gpu_get_buddy - get buddy address + * + * @block: GPU buddy block + * + * Returns the corresponding buddy block for @block, or NULL + * if this is a root block and can't be merged further. + * Requires some kind of locking to protect against + * any concurrent allocate and free operations. + */ +struct gpu_buddy_block * +gpu_get_buddy(struct gpu_buddy_block *block) +{ + return __get_buddy(block); +} +EXPORT_SYMBOL(gpu_get_buddy); + +/** + * gpu_buddy_reset_clear - reset blocks clear state + * + * @mm: GPU buddy manager + * @is_clear: blocks clear state + * + * Reset the clear state based on @is_clear value for each block + * in the freetree. + */ +void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear) +{ + enum gpu_buddy_free_tree src_tree, dst_tree; + u64 root_size, size, start; + unsigned int order; + int i; + + size =3D mm->size; + for (i =3D 0; i < mm->n_roots; ++i) { + order =3D ilog2(size) - ilog2(mm->chunk_size); + start =3D gpu_buddy_block_offset(mm->roots[i]); + __force_merge(mm, start, start + size, order); + + root_size =3D mm->chunk_size << order; + size -=3D root_size; + } + + src_tree =3D is_clear ? GPU_BUDDY_DIRTY_TREE : GPU_BUDDY_CLEAR_TREE; + dst_tree =3D is_clear ? GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; + + for (i =3D 0; i <=3D mm->max_order; ++i) { + struct rb_root *root =3D &mm->free_trees[src_tree][i]; + struct gpu_buddy_block *block, *tmp; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + rbtree_remove(mm, block); + if (is_clear) { + mark_cleared(block); + mm->clear_avail +=3D gpu_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -=3D gpu_buddy_block_size(mm, block); + } + + rbtree_insert(mm, block, dst_tree); + } + } +} +EXPORT_SYMBOL(gpu_buddy_reset_clear); + +/** + * gpu_buddy_free_block - free a block + * + * @mm: GPU buddy manager + * @block: block to be freed + */ +void gpu_buddy_free_block(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + BUG_ON(!gpu_buddy_block_is_allocated(block)); + mm->avail +=3D gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail +=3D gpu_buddy_block_size(mm, block); + + __gpu_buddy_free(mm, block, false); +} +EXPORT_SYMBOL(gpu_buddy_free_block); + +static void __gpu_buddy_free_list(struct gpu_buddy *mm, + struct list_head *objects, + bool mark_clear, + bool mark_dirty) +{ + struct gpu_buddy_block *block, *on; + + WARN_ON(mark_dirty && mark_clear); + + list_for_each_entry_safe(block, on, objects, link) { + if (mark_clear) + mark_cleared(block); + else if (mark_dirty) + clear_reset(block); + gpu_buddy_free_block(mm, block); + cond_resched(); + } + INIT_LIST_HEAD(objects); +} + +static void gpu_buddy_free_list_internal(struct gpu_buddy *mm, + struct list_head *objects) +{ + /* + * Don't touch the clear/dirty bit, since allocation is still internal + * at this point. For example we might have just failed part of the + * allocation. + */ + __gpu_buddy_free_list(mm, objects, false, false); +} + +/** + * gpu_buddy_free_list - free blocks + * + * @mm: GPU buddy manager + * @objects: input list head to free blocks + * @flags: optional flags like GPU_BUDDY_CLEARED + */ +void gpu_buddy_free_list(struct gpu_buddy *mm, + struct list_head *objects, + unsigned int flags) +{ + bool mark_clear =3D flags & GPU_BUDDY_CLEARED; + + __gpu_buddy_free_list(mm, objects, mark_clear, !mark_clear); +} +EXPORT_SYMBOL(gpu_buddy_free_list); + +static bool block_incompatible(struct gpu_buddy_block *block, unsigned int= flags) +{ + bool needs_clear =3D flags & GPU_BUDDY_CLEAR_ALLOCATION; + + return needs_clear !=3D gpu_buddy_block_is_clear(block); +} + +static struct gpu_buddy_block * +__alloc_range_bias(struct gpu_buddy *mm, + u64 start, u64 end, + unsigned int order, + unsigned long flags, + bool fallback) +{ + u64 req_size =3D mm->chunk_size << order; + struct gpu_buddy_block *block; + struct gpu_buddy_block *buddy; + LIST_HEAD(dfs); + int err; + int i; + + end =3D end - 1; + + for (i =3D 0; i < mm->n_roots; ++i) + list_add_tail(&mm->roots[i]->tmp_link, &dfs); + + do { + u64 block_start; + u64 block_end; + + block =3D list_first_entry_or_null(&dfs, + struct gpu_buddy_block, + tmp_link); + if (!block) + break; + + list_del(&block->tmp_link); + + if (gpu_buddy_block_order(block) < order) + continue; + + block_start =3D gpu_buddy_block_offset(block); + block_end =3D block_start + gpu_buddy_block_size(mm, block) - 1; + + if (!overlaps(start, end, block_start, block_end)) + continue; + + if (gpu_buddy_block_is_allocated(block)) + continue; + + if (block_start < start || block_end > end) { + u64 adjusted_start =3D max(block_start, start); + u64 adjusted_end =3D min(block_end, end); + + if (round_down(adjusted_end + 1, req_size) <=3D + round_up(adjusted_start, req_size)) + continue; + } + + if (!fallback && block_incompatible(block, flags)) + continue; + + if (contains(start, end, block_start, block_end) && + order =3D=3D gpu_buddy_block_order(block)) { + /* + * Find the free block within the range. + */ + if (gpu_buddy_block_is_free(block)) + return block; + + continue; + } + + if (!gpu_buddy_block_is_split(block)) { + err =3D split_block(mm, block); + if (unlikely(err)) + goto err_undo; + } + + list_add(&block->right->tmp_link, &dfs); + list_add(&block->left->tmp_link, &dfs); + } while (1); + + return ERR_PTR(-ENOSPC); + +err_undo: + /* + * We really don't want to leave around a bunch of split blocks, since + * bigger is better, so make sure we merge everything back before we + * free the allocated blocks. + */ + buddy =3D __get_buddy(block); + if (buddy && + (gpu_buddy_block_is_free(block) && + gpu_buddy_block_is_free(buddy))) + __gpu_buddy_free(mm, block, false); + return ERR_PTR(err); +} + +static struct gpu_buddy_block * +__gpu_buddy_alloc_range_bias(struct gpu_buddy *mm, + u64 start, u64 end, + unsigned int order, + unsigned long flags) +{ + struct gpu_buddy_block *block; + bool fallback =3D false; + + block =3D __alloc_range_bias(mm, start, end, order, + flags, fallback); + if (IS_ERR(block)) + return __alloc_range_bias(mm, start, end, order, + flags, !fallback); + + return block; +} + +static struct gpu_buddy_block * +get_maxblock(struct gpu_buddy *mm, + unsigned int order, + enum gpu_buddy_free_tree tree) +{ + struct gpu_buddy_block *max_block =3D NULL, *block =3D NULL; + struct rb_root *root; + unsigned int i; + + for (i =3D order; i <=3D mm->max_order; ++i) { + root =3D &mm->free_trees[tree][i]; + block =3D rbtree_last_free_block(root); + if (!block) + continue; + + if (!max_block) { + max_block =3D block; + continue; + } + + if (gpu_buddy_block_offset(block) > + gpu_buddy_block_offset(max_block)) { + max_block =3D block; + } + } + + return max_block; +} + +static struct gpu_buddy_block * +alloc_from_freetree(struct gpu_buddy *mm, + unsigned int order, + unsigned long flags) +{ + struct gpu_buddy_block *block =3D NULL; + struct rb_root *root; + enum gpu_buddy_free_tree tree; + unsigned int tmp; + int err; + + tree =3D (flags & GPU_BUDDY_CLEAR_ALLOCATION) ? + GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE; + + if (flags & GPU_BUDDY_TOPDOWN_ALLOCATION) { + block =3D get_maxblock(mm, order, tree); + if (block) + /* Store the obtained block order */ + tmp =3D gpu_buddy_block_order(block); + } else { + for (tmp =3D order; tmp <=3D mm->max_order; ++tmp) { + /* Get RB tree root for this order and tree */ + root =3D &mm->free_trees[tree][tmp]; + block =3D rbtree_last_free_block(root); + if (block) + break; + } + } + + if (!block) { + /* Try allocating from the other tree */ + tree =3D (tree =3D=3D GPU_BUDDY_CLEAR_TREE) ? + GPU_BUDDY_DIRTY_TREE : GPU_BUDDY_CLEAR_TREE; + + for (tmp =3D order; tmp <=3D mm->max_order; ++tmp) { + root =3D &mm->free_trees[tree][tmp]; + block =3D rbtree_last_free_block(root); + if (block) + break; + } + + if (!block) + return ERR_PTR(-ENOSPC); + } + + BUG_ON(!gpu_buddy_block_is_free(block)); + + while (tmp !=3D order) { + err =3D split_block(mm, block); + if (unlikely(err)) + goto err_undo; + + block =3D block->right; + tmp--; + } + return block; + +err_undo: + if (tmp !=3D order) + __gpu_buddy_free(mm, block, false); + return ERR_PTR(err); +} + +static int __alloc_range(struct gpu_buddy *mm, + struct list_head *dfs, + u64 start, u64 size, + struct list_head *blocks, + u64 *total_allocated_on_err) +{ + struct gpu_buddy_block *block; + struct gpu_buddy_block *buddy; + u64 total_allocated =3D 0; + LIST_HEAD(allocated); + u64 end; + int err; + + end =3D start + size - 1; + + do { + u64 block_start; + u64 block_end; + + block =3D list_first_entry_or_null(dfs, + struct gpu_buddy_block, + tmp_link); + if (!block) + break; + + list_del(&block->tmp_link); + + block_start =3D gpu_buddy_block_offset(block); + block_end =3D block_start + gpu_buddy_block_size(mm, block) - 1; + + if (!overlaps(start, end, block_start, block_end)) + continue; + + if (gpu_buddy_block_is_allocated(block)) { + err =3D -ENOSPC; + goto err_free; + } + + if (contains(start, end, block_start, block_end)) { + if (gpu_buddy_block_is_free(block)) { + mark_allocated(mm, block); + total_allocated +=3D gpu_buddy_block_size(mm, block); + mm->avail -=3D gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -=3D gpu_buddy_block_size(mm, block); + list_add_tail(&block->link, &allocated); + continue; + } else if (!mm->clear_avail) { + err =3D -ENOSPC; + goto err_free; + } + } + + if (!gpu_buddy_block_is_split(block)) { + err =3D split_block(mm, block); + if (unlikely(err)) + goto err_undo; + } + + list_add(&block->right->tmp_link, dfs); + list_add(&block->left->tmp_link, dfs); + } while (1); + + if (total_allocated < size) { + err =3D -ENOSPC; + goto err_free; + } + + list_splice_tail(&allocated, blocks); + + return 0; + +err_undo: + /* + * We really don't want to leave around a bunch of split blocks, since + * bigger is better, so make sure we merge everything back before we + * free the allocated blocks. + */ + buddy =3D __get_buddy(block); + if (buddy && + (gpu_buddy_block_is_free(block) && + gpu_buddy_block_is_free(buddy))) + __gpu_buddy_free(mm, block, false); + +err_free: + if (err =3D=3D -ENOSPC && total_allocated_on_err) { + list_splice_tail(&allocated, blocks); + *total_allocated_on_err =3D total_allocated; + } else { + gpu_buddy_free_list_internal(mm, &allocated); + } + + return err; +} + +static int __gpu_buddy_alloc_range(struct gpu_buddy *mm, + u64 start, + u64 size, + u64 *total_allocated_on_err, + struct list_head *blocks) +{ + LIST_HEAD(dfs); + int i; + + for (i =3D 0; i < mm->n_roots; ++i) + list_add_tail(&mm->roots[i]->tmp_link, &dfs); + + return __alloc_range(mm, &dfs, start, size, + blocks, total_allocated_on_err); +} + +static int __alloc_contig_try_harder(struct gpu_buddy *mm, + u64 size, + u64 min_block_size, + struct list_head *blocks) +{ + u64 rhs_offset, lhs_offset, lhs_size, filled; + struct gpu_buddy_block *block; + unsigned int tree, order; + LIST_HEAD(blocks_lhs); + unsigned long pages; + u64 modify_size; + int err; + + modify_size =3D rounddown_pow_of_two(size); + pages =3D modify_size >> ilog2(mm->chunk_size); + order =3D fls(pages) - 1; + if (order =3D=3D 0) + return -ENOSPC; + + for_each_free_tree(tree) { + struct rb_root *root; + struct rb_node *iter; + + root =3D &mm->free_trees[tree][order]; + if (rbtree_is_empty(root)) + continue; + + iter =3D rb_last(root); + while (iter) { + block =3D rbtree_get_free_block(iter); + + /* Allocate blocks traversing RHS */ + rhs_offset =3D gpu_buddy_block_offset(block); + err =3D __gpu_buddy_alloc_range(mm, rhs_offset, size, + &filled, blocks); + if (!err || err !=3D -ENOSPC) + return err; + + lhs_size =3D max((size - filled), min_block_size); + if (!IS_ALIGNED(lhs_size, min_block_size)) + lhs_size =3D round_up(lhs_size, min_block_size); + + /* Allocate blocks traversing LHS */ + lhs_offset =3D gpu_buddy_block_offset(block) - lhs_size; + err =3D __gpu_buddy_alloc_range(mm, lhs_offset, lhs_size, + NULL, &blocks_lhs); + if (!err) { + list_splice(&blocks_lhs, blocks); + return 0; + } else if (err !=3D -ENOSPC) { + gpu_buddy_free_list_internal(mm, blocks); + return err; + } + /* Free blocks for the next iteration */ + gpu_buddy_free_list_internal(mm, blocks); + + iter =3D rb_prev(iter); + } + } + + return -ENOSPC; +} + +/** + * gpu_buddy_block_trim - free unused pages + * + * @mm: GPU buddy manager + * @start: start address to begin the trimming. + * @new_size: original size requested + * @blocks: Input and output list of allocated blocks. + * MUST contain single block as input to be trimmed. + * On success will contain the newly allocated blocks + * making up the @new_size. Blocks always appear in + * ascending order + * + * For contiguous allocation, we round up the size to the nearest + * power of two value, drivers consume *actual* size, so remaining + * portions are unused and can be optionally freed with this function + * + * Returns: + * 0 on success, error code on failure. + */ +int gpu_buddy_block_trim(struct gpu_buddy *mm, + u64 *start, + u64 new_size, + struct list_head *blocks) +{ + struct gpu_buddy_block *parent; + struct gpu_buddy_block *block; + u64 block_start, block_end; + LIST_HEAD(dfs); + u64 new_start; + int err; + + if (!list_is_singular(blocks)) + return -EINVAL; + + block =3D list_first_entry(blocks, + struct gpu_buddy_block, + link); + + block_start =3D gpu_buddy_block_offset(block); + block_end =3D block_start + gpu_buddy_block_size(mm, block); + + if (WARN_ON(!gpu_buddy_block_is_allocated(block))) + return -EINVAL; + + if (new_size > gpu_buddy_block_size(mm, block)) + return -EINVAL; + + if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size)) + return -EINVAL; + + if (new_size =3D=3D gpu_buddy_block_size(mm, block)) + return 0; + + new_start =3D block_start; + if (start) { + new_start =3D *start; + + if (new_start < block_start) + return -EINVAL; + + if (!IS_ALIGNED(new_start, mm->chunk_size)) + return -EINVAL; + + if (range_overflows(new_start, new_size, block_end)) + return -EINVAL; + } + + list_del(&block->link); + mark_free(mm, block); + mm->avail +=3D gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail +=3D gpu_buddy_block_size(mm, block); + + /* Prevent recursively freeing this node */ + parent =3D block->parent; + block->parent =3D NULL; + + list_add(&block->tmp_link, &dfs); + err =3D __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); + if (err) { + mark_allocated(mm, block); + mm->avail -=3D gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -=3D gpu_buddy_block_size(mm, block); + list_add(&block->link, blocks); + } + + block->parent =3D parent; + return err; +} +EXPORT_SYMBOL(gpu_buddy_block_trim); + +static struct gpu_buddy_block * +__gpu_buddy_alloc_blocks(struct gpu_buddy *mm, + u64 start, u64 end, + unsigned int order, + unsigned long flags) +{ + if (flags & GPU_BUDDY_RANGE_ALLOCATION) + /* Allocate traversing within the range */ + return __gpu_buddy_alloc_range_bias(mm, start, end, + order, flags); + else + /* Allocate from freetree */ + return alloc_from_freetree(mm, order, flags); +} + +/** + * gpu_buddy_alloc_blocks - allocate power-of-two blocks + * + * @mm: GPU buddy manager to allocate from + * @start: start of the allowed range for this block + * @end: end of the allowed range for this block + * @size: size of the allocation in bytes + * @min_block_size: alignment of the allocation + * @blocks: output list head to add allocated blocks + * @flags: GPU_BUDDY_*_ALLOCATION flags + * + * alloc_range_bias() called on range limitations, which traverses + * the tree and returns the desired block. + * + * alloc_from_freetree() called when *no* range restrictions + * are enforced, which picks the block from the freetree. + * + * Returns: + * 0 on success, error code on failure. + */ +int gpu_buddy_alloc_blocks(struct gpu_buddy *mm, + u64 start, u64 end, u64 size, + u64 min_block_size, + struct list_head *blocks, + unsigned long flags) +{ + struct gpu_buddy_block *block =3D NULL; + u64 original_size, original_min_size; + unsigned int min_order, order; + LIST_HEAD(allocated); + unsigned long pages; + int err; + + if (size < mm->chunk_size) + return -EINVAL; + + if (min_block_size < mm->chunk_size) + return -EINVAL; + + if (!is_power_of_2(min_block_size)) + return -EINVAL; + + if (!IS_ALIGNED(start | end | size, mm->chunk_size)) + return -EINVAL; + + if (end > mm->size) + return -EINVAL; + + if (range_overflows(start, size, mm->size)) + return -EINVAL; + + /* Actual range allocation */ + if (start + size =3D=3D end) { + if (!IS_ALIGNED(start | end, min_block_size)) + return -EINVAL; + + return __gpu_buddy_alloc_range(mm, start, size, NULL, blocks); + } + + original_size =3D size; + original_min_size =3D min_block_size; + + /* Roundup the size to power of 2 */ + if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION) { + size =3D roundup_pow_of_two(size); + min_block_size =3D size; + /* Align size value to min_block_size */ + } else if (!IS_ALIGNED(size, min_block_size)) { + size =3D round_up(size, min_block_size); + } + + pages =3D size >> ilog2(mm->chunk_size); + order =3D fls(pages) - 1; + min_order =3D ilog2(min_block_size) - ilog2(mm->chunk_size); + + do { + order =3D min(order, (unsigned int)fls(pages) - 1); + BUG_ON(order > mm->max_order); + BUG_ON(order < min_order); + + do { + block =3D __gpu_buddy_alloc_blocks(mm, start, + end, + order, + flags); + if (!IS_ERR(block)) + break; + + if (order-- =3D=3D min_order) { + /* Try allocation through force merge method */ + if (mm->clear_avail && + !__force_merge(mm, start, end, min_order)) { + block =3D __gpu_buddy_alloc_blocks(mm, start, + end, + min_order, + flags); + if (!IS_ERR(block)) { + order =3D min_order; + break; + } + } + + /* + * Try contiguous block allocation through + * try harder method. + */ + if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION && + !(flags & GPU_BUDDY_RANGE_ALLOCATION)) + return __alloc_contig_try_harder(mm, + original_size, + original_min_size, + blocks); + err =3D -ENOSPC; + goto err_free; + } + } while (1); + + mark_allocated(mm, block); + mm->avail -=3D gpu_buddy_block_size(mm, block); + if (gpu_buddy_block_is_clear(block)) + mm->clear_avail -=3D gpu_buddy_block_size(mm, block); + kmemleak_update_trace(block); + list_add_tail(&block->link, &allocated); + + pages -=3D BIT(order); + + if (!pages) + break; + } while (1); + + /* Trim the allocated block to the required size */ + if (!(flags & GPU_BUDDY_TRIM_DISABLE) && + original_size !=3D size) { + struct list_head *trim_list; + LIST_HEAD(temp); + u64 trim_size; + + trim_list =3D &allocated; + trim_size =3D original_size; + + if (!list_is_singular(&allocated)) { + block =3D list_last_entry(&allocated, typeof(*block), link); + list_move(&block->link, &temp); + trim_list =3D &temp; + trim_size =3D gpu_buddy_block_size(mm, block) - + (size - original_size); + } + + gpu_buddy_block_trim(mm, + NULL, + trim_size, + trim_list); + + if (!list_empty(&temp)) + list_splice_tail(trim_list, &allocated); + } + + list_splice_tail(&allocated, blocks); + return 0; + +err_free: + gpu_buddy_free_list_internal(mm, &allocated); + return err; +} +EXPORT_SYMBOL(gpu_buddy_alloc_blocks); + +/** + * gpu_buddy_block_print - print block information + * + * @mm: GPU buddy manager + * @block: GPU buddy block + */ +void gpu_buddy_block_print(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + u64 start =3D gpu_buddy_block_offset(block); + u64 size =3D gpu_buddy_block_size(mm, block); + + pr_info("%#018llx-%#018llx: %llu\n", start, start + size, size); +} +EXPORT_SYMBOL(gpu_buddy_block_print); + +/** + * gpu_buddy_print - print allocator state + * + * @mm: GPU buddy manager + */ +void gpu_buddy_print(struct gpu_buddy *mm) +{ + int order; + + pr_info("chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: = %lluMiB\n", + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avail >= > 20); + + for (order =3D mm->max_order; order >=3D 0; order--) { + struct gpu_buddy_block *block, *tmp; + struct rb_root *root; + u64 count =3D 0, free; + unsigned int tree; + + for_each_free_tree(tree) { + root =3D &mm->free_trees[tree][order]; + + rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { + BUG_ON(!gpu_buddy_block_is_free(block)); + count++; + } + } + + free =3D count * (mm->chunk_size << order); + if (free < SZ_1M) + pr_info("order-%2d free: %8llu KiB, blocks: %llu\n", + order, free >> 10, count); + else + pr_info("order-%2d free: %8llu MiB, blocks: %llu\n", + order, free >> 20, count); + } +} +EXPORT_SYMBOL(gpu_buddy_print); + +static void gpu_buddy_module_exit(void) +{ + kmem_cache_destroy(slab_blocks); +} + +static int __init gpu_buddy_module_init(void) +{ + slab_blocks =3D KMEM_CACHE(gpu_buddy_block, 0); + if (!slab_blocks) + return -ENOMEM; + + return 0; +} + +module_init(gpu_buddy_module_init); +module_exit(gpu_buddy_module_exit); + +MODULE_DESCRIPTION("GPU Buddy Allocator"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 7e6bc0b3a589..0475defb37f0 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -220,6 +220,7 @@ config DRM_GPUSVM config DRM_BUDDY tristate depends on DRM + select GPU_BUDDY help A page based buddy allocator =20 diff --git a/drivers/gpu/drm/Kconfig.debug b/drivers/gpu/drm/Kconfig.debug index 05dc43c0b8c5..1f4c408c7920 100644 --- a/drivers/gpu/drm/Kconfig.debug +++ b/drivers/gpu/drm/Kconfig.debug @@ -71,6 +71,7 @@ config DRM_KUNIT_TEST select DRM_KUNIT_TEST_HELPERS select DRM_LIB_RANDOM select DRM_SYSFB_HELPER + select GPU_BUDDY select PRIME_NUMBERS default KUNIT_ALL_TESTS help @@ -88,10 +89,11 @@ config DRM_TTM_KUNIT_TEST tristate "KUnit tests for TTM" if !KUNIT_ALL_TESTS default n depends on DRM && KUNIT && MMU && (UML || COMPILE_TEST) - select DRM_TTM select DRM_BUDDY + select DRM_TTM select DRM_EXPORT_FOR_TESTS if m select DRM_KUNIT_TEST_HELPERS + select GPU_BUDDY default KUNIT_ALL_TESTS help Enables unit tests for TTM, a GPU memory manager subsystem used diff --git a/drivers/gpu/drm/amd/amdgpu/Kconfig b/drivers/gpu/drm/amd/amdgp= u/Kconfig index 7f515be5185d..bb131543e1d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/Kconfig +++ b/drivers/gpu/drm/amd/amdgpu/Kconfig @@ -23,6 +23,7 @@ config DRM_AMDGPU select CRC16 select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE + select GPU_BUDDY select DRM_BUDDY select DRM_SUBALLOC_HELPER select DRM_EXEC diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/= amdgpu/amdgpu_ras.c index 2a6cf7963dde..e0bd8a68877f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -5654,7 +5654,7 @@ int amdgpu_ras_add_critical_region(struct amdgpu_devi= ce *adev, struct amdgpu_ras *con =3D amdgpu_ras_get_context(adev); struct amdgpu_vram_mgr_resource *vres; struct ras_critical_region *region; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; int ret =3D 0; =20 if (!bo || !bo->tbo.resource) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/d= rm/amd/amdgpu/amdgpu_res_cursor.h index be2e56ce1355..8908d9e08a30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -55,7 +55,7 @@ static inline void amdgpu_res_first(struct ttm_resource *= res, uint64_t start, uint64_t size, struct amdgpu_res_cursor *cur) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct list_head *head, *next; struct drm_mm_node *node; =20 @@ -71,7 +71,7 @@ static inline void amdgpu_res_first(struct ttm_resource *= res, head =3D &to_amdgpu_vram_mgr_resource(res)->blocks; =20 block =3D list_first_entry_or_null(head, - struct drm_buddy_block, + struct gpu_buddy_block, link); if (!block) goto fallback; @@ -81,7 +81,7 @@ static inline void amdgpu_res_first(struct ttm_resource *= res, =20 next =3D block->link.next; if (next !=3D head) - block =3D list_entry(next, struct drm_buddy_block, link); + block =3D list_entry(next, struct gpu_buddy_block, link); } =20 cur->start =3D amdgpu_vram_mgr_block_start(block) + start; @@ -125,7 +125,7 @@ static inline void amdgpu_res_first(struct ttm_resource= *res, */ static inline void amdgpu_res_next(struct amdgpu_res_cursor *cur, uint64_t= size) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct drm_mm_node *node; struct list_head *next; =20 @@ -146,7 +146,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cu= rsor *cur, uint64_t size) block =3D cur->node; =20 next =3D block->link.next; - block =3D list_entry(next, struct drm_buddy_block, link); + block =3D list_entry(next, struct gpu_buddy_block, link); =20 cur->node =3D block; cur->start =3D amdgpu_vram_mgr_block_start(block); @@ -175,7 +175,7 @@ static inline void amdgpu_res_next(struct amdgpu_res_cu= rsor *cur, uint64_t size) */ static inline bool amdgpu_res_cleared(struct amdgpu_res_cursor *cur) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; =20 switch (cur->mem_type) { case TTM_PL_VRAM: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm= /amd/amdgpu/amdgpu_vram_mgr.c index 9d934c07fa6b..6c06a9c9b13f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -23,6 +23,8 @@ */ =20 #include + +#include #include #include =20 @@ -52,15 +54,15 @@ to_amdgpu_device(struct amdgpu_vram_mgr *mgr) return container_of(mgr, struct amdgpu_device, mman.vram_mgr); } =20 -static inline struct drm_buddy_block * +static inline struct gpu_buddy_block * amdgpu_vram_mgr_first_block(struct list_head *list) { - return list_first_entry_or_null(list, struct drm_buddy_block, link); + return list_first_entry_or_null(list, struct gpu_buddy_block, link); } =20 static inline bool amdgpu_is_vram_mgr_blocks_contiguous(struct list_head *= head) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 start, size; =20 block =3D amdgpu_vram_mgr_first_block(head); @@ -71,7 +73,7 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(s= truct list_head *head) start =3D amdgpu_vram_mgr_block_start(block); size =3D amdgpu_vram_mgr_block_size(block); =20 - block =3D list_entry(block->link.next, struct drm_buddy_block, link); + block =3D list_entry(block->link.next, struct gpu_buddy_block, link); if (start + size !=3D amdgpu_vram_mgr_block_start(block)) return false; } @@ -81,7 +83,7 @@ static inline bool amdgpu_is_vram_mgr_blocks_contiguous(s= truct list_head *head) =20 static inline u64 amdgpu_vram_mgr_blocks_size(struct list_head *head) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 size =3D 0; =20 list_for_each_entry(block, head, link) @@ -254,7 +256,7 @@ const struct attribute_group amdgpu_vram_mgr_attr_group= =3D { * Calculate how many bytes of the DRM BUDDY block are inside visible VRAM */ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev, - struct drm_buddy_block *block) + struct gpu_buddy_block *block) { u64 start =3D amdgpu_vram_mgr_block_start(block); u64 end =3D start + amdgpu_vram_mgr_block_size(block); @@ -279,7 +281,7 @@ u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *b= o) struct amdgpu_device *adev =3D amdgpu_ttm_adev(bo->tbo.bdev); struct ttm_resource *res =3D bo->tbo.resource; struct amdgpu_vram_mgr_resource *vres =3D to_amdgpu_vram_mgr_resource(res= ); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 usage =3D 0; =20 if (amdgpu_gmc_vram_full_visible(&adev->gmc)) @@ -299,15 +301,15 @@ static void amdgpu_vram_mgr_do_reserve(struct ttm_res= ource_manager *man) { struct amdgpu_vram_mgr *mgr =3D to_vram_mgr(man); struct amdgpu_device *adev =3D to_amdgpu_device(mgr); - struct drm_buddy *mm =3D &mgr->mm; + struct gpu_buddy *mm =3D &mgr->mm; struct amdgpu_vram_reservation *rsv, *temp; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; uint64_t vis_usage; =20 list_for_each_entry_safe(rsv, temp, &mgr->reservations_pending, blocks) { - if (drm_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, + if (gpu_buddy_alloc_blocks(mm, rsv->start, rsv->start + rsv->size, rsv->size, mm->chunk_size, &rsv->allocated, - DRM_BUDDY_RANGE_ALLOCATION)) + GPU_BUDDY_RANGE_ALLOCATION)) continue; =20 block =3D amdgpu_vram_mgr_first_block(&rsv->allocated); @@ -403,7 +405,7 @@ int amdgpu_vram_mgr_query_address_block_info(struct amd= gpu_vram_mgr *mgr, uint64_t address, struct amdgpu_vram_block_info *info) { struct amdgpu_vram_mgr_resource *vres; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 start, size; int ret =3D -ENOENT; =20 @@ -450,8 +452,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_mana= ger *man, struct amdgpu_vram_mgr_resource *vres; u64 size, remaining_size, lpfn, fpfn; unsigned int adjust_dcc_size =3D 0; - struct drm_buddy *mm =3D &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm =3D &mgr->mm; + struct gpu_buddy_block *block; unsigned long pages_per_block; int r; =20 @@ -493,17 +495,17 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_ma= nager *man, INIT_LIST_HEAD(&vres->blocks); =20 if (place->flags & TTM_PL_FLAG_TOPDOWN) - vres->flags |=3D DRM_BUDDY_TOPDOWN_ALLOCATION; + vres->flags |=3D GPU_BUDDY_TOPDOWN_ALLOCATION; =20 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS) - vres->flags |=3D DRM_BUDDY_CONTIGUOUS_ALLOCATION; + vres->flags |=3D GPU_BUDDY_CONTIGUOUS_ALLOCATION; =20 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED) - vres->flags |=3D DRM_BUDDY_CLEAR_ALLOCATION; + vres->flags |=3D GPU_BUDDY_CLEAR_ALLOCATION; =20 if (fpfn || lpfn !=3D mgr->mm.size) /* Allocate blocks in desired range */ - vres->flags |=3D DRM_BUDDY_RANGE_ALLOCATION; + vres->flags |=3D GPU_BUDDY_RANGE_ALLOCATION; =20 if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC && adev->gmc.gmc_funcs->get_dcc_alignment) @@ -516,7 +518,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_mana= ger *man, dcc_size =3D roundup_pow_of_two(vres->base.size + adjust_dcc_size); remaining_size =3D (u64)dcc_size; =20 - vres->flags |=3D DRM_BUDDY_TRIM_DISABLE; + vres->flags |=3D GPU_BUDDY_TRIM_DISABLE; } =20 mutex_lock(&mgr->lock); @@ -536,7 +538,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_mana= ger *man, =20 BUG_ON(min_block_size < mm->chunk_size); =20 - r =3D drm_buddy_alloc_blocks(mm, fpfn, + r =3D gpu_buddy_alloc_blocks(mm, fpfn, lpfn, size, min_block_size, @@ -545,7 +547,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_mana= ger *man, =20 if (unlikely(r =3D=3D -ENOSPC) && pages_per_block =3D=3D ~0ul && !(place->flags & TTM_PL_FLAG_CONTIGUOUS)) { - vres->flags &=3D ~DRM_BUDDY_CONTIGUOUS_ALLOCATION; + vres->flags &=3D ~GPU_BUDDY_CONTIGUOUS_ALLOCATION; pages_per_block =3D max_t(u32, 2UL << (20UL - PAGE_SHIFT), tbo->page_alignment); =20 @@ -566,7 +568,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_mana= ger *man, list_add_tail(&vres->vres_node, &mgr->allocated_vres_list); =20 if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) { - struct drm_buddy_block *dcc_block; + struct gpu_buddy_block *dcc_block; unsigned long dcc_start; u64 trim_start; =20 @@ -576,7 +578,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_mana= ger *man, roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block), adjust_dcc_size); trim_start =3D (u64)dcc_start; - drm_buddy_block_trim(mm, &trim_start, + gpu_buddy_block_trim(mm, &trim_start, (u64)vres->base.size, &vres->blocks); } @@ -614,7 +616,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_mana= ger *man, return 0; =20 error_free_blocks: - drm_buddy_free_list(mm, &vres->blocks, 0); + gpu_buddy_free_list(mm, &vres->blocks, 0); mutex_unlock(&mgr->lock); error_fini: ttm_resource_fini(man, &vres->base); @@ -637,8 +639,8 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_man= ager *man, struct amdgpu_vram_mgr_resource *vres =3D to_amdgpu_vram_mgr_resource(res= ); struct amdgpu_vram_mgr *mgr =3D to_vram_mgr(man); struct amdgpu_device *adev =3D to_amdgpu_device(mgr); - struct drm_buddy *mm =3D &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm =3D &mgr->mm; + struct gpu_buddy_block *block; uint64_t vis_usage =3D 0; =20 mutex_lock(&mgr->lock); @@ -649,7 +651,7 @@ static void amdgpu_vram_mgr_del(struct ttm_resource_man= ager *man, list_for_each_entry(block, &vres->blocks, link) vis_usage +=3D amdgpu_vram_mgr_vis_size(adev, block); =20 - drm_buddy_free_list(mm, &vres->blocks, vres->flags); + gpu_buddy_free_list(mm, &vres->blocks, vres->flags); amdgpu_vram_mgr_do_reserve(man); mutex_unlock(&mgr->lock); =20 @@ -688,7 +690,7 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *ade= v, if (!*sgt) return -ENOMEM; =20 - /* Determine the number of DRM_BUDDY blocks to export */ + /* Determine the number of GPU_BUDDY blocks to export */ amdgpu_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; @@ -704,10 +706,10 @@ int amdgpu_vram_mgr_alloc_sgt(struct amdgpu_device *a= dev, sg->length =3D 0; =20 /* - * Walk down DRM_BUDDY blocks to populate scatterlist nodes - * @note: Use iterator api to get first the DRM_BUDDY block + * Walk down GPU_BUDDY blocks to populate scatterlist nodes + * @note: Use iterator api to get first the GPU_BUDDY block * and the number of bytes from it. Access the following - * DRM_BUDDY block(s) if more buffer needs to exported + * GPU_BUDDY block(s) if more buffer needs to exported */ amdgpu_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { @@ -792,10 +794,10 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram= _mgr *mgr) void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) { struct amdgpu_vram_mgr *mgr =3D &adev->mman.vram_mgr; - struct drm_buddy *mm =3D &mgr->mm; + struct gpu_buddy *mm =3D &mgr->mm; =20 mutex_lock(&mgr->lock); - drm_buddy_reset_clear(mm, false); + gpu_buddy_reset_clear(mm, false); mutex_unlock(&mgr->lock); } =20 @@ -815,7 +817,7 @@ static bool amdgpu_vram_mgr_intersects(struct ttm_resou= rce_manager *man, size_t size) { struct amdgpu_vram_mgr_resource *mgr =3D to_amdgpu_vram_mgr_resource(res); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; =20 /* Check each drm buddy block individually */ list_for_each_entry(block, &mgr->blocks, link) { @@ -848,7 +850,7 @@ static bool amdgpu_vram_mgr_compatible(struct ttm_resou= rce_manager *man, size_t size) { struct amdgpu_vram_mgr_resource *mgr =3D to_amdgpu_vram_mgr_resource(res); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; =20 /* Check each drm buddy block individually */ list_for_each_entry(block, &mgr->blocks, link) { @@ -877,7 +879,7 @@ static void amdgpu_vram_mgr_debug(struct ttm_resource_m= anager *man, struct drm_printer *printer) { struct amdgpu_vram_mgr *mgr =3D to_vram_mgr(man); - struct drm_buddy *mm =3D &mgr->mm; + struct gpu_buddy *mm =3D &mgr->mm; struct amdgpu_vram_reservation *rsv; =20 drm_printf(printer, " vis usage:%llu\n", @@ -930,7 +932,7 @@ int amdgpu_vram_mgr_init(struct amdgpu_device *adev) mgr->default_page_size =3D PAGE_SIZE; =20 man->func =3D &amdgpu_vram_mgr_func; - err =3D drm_buddy_init(&mgr->mm, man->size, PAGE_SIZE); + err =3D gpu_buddy_init(&mgr->mm, man->size, PAGE_SIZE); if (err) return err; =20 @@ -965,11 +967,11 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) kfree(rsv); =20 list_for_each_entry_safe(rsv, temp, &mgr->reserved_pages, blocks) { - drm_buddy_free_list(&mgr->mm, &rsv->allocated, 0); + gpu_buddy_free_list(&mgr->mm, &rsv->allocated, 0); kfree(rsv); } if (!adev->gmc.is_app_apu) - drm_buddy_fini(&mgr->mm); + gpu_buddy_fini(&mgr->mm); mutex_unlock(&mgr->lock); =20 ttm_resource_manager_cleanup(man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h b/drivers/gpu/drm= /amd/amdgpu/amdgpu_vram_mgr.h index 5f5fd9a911c2..429a21a2e9b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.h @@ -24,11 +24,11 @@ #ifndef __AMDGPU_VRAM_MGR_H__ #define __AMDGPU_VRAM_MGR_H__ =20 -#include +#include =20 struct amdgpu_vram_mgr { struct ttm_resource_manager manager; - struct drm_buddy mm; + struct gpu_buddy mm; /* protects access to buffer objects */ struct mutex lock; struct list_head reservations_pending; @@ -57,19 +57,19 @@ struct amdgpu_vram_mgr_resource { struct amdgpu_vres_task task; }; =20 -static inline u64 amdgpu_vram_mgr_block_start(struct drm_buddy_block *bloc= k) +static inline u64 amdgpu_vram_mgr_block_start(struct gpu_buddy_block *bloc= k) { - return drm_buddy_block_offset(block); + return gpu_buddy_block_offset(block); } =20 -static inline u64 amdgpu_vram_mgr_block_size(struct drm_buddy_block *block) +static inline u64 amdgpu_vram_mgr_block_size(struct gpu_buddy_block *block) { - return (u64)PAGE_SIZE << drm_buddy_block_order(block); + return (u64)PAGE_SIZE << gpu_buddy_block_order(block); } =20 -static inline bool amdgpu_vram_mgr_is_cleared(struct drm_buddy_block *bloc= k) +static inline bool amdgpu_vram_mgr_is_cleared(struct gpu_buddy_block *bloc= k) { - return drm_buddy_block_is_clear(block); + return gpu_buddy_block_is_clear(block); } =20 static inline struct amdgpu_vram_mgr_resource * @@ -82,8 +82,8 @@ static inline void amdgpu_vram_mgr_set_cleared(struct ttm= _resource *res) { struct amdgpu_vram_mgr_resource *ares =3D to_amdgpu_vram_mgr_resource(res= ); =20 - WARN_ON(ares->flags & DRM_BUDDY_CLEARED); - ares->flags |=3D DRM_BUDDY_CLEARED; + WARN_ON(ares->flags & GPU_BUDDY_CLEARED); + ares->flags |=3D GPU_BUDDY_CLEARED; } =20 int amdgpu_vram_mgr_query_address_block_info(struct amdgpu_vram_mgr *mgr, diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index 2f279b46bd2c..056514801caf 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -3,1262 +3,25 @@ * Copyright =C2=A9 2021 Intel Corporation */ =20 -#include - #include -#include #include #include =20 #include #include =20 -enum drm_buddy_free_tree { - DRM_BUDDY_CLEAR_TREE =3D 0, - DRM_BUDDY_DIRTY_TREE, - DRM_BUDDY_MAX_FREE_TREES, -}; - -static struct kmem_cache *slab_blocks; - -#define for_each_free_tree(tree) \ - for ((tree) =3D 0; (tree) < DRM_BUDDY_MAX_FREE_TREES; (tree)++) - -static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, - struct drm_buddy_block *parent, - unsigned int order, - u64 offset) -{ - struct drm_buddy_block *block; - - BUG_ON(order > DRM_BUDDY_MAX_ORDER); - - block =3D kmem_cache_zalloc(slab_blocks, GFP_KERNEL); - if (!block) - return NULL; - - block->header =3D offset; - block->header |=3D order; - block->parent =3D parent; - - RB_CLEAR_NODE(&block->rb); - - BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED); - return block; -} - -static void drm_block_free(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - kmem_cache_free(slab_blocks, block); -} - -static enum drm_buddy_free_tree -get_block_tree(struct drm_buddy_block *block) -{ - return drm_buddy_block_is_clear(block) ? - DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; -} - -static struct drm_buddy_block * -rbtree_get_free_block(const struct rb_node *node) -{ - return node ? rb_entry(node, struct drm_buddy_block, rb) : NULL; -} - -static struct drm_buddy_block * -rbtree_last_free_block(struct rb_root *root) -{ - return rbtree_get_free_block(rb_last(root)); -} - -static bool rbtree_is_empty(struct rb_root *root) -{ - return RB_EMPTY_ROOT(root); -} - -static bool drm_buddy_block_offset_less(const struct drm_buddy_block *bloc= k, - const struct drm_buddy_block *node) -{ - return drm_buddy_block_offset(block) < drm_buddy_block_offset(node); -} - -static bool rbtree_block_offset_less(struct rb_node *block, - const struct rb_node *node) -{ - return drm_buddy_block_offset_less(rbtree_get_free_block(block), - rbtree_get_free_block(node)); -} - -static void rbtree_insert(struct drm_buddy *mm, - struct drm_buddy_block *block, - enum drm_buddy_free_tree tree) -{ - rb_add(&block->rb, - &mm->free_trees[tree][drm_buddy_block_order(block)], - rbtree_block_offset_less); -} - -static void rbtree_remove(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - unsigned int order =3D drm_buddy_block_order(block); - enum drm_buddy_free_tree tree; - struct rb_root *root; - - tree =3D get_block_tree(block); - root =3D &mm->free_trees[tree][order]; - - rb_erase(&block->rb, root); - RB_CLEAR_NODE(&block->rb); -} - -static void clear_reset(struct drm_buddy_block *block) -{ - block->header &=3D ~DRM_BUDDY_HEADER_CLEAR; -} - -static void mark_cleared(struct drm_buddy_block *block) -{ - block->header |=3D DRM_BUDDY_HEADER_CLEAR; -} - -static void mark_allocated(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - block->header &=3D ~DRM_BUDDY_HEADER_STATE; - block->header |=3D DRM_BUDDY_ALLOCATED; - - rbtree_remove(mm, block); -} - -static void mark_free(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - enum drm_buddy_free_tree tree; - - block->header &=3D ~DRM_BUDDY_HEADER_STATE; - block->header |=3D DRM_BUDDY_FREE; - - tree =3D get_block_tree(block); - rbtree_insert(mm, block, tree); -} - -static void mark_split(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - block->header &=3D ~DRM_BUDDY_HEADER_STATE; - block->header |=3D DRM_BUDDY_SPLIT; - - rbtree_remove(mm, block); -} - -static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2) -{ - return s1 <=3D e2 && e1 >=3D s2; -} - -static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2) -{ - return s1 <=3D s2 && e1 >=3D e2; -} - -static struct drm_buddy_block * -__get_buddy(struct drm_buddy_block *block) -{ - struct drm_buddy_block *parent; - - parent =3D block->parent; - if (!parent) - return NULL; - - if (parent->left =3D=3D block) - return parent->right; - - return parent->left; -} - -static unsigned int __drm_buddy_free(struct drm_buddy *mm, - struct drm_buddy_block *block, - bool force_merge) -{ - struct drm_buddy_block *parent; - unsigned int order; - - while ((parent =3D block->parent)) { - struct drm_buddy_block *buddy; - - buddy =3D __get_buddy(block); - - if (!drm_buddy_block_is_free(buddy)) - break; - - if (!force_merge) { - /* - * Check the block and its buddy clear state and exit - * the loop if they both have the dissimilar state. - */ - if (drm_buddy_block_is_clear(block) !=3D - drm_buddy_block_is_clear(buddy)) - break; - - if (drm_buddy_block_is_clear(block)) - mark_cleared(parent); - } - - rbtree_remove(mm, buddy); - if (force_merge && drm_buddy_block_is_clear(buddy)) - mm->clear_avail -=3D drm_buddy_block_size(mm, buddy); - - drm_block_free(mm, block); - drm_block_free(mm, buddy); - - block =3D parent; - } - - order =3D drm_buddy_block_order(block); - mark_free(mm, block); - - return order; -} - -static int __force_merge(struct drm_buddy *mm, - u64 start, - u64 end, - unsigned int min_order) -{ - unsigned int tree, order; - int i; - - if (!min_order) - return -ENOMEM; - - if (min_order > mm->max_order) - return -EINVAL; - - for_each_free_tree(tree) { - for (i =3D min_order - 1; i >=3D 0; i--) { - struct rb_node *iter =3D rb_last(&mm->free_trees[tree][i]); - - while (iter) { - struct drm_buddy_block *block, *buddy; - u64 block_start, block_end; - - block =3D rbtree_get_free_block(iter); - iter =3D rb_prev(iter); - - if (!block || !block->parent) - continue; - - block_start =3D drm_buddy_block_offset(block); - block_end =3D block_start + drm_buddy_block_size(mm, block) - 1; - - if (!contains(start, end, block_start, block_end)) - continue; - - buddy =3D __get_buddy(block); - if (!drm_buddy_block_is_free(buddy)) - continue; - - WARN_ON(drm_buddy_block_is_clear(block) =3D=3D - drm_buddy_block_is_clear(buddy)); - - /* - * Advance to the next node when the current node is the buddy, - * as freeing the block will also remove its buddy from the tree. - */ - if (iter =3D=3D &buddy->rb) - iter =3D rb_prev(iter); - - rbtree_remove(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -=3D drm_buddy_block_size(mm, block); - - order =3D __drm_buddy_free(mm, block, true); - if (order >=3D min_order) - return 0; - } - } - } - - return -ENOMEM; -} - -/** - * drm_buddy_init - init memory manager - * - * @mm: DRM buddy manager to initialize - * @size: size in bytes to manage - * @chunk_size: minimum page size in bytes for our allocations - * - * Initializes the memory manager and its resources. - * - * Returns: - * 0 on success, error code on failure. - */ -int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size) -{ - unsigned int i, j, root_count =3D 0; - u64 offset =3D 0; - - if (size < chunk_size) - return -EINVAL; - - if (chunk_size < SZ_4K) - return -EINVAL; - - if (!is_power_of_2(chunk_size)) - return -EINVAL; - - size =3D round_down(size, chunk_size); - - mm->size =3D size; - mm->avail =3D size; - mm->clear_avail =3D 0; - mm->chunk_size =3D chunk_size; - mm->max_order =3D ilog2(size) - ilog2(chunk_size); - - BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER); - - mm->free_trees =3D kmalloc_array(DRM_BUDDY_MAX_FREE_TREES, - sizeof(*mm->free_trees), - GFP_KERNEL); - if (!mm->free_trees) - return -ENOMEM; - - for_each_free_tree(i) { - mm->free_trees[i] =3D kmalloc_array(mm->max_order + 1, - sizeof(struct rb_root), - GFP_KERNEL); - if (!mm->free_trees[i]) - goto out_free_tree; - - for (j =3D 0; j <=3D mm->max_order; ++j) - mm->free_trees[i][j] =3D RB_ROOT; - } - - mm->n_roots =3D hweight64(size); - - mm->roots =3D kmalloc_array(mm->n_roots, - sizeof(struct drm_buddy_block *), - GFP_KERNEL); - if (!mm->roots) - goto out_free_tree; - - /* - * Split into power-of-two blocks, in case we are given a size that is - * not itself a power-of-two. - */ - do { - struct drm_buddy_block *root; - unsigned int order; - u64 root_size; - - order =3D ilog2(size) - ilog2(chunk_size); - root_size =3D chunk_size << order; - - root =3D drm_block_alloc(mm, NULL, order, offset); - if (!root) - goto out_free_roots; - - mark_free(mm, root); - - BUG_ON(root_count > mm->max_order); - BUG_ON(drm_buddy_block_size(mm, root) < chunk_size); - - mm->roots[root_count] =3D root; - - offset +=3D root_size; - size -=3D root_size; - root_count++; - } while (size); - - return 0; - -out_free_roots: - while (root_count--) - drm_block_free(mm, mm->roots[root_count]); - kfree(mm->roots); -out_free_tree: - while (i--) - kfree(mm->free_trees[i]); - kfree(mm->free_trees); - return -ENOMEM; -} -EXPORT_SYMBOL(drm_buddy_init); - -/** - * drm_buddy_fini - tear down the memory manager - * - * @mm: DRM buddy manager to free - * - * Cleanup memory manager resources and the freetree - */ -void drm_buddy_fini(struct drm_buddy *mm) -{ - u64 root_size, size, start; - unsigned int order; - int i; - - size =3D mm->size; - - for (i =3D 0; i < mm->n_roots; ++i) { - order =3D ilog2(size) - ilog2(mm->chunk_size); - start =3D drm_buddy_block_offset(mm->roots[i]); - __force_merge(mm, start, start + size, order); - - if (WARN_ON(!drm_buddy_block_is_free(mm->roots[i]))) - kunit_fail_current_test("buddy_fini() root"); - - drm_block_free(mm, mm->roots[i]); - - root_size =3D mm->chunk_size << order; - size -=3D root_size; - } - - WARN_ON(mm->avail !=3D mm->size); - - for_each_free_tree(i) - kfree(mm->free_trees[i]); - kfree(mm->roots); -} -EXPORT_SYMBOL(drm_buddy_fini); - -static int split_block(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - unsigned int block_order =3D drm_buddy_block_order(block) - 1; - u64 offset =3D drm_buddy_block_offset(block); - - BUG_ON(!drm_buddy_block_is_free(block)); - BUG_ON(!drm_buddy_block_order(block)); - - block->left =3D drm_block_alloc(mm, block, block_order, offset); - if (!block->left) - return -ENOMEM; - - block->right =3D drm_block_alloc(mm, block, block_order, - offset + (mm->chunk_size << block_order)); - if (!block->right) { - drm_block_free(mm, block->left); - return -ENOMEM; - } - - mark_split(mm, block); - - if (drm_buddy_block_is_clear(block)) { - mark_cleared(block->left); - mark_cleared(block->right); - clear_reset(block); - } - - mark_free(mm, block->left); - mark_free(mm, block->right); - - return 0; -} - -/** - * drm_get_buddy - get buddy address - * - * @block: DRM buddy block - * - * Returns the corresponding buddy block for @block, or NULL - * if this is a root block and can't be merged further. - * Requires some kind of locking to protect against - * any concurrent allocate and free operations. - */ -struct drm_buddy_block * -drm_get_buddy(struct drm_buddy_block *block) -{ - return __get_buddy(block); -} -EXPORT_SYMBOL(drm_get_buddy); - -/** - * drm_buddy_reset_clear - reset blocks clear state - * - * @mm: DRM buddy manager - * @is_clear: blocks clear state - * - * Reset the clear state based on @is_clear value for each block - * in the freetree. - */ -void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) -{ - enum drm_buddy_free_tree src_tree, dst_tree; - u64 root_size, size, start; - unsigned int order; - int i; - - size =3D mm->size; - for (i =3D 0; i < mm->n_roots; ++i) { - order =3D ilog2(size) - ilog2(mm->chunk_size); - start =3D drm_buddy_block_offset(mm->roots[i]); - __force_merge(mm, start, start + size, order); - - root_size =3D mm->chunk_size << order; - size -=3D root_size; - } - - src_tree =3D is_clear ? DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; - dst_tree =3D is_clear ? DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; - - for (i =3D 0; i <=3D mm->max_order; ++i) { - struct rb_root *root =3D &mm->free_trees[src_tree][i]; - struct drm_buddy_block *block, *tmp; - - rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { - rbtree_remove(mm, block); - if (is_clear) { - mark_cleared(block); - mm->clear_avail +=3D drm_buddy_block_size(mm, block); - } else { - clear_reset(block); - mm->clear_avail -=3D drm_buddy_block_size(mm, block); - } - - rbtree_insert(mm, block, dst_tree); - } - } -} -EXPORT_SYMBOL(drm_buddy_reset_clear); - -/** - * drm_buddy_free_block - free a block - * - * @mm: DRM buddy manager - * @block: block to be freed - */ -void drm_buddy_free_block(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - BUG_ON(!drm_buddy_block_is_allocated(block)); - mm->avail +=3D drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail +=3D drm_buddy_block_size(mm, block); - - __drm_buddy_free(mm, block, false); -} -EXPORT_SYMBOL(drm_buddy_free_block); - -static void __drm_buddy_free_list(struct drm_buddy *mm, - struct list_head *objects, - bool mark_clear, - bool mark_dirty) -{ - struct drm_buddy_block *block, *on; - - WARN_ON(mark_dirty && mark_clear); - - list_for_each_entry_safe(block, on, objects, link) { - if (mark_clear) - mark_cleared(block); - else if (mark_dirty) - clear_reset(block); - drm_buddy_free_block(mm, block); - cond_resched(); - } - INIT_LIST_HEAD(objects); -} - -static void drm_buddy_free_list_internal(struct drm_buddy *mm, - struct list_head *objects) -{ - /* - * Don't touch the clear/dirty bit, since allocation is still internal - * at this point. For example we might have just failed part of the - * allocation. - */ - __drm_buddy_free_list(mm, objects, false, false); -} - -/** - * drm_buddy_free_list - free blocks - * - * @mm: DRM buddy manager - * @objects: input list head to free blocks - * @flags: optional flags like DRM_BUDDY_CLEARED - */ -void drm_buddy_free_list(struct drm_buddy *mm, - struct list_head *objects, - unsigned int flags) -{ - bool mark_clear =3D flags & DRM_BUDDY_CLEARED; - - __drm_buddy_free_list(mm, objects, mark_clear, !mark_clear); -} -EXPORT_SYMBOL(drm_buddy_free_list); - -static bool block_incompatible(struct drm_buddy_block *block, unsigned int= flags) -{ - bool needs_clear =3D flags & DRM_BUDDY_CLEAR_ALLOCATION; - - return needs_clear !=3D drm_buddy_block_is_clear(block); -} - -static struct drm_buddy_block * -__alloc_range_bias(struct drm_buddy *mm, - u64 start, u64 end, - unsigned int order, - unsigned long flags, - bool fallback) -{ - u64 req_size =3D mm->chunk_size << order; - struct drm_buddy_block *block; - struct drm_buddy_block *buddy; - LIST_HEAD(dfs); - int err; - int i; - - end =3D end - 1; - - for (i =3D 0; i < mm->n_roots; ++i) - list_add_tail(&mm->roots[i]->tmp_link, &dfs); - - do { - u64 block_start; - u64 block_end; - - block =3D list_first_entry_or_null(&dfs, - struct drm_buddy_block, - tmp_link); - if (!block) - break; - - list_del(&block->tmp_link); - - if (drm_buddy_block_order(block) < order) - continue; - - block_start =3D drm_buddy_block_offset(block); - block_end =3D block_start + drm_buddy_block_size(mm, block) - 1; - - if (!overlaps(start, end, block_start, block_end)) - continue; - - if (drm_buddy_block_is_allocated(block)) - continue; - - if (block_start < start || block_end > end) { - u64 adjusted_start =3D max(block_start, start); - u64 adjusted_end =3D min(block_end, end); - - if (round_down(adjusted_end + 1, req_size) <=3D - round_up(adjusted_start, req_size)) - continue; - } - - if (!fallback && block_incompatible(block, flags)) - continue; - - if (contains(start, end, block_start, block_end) && - order =3D=3D drm_buddy_block_order(block)) { - /* - * Find the free block within the range. - */ - if (drm_buddy_block_is_free(block)) - return block; - - continue; - } - - if (!drm_buddy_block_is_split(block)) { - err =3D split_block(mm, block); - if (unlikely(err)) - goto err_undo; - } - - list_add(&block->right->tmp_link, &dfs); - list_add(&block->left->tmp_link, &dfs); - } while (1); - - return ERR_PTR(-ENOSPC); - -err_undo: - /* - * We really don't want to leave around a bunch of split blocks, since - * bigger is better, so make sure we merge everything back before we - * free the allocated blocks. - */ - buddy =3D __get_buddy(block); - if (buddy && - (drm_buddy_block_is_free(block) && - drm_buddy_block_is_free(buddy))) - __drm_buddy_free(mm, block, false); - return ERR_PTR(err); -} - -static struct drm_buddy_block * -__drm_buddy_alloc_range_bias(struct drm_buddy *mm, - u64 start, u64 end, - unsigned int order, - unsigned long flags) -{ - struct drm_buddy_block *block; - bool fallback =3D false; - - block =3D __alloc_range_bias(mm, start, end, order, - flags, fallback); - if (IS_ERR(block)) - return __alloc_range_bias(mm, start, end, order, - flags, !fallback); - - return block; -} - -static struct drm_buddy_block * -get_maxblock(struct drm_buddy *mm, - unsigned int order, - enum drm_buddy_free_tree tree) -{ - struct drm_buddy_block *max_block =3D NULL, *block =3D NULL; - struct rb_root *root; - unsigned int i; - - for (i =3D order; i <=3D mm->max_order; ++i) { - root =3D &mm->free_trees[tree][i]; - block =3D rbtree_last_free_block(root); - if (!block) - continue; - - if (!max_block) { - max_block =3D block; - continue; - } - - if (drm_buddy_block_offset(block) > - drm_buddy_block_offset(max_block)) { - max_block =3D block; - } - } - - return max_block; -} - -static struct drm_buddy_block * -alloc_from_freetree(struct drm_buddy *mm, - unsigned int order, - unsigned long flags) -{ - struct drm_buddy_block *block =3D NULL; - struct rb_root *root; - enum drm_buddy_free_tree tree; - unsigned int tmp; - int err; - - tree =3D (flags & DRM_BUDDY_CLEAR_ALLOCATION) ? - DRM_BUDDY_CLEAR_TREE : DRM_BUDDY_DIRTY_TREE; - - if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) { - block =3D get_maxblock(mm, order, tree); - if (block) - /* Store the obtained block order */ - tmp =3D drm_buddy_block_order(block); - } else { - for (tmp =3D order; tmp <=3D mm->max_order; ++tmp) { - /* Get RB tree root for this order and tree */ - root =3D &mm->free_trees[tree][tmp]; - block =3D rbtree_last_free_block(root); - if (block) - break; - } - } - - if (!block) { - /* Try allocating from the other tree */ - tree =3D (tree =3D=3D DRM_BUDDY_CLEAR_TREE) ? - DRM_BUDDY_DIRTY_TREE : DRM_BUDDY_CLEAR_TREE; - - for (tmp =3D order; tmp <=3D mm->max_order; ++tmp) { - root =3D &mm->free_trees[tree][tmp]; - block =3D rbtree_last_free_block(root); - if (block) - break; - } - - if (!block) - return ERR_PTR(-ENOSPC); - } - - BUG_ON(!drm_buddy_block_is_free(block)); - - while (tmp !=3D order) { - err =3D split_block(mm, block); - if (unlikely(err)) - goto err_undo; - - block =3D block->right; - tmp--; - } - return block; - -err_undo: - if (tmp !=3D order) - __drm_buddy_free(mm, block, false); - return ERR_PTR(err); -} - -static int __alloc_range(struct drm_buddy *mm, - struct list_head *dfs, - u64 start, u64 size, - struct list_head *blocks, - u64 *total_allocated_on_err) -{ - struct drm_buddy_block *block; - struct drm_buddy_block *buddy; - u64 total_allocated =3D 0; - LIST_HEAD(allocated); - u64 end; - int err; - - end =3D start + size - 1; - - do { - u64 block_start; - u64 block_end; - - block =3D list_first_entry_or_null(dfs, - struct drm_buddy_block, - tmp_link); - if (!block) - break; - - list_del(&block->tmp_link); - - block_start =3D drm_buddy_block_offset(block); - block_end =3D block_start + drm_buddy_block_size(mm, block) - 1; - - if (!overlaps(start, end, block_start, block_end)) - continue; - - if (drm_buddy_block_is_allocated(block)) { - err =3D -ENOSPC; - goto err_free; - } - - if (contains(start, end, block_start, block_end)) { - if (drm_buddy_block_is_free(block)) { - mark_allocated(mm, block); - total_allocated +=3D drm_buddy_block_size(mm, block); - mm->avail -=3D drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -=3D drm_buddy_block_size(mm, block); - list_add_tail(&block->link, &allocated); - continue; - } else if (!mm->clear_avail) { - err =3D -ENOSPC; - goto err_free; - } - } - - if (!drm_buddy_block_is_split(block)) { - err =3D split_block(mm, block); - if (unlikely(err)) - goto err_undo; - } - - list_add(&block->right->tmp_link, dfs); - list_add(&block->left->tmp_link, dfs); - } while (1); - - if (total_allocated < size) { - err =3D -ENOSPC; - goto err_free; - } - - list_splice_tail(&allocated, blocks); - - return 0; - -err_undo: - /* - * We really don't want to leave around a bunch of split blocks, since - * bigger is better, so make sure we merge everything back before we - * free the allocated blocks. - */ - buddy =3D __get_buddy(block); - if (buddy && - (drm_buddy_block_is_free(block) && - drm_buddy_block_is_free(buddy))) - __drm_buddy_free(mm, block, false); - -err_free: - if (err =3D=3D -ENOSPC && total_allocated_on_err) { - list_splice_tail(&allocated, blocks); - *total_allocated_on_err =3D total_allocated; - } else { - drm_buddy_free_list_internal(mm, &allocated); - } - - return err; -} - -static int __drm_buddy_alloc_range(struct drm_buddy *mm, - u64 start, - u64 size, - u64 *total_allocated_on_err, - struct list_head *blocks) -{ - LIST_HEAD(dfs); - int i; - - for (i =3D 0; i < mm->n_roots; ++i) - list_add_tail(&mm->roots[i]->tmp_link, &dfs); - - return __alloc_range(mm, &dfs, start, size, - blocks, total_allocated_on_err); -} - -static int __alloc_contig_try_harder(struct drm_buddy *mm, - u64 size, - u64 min_block_size, - struct list_head *blocks) -{ - u64 rhs_offset, lhs_offset, lhs_size, filled; - struct drm_buddy_block *block; - unsigned int tree, order; - LIST_HEAD(blocks_lhs); - unsigned long pages; - u64 modify_size; - int err; - - modify_size =3D rounddown_pow_of_two(size); - pages =3D modify_size >> ilog2(mm->chunk_size); - order =3D fls(pages) - 1; - if (order =3D=3D 0) - return -ENOSPC; - - for_each_free_tree(tree) { - struct rb_root *root; - struct rb_node *iter; - - root =3D &mm->free_trees[tree][order]; - if (rbtree_is_empty(root)) - continue; - - iter =3D rb_last(root); - while (iter) { - block =3D rbtree_get_free_block(iter); - - /* Allocate blocks traversing RHS */ - rhs_offset =3D drm_buddy_block_offset(block); - err =3D __drm_buddy_alloc_range(mm, rhs_offset, size, - &filled, blocks); - if (!err || err !=3D -ENOSPC) - return err; - - lhs_size =3D max((size - filled), min_block_size); - if (!IS_ALIGNED(lhs_size, min_block_size)) - lhs_size =3D round_up(lhs_size, min_block_size); - - /* Allocate blocks traversing LHS */ - lhs_offset =3D drm_buddy_block_offset(block) - lhs_size; - err =3D __drm_buddy_alloc_range(mm, lhs_offset, lhs_size, - NULL, &blocks_lhs); - if (!err) { - list_splice(&blocks_lhs, blocks); - return 0; - } else if (err !=3D -ENOSPC) { - drm_buddy_free_list_internal(mm, blocks); - return err; - } - /* Free blocks for the next iteration */ - drm_buddy_free_list_internal(mm, blocks); - - iter =3D rb_prev(iter); - } - } - - return -ENOSPC; -} - -/** - * drm_buddy_block_trim - free unused pages - * - * @mm: DRM buddy manager - * @start: start address to begin the trimming. - * @new_size: original size requested - * @blocks: Input and output list of allocated blocks. - * MUST contain single block as input to be trimmed. - * On success will contain the newly allocated blocks - * making up the @new_size. Blocks always appear in - * ascending order - * - * For contiguous allocation, we round up the size to the nearest - * power of two value, drivers consume *actual* size, so remaining - * portions are unused and can be optionally freed with this function - * - * Returns: - * 0 on success, error code on failure. - */ -int drm_buddy_block_trim(struct drm_buddy *mm, - u64 *start, - u64 new_size, - struct list_head *blocks) -{ - struct drm_buddy_block *parent; - struct drm_buddy_block *block; - u64 block_start, block_end; - LIST_HEAD(dfs); - u64 new_start; - int err; - - if (!list_is_singular(blocks)) - return -EINVAL; - - block =3D list_first_entry(blocks, - struct drm_buddy_block, - link); - - block_start =3D drm_buddy_block_offset(block); - block_end =3D block_start + drm_buddy_block_size(mm, block); - - if (WARN_ON(!drm_buddy_block_is_allocated(block))) - return -EINVAL; - - if (new_size > drm_buddy_block_size(mm, block)) - return -EINVAL; - - if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size)) - return -EINVAL; - - if (new_size =3D=3D drm_buddy_block_size(mm, block)) - return 0; - - new_start =3D block_start; - if (start) { - new_start =3D *start; - - if (new_start < block_start) - return -EINVAL; - - if (!IS_ALIGNED(new_start, mm->chunk_size)) - return -EINVAL; - - if (range_overflows(new_start, new_size, block_end)) - return -EINVAL; - } - - list_del(&block->link); - mark_free(mm, block); - mm->avail +=3D drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail +=3D drm_buddy_block_size(mm, block); - - /* Prevent recursively freeing this node */ - parent =3D block->parent; - block->parent =3D NULL; - - list_add(&block->tmp_link, &dfs); - err =3D __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); - if (err) { - mark_allocated(mm, block); - mm->avail -=3D drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -=3D drm_buddy_block_size(mm, block); - list_add(&block->link, blocks); - } - - block->parent =3D parent; - return err; -} -EXPORT_SYMBOL(drm_buddy_block_trim); - -static struct drm_buddy_block * -__drm_buddy_alloc_blocks(struct drm_buddy *mm, - u64 start, u64 end, - unsigned int order, - unsigned long flags) -{ - if (flags & DRM_BUDDY_RANGE_ALLOCATION) - /* Allocate traversing within the range */ - return __drm_buddy_alloc_range_bias(mm, start, end, - order, flags); - else - /* Allocate from freetree */ - return alloc_from_freetree(mm, order, flags); -} - -/** - * drm_buddy_alloc_blocks - allocate power-of-two blocks - * - * @mm: DRM buddy manager to allocate from - * @start: start of the allowed range for this block - * @end: end of the allowed range for this block - * @size: size of the allocation in bytes - * @min_block_size: alignment of the allocation - * @blocks: output list head to add allocated blocks - * @flags: DRM_BUDDY_*_ALLOCATION flags - * - * alloc_range_bias() called on range limitations, which traverses - * the tree and returns the desired block. - * - * alloc_from_freetree() called when *no* range restrictions - * are enforced, which picks the block from the freetree. - * - * Returns: - * 0 on success, error code on failure. - */ -int drm_buddy_alloc_blocks(struct drm_buddy *mm, - u64 start, u64 end, u64 size, - u64 min_block_size, - struct list_head *blocks, - unsigned long flags) -{ - struct drm_buddy_block *block =3D NULL; - u64 original_size, original_min_size; - unsigned int min_order, order; - LIST_HEAD(allocated); - unsigned long pages; - int err; - - if (size < mm->chunk_size) - return -EINVAL; - - if (min_block_size < mm->chunk_size) - return -EINVAL; - - if (!is_power_of_2(min_block_size)) - return -EINVAL; - - if (!IS_ALIGNED(start | end | size, mm->chunk_size)) - return -EINVAL; - - if (end > mm->size) - return -EINVAL; - - if (range_overflows(start, size, mm->size)) - return -EINVAL; - - /* Actual range allocation */ - if (start + size =3D=3D end) { - if (!IS_ALIGNED(start | end, min_block_size)) - return -EINVAL; - - return __drm_buddy_alloc_range(mm, start, size, NULL, blocks); - } - - original_size =3D size; - original_min_size =3D min_block_size; - - /* Roundup the size to power of 2 */ - if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) { - size =3D roundup_pow_of_two(size); - min_block_size =3D size; - /* Align size value to min_block_size */ - } else if (!IS_ALIGNED(size, min_block_size)) { - size =3D round_up(size, min_block_size); - } - - pages =3D size >> ilog2(mm->chunk_size); - order =3D fls(pages) - 1; - min_order =3D ilog2(min_block_size) - ilog2(mm->chunk_size); - - do { - order =3D min(order, (unsigned int)fls(pages) - 1); - BUG_ON(order > mm->max_order); - BUG_ON(order < min_order); - - do { - block =3D __drm_buddy_alloc_blocks(mm, start, - end, - order, - flags); - if (!IS_ERR(block)) - break; - - if (order-- =3D=3D min_order) { - /* Try allocation through force merge method */ - if (mm->clear_avail && - !__force_merge(mm, start, end, min_order)) { - block =3D __drm_buddy_alloc_blocks(mm, start, - end, - min_order, - flags); - if (!IS_ERR(block)) { - order =3D min_order; - break; - } - } - - /* - * Try contiguous block allocation through - * try harder method. - */ - if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION && - !(flags & DRM_BUDDY_RANGE_ALLOCATION)) - return __alloc_contig_try_harder(mm, - original_size, - original_min_size, - blocks); - err =3D -ENOSPC; - goto err_free; - } - } while (1); - - mark_allocated(mm, block); - mm->avail -=3D drm_buddy_block_size(mm, block); - if (drm_buddy_block_is_clear(block)) - mm->clear_avail -=3D drm_buddy_block_size(mm, block); - kmemleak_update_trace(block); - list_add_tail(&block->link, &allocated); - - pages -=3D BIT(order); - - if (!pages) - break; - } while (1); - - /* Trim the allocated block to the required size */ - if (!(flags & DRM_BUDDY_TRIM_DISABLE) && - original_size !=3D size) { - struct list_head *trim_list; - LIST_HEAD(temp); - u64 trim_size; - - trim_list =3D &allocated; - trim_size =3D original_size; - - if (!list_is_singular(&allocated)) { - block =3D list_last_entry(&allocated, typeof(*block), link); - list_move(&block->link, &temp); - trim_list =3D &temp; - trim_size =3D drm_buddy_block_size(mm, block) - - (size - original_size); - } - - drm_buddy_block_trim(mm, - NULL, - trim_size, - trim_list); - - if (!list_empty(&temp)) - list_splice_tail(trim_list, &allocated); - } - - list_splice_tail(&allocated, blocks); - return 0; - -err_free: - drm_buddy_free_list_internal(mm, &allocated); - return err; -} -EXPORT_SYMBOL(drm_buddy_alloc_blocks); - /** * drm_buddy_block_print - print block information * - * @mm: DRM buddy manager - * @block: DRM buddy block + * @mm: GPU buddy manager + * @block: GPU buddy block * @p: DRM printer to use */ -void drm_buddy_block_print(struct drm_buddy *mm, - struct drm_buddy_block *block, +void drm_buddy_block_print(struct gpu_buddy *mm, struct gpu_buddy_block *b= lock, struct drm_printer *p) { - u64 start =3D drm_buddy_block_offset(block); - u64 size =3D drm_buddy_block_size(mm, block); + u64 start =3D gpu_buddy_block_offset(block); + u64 size =3D gpu_buddy_block_size(mm, block); =20 drm_printf(p, "%#018llx-%#018llx: %llu\n", start, start + size, size); } @@ -1267,27 +30,32 @@ EXPORT_SYMBOL(drm_buddy_block_print); /** * drm_buddy_print - print allocator state * - * @mm: DRM buddy manager + * @mm: GPU buddy manager * @p: DRM printer to use */ -void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p) +void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p) { int order; =20 - drm_printf(p, "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_= free: %lluMiB\n", - mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, mm->clear_avai= l >> 20); + drm_printf( + p, + "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB, clear_free: %lluMiB= \n", + mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20, + mm->clear_avail >> 20); =20 for (order =3D mm->max_order; order >=3D 0; order--) { - struct drm_buddy_block *block, *tmp; + struct gpu_buddy_block *block, *tmp; struct rb_root *root; u64 count =3D 0, free; unsigned int tree; =20 - for_each_free_tree(tree) { + for_each_free_tree(tree) + { root =3D &mm->free_trees[tree][order]; =20 - rbtree_postorder_for_each_entry_safe(block, tmp, root, rb) { - BUG_ON(!drm_buddy_block_is_free(block)); + rbtree_postorder_for_each_entry_safe(block, tmp, root, + rb) { + BUG_ON(!gpu_buddy_block_is_free(block)); count++; } } @@ -1305,22 +73,5 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_= printer *p) } EXPORT_SYMBOL(drm_buddy_print); =20 -static void drm_buddy_module_exit(void) -{ - kmem_cache_destroy(slab_blocks); -} - -static int __init drm_buddy_module_init(void) -{ - slab_blocks =3D KMEM_CACHE(drm_buddy_block, 0); - if (!slab_blocks) - return -ENOMEM; - - return 0; -} - -module_init(drm_buddy_module_init); -module_exit(drm_buddy_module_exit); - -MODULE_DESCRIPTION("DRM Buddy Allocator"); +MODULE_DESCRIPTION("DRM-specific GPU Buddy Allocator Print Helpers"); MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 5e939004b646..859aeca87c19 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -38,6 +38,7 @@ config DRM_I915 select CEC_CORE if CEC_NOTIFIER select VMAP_PFN select DRM_TTM + select GPU_BUDDY select DRM_BUDDY select AUXILIARY_BUS help diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915= /i915_scatterlist.c index 4d830740946d..6a34dae13769 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -7,7 +7,7 @@ #include "i915_scatterlist.h" #include "i915_ttm_buddy_manager.h" =20 -#include +#include #include =20 #include @@ -167,9 +167,9 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(st= ruct ttm_resource *res, struct i915_ttm_buddy_resource *bman_res =3D to_ttm_buddy_resource(res); const u64 size =3D res->size; const u32 max_segment =3D round_down(UINT_MAX, page_alignment); - struct drm_buddy *mm =3D bman_res->mm; + struct gpu_buddy *mm =3D bman_res->mm; struct list_head *blocks =3D &bman_res->blocks; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct i915_refct_sgt *rsgt; struct scatterlist *sg; struct sg_table *st; @@ -202,8 +202,8 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(st= ruct ttm_resource *res, list_for_each_entry(block, blocks, link) { u64 block_size, offset; =20 - block_size =3D min_t(u64, size, drm_buddy_block_size(mm, block)); - offset =3D drm_buddy_block_offset(block); + block_size =3D min_t(u64, size, gpu_buddy_block_size(mm, block)); + offset =3D gpu_buddy_block_offset(block); =20 while (block_size) { u64 len; diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/dr= m/i915/i915_ttm_buddy_manager.c index d5c6e6605086..f43d7f2771ad 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -4,6 +4,7 @@ */ =20 #include +#include =20 #include #include @@ -16,7 +17,7 @@ =20 struct i915_ttm_buddy_manager { struct ttm_resource_manager manager; - struct drm_buddy mm; + struct gpu_buddy mm; struct list_head reserved; struct mutex lock; unsigned long visible_size; @@ -38,7 +39,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_m= anager *man, { struct i915_ttm_buddy_manager *bman =3D to_buddy_manager(man); struct i915_ttm_buddy_resource *bman_res; - struct drm_buddy *mm =3D &bman->mm; + struct gpu_buddy *mm =3D &bman->mm; unsigned long n_pages, lpfn; u64 min_page_size; u64 size; @@ -57,13 +58,13 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource= _manager *man, bman_res->mm =3D mm; =20 if (place->flags & TTM_PL_FLAG_TOPDOWN) - bman_res->flags |=3D DRM_BUDDY_TOPDOWN_ALLOCATION; + bman_res->flags |=3D GPU_BUDDY_TOPDOWN_ALLOCATION; =20 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) - bman_res->flags |=3D DRM_BUDDY_CONTIGUOUS_ALLOCATION; + bman_res->flags |=3D GPU_BUDDY_CONTIGUOUS_ALLOCATION; =20 if (place->fpfn || lpfn !=3D man->size) - bman_res->flags |=3D DRM_BUDDY_RANGE_ALLOCATION; + bman_res->flags |=3D GPU_BUDDY_RANGE_ALLOCATION; =20 GEM_BUG_ON(!bman_res->base.size); size =3D bman_res->base.size; @@ -89,7 +90,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_m= anager *man, goto err_free_res; } =20 - err =3D drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, + err =3D gpu_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, (u64)lpfn << PAGE_SHIFT, (u64)n_pages << PAGE_SHIFT, min_page_size, @@ -101,15 +102,15 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resour= ce_manager *man, if (lpfn <=3D bman->visible_size) { bman_res->used_visible_size =3D PFN_UP(bman_res->base.size); } else { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; =20 list_for_each_entry(block, &bman_res->blocks, link) { unsigned long start =3D - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; =20 if (start < bman->visible_size) { unsigned long end =3D start + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); =20 bman_res->used_visible_size +=3D min(end, bman->visible_size) - start; @@ -126,7 +127,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource= _manager *man, return 0; =20 err_free_blocks: - drm_buddy_free_list(mm, &bman_res->blocks, 0); + gpu_buddy_free_list(mm, &bman_res->blocks, 0); mutex_unlock(&bman->lock); err_free_res: ttm_resource_fini(man, &bman_res->base); @@ -141,7 +142,7 @@ static void i915_ttm_buddy_man_free(struct ttm_resource= _manager *man, struct i915_ttm_buddy_manager *bman =3D to_buddy_manager(man); =20 mutex_lock(&bman->lock); - drm_buddy_free_list(&bman->mm, &bman_res->blocks, 0); + gpu_buddy_free_list(&bman->mm, &bman_res->blocks, 0); bman->visible_avail +=3D bman_res->used_visible_size; mutex_unlock(&bman->lock); =20 @@ -156,8 +157,8 @@ static bool i915_ttm_buddy_man_intersects(struct ttm_re= source_manager *man, { struct i915_ttm_buddy_resource *bman_res =3D to_ttm_buddy_resource(res); struct i915_ttm_buddy_manager *bman =3D to_buddy_manager(man); - struct drm_buddy *mm =3D &bman->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm =3D &bman->mm; + struct gpu_buddy_block *block; =20 if (!place->fpfn && !place->lpfn) return true; @@ -176,9 +177,9 @@ static bool i915_ttm_buddy_man_intersects(struct ttm_re= source_manager *man, /* Check each drm buddy block individually */ list_for_each_entry(block, &bman_res->blocks, link) { unsigned long fpfn =3D - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn =3D fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); =20 if (place->fpfn < lpfn && place->lpfn > fpfn) return true; @@ -194,8 +195,8 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_re= source_manager *man, { struct i915_ttm_buddy_resource *bman_res =3D to_ttm_buddy_resource(res); struct i915_ttm_buddy_manager *bman =3D to_buddy_manager(man); - struct drm_buddy *mm =3D &bman->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm =3D &bman->mm; + struct gpu_buddy_block *block; =20 if (!place->fpfn && !place->lpfn) return true; @@ -209,9 +210,9 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_re= source_manager *man, /* Check each drm buddy block individually */ list_for_each_entry(block, &bman_res->blocks, link) { unsigned long fpfn =3D - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn =3D fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); =20 if (fpfn < place->fpfn || lpfn > place->lpfn) return false; @@ -224,7 +225,7 @@ static void i915_ttm_buddy_man_debug(struct ttm_resourc= e_manager *man, struct drm_printer *printer) { struct i915_ttm_buddy_manager *bman =3D to_buddy_manager(man); - struct drm_buddy_block *block; + struct gpu_buddy_block *block; =20 mutex_lock(&bman->lock); drm_printf(printer, "default_page_size: %lluKiB\n", @@ -293,7 +294,7 @@ int i915_ttm_buddy_man_init(struct ttm_device *bdev, if (!bman) return -ENOMEM; =20 - err =3D drm_buddy_init(&bman->mm, size, chunk_size); + err =3D gpu_buddy_init(&bman->mm, size, chunk_size); if (err) goto err_free_bman; =20 @@ -333,7 +334,7 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, un= signed int type) { struct ttm_resource_manager *man =3D ttm_manager_type(bdev, type); struct i915_ttm_buddy_manager *bman =3D to_buddy_manager(man); - struct drm_buddy *mm =3D &bman->mm; + struct gpu_buddy *mm =3D &bman->mm; int ret; =20 ttm_resource_manager_set_used(man, false); @@ -345,8 +346,8 @@ int i915_ttm_buddy_man_fini(struct ttm_device *bdev, un= signed int type) ttm_set_driver_manager(bdev, type, NULL); =20 mutex_lock(&bman->lock); - drm_buddy_free_list(mm, &bman->reserved, 0); - drm_buddy_fini(mm); + gpu_buddy_free_list(mm, &bman->reserved, 0); + gpu_buddy_fini(mm); bman->visible_avail +=3D bman->visible_reserved; WARN_ON_ONCE(bman->visible_avail !=3D bman->visible_size); mutex_unlock(&bman->lock); @@ -371,15 +372,15 @@ int i915_ttm_buddy_man_reserve(struct ttm_resource_ma= nager *man, u64 start, u64 size) { struct i915_ttm_buddy_manager *bman =3D to_buddy_manager(man); - struct drm_buddy *mm =3D &bman->mm; + struct gpu_buddy *mm =3D &bman->mm; unsigned long fpfn =3D start >> PAGE_SHIFT; unsigned long flags =3D 0; int ret; =20 - flags |=3D DRM_BUDDY_RANGE_ALLOCATION; + flags |=3D GPU_BUDDY_RANGE_ALLOCATION; =20 mutex_lock(&bman->lock); - ret =3D drm_buddy_alloc_blocks(mm, start, + ret =3D gpu_buddy_alloc_blocks(mm, start, start + size, size, mm->chunk_size, &bman->reserved, diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h b/drivers/gpu/dr= m/i915/i915_ttm_buddy_manager.h index d64620712830..4a92dcf09766 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.h @@ -13,14 +13,14 @@ =20 struct ttm_device; struct ttm_resource_manager; -struct drm_buddy; +struct gpu_buddy; =20 /** * struct i915_ttm_buddy_resource * * @base: struct ttm_resource base class we extend * @blocks: the list of struct i915_buddy_block for this resource/allocati= on - * @flags: DRM_BUDDY_*_ALLOCATION flags + * @flags: GPU_BUDDY_*_ALLOCATION flags * @used_visible_size: How much of this resource, if any, uses the CPU vis= ible * portion, in pages. * @mm: the struct i915_buddy_mm for this resource @@ -33,7 +33,7 @@ struct i915_ttm_buddy_resource { struct list_head blocks; unsigned long flags; unsigned long used_visible_size; - struct drm_buddy *mm; + struct gpu_buddy *mm; }; =20 /** diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers= /gpu/drm/i915/selftests/intel_memory_region.c index 7b856b5090f9..8307390943a2 100644 --- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -6,7 +6,7 @@ #include #include =20 -#include +#include =20 #include "../i915_selftest.h" =20 @@ -371,7 +371,7 @@ static int igt_mock_splintered_region(void *arg) struct drm_i915_private *i915 =3D mem->i915; struct i915_ttm_buddy_resource *res; struct drm_i915_gem_object *obj; - struct drm_buddy *mm; + struct gpu_buddy *mm; unsigned int expected_order; LIST_HEAD(objects); u64 size; @@ -447,8 +447,8 @@ static int igt_mock_max_segment(void *arg) struct drm_i915_private *i915 =3D mem->i915; struct i915_ttm_buddy_resource *res; struct drm_i915_gem_object *obj; - struct drm_buddy_block *block; - struct drm_buddy *mm; + struct gpu_buddy_block *block; + struct gpu_buddy *mm; struct list_head *blocks; struct scatterlist *sg; I915_RND_STATE(prng); @@ -487,8 +487,8 @@ static int igt_mock_max_segment(void *arg) mm =3D res->mm; size =3D 0; list_for_each_entry(block, blocks, link) { - if (drm_buddy_block_size(mm, block) > size) - size =3D drm_buddy_block_size(mm, block); + if (gpu_buddy_block_size(mm, block) > size) + size =3D gpu_buddy_block_size(mm, block); } if (size < max_segment) { pr_err("%s: Failed to create a huge contiguous block [> %u], largest blo= ck %lld\n", @@ -527,14 +527,14 @@ static u64 igt_object_mappable_total(struct drm_i915_= gem_object *obj) struct intel_memory_region *mr =3D obj->mm.region; struct i915_ttm_buddy_resource *bman_res =3D to_ttm_buddy_resource(obj->mm.res); - struct drm_buddy *mm =3D bman_res->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm =3D bman_res->mm; + struct gpu_buddy_block *block; u64 total; =20 total =3D 0; list_for_each_entry(block, &bman_res->blocks, link) { - u64 start =3D drm_buddy_block_offset(block); - u64 end =3D start + drm_buddy_block_size(mm, block); + u64 start =3D gpu_buddy_block_offset(block); + u64 end =3D start + gpu_buddy_block_size(mm, block); =20 if (start < resource_size(&mr->io)) total +=3D min_t(u64, end, resource_size(&mr->io)) - start; diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile index 87d5d5f9332a..d2e2e3d8349a 100644 --- a/drivers/gpu/drm/tests/Makefile +++ b/drivers/gpu/drm/tests/Makefile @@ -7,7 +7,6 @@ obj-$(CONFIG_DRM_KUNIT_TEST) +=3D \ drm_atomic_test.o \ drm_atomic_state_test.o \ drm_bridge_test.o \ - drm_buddy_test.o \ drm_cmdline_parser_test.o \ drm_connector_test.o \ drm_damage_helper_test.o \ diff --git a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c b/drivers/gpu= /drm/ttm/tests/ttm_bo_validate_test.c index 2eda87882e65..ffa12473077c 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c +++ b/drivers/gpu/drm/ttm/tests/ttm_bo_validate_test.c @@ -3,6 +3,7 @@ * Copyright =C2=A9 2023 Intel Corporation */ #include +#include #include =20 #include @@ -251,7 +252,7 @@ static void ttm_bo_validate_basic(struct kunit *test) NULL, &dummy_ttm_bo_destroy); KUNIT_EXPECT_EQ(test, err, 0); =20 - snd_place =3D ttm_place_kunit_init(test, snd_mem, DRM_BUDDY_TOPDOWN_ALLOC= ATION); + snd_place =3D ttm_place_kunit_init(test, snd_mem, GPU_BUDDY_TOPDOWN_ALLOC= ATION); snd_placement =3D ttm_placement_kunit_init(test, snd_place, 1); =20 err =3D ttm_bo_validate(bo, snd_placement, &ctx_val); @@ -263,7 +264,7 @@ static void ttm_bo_validate_basic(struct kunit *test) KUNIT_EXPECT_TRUE(test, ttm_tt_is_populated(bo->ttm)); KUNIT_EXPECT_EQ(test, bo->resource->mem_type, snd_mem); KUNIT_EXPECT_EQ(test, bo->resource->placement, - DRM_BUDDY_TOPDOWN_ALLOCATION); + GPU_BUDDY_TOPDOWN_ALLOCATION); =20 ttm_bo_fini(bo); ttm_mock_manager_fini(priv->ttm_dev, snd_mem); diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c b/drivers/gpu/drm= /ttm/tests/ttm_mock_manager.c index dd395229e388..294d56d9067e 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c +++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.c @@ -31,7 +31,7 @@ static int ttm_mock_manager_alloc(struct ttm_resource_man= ager *man, { struct ttm_mock_manager *manager =3D to_mock_mgr(man); struct ttm_mock_resource *mock_res; - struct drm_buddy *mm =3D &manager->mm; + struct gpu_buddy *mm =3D &manager->mm; u64 lpfn, fpfn, alloc_size; int err; =20 @@ -47,14 +47,14 @@ static int ttm_mock_manager_alloc(struct ttm_resource_m= anager *man, INIT_LIST_HEAD(&mock_res->blocks); =20 if (place->flags & TTM_PL_FLAG_TOPDOWN) - mock_res->flags |=3D DRM_BUDDY_TOPDOWN_ALLOCATION; + mock_res->flags |=3D GPU_BUDDY_TOPDOWN_ALLOCATION; =20 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) - mock_res->flags |=3D DRM_BUDDY_CONTIGUOUS_ALLOCATION; + mock_res->flags |=3D GPU_BUDDY_CONTIGUOUS_ALLOCATION; =20 alloc_size =3D (uint64_t)mock_res->base.size; mutex_lock(&manager->lock); - err =3D drm_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size, + err =3D gpu_buddy_alloc_blocks(mm, fpfn, lpfn, alloc_size, manager->default_page_size, &mock_res->blocks, mock_res->flags); @@ -67,7 +67,7 @@ static int ttm_mock_manager_alloc(struct ttm_resource_man= ager *man, return 0; =20 error_free_blocks: - drm_buddy_free_list(mm, &mock_res->blocks, 0); + gpu_buddy_free_list(mm, &mock_res->blocks, 0); ttm_resource_fini(man, &mock_res->base); mutex_unlock(&manager->lock); =20 @@ -79,10 +79,10 @@ static void ttm_mock_manager_free(struct ttm_resource_m= anager *man, { struct ttm_mock_manager *manager =3D to_mock_mgr(man); struct ttm_mock_resource *mock_res =3D to_mock_mgr_resource(res); - struct drm_buddy *mm =3D &manager->mm; + struct gpu_buddy *mm =3D &manager->mm; =20 mutex_lock(&manager->lock); - drm_buddy_free_list(mm, &mock_res->blocks, 0); + gpu_buddy_free_list(mm, &mock_res->blocks, 0); mutex_unlock(&manager->lock); =20 ttm_resource_fini(man, res); @@ -106,7 +106,7 @@ int ttm_mock_manager_init(struct ttm_device *bdev, u32 = mem_type, u32 size) =20 mutex_init(&manager->lock); =20 - err =3D drm_buddy_init(&manager->mm, size, PAGE_SIZE); + err =3D gpu_buddy_init(&manager->mm, size, PAGE_SIZE); =20 if (err) { kfree(manager); @@ -142,7 +142,7 @@ void ttm_mock_manager_fini(struct ttm_device *bdev, u32= mem_type) ttm_resource_manager_set_used(man, false); =20 mutex_lock(&mock_man->lock); - drm_buddy_fini(&mock_man->mm); + gpu_buddy_fini(&mock_man->mm); mutex_unlock(&mock_man->lock); =20 ttm_set_driver_manager(bdev, mem_type, NULL); diff --git a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h b/drivers/gpu/drm= /ttm/tests/ttm_mock_manager.h index e4c95f86a467..08710756fd8e 100644 --- a/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h +++ b/drivers/gpu/drm/ttm/tests/ttm_mock_manager.h @@ -5,11 +5,11 @@ #ifndef TTM_MOCK_MANAGER_H #define TTM_MOCK_MANAGER_H =20 -#include +#include =20 struct ttm_mock_manager { struct ttm_resource_manager man; - struct drm_buddy mm; + struct gpu_buddy mm; u64 default_page_size; /* protects allocations of mock buffer objects */ struct mutex lock; diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 4b288eb3f5b0..982ef754742e 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -11,6 +11,7 @@ config DRM_XE # the shmem_readpage() which depends upon tmpfs select SHMEM select TMPFS + select GPU_BUDDY select DRM_BUDDY select DRM_CLIENT_SELECTION select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/xe/xe_res_cursor.h b/drivers/gpu/drm/xe/xe_res= _cursor.h index 4e00008b7081..5f4ab08c0686 100644 --- a/drivers/gpu/drm/xe/xe_res_cursor.h +++ b/drivers/gpu/drm/xe/xe_res_cursor.h @@ -58,7 +58,7 @@ struct xe_res_cursor { /** @dma_addr: Current element in a struct drm_pagemap_addr array */ const struct drm_pagemap_addr *dma_addr; /** @mm: Buddy allocator for VRAM cursor */ - struct drm_buddy *mm; + struct gpu_buddy *mm; /** * @dma_start: DMA start address for the current segment. * This may be different to @dma_addr.addr since elements in @@ -69,7 +69,7 @@ struct xe_res_cursor { u64 dma_seg_size; }; =20 -static struct drm_buddy *xe_res_get_buddy(struct ttm_resource *res) +static struct gpu_buddy *xe_res_get_buddy(struct ttm_resource *res) { struct ttm_resource_manager *mgr; =20 @@ -104,30 +104,30 @@ static inline void xe_res_first(struct ttm_resource *= res, case XE_PL_STOLEN: case XE_PL_VRAM0: case XE_PL_VRAM1: { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct list_head *head, *next; - struct drm_buddy *mm =3D xe_res_get_buddy(res); + struct gpu_buddy *mm =3D xe_res_get_buddy(res); =20 head =3D &to_xe_ttm_vram_mgr_resource(res)->blocks; =20 block =3D list_first_entry_or_null(head, - struct drm_buddy_block, + struct gpu_buddy_block, link); if (!block) goto fallback; =20 - while (start >=3D drm_buddy_block_size(mm, block)) { - start -=3D drm_buddy_block_size(mm, block); + while (start >=3D gpu_buddy_block_size(mm, block)) { + start -=3D gpu_buddy_block_size(mm, block); =20 next =3D block->link.next; if (next !=3D head) - block =3D list_entry(next, struct drm_buddy_block, + block =3D list_entry(next, struct gpu_buddy_block, link); } =20 cur->mm =3D mm; - cur->start =3D drm_buddy_block_offset(block) + start; - cur->size =3D min(drm_buddy_block_size(mm, block) - start, + cur->start =3D gpu_buddy_block_offset(block) + start; + cur->size =3D min(gpu_buddy_block_size(mm, block) - start, size); cur->remaining =3D size; cur->node =3D block; @@ -259,7 +259,7 @@ static inline void xe_res_first_dma(const struct drm_pa= gemap_addr *dma_addr, */ static inline void xe_res_next(struct xe_res_cursor *cur, u64 size) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct list_head *next; u64 start; =20 @@ -295,18 +295,18 @@ static inline void xe_res_next(struct xe_res_cursor *= cur, u64 size) block =3D cur->node; =20 next =3D block->link.next; - block =3D list_entry(next, struct drm_buddy_block, link); + block =3D list_entry(next, struct gpu_buddy_block, link); =20 =20 - while (start >=3D drm_buddy_block_size(cur->mm, block)) { - start -=3D drm_buddy_block_size(cur->mm, block); + while (start >=3D gpu_buddy_block_size(cur->mm, block)) { + start -=3D gpu_buddy_block_size(cur->mm, block); =20 next =3D block->link.next; - block =3D list_entry(next, struct drm_buddy_block, link); + block =3D list_entry(next, struct gpu_buddy_block, link); } =20 - cur->start =3D drm_buddy_block_offset(block) + start; - cur->size =3D min(drm_buddy_block_size(cur->mm, block) - start, + cur->start =3D gpu_buddy_block_offset(block) + start; + cur->size =3D min(gpu_buddy_block_size(cur->mm, block) - start, cur->remaining); cur->node =3D block; break; diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c index 55c5a0eb82e1..26b59898d4f4 100644 --- a/drivers/gpu/drm/xe/xe_svm.c +++ b/drivers/gpu/drm/xe/xe_svm.c @@ -676,7 +676,7 @@ static u64 block_offset_to_pfn(struct xe_vram_region *v= r, u64 offset) return PHYS_PFN(offset + vr->hpa_base); } =20 -static struct drm_buddy *vram_to_buddy(struct xe_vram_region *vram) +static struct gpu_buddy *vram_to_buddy(struct xe_vram_region *vram) { return &vram->ttm.mm; } @@ -687,16 +687,16 @@ static int xe_svm_populate_devmem_pfn(struct drm_page= map_devmem *devmem_allocati struct xe_bo *bo =3D to_xe_bo(devmem_allocation); struct ttm_resource *res =3D bo->ttm.resource; struct list_head *blocks =3D &to_xe_ttm_vram_mgr_resource(res)->blocks; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; int j =3D 0; =20 list_for_each_entry(block, blocks, link) { struct xe_vram_region *vr =3D block->private; - struct drm_buddy *buddy =3D vram_to_buddy(vr); - u64 block_pfn =3D block_offset_to_pfn(vr, drm_buddy_block_offset(block)); + struct gpu_buddy *buddy =3D vram_to_buddy(vr); + u64 block_pfn =3D block_offset_to_pfn(vr, gpu_buddy_block_offset(block)); int i; =20 - for (i =3D 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i) + for (i =3D 0; i < gpu_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i) pfn[j++] =3D block_pfn + i; } =20 @@ -863,7 +863,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagema= p *dpagemap, struct xe_vram_region *vr =3D container_of(dpagemap, typeof(*vr), dpagema= p); struct xe_device *xe =3D vr->xe; struct device *dev =3D xe->drm.dev; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; struct xe_validation_ctx vctx; struct list_head *blocks; struct drm_exec exec; diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c b/drivers/gpu/drm/xe/xe_t= tm_vram_mgr.c index 9f70802fce92..8192957261e8 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr.c @@ -4,8 +4,9 @@ * Copyright (C) 2021-2022 Red Hat */ =20 -#include +#include #include +#include =20 #include #include @@ -17,16 +18,16 @@ #include "xe_ttm_vram_mgr.h" #include "xe_vram_types.h" =20 -static inline struct drm_buddy_block * +static inline struct gpu_buddy_block * xe_ttm_vram_mgr_first_block(struct list_head *list) { - return list_first_entry_or_null(list, struct drm_buddy_block, link); + return list_first_entry_or_null(list, struct gpu_buddy_block, link); } =20 -static inline bool xe_is_vram_mgr_blocks_contiguous(struct drm_buddy *mm, +static inline bool xe_is_vram_mgr_blocks_contiguous(struct gpu_buddy *mm, struct list_head *head) { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; u64 start, size; =20 block =3D xe_ttm_vram_mgr_first_block(head); @@ -34,12 +35,12 @@ static inline bool xe_is_vram_mgr_blocks_contiguous(str= uct drm_buddy *mm, return false; =20 while (head !=3D block->link.next) { - start =3D drm_buddy_block_offset(block); - size =3D drm_buddy_block_size(mm, block); + start =3D gpu_buddy_block_offset(block); + size =3D gpu_buddy_block_size(mm, block); =20 - block =3D list_entry(block->link.next, struct drm_buddy_block, + block =3D list_entry(block->link.next, struct gpu_buddy_block, link); - if (start + size !=3D drm_buddy_block_offset(block)) + if (start + size !=3D gpu_buddy_block_offset(block)) return false; } =20 @@ -53,7 +54,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manage= r *man, { struct xe_ttm_vram_mgr *mgr =3D to_xe_ttm_vram_mgr(man); struct xe_ttm_vram_mgr_resource *vres; - struct drm_buddy *mm =3D &mgr->mm; + struct gpu_buddy *mm =3D &mgr->mm; u64 size, min_page_size; unsigned long lpfn; int err; @@ -80,10 +81,10 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_mana= ger *man, INIT_LIST_HEAD(&vres->blocks); =20 if (place->flags & TTM_PL_FLAG_TOPDOWN) - vres->flags |=3D DRM_BUDDY_TOPDOWN_ALLOCATION; + vres->flags |=3D GPU_BUDDY_TOPDOWN_ALLOCATION; =20 if (place->fpfn || lpfn !=3D man->size >> PAGE_SHIFT) - vres->flags |=3D DRM_BUDDY_RANGE_ALLOCATION; + vres->flags |=3D GPU_BUDDY_RANGE_ALLOCATION; =20 if (WARN_ON(!vres->base.size)) { err =3D -EINVAL; @@ -119,27 +120,27 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_ma= nager *man, lpfn =3D max_t(unsigned long, place->fpfn + (size >> PAGE_SHIFT), lpfn); } =20 - err =3D drm_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, + err =3D gpu_buddy_alloc_blocks(mm, (u64)place->fpfn << PAGE_SHIFT, (u64)lpfn << PAGE_SHIFT, size, min_page_size, &vres->blocks, vres->flags); if (err) goto error_unlock; =20 if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) + if (!gpu_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks)) size =3D vres->base.size; } =20 if (lpfn <=3D mgr->visible_size >> PAGE_SHIFT) { vres->used_visible_size =3D size; } else { - struct drm_buddy_block *block; + struct gpu_buddy_block *block; =20 list_for_each_entry(block, &vres->blocks, link) { - u64 start =3D drm_buddy_block_offset(block); + u64 start =3D gpu_buddy_block_offset(block); =20 if (start < mgr->visible_size) { - u64 end =3D start + drm_buddy_block_size(mm, block); + u64 end =3D start + gpu_buddy_block_size(mm, block); =20 vres->used_visible_size +=3D min(end, mgr->visible_size) - start; @@ -159,11 +160,11 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_ma= nager *man, * the object. */ if (vres->base.placement & TTM_PL_FLAG_CONTIGUOUS) { - struct drm_buddy_block *block =3D list_first_entry(&vres->blocks, + struct gpu_buddy_block *block =3D list_first_entry(&vres->blocks, typeof(*block), link); =20 - vres->base.start =3D drm_buddy_block_offset(block) >> PAGE_SHIFT; + vres->base.start =3D gpu_buddy_block_offset(block) >> PAGE_SHIFT; } else { vres->base.start =3D XE_BO_INVALID_OFFSET; } @@ -185,10 +186,10 @@ static void xe_ttm_vram_mgr_del(struct ttm_resource_m= anager *man, struct xe_ttm_vram_mgr_resource *vres =3D to_xe_ttm_vram_mgr_resource(res); struct xe_ttm_vram_mgr *mgr =3D to_xe_ttm_vram_mgr(man); - struct drm_buddy *mm =3D &mgr->mm; + struct gpu_buddy *mm =3D &mgr->mm; =20 mutex_lock(&mgr->lock); - drm_buddy_free_list(mm, &vres->blocks, 0); + gpu_buddy_free_list(mm, &vres->blocks, 0); mgr->visible_avail +=3D vres->used_visible_size; mutex_unlock(&mgr->lock); =20 @@ -201,7 +202,7 @@ static void xe_ttm_vram_mgr_debug(struct ttm_resource_m= anager *man, struct drm_printer *printer) { struct xe_ttm_vram_mgr *mgr =3D to_xe_ttm_vram_mgr(man); - struct drm_buddy *mm =3D &mgr->mm; + struct gpu_buddy *mm =3D &mgr->mm; =20 mutex_lock(&mgr->lock); drm_printf(printer, "default_page_size: %lluKiB\n", @@ -224,8 +225,8 @@ static bool xe_ttm_vram_mgr_intersects(struct ttm_resou= rce_manager *man, struct xe_ttm_vram_mgr *mgr =3D to_xe_ttm_vram_mgr(man); struct xe_ttm_vram_mgr_resource *vres =3D to_xe_ttm_vram_mgr_resource(res); - struct drm_buddy *mm =3D &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm =3D &mgr->mm; + struct gpu_buddy_block *block; =20 if (!place->fpfn && !place->lpfn) return true; @@ -235,9 +236,9 @@ static bool xe_ttm_vram_mgr_intersects(struct ttm_resou= rce_manager *man, =20 list_for_each_entry(block, &vres->blocks, link) { unsigned long fpfn =3D - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn =3D fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); =20 if (place->fpfn < lpfn && place->lpfn > fpfn) return true; @@ -254,8 +255,8 @@ static bool xe_ttm_vram_mgr_compatible(struct ttm_resou= rce_manager *man, struct xe_ttm_vram_mgr *mgr =3D to_xe_ttm_vram_mgr(man); struct xe_ttm_vram_mgr_resource *vres =3D to_xe_ttm_vram_mgr_resource(res); - struct drm_buddy *mm =3D &mgr->mm; - struct drm_buddy_block *block; + struct gpu_buddy *mm =3D &mgr->mm; + struct gpu_buddy_block *block; =20 if (!place->fpfn && !place->lpfn) return true; @@ -265,9 +266,9 @@ static bool xe_ttm_vram_mgr_compatible(struct ttm_resou= rce_manager *man, =20 list_for_each_entry(block, &vres->blocks, link) { unsigned long fpfn =3D - drm_buddy_block_offset(block) >> PAGE_SHIFT; + gpu_buddy_block_offset(block) >> PAGE_SHIFT; unsigned long lpfn =3D fpfn + - (drm_buddy_block_size(mm, block) >> PAGE_SHIFT); + (gpu_buddy_block_size(mm, block) >> PAGE_SHIFT); =20 if (fpfn < place->fpfn || lpfn > place->lpfn) return false; @@ -297,7 +298,7 @@ static void xe_ttm_vram_mgr_fini(struct drm_device *dev= , void *arg) =20 WARN_ON_ONCE(mgr->visible_avail !=3D mgr->visible_size); =20 - drm_buddy_fini(&mgr->mm); + gpu_buddy_fini(&mgr->mm); =20 ttm_resource_manager_cleanup(&mgr->manager); =20 @@ -328,7 +329,7 @@ int __xe_ttm_vram_mgr_init(struct xe_device *xe, struct= xe_ttm_vram_mgr *mgr, mgr->visible_avail =3D io_size; =20 ttm_resource_manager_init(man, &xe->ttm, size); - err =3D drm_buddy_init(&mgr->mm, man->size, default_page_size); + err =3D gpu_buddy_init(&mgr->mm, man->size, default_page_size); if (err) return err; =20 @@ -376,7 +377,7 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe, if (!*sgt) return -ENOMEM; =20 - /* Determine the number of DRM_BUDDY blocks to export */ + /* Determine the number of GPU_BUDDY blocks to export */ xe_res_first(res, offset, length, &cursor); while (cursor.remaining) { num_entries++; @@ -393,10 +394,10 @@ int xe_ttm_vram_mgr_alloc_sgt(struct xe_device *xe, sg->length =3D 0; =20 /* - * Walk down DRM_BUDDY blocks to populate scatterlist nodes - * @note: Use iterator api to get first the DRM_BUDDY block + * Walk down GPU_BUDDY blocks to populate scatterlist nodes + * @note: Use iterator api to get first the GPU_BUDDY block * and the number of bytes from it. Access the following - * DRM_BUDDY block(s) if more buffer needs to exported + * GPU_BUDDY block(s) if more buffer needs to exported */ xe_res_first(res, offset, length, &cursor); for_each_sgtable_sg((*sgt), sg, i) { diff --git a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h b/drivers/gpu/drm/x= e/xe_ttm_vram_mgr_types.h index a71e14818ec2..9106da056b49 100644 --- a/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h +++ b/drivers/gpu/drm/xe/xe_ttm_vram_mgr_types.h @@ -6,7 +6,7 @@ #ifndef _XE_TTM_VRAM_MGR_TYPES_H_ #define _XE_TTM_VRAM_MGR_TYPES_H_ =20 -#include +#include #include =20 /** @@ -18,7 +18,7 @@ struct xe_ttm_vram_mgr { /** @manager: Base TTM resource manager */ struct ttm_resource_manager manager; /** @mm: DRM buddy allocator which manages the VRAM */ - struct drm_buddy mm; + struct gpu_buddy mm; /** @visible_size: Proped size of the CPU visible portion */ u64 visible_size; /** @visible_avail: CPU visible portion still unallocated */ diff --git a/drivers/gpu/tests/Makefile b/drivers/gpu/tests/Makefile new file mode 100644 index 000000000000..31a5ff44cb4e --- /dev/null +++ b/drivers/gpu/tests/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_GPU_BUDDY_KUNIT_TEST) +=3D gpu_buddy_test.o gpu_random.o diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/tests/gpu= _buddy_test.c similarity index 68% rename from drivers/gpu/drm/tests/drm_buddy_test.c rename to drivers/gpu/tests/gpu_buddy_test.c index 5f40b5343bd8..dcd4741a905d 100644 --- a/drivers/gpu/drm/tests/drm_buddy_test.c +++ b/drivers/gpu/tests/gpu_buddy_test.c @@ -10,9 +10,9 @@ #include #include =20 -#include +#include =20 -#include "../lib/drm_random.h" +#include "gpu_random.h" =20 static unsigned int random_seed; =20 @@ -21,9 +21,9 @@ static inline u64 get_size(int order, u64 chunk_size) return (1 << order) * chunk_size; } =20 -static void drm_test_buddy_fragmentation_performance(struct kunit *test) +static void gpu_test_buddy_fragmentation_performance(struct kunit *test) { - struct drm_buddy_block *block, *tmp; + struct gpu_buddy_block *block, *tmp; int num_blocks, i, ret, count =3D 0; LIST_HEAD(allocated_blocks); unsigned long elapsed_ms; @@ -32,7 +32,7 @@ static void drm_test_buddy_fragmentation_performance(stru= ct kunit *test) LIST_HEAD(clear_list); LIST_HEAD(dirty_list); LIST_HEAD(free_list); - struct drm_buddy mm; + struct gpu_buddy mm; u64 mm_size =3D SZ_4G; ktime_t start, end; =20 @@ -47,7 +47,7 @@ static void drm_test_buddy_fragmentation_performance(stru= ct kunit *test) * quickly the allocator can satisfy larger, aligned requests from a pool= of * highly fragmented space. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); =20 num_blocks =3D mm_size / SZ_64K; @@ -55,7 +55,7 @@ static void drm_test_buddy_fragmentation_performance(stru= ct kunit *test) start =3D ktime_get(); /* Allocate with maximum fragmentation - 8K blocks with 64K alignment */ for (i =3D 0; i < num_blocks; i++) - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_= 8K, SZ_64K, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_= 8K, SZ_64K, &allocated_blocks, 0), "buddy_alloc hit an error size=3D%u\n", SZ_8K); =20 @@ -68,21 +68,21 @@ static void drm_test_buddy_fragmentation_performance(st= ruct kunit *test) } =20 /* Free with different flags to ensure no coalescing */ - drm_buddy_free_list(&mm, &clear_list, DRM_BUDDY_CLEARED); - drm_buddy_free_list(&mm, &dirty_list, 0); + gpu_buddy_free_list(&mm, &clear_list, GPU_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &dirty_list, 0); =20 for (i =3D 0; i < num_blocks; i++) - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_= 64K, SZ_64K, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_= 64K, SZ_64K, &test_blocks, 0), "buddy_alloc hit an error size=3D%u\n", SZ_64K); - drm_buddy_free_list(&mm, &test_blocks, 0); + gpu_buddy_free_list(&mm, &test_blocks, 0); =20 end =3D ktime_get(); elapsed_ms =3D ktime_to_ms(ktime_sub(end, start)); =20 kunit_info(test, "Fragmented allocation took %lu ms\n", elapsed_ms); =20 - drm_buddy_fini(&mm); + gpu_buddy_fini(&mm); =20 /* * Reverse free order under fragmentation @@ -96,13 +96,13 @@ static void drm_test_buddy_fragmentation_performance(st= ruct kunit *test) * deallocation occurs in the opposite order of allocation, exposing the * cost difference between a linear freelist scan and an ordered tree loo= kup. */ - ret =3D drm_buddy_init(&mm, mm_size, SZ_4K); + ret =3D gpu_buddy_init(&mm, mm_size, SZ_4K); KUNIT_ASSERT_EQ(test, ret, 0); =20 start =3D ktime_get(); /* Allocate maximum fragmentation */ for (i =3D 0; i < num_blocks; i++) - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, SZ_= 8K, SZ_64K, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_= 8K, SZ_64K, &allocated_blocks, 0), "buddy_alloc hit an error size=3D%u\n", SZ_8K); =20 @@ -111,28 +111,28 @@ static void drm_test_buddy_fragmentation_performance(= struct kunit *test) list_move_tail(&block->link, &free_list); count++; } - drm_buddy_free_list(&mm, &free_list, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &free_list, GPU_BUDDY_CLEARED); =20 list_for_each_entry_safe_reverse(block, tmp, &allocated_blocks, link) list_move(&block->link, &reverse_list); - drm_buddy_free_list(&mm, &reverse_list, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &reverse_list, GPU_BUDDY_CLEARED); =20 end =3D ktime_get(); elapsed_ms =3D ktime_to_ms(ktime_sub(end, start)); =20 kunit_info(test, "Reverse-ordered free took %lu ms\n", elapsed_ms); =20 - drm_buddy_fini(&mm); + gpu_buddy_fini(&mm); } =20 -static void drm_test_buddy_alloc_range_bias(struct kunit *test) +static void gpu_test_buddy_alloc_range_bias(struct kunit *test) { u32 mm_size, size, ps, bias_size, bias_start, bias_end, bias_rem; - DRM_RND_STATE(prng, random_seed); + GPU_RND_STATE(prng, random_seed); unsigned int i, count, *order; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; unsigned long flags; - struct drm_buddy mm; + struct gpu_buddy mm; LIST_HEAD(allocated); =20 bias_size =3D SZ_1M; @@ -142,11 +142,11 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) =20 kunit_info(test, "mm_size=3D%u, ps=3D%u\n", mm_size, ps); =20 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), "buddy_init failed\n"); =20 count =3D mm_size / bias_size; - order =3D drm_random_order(count, &prng); + order =3D gpu_random_order(count, &prng); KUNIT_EXPECT_TRUE(test, order); =20 /* @@ -166,79 +166,79 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) =20 /* internal round_up too big */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_size + ps, bias_size, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, bias_size, bias_size); =20 /* size too big */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_size + ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc didn't fail with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, bias_size + ps, ps); =20 /* bias range too small for size */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start + ps, + gpu_buddy_alloc_blocks(&mm, bias_start + ps, bias_end, bias_size, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc didn't fail with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start + ps, bias_end, bias_size, ps); =20 /* bias misaligned */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start + ps, + gpu_buddy_alloc_blocks(&mm, bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc h didn't fail with bias(%x-%x), size=3D%u, ps=3D%u\= n", bias_start + ps, bias_end - ps, bias_size >> 1, bias_size >> 1); =20 /* single big page */ KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_size, bias_size, &tmp, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc i failed with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, bias_size, bias_size); - drm_buddy_free_list(&mm, &tmp, 0); + gpu_buddy_free_list(&mm, &tmp, 0); =20 /* single page with internal round_up */ KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, ps, bias_size, &tmp, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, ps, bias_size); - drm_buddy_free_list(&mm, &tmp, 0); + gpu_buddy_free_list(&mm, &tmp, 0); =20 /* random size within */ size =3D max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); if (size) KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &tmp, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, size, ps); =20 bias_rem -=3D size; /* too big for current avail */ KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, bias_rem + ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc didn't fail with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, bias_rem + ps, ps); =20 @@ -248,10 +248,10 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) size =3D max(size, ps); =20 KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, size, ps); /* @@ -259,15 +259,15 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) * unallocated, and ideally not always on the bias * boundaries. */ - drm_buddy_free_list(&mm, &tmp, 0); + gpu_buddy_free_list(&mm, &tmp, 0); } else { list_splice_tail(&tmp, &allocated); } } =20 kfree(order); - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); =20 /* * Something more free-form. Idea is to pick a random starting bias @@ -278,7 +278,7 @@ static void drm_test_buddy_alloc_range_bias(struct kuni= t *test) * allocated nodes in the middle of the address space. */ =20 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), "buddy_init failed\n"); =20 bias_start =3D round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); @@ -290,10 +290,10 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) u32 size =3D max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); =20 KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc failed with bias(%x-%x), size=3D%u, ps=3D%u\n", bias_start, bias_end, size, ps); bias_rem -=3D size; @@ -319,24 +319,24 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) KUNIT_ASSERT_EQ(test, bias_start, 0); KUNIT_ASSERT_EQ(test, bias_end, mm_size); KUNIT_ASSERT_TRUE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, bias_end, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc passed with bias(%x-%x), size=3D%u\n", bias_start, bias_end, ps); =20 - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); =20 /* - * Allocate cleared blocks in the bias range when the DRM buddy's clear a= vail is + * Allocate cleared blocks in the bias range when the GPU buddy's clear a= vail is * zero. This will validate the bias range allocation in scenarios like s= ystem boot * when no cleared blocks are available and exercise the fallback path to= o. The resulting * blocks should always be dirty. */ =20 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps), "buddy_init failed\n"); =20 bias_start =3D round_up(prandom_u32_state(&prng) % (mm_size - ps), ps); @@ -344,11 +344,11 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) bias_end =3D max(bias_end, bias_start + ps); bias_rem =3D bias_end - bias_start; =20 - flags =3D DRM_BUDDY_CLEAR_ALLOCATION | DRM_BUDDY_RANGE_ALLOCATION; + flags =3D GPU_BUDDY_CLEAR_ALLOCATION | GPU_BUDDY_RANGE_ALLOCATION; size =3D max(round_up(prandom_u32_state(&prng) % bias_rem, ps), ps); =20 KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, bias_start, + gpu_buddy_alloc_blocks(&mm, bias_start, bias_end, size, ps, &allocated, flags), @@ -356,27 +356,27 @@ static void drm_test_buddy_alloc_range_bias(struct ku= nit *test) bias_start, bias_end, size, ps); =20 list_for_each_entry(block, &allocated, link) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); =20 - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); } =20 -static void drm_test_buddy_alloc_clear(struct kunit *test) +static void gpu_test_buddy_alloc_clear(struct kunit *test) { unsigned long n_pages, total, i =3D 0; const unsigned long ps =3D SZ_4K; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; const int max_order =3D 12; LIST_HEAD(allocated); - struct drm_buddy mm; + struct gpu_buddy mm; unsigned int order; u32 mm_size, size; LIST_HEAD(dirty); LIST_HEAD(clean); =20 mm_size =3D SZ_4K << max_order; - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); =20 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); =20 @@ -389,11 +389,11 @@ static void drm_test_buddy_alloc_clear(struct kunit *= test) * is indeed all dirty pages and vice versa. Free it all again, * keeping the dirty/clear status. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 5 * ps, ps, &allocated, - DRM_BUDDY_TOPDOWN_ALLOCATION), + GPU_BUDDY_TOPDOWN_ALLOCATION), "buddy_alloc hit an error size=3D%lu\n", 5 * ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); =20 n_pages =3D 10; do { @@ -406,37 +406,37 @@ static void drm_test_buddy_alloc_clear(struct kunit *= test) flags =3D 0; } else { list =3D &clean; - flags =3D DRM_BUDDY_CLEAR_ALLOCATION; + flags =3D GPU_BUDDY_CLEAR_ALLOCATION; } =20 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, ps, ps, list, flags), "buddy_alloc hit an error size=3D%lu\n", ps); } while (++i < n_pages); =20 list_for_each_entry(block, &clean, link) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), true); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), true); =20 list_for_each_entry(block, &dirty, link) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); =20 - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); =20 /* * Trying to go over the clear limit for some allocation. * The allocation should never fail with reasonable page-size. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 10 * ps, ps, &clean, - DRM_BUDDY_CLEAR_ALLOCATION), + GPU_BUDDY_CLEAR_ALLOCATION), "buddy_alloc hit an error size=3D%lu\n", 10 * ps); =20 - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); - drm_buddy_free_list(&mm, &dirty, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &dirty, 0); + gpu_buddy_fini(&mm); =20 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); =20 /* * Create a new mm. Intentionally fragment the address space by creating @@ -458,34 +458,34 @@ static void drm_test_buddy_alloc_clear(struct kunit *= test) else list =3D &clean; =20 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, ps, ps, list, 0), "buddy_alloc hit an error size=3D%lu\n", ps); } while (++i < n_pages); =20 - drm_buddy_free_list(&mm, &clean, DRM_BUDDY_CLEARED); - drm_buddy_free_list(&mm, &dirty, 0); + gpu_buddy_free_list(&mm, &clean, GPU_BUDDY_CLEARED); + gpu_buddy_free_list(&mm, &dirty, 0); =20 order =3D 1; do { size =3D SZ_4K << order; =20 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, size, size, &allocated, - DRM_BUDDY_CLEAR_ALLOCATION), + GPU_BUDDY_CLEAR_ALLOCATION), "buddy_alloc hit an error size=3D%u\n", size); total =3D 0; list_for_each_entry(block, &allocated, link) { if (size !=3D mm_size) - KUNIT_EXPECT_EQ(test, drm_buddy_block_is_clear(block), false); - total +=3D drm_buddy_block_size(&mm, block); + KUNIT_EXPECT_EQ(test, gpu_buddy_block_is_clear(block), false); + total +=3D gpu_buddy_block_size(&mm, block); } KUNIT_EXPECT_EQ(test, total, size); =20 - drm_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_free_list(&mm, &allocated, 0); } while (++order <=3D max_order); =20 - drm_buddy_fini(&mm); + gpu_buddy_fini(&mm); =20 /* * Create a new mm with a non power-of-two size. Allocate a random size f= rom each @@ -494,44 +494,44 @@ static void drm_test_buddy_alloc_clear(struct kunit *= test) */ mm_size =3D (SZ_4K << max_order) + (SZ_4K << (max_order - 2)); =20 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); KUNIT_EXPECT_EQ(test, mm.max_order, max_order); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_= order, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, SZ_4K << max_= order, 4 * ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc hit an error size=3D%lu\n", 4 * ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, SZ_4K << max_= order, + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, SZ_4K << max_= order, 2 * ps, ps, &allocated, - DRM_BUDDY_CLEAR_ALLOCATION), + GPU_BUDDY_CLEAR_ALLOCATION), "buddy_alloc hit an error size=3D%lu\n", 2 * ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, SZ_4K << max_ord= er, mm_size, + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, SZ_4K << max_ord= er, mm_size, ps, ps, &allocated, - DRM_BUDDY_RANGE_ALLOCATION), + GPU_BUDDY_RANGE_ALLOCATION), "buddy_alloc hit an error size=3D%lu\n", ps); - drm_buddy_free_list(&mm, &allocated, DRM_BUDDY_CLEARED); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, GPU_BUDDY_CLEARED); + gpu_buddy_fini(&mm); } =20 -static void drm_test_buddy_alloc_contiguous(struct kunit *test) +static void gpu_test_buddy_alloc_contiguous(struct kunit *test) { const unsigned long ps =3D SZ_4K, mm_size =3D 16 * 3 * SZ_4K; unsigned long i, n_pages, total; - struct drm_buddy_block *block; - struct drm_buddy mm; + struct gpu_buddy_block *block; + struct gpu_buddy mm; LIST_HEAD(left); LIST_HEAD(middle); LIST_HEAD(right); LIST_HEAD(allocated); =20 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, mm_size, ps)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps)); =20 /* * Idea is to fragment the address space by alternating block * allocations between three different lists; one for left, middle and * right. We can then free a list to simulate fragmentation. In - * particular we want to exercise the DRM_BUDDY_CONTIGUOUS_ALLOCATION, + * particular we want to exercise the GPU_BUDDY_CONTIGUOUS_ALLOCATION, * including the try_harder path. */ =20 @@ -548,66 +548,66 @@ static void drm_test_buddy_alloc_contiguous(struct ku= nit *test) else list =3D &right; KUNIT_ASSERT_FALSE_MSG(test, - drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_alloc_blocks(&mm, 0, mm_size, ps, ps, list, 0), "buddy_alloc hit an error size=3D%lu\n", ps); } while (++i < n_pages); =20 - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=3D%lu\n", 3 * ps); =20 - drm_buddy_free_list(&mm, &middle, 0); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_free_list(&mm, &middle, 0); + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=3D%lu\n", 3 * ps); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 2 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=3D%lu\n", 2 * ps); =20 - drm_buddy_free_list(&mm, &right, 0); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_free_list(&mm, &right, 0); + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc didn't error size=3D%lu\n", 3 * ps); /* * At this point we should have enough contiguous space for 2 blocks, * however they are never buddies (since we freed middle and right) so * will require the try_harder logic to find them. */ - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 2 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc hit an error size=3D%lu\n", 2 * ps); =20 - drm_buddy_free_list(&mm, &left, 0); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, 0, mm_size, + gpu_buddy_free_list(&mm, &left, 0); + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, 3 * ps, ps, &allocated, - DRM_BUDDY_CONTIGUOUS_ALLOCATION), + GPU_BUDDY_CONTIGUOUS_ALLOCATION), "buddy_alloc hit an error size=3D%lu\n", 3 * ps); =20 total =3D 0; list_for_each_entry(block, &allocated, link) - total +=3D drm_buddy_block_size(&mm, block); + total +=3D gpu_buddy_block_size(&mm, block); =20 KUNIT_ASSERT_EQ(test, total, ps * 2 + ps * 3); =20 - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); } =20 -static void drm_test_buddy_alloc_pathological(struct kunit *test) +static void gpu_test_buddy_alloc_pathological(struct kunit *test) { u64 mm_size, size, start =3D 0; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; const int max_order =3D 3; unsigned long flags =3D 0; int order, top; - struct drm_buddy mm; + struct gpu_buddy mm; LIST_HEAD(blocks); LIST_HEAD(holes); LIST_HEAD(tmp); @@ -620,7 +620,7 @@ static void drm_test_buddy_alloc_pathological(struct ku= nit *test) */ =20 mm_size =3D SZ_4K << max_order; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); =20 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); @@ -630,18 +630,18 @@ static void drm_test_buddy_alloc_pathological(struct = kunit *test) block =3D list_first_entry_or_null(&blocks, typeof(*block), link); if (block) { list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); } =20 for (order =3D top; order--;) { size =3D get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=3D%d, top=3D%d\n", order, top); =20 - block =3D list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block =3D list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); =20 list_move_tail(&block->link, &blocks); @@ -649,45 +649,45 @@ static void drm_test_buddy_alloc_pathological(struct = kunit *test) =20 /* There should be one final page for this sub-allocation */ size =3D get_size(0, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM for hole\n"); =20 - block =3D list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block =3D list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); =20 list_move_tail(&block->link, &holes); =20 size =3D get_size(top, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should= be full!", top, max_order); } =20 - drm_buddy_free_list(&mm, &holes, 0); + gpu_buddy_free_list(&mm, &holes, 0); =20 /* Nothing larger than blocks of chunk_size now available */ for (order =3D 1; order <=3D max_order; order++) { size =3D get_size(order, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded at order %d, it should be ful= l!", order); } =20 list_splice_tail(&holes, &blocks); - drm_buddy_free_list(&mm, &blocks, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &blocks, 0); + gpu_buddy_fini(&mm); } =20 -static void drm_test_buddy_alloc_pessimistic(struct kunit *test) +static void gpu_test_buddy_alloc_pessimistic(struct kunit *test) { u64 mm_size, size, start =3D 0; - struct drm_buddy_block *block, *bn; + struct gpu_buddy_block *block, *bn; const unsigned int max_order =3D 16; unsigned long flags =3D 0; - struct drm_buddy mm; + struct gpu_buddy mm; unsigned int order; LIST_HEAD(blocks); LIST_HEAD(tmp); @@ -699,19 +699,19 @@ static void drm_test_buddy_alloc_pessimistic(struct k= unit *test) */ =20 mm_size =3D SZ_4K << max_order; - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); =20 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); =20 for (order =3D 0; order < max_order; order++) { size =3D get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=3D%d\n", order); =20 - block =3D list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block =3D list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); =20 list_move_tail(&block->link, &blocks); @@ -719,11 +719,11 @@ static void drm_test_buddy_alloc_pessimistic(struct k= unit *test) =20 /* And now the last remaining block available */ size =3D get_size(0, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM on final alloc\n"); =20 - block =3D list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block =3D list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); =20 list_move_tail(&block->link, &blocks); @@ -731,58 +731,58 @@ static void drm_test_buddy_alloc_pessimistic(struct k= unit *test) /* Should be completely full! */ for (order =3D max_order; order--;) { size =3D get_size(order, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded, it should be full!"); } =20 block =3D list_last_entry(&blocks, typeof(*block), link); list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); =20 /* As we free in increasing size, we make available larger blocks */ order =3D 1; list_for_each_entry_safe(block, bn, &blocks, link) { list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); =20 size =3D get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=3D%d\n", order); =20 - block =3D list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block =3D list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); =20 list_del(&block->link); - drm_buddy_free_block(&mm, block); + gpu_buddy_free_block(&mm, block); order++; } =20 /* To confirm, now the whole mm should be available */ size =3D get_size(max_order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc (realloc) hit -ENOMEM with order=3D%d\n", max_order); =20 - block =3D list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block =3D list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); =20 list_del(&block->link); - drm_buddy_free_block(&mm, block); - drm_buddy_free_list(&mm, &blocks, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_block(&mm, block); + gpu_buddy_free_list(&mm, &blocks, 0); + gpu_buddy_fini(&mm); } =20 -static void drm_test_buddy_alloc_optimistic(struct kunit *test) +static void gpu_test_buddy_alloc_optimistic(struct kunit *test) { u64 mm_size, size, start =3D 0; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; unsigned long flags =3D 0; const int max_order =3D 16; - struct drm_buddy mm; + struct gpu_buddy mm; LIST_HEAD(blocks); LIST_HEAD(tmp); int order; @@ -794,19 +794,19 @@ static void drm_test_buddy_alloc_optimistic(struct ku= nit *test) =20 mm_size =3D SZ_4K * ((1 << (max_order + 1)) - 1); =20 - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, SZ_4K), + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K), "buddy_init failed\n"); =20 KUNIT_EXPECT_EQ(test, mm.max_order, max_order); =20 for (order =3D 0; order <=3D max_order; order++) { size =3D get_size(order, mm.chunk_size); - KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc hit -ENOMEM with order=3D%d\n", order); =20 - block =3D list_first_entry_or_null(&tmp, struct drm_buddy_block, link); + block =3D list_first_entry_or_null(&tmp, struct gpu_buddy_block, link); KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n"); =20 list_move_tail(&block->link, &blocks); @@ -814,80 +814,80 @@ static void drm_test_buddy_alloc_optimistic(struct ku= nit *test) =20 /* Should be completely full! */ size =3D get_size(0, mm.chunk_size); - KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size, + KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags), "buddy_alloc unexpectedly succeeded, it should be full!"); =20 - drm_buddy_free_list(&mm, &blocks, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &blocks, 0); + gpu_buddy_fini(&mm); } =20 -static void drm_test_buddy_alloc_limit(struct kunit *test) +static void gpu_test_buddy_alloc_limit(struct kunit *test) { u64 size =3D U64_MAX, start =3D 0; - struct drm_buddy_block *block; + struct gpu_buddy_block *block; unsigned long flags =3D 0; LIST_HEAD(allocated); - struct drm_buddy mm; + struct gpu_buddy mm; =20 - KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, SZ_4K)); + KUNIT_EXPECT_FALSE(test, gpu_buddy_init(&mm, size, SZ_4K)); =20 - KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER, + KUNIT_EXPECT_EQ_MSG(test, mm.max_order, GPU_BUDDY_MAX_ORDER, "mm.max_order(%d) !=3D %d\n", mm.max_order, - DRM_BUDDY_MAX_ORDER); + GPU_BUDDY_MAX_ORDER); =20 size =3D mm.chunk_size << mm.max_order; - KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size, + KUNIT_EXPECT_FALSE(test, gpu_buddy_alloc_blocks(&mm, start, size, size, mm.chunk_size, &allocated, flags)); =20 - block =3D list_first_entry_or_null(&allocated, struct drm_buddy_block, li= nk); + block =3D list_first_entry_or_null(&allocated, struct gpu_buddy_block, li= nk); KUNIT_EXPECT_TRUE(test, block); =20 - KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order, + KUNIT_EXPECT_EQ_MSG(test, gpu_buddy_block_order(block), mm.max_order, "block order(%d) !=3D %d\n", - drm_buddy_block_order(block), mm.max_order); + gpu_buddy_block_order(block), mm.max_order); =20 - KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block), + KUNIT_EXPECT_EQ_MSG(test, gpu_buddy_block_size(&mm, block), BIT_ULL(mm.max_order) * mm.chunk_size, "block size(%llu) !=3D %llu\n", - drm_buddy_block_size(&mm, block), + gpu_buddy_block_size(&mm, block), BIT_ULL(mm.max_order) * mm.chunk_size); =20 - drm_buddy_free_list(&mm, &allocated, 0); - drm_buddy_fini(&mm); + gpu_buddy_free_list(&mm, &allocated, 0); + gpu_buddy_fini(&mm); } =20 -static int drm_buddy_suite_init(struct kunit_suite *suite) +static int gpu_buddy_suite_init(struct kunit_suite *suite) { while (!random_seed) random_seed =3D get_random_u32(); =20 - kunit_info(suite, "Testing DRM buddy manager, with random_seed=3D0x%x\n", + kunit_info(suite, "Testing GPU buddy manager, with random_seed=3D0x%x\n", random_seed); =20 return 0; } =20 -static struct kunit_case drm_buddy_tests[] =3D { - KUNIT_CASE(drm_test_buddy_alloc_limit), - KUNIT_CASE(drm_test_buddy_alloc_optimistic), - KUNIT_CASE(drm_test_buddy_alloc_pessimistic), - KUNIT_CASE(drm_test_buddy_alloc_pathological), - KUNIT_CASE(drm_test_buddy_alloc_contiguous), - KUNIT_CASE(drm_test_buddy_alloc_clear), - KUNIT_CASE(drm_test_buddy_alloc_range_bias), - KUNIT_CASE(drm_test_buddy_fragmentation_performance), +static struct kunit_case gpu_buddy_tests[] =3D { + KUNIT_CASE(gpu_test_buddy_alloc_limit), + KUNIT_CASE(gpu_test_buddy_alloc_optimistic), + KUNIT_CASE(gpu_test_buddy_alloc_pessimistic), + KUNIT_CASE(gpu_test_buddy_alloc_pathological), + KUNIT_CASE(gpu_test_buddy_alloc_contiguous), + KUNIT_CASE(gpu_test_buddy_alloc_clear), + KUNIT_CASE(gpu_test_buddy_alloc_range_bias), + KUNIT_CASE(gpu_test_buddy_fragmentation_performance), {} }; =20 -static struct kunit_suite drm_buddy_test_suite =3D { - .name =3D "drm_buddy", - .suite_init =3D drm_buddy_suite_init, - .test_cases =3D drm_buddy_tests, +static struct kunit_suite gpu_buddy_test_suite =3D { + .name =3D "gpu_buddy", + .suite_init =3D gpu_buddy_suite_init, + .test_cases =3D gpu_buddy_tests, }; =20 -kunit_test_suite(drm_buddy_test_suite); +kunit_test_suite(gpu_buddy_test_suite); =20 MODULE_AUTHOR("Intel Corporation"); -MODULE_DESCRIPTION("Kunit test for drm_buddy functions"); +MODULE_DESCRIPTION("Kunit test for gpu_buddy functions"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/tests/gpu_random.c b/drivers/gpu/tests/gpu_random.c new file mode 100644 index 000000000000..54f1f6a3a6c1 --- /dev/null +++ b/drivers/gpu/tests/gpu_random.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#include "gpu_random.h" + +u32 gpu_prandom_u32_max_state(u32 ep_ro, struct rnd_state *state) +{ + return upper_32_bits((u64)prandom_u32_state(state) * ep_ro); +} +EXPORT_SYMBOL(gpu_prandom_u32_max_state); + +void gpu_random_reorder(unsigned int *order, unsigned int count, + struct rnd_state *state) +{ + unsigned int i, j; + + for (i =3D 0; i < count; ++i) { + BUILD_BUG_ON(sizeof(unsigned int) > sizeof(u32)); + j =3D gpu_prandom_u32_max_state(count, state); + swap(order[i], order[j]); + } +} +EXPORT_SYMBOL(gpu_random_reorder); + +unsigned int *gpu_random_order(unsigned int count, struct rnd_state *state) +{ + unsigned int *order, i; + + order =3D kmalloc_array(count, sizeof(*order), GFP_KERNEL); + if (!order) + return order; + + for (i =3D 0; i < count; i++) + order[i] =3D i; + + gpu_random_reorder(order, count, state); + return order; +} +EXPORT_SYMBOL(gpu_random_order); + +MODULE_DESCRIPTION("GPU Randomization Utilities"); +MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/gpu/tests/gpu_random.h b/drivers/gpu/tests/gpu_random.h new file mode 100644 index 000000000000..b68cf3448264 --- /dev/null +++ b/drivers/gpu/tests/gpu_random.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __GPU_RANDOM_H__ +#define __GPU_RANDOM_H__ + +/* This is a temporary home for a couple of utility functions that should + * be transposed to lib/ at the earliest convenience. + */ + +#include + +#define GPU_RND_STATE_INITIALIZER(seed__) ({ \ + struct rnd_state state__; \ + prandom_seed_state(&state__, (seed__)); \ + state__; \ +}) + +#define GPU_RND_STATE(name__, seed__) \ + struct rnd_state name__ =3D GPU_RND_STATE_INITIALIZER(seed__) + +unsigned int *gpu_random_order(unsigned int count, + struct rnd_state *state); +void gpu_random_reorder(unsigned int *order, + unsigned int count, + struct rnd_state *state); +u32 gpu_prandom_u32_max_state(u32 ep_ro, + struct rnd_state *state); + +#endif /* !__GPU_RANDOM_H__ */ diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index d51777df12d1..6ae1383b0e2e 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -37,6 +37,8 @@ source "drivers/char/agp/Kconfig" =20 source "drivers/gpu/vga/Kconfig" =20 +source "drivers/gpu/Kconfig" + source "drivers/gpu/host1x/Kconfig" source "drivers/gpu/ipu-v3/Kconfig" source "drivers/gpu/nova-core/Kconfig" diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h index b909fa8f810a..3054369bebff 100644 --- a/include/drm/drm_buddy.h +++ b/include/drm/drm_buddy.h @@ -6,166 +6,13 @@ #ifndef __DRM_BUDDY_H__ #define __DRM_BUDDY_H__ =20 -#include -#include -#include -#include -#include +#include =20 struct drm_printer; =20 -#define DRM_BUDDY_RANGE_ALLOCATION BIT(0) -#define DRM_BUDDY_TOPDOWN_ALLOCATION BIT(1) -#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) -#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) -#define DRM_BUDDY_CLEARED BIT(4) -#define DRM_BUDDY_TRIM_DISABLE BIT(5) - -struct drm_buddy_block { -#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) -#define DRM_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) -#define DRM_BUDDY_ALLOCATED (1 << 10) -#define DRM_BUDDY_FREE (2 << 10) -#define DRM_BUDDY_SPLIT (3 << 10) -#define DRM_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) -/* Free to be used, if needed in the future */ -#define DRM_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) -#define DRM_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) - u64 header; - - struct drm_buddy_block *left; - struct drm_buddy_block *right; - struct drm_buddy_block *parent; - - void *private; /* owned by creator */ - - /* - * While the block is allocated by the user through drm_buddy_alloc*, - * the user has ownership of the link, for example to maintain within - * a list, if so desired. As soon as the block is freed with - * drm_buddy_free* ownership is given back to the mm. - */ - union { - struct rb_node rb; - struct list_head link; - }; - - struct list_head tmp_link; -}; - -/* Order-zero must be at least SZ_4K */ -#define DRM_BUDDY_MAX_ORDER (63 - 12) - -/* - * Binary Buddy System. - * - * Locking should be handled by the user, a simple mutex around - * drm_buddy_alloc* and drm_buddy_free* should suffice. - */ -struct drm_buddy { - /* Maintain a free list for each order. */ - struct rb_root **free_trees; - - /* - * Maintain explicit binary tree(s) to track the allocation of the - * address space. This gives us a simple way of finding a buddy block - * and performing the potentially recursive merge step when freeing a - * block. Nodes are either allocated or free, in which case they will - * also exist on the respective free list. - */ - struct drm_buddy_block **roots; - - /* - * Anything from here is public, and remains static for the lifetime of - * the mm. Everything above is considered do-not-touch. - */ - unsigned int n_roots; - unsigned int max_order; - - /* Must be at least SZ_4K */ - u64 chunk_size; - u64 size; - u64 avail; - u64 clear_avail; -}; - -static inline u64 -drm_buddy_block_offset(const struct drm_buddy_block *block) -{ - return block->header & DRM_BUDDY_HEADER_OFFSET; -} - -static inline unsigned int -drm_buddy_block_order(struct drm_buddy_block *block) -{ - return block->header & DRM_BUDDY_HEADER_ORDER; -} - -static inline unsigned int -drm_buddy_block_state(struct drm_buddy_block *block) -{ - return block->header & DRM_BUDDY_HEADER_STATE; -} - -static inline bool -drm_buddy_block_is_allocated(struct drm_buddy_block *block) -{ - return drm_buddy_block_state(block) =3D=3D DRM_BUDDY_ALLOCATED; -} - -static inline bool -drm_buddy_block_is_clear(struct drm_buddy_block *block) -{ - return block->header & DRM_BUDDY_HEADER_CLEAR; -} - -static inline bool -drm_buddy_block_is_free(struct drm_buddy_block *block) -{ - return drm_buddy_block_state(block) =3D=3D DRM_BUDDY_FREE; -} - -static inline bool -drm_buddy_block_is_split(struct drm_buddy_block *block) -{ - return drm_buddy_block_state(block) =3D=3D DRM_BUDDY_SPLIT; -} - -static inline u64 -drm_buddy_block_size(struct drm_buddy *mm, - struct drm_buddy_block *block) -{ - return mm->chunk_size << drm_buddy_block_order(block); -} - -int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size); - -void drm_buddy_fini(struct drm_buddy *mm); - -struct drm_buddy_block * -drm_get_buddy(struct drm_buddy_block *block); - -int drm_buddy_alloc_blocks(struct drm_buddy *mm, - u64 start, u64 end, u64 size, - u64 min_page_size, - struct list_head *blocks, - unsigned long flags); - -int drm_buddy_block_trim(struct drm_buddy *mm, - u64 *start, - u64 new_size, - struct list_head *blocks); - -void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear); - -void drm_buddy_free_block(struct drm_buddy *mm, struct drm_buddy_block *bl= ock); - -void drm_buddy_free_list(struct drm_buddy *mm, - struct list_head *objects, - unsigned int flags); - -void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p); -void drm_buddy_block_print(struct drm_buddy *mm, - struct drm_buddy_block *block, +/* DRM-specific GPU Buddy Allocator print helpers */ +void drm_buddy_print(struct gpu_buddy *mm, struct drm_printer *p); +void drm_buddy_block_print(struct gpu_buddy *mm, + struct gpu_buddy_block *block, struct drm_printer *p); #endif diff --git a/include/linux/gpu_buddy.h b/include/linux/gpu_buddy.h new file mode 100644 index 000000000000..3e4bd11ccb71 --- /dev/null +++ b/include/linux/gpu_buddy.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright =C2=A9 2021 Intel Corporation + */ + +#ifndef __GPU_BUDDY_H__ +#define __GPU_BUDDY_H__ + +#include +#include +#include +#include +#include + +#define GPU_BUDDY_RANGE_ALLOCATION BIT(0) +#define GPU_BUDDY_TOPDOWN_ALLOCATION BIT(1) +#define GPU_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) +#define GPU_BUDDY_CLEAR_ALLOCATION BIT(3) +#define GPU_BUDDY_CLEARED BIT(4) +#define GPU_BUDDY_TRIM_DISABLE BIT(5) + +enum gpu_buddy_free_tree { + GPU_BUDDY_CLEAR_TREE =3D 0, + GPU_BUDDY_DIRTY_TREE, + GPU_BUDDY_MAX_FREE_TREES, +}; + +#define for_each_free_tree(tree) \ + for ((tree) =3D 0; (tree) < GPU_BUDDY_MAX_FREE_TREES; (tree)++) + +struct gpu_buddy_block { +#define GPU_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) +#define GPU_BUDDY_HEADER_STATE GENMASK_ULL(11, 10) +#define GPU_BUDDY_ALLOCATED (1 << 10) +#define GPU_BUDDY_FREE (2 << 10) +#define GPU_BUDDY_SPLIT (3 << 10) +#define GPU_BUDDY_HEADER_CLEAR GENMASK_ULL(9, 9) +/* Free to be used, if needed in the future */ +#define GPU_BUDDY_HEADER_UNUSED GENMASK_ULL(8, 6) +#define GPU_BUDDY_HEADER_ORDER GENMASK_ULL(5, 0) + u64 header; + + struct gpu_buddy_block *left; + struct gpu_buddy_block *right; + struct gpu_buddy_block *parent; + + void *private; /* owned by creator */ + + /* + * While the block is allocated by the user through gpu_buddy_alloc*, + * the user has ownership of the link, for example to maintain within + * a list, if so desired. As soon as the block is freed with + * gpu_buddy_free* ownership is given back to the mm. + */ + union { + struct rb_node rb; + struct list_head link; + }; + + struct list_head tmp_link; +}; + +/* Order-zero must be at least SZ_4K */ +#define GPU_BUDDY_MAX_ORDER (63 - 12) + +/* + * Binary Buddy System. + * + * Locking should be handled by the user, a simple mutex around + * gpu_buddy_alloc* and gpu_buddy_free* should suffice. + */ +struct gpu_buddy { + /* Maintain a free list for each order. */ + struct rb_root **free_trees; + + /* + * Maintain explicit binary tree(s) to track the allocation of the + * address space. This gives us a simple way of finding a buddy block + * and performing the potentially recursive merge step when freeing a + * block. Nodes are either allocated or free, in which case they will + * also exist on the respective free list. + */ + struct gpu_buddy_block **roots; + + /* + * Anything from here is public, and remains static for the lifetime of + * the mm. Everything above is considered do-not-touch. + */ + unsigned int n_roots; + unsigned int max_order; + + /* Must be at least SZ_4K */ + u64 chunk_size; + u64 size; + u64 avail; + u64 clear_avail; +}; + +static inline u64 +gpu_buddy_block_offset(const struct gpu_buddy_block *block) +{ + return block->header & GPU_BUDDY_HEADER_OFFSET; +} + +static inline unsigned int +gpu_buddy_block_order(struct gpu_buddy_block *block) +{ + return block->header & GPU_BUDDY_HEADER_ORDER; +} + +static inline unsigned int +gpu_buddy_block_state(struct gpu_buddy_block *block) +{ + return block->header & GPU_BUDDY_HEADER_STATE; +} + +static inline bool +gpu_buddy_block_is_allocated(struct gpu_buddy_block *block) +{ + return gpu_buddy_block_state(block) =3D=3D GPU_BUDDY_ALLOCATED; +} + +static inline bool +gpu_buddy_block_is_clear(struct gpu_buddy_block *block) +{ + return block->header & GPU_BUDDY_HEADER_CLEAR; +} + +static inline bool +gpu_buddy_block_is_free(struct gpu_buddy_block *block) +{ + return gpu_buddy_block_state(block) =3D=3D GPU_BUDDY_FREE; +} + +static inline bool +gpu_buddy_block_is_split(struct gpu_buddy_block *block) +{ + return gpu_buddy_block_state(block) =3D=3D GPU_BUDDY_SPLIT; +} + +static inline u64 +gpu_buddy_block_size(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + return mm->chunk_size << gpu_buddy_block_order(block); +} + +int gpu_buddy_init(struct gpu_buddy *mm, u64 size, u64 chunk_size); + +void gpu_buddy_fini(struct gpu_buddy *mm); + +struct gpu_buddy_block * +gpu_get_buddy(struct gpu_buddy_block *block); + +int gpu_buddy_alloc_blocks(struct gpu_buddy *mm, + u64 start, u64 end, u64 size, + u64 min_page_size, + struct list_head *blocks, + unsigned long flags); + +int gpu_buddy_block_trim(struct gpu_buddy *mm, + u64 *start, + u64 new_size, + struct list_head *blocks); + +void gpu_buddy_reset_clear(struct gpu_buddy *mm, bool is_clear); + +void gpu_buddy_free_block(struct gpu_buddy *mm, struct gpu_buddy_block *bl= ock); + +void gpu_buddy_free_list(struct gpu_buddy *mm, + struct list_head *objects, + unsigned int flags); + +void gpu_buddy_print(struct gpu_buddy *mm); +void gpu_buddy_block_print(struct gpu_buddy *mm, + struct gpu_buddy_block *block); +#endif --=20 2.34.1 From nobody Sun Feb 8 06:56:18 2026 Received: from PH8PR06CU001.outbound.protection.outlook.com (mail-westus3azon11012058.outbound.protection.outlook.com [40.107.209.58]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DE9D52DA74C; Fri, 19 Dec 2025 20:38:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=fail smtp.client-ip=40.107.209.58 ARC-Seal: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176707; cv=fail; b=SskQ3WLsrFHjWAnWQbIBXaA1miEvZhdmIReZ+ZNyb++hJL79m19Cl4VLHbQkTAD9FOgmgzCMUCMz2IOQW+a61NoEmCNI45tT8BweIriZZG8FZ3EMHkDo7nGEqyuYNZaWxdGvZBxeSgrwIMj+BSlLUnOwtfa/rcDFTPjDeTTnJqQ= ARC-Message-Signature: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176707; c=relaxed/simple; bh=04ywLj+YsfwMajmdxfLCU6lm0CFpAo/A9L1abuza3KQ=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: Content-Type:MIME-Version; b=q56IXnfAe8hsHXJ1K7RjCl3r4DVOXgc4Hb127+OY43oSEqodI3b5e5xBy/76lG1784wR7G34+JS6cuBl7pilekNLmxkDdBZwhDRi2gxbaNvtVESgGnlUJ+JvECME+bmOMc8f0hxHDlnrrTX00MbKPsoeO7PMlALqdSVH73h27Uc= ARC-Authentication-Results: i=2; smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com; spf=fail smtp.mailfrom=nvidia.com; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b=Eq9Ow23k; arc=fail smtp.client-ip=40.107.209.58 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com Authentication-Results: smtp.subspace.kernel.org; spf=fail smtp.mailfrom=nvidia.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b="Eq9Ow23k" ARC-Seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=qPykeSlIMjobCnMdAapfNn2BTGWDnAcfb4zW3iu23ZSt8SnARm0IoOwVkbd7ep5BgWHEC+A2INrnt4PCBX22uwxGYNBbkKDVlcbnaOmLKqWUSnRxt+3wsSHpFDANUGRyE1dm+4RY/UhCNxtCRrxKbC/vYuBQ9KvUDLetVSOz/AOTd8vFNv7OmeUxNVbjhYa/ncR2LRhwOwV263GQNGlMk6FG4T6CE/5nI8kJ8CvJAfKiRRQPNLCk4miuSt0gT9rsSaBqVl2fDOygDhwI6G9k5h4NrWM/GLxZ2dvHMVWv8GL58ck+m15eMR22w30/llzZo2DfUO+XyfEOsetdCm6HRg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=uqTQb7UnwqgjDlKm+BA/yrgDupYrgAzAtczaCbm1koA=; b=reAn+l0cCMM0bDHEZTWPLdMtKmOlFLxIPnN9fUlOPdTtFHi5yMJbFj86yxrZf16YmBzCQvFMck436V2+/8bLUo+UBQKZc5Zs6/9KZ/L6ZDrTKSyCUQCxyBfTQQ1MSEBHK0Kj72lMca0iN8TE5bNw51JawL+4GWLFBQgOwsG3RaawY6j8rqOg6ABxIDtRY5tEM8qMi8+Thi090D+qram5qHkkZpHn5rybOfogyqwhRKM2SIC0GV9G8nEZdzihL08FFWssO0rrxnpTD3TLT7VQ1/qoIcqGNuO+0zeMrQXMND1eHmSVRkbCUhT4yAJrUDcf+j4w0iyDWHlf+dxA4zis9Q== ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=nvidia.com; dmarc=pass action=none header.from=nvidia.com; dkim=pass header.d=nvidia.com; arc=none DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com; s=selector2; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=uqTQb7UnwqgjDlKm+BA/yrgDupYrgAzAtczaCbm1koA=; b=Eq9Ow23kcTSc/9XCOuvWbTgJwWc1aPGR7TrYUqnvEBdjO33oSoTUs47F0G/kJ0x+StOxBIPJPx4Xm0BM0ik6FhEji//V0ceIyenj2Zob6jFuH0kvCYQzJqPh0nXxVNtqpDzl09FwBOkBvuUTy+YCwYzD75DJ/QcNAW2SYNwaIGuCqMYfhj08XL1A/UVaeWBDEcKPIqNyv3HNVB2LdQY5PVexQS3oS0B9rpQLidJWTXWYf0YKiOZuULdQEeBESN6Zr1flCOVp+OD8QKMLR6OR0Nt8tG4gQCfQ7O4cXkgPwCVOp7fMFgcTxHjz/FK4DVlTG0lM9F3Z/I9KqHthjEtStQ== Authentication-Results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=nvidia.com; Received: from SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) by CH2PR12MB4248.namprd12.prod.outlook.com (2603:10b6:610:7a::23) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.9434.6; Fri, 19 Dec 2025 20:38:16 +0000 Received: from SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91]) by SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91%2]) with mapi id 15.20.9434.009; Fri, 19 Dec 2025 20:38:16 +0000 From: Joel Fernandes To: Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , David Airlie , Simona Vetter , Jonathan Corbet , Alex Deucher , =?UTF-8?q?Christian=20K=C3=B6nig?= , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , Huang Rui , Matthew Auld , Matthew Brost , Lucas De Marchi , =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= , Helge Deller Cc: Danilo Krummrich , Alice Ryhl , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?UTF-8?q?Bj=C3=B6rn=20Roy=20Baron?= , Benno Lossin , Andreas Hindborg , Trevor Gross , John Hubbard , Alistair Popple , Timur Tabi , Edwin Peer , Alexandre Courbot , Andrea Righi , Philipp Stanner , Elle Rhumsaa , Daniel Almeida , joel@joelfernandes.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org, amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org, Joel Fernandes Subject: [PATCH RFC v5 3/6] rust: gpu: Add GPU buddy allocator bindings Date: Fri, 19 Dec 2025 15:38:02 -0500 Message-Id: <20251219203805.1246586-4-joelagnelf@nvidia.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251219203805.1246586-1-joelagnelf@nvidia.com> References: <20251219203805.1246586-1-joelagnelf@nvidia.com> Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: BLAPR05CA0045.namprd05.prod.outlook.com (2603:10b6:208:335::25) To SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-MS-PublicTrafficType: Email X-MS-TrafficTypeDiagnostic: SN7PR12MB8059:EE_|CH2PR12MB4248:EE_ X-MS-Office365-Filtering-Correlation-Id: ecf55a9e-ed11-40d8-d8c1-08de3f3e8964 X-MS-Exchange-SenderADCheck: 1 X-MS-Exchange-AntiSpam-Relay: 0 X-Microsoft-Antispam: BCL:0;ARA:13230040|7416014|366016|376014|1800799024|921020; X-Microsoft-Antispam-Message-Info: =?us-ascii?Q?QtG3z+Sa1xpLuR9Y7U5wK4pYjkpeEVE2GkdQYQ+ywRHI5pmPHFRJies+ecRW?= =?us-ascii?Q?xfBTxeeq4lhJyNczcgvcozNJwh3cNqznkEP+shUxP8FLChmgxWjoVQxlO6Yq?= =?us-ascii?Q?XncdNIcSccKNZs2VHJdqVE9r3c/2jl/NSgug18l02W77W13/Dpyml/gliLgE?= =?us-ascii?Q?3AzL+8yw9yF74vNAPw7XxCJ82ETZ5cDX5erAtfCCc/MVhU5UhPYzSvSeqyR/?= =?us-ascii?Q?dkHiMFNNRMD9UaPs+bCAzKBiFuq2HGyU0nuOP8Kq4J/ar04ccdJza1Of1FZE?= =?us-ascii?Q?Td4C5f27oe41lKXc6bp2QMZLGbiLVW8m2qEbCLpkvFx/4qt5zrKD1wpPYPIh?= =?us-ascii?Q?Odwxzmu39UMR01IAir8aYwwJyXQPiRrf6afv6+mQe4rIzS2HmC/CNuzX8fPo?= =?us-ascii?Q?QdJtpbojiDX2J+ZWsURgctRCXQrhZbOo6GH/MloKRBn79vIQIPBd09ArnBfs?= =?us-ascii?Q?+GTnpuiyHlOxHrxpraUhjdjS0igQwhnZB5mqcFsb36TjlvOkct8Yw8XtkwB1?= =?us-ascii?Q?sN/8xGeZMQlOdlECggmF2G762TtKwQAez907oIE8o58CuKVzuTM0oSP+Gr4C?= =?us-ascii?Q?iiNDC+sCpM52noNaFiT8znLQL3NHuPzBChVKaZm/ZmuKfkmWm3hk0Ipanx1i?= =?us-ascii?Q?2LrfH6q5PQYaNP4Cqi6CGUer12eCaKvyBxAxnJnX50hLa/h/3gDxSApVxOU/?= =?us-ascii?Q?tf6tpy4SODLFIkKdIKwUoIhHNbRD5mPV/Q84Ufhhr08CzoQkjyGeq3/XwN+a?= =?us-ascii?Q?2NgcPkNaskIA1bo3nggeQaDHRonh7SyXJSoe2PBsC5wEPCtLTL6gedjw1aZM?= =?us-ascii?Q?SF0vtg6fW07cFrwXNCwxsHXbpxpEXdFlaDx/sjHTKZukpRm+ssIFXvqPDUyS?= =?us-ascii?Q?VZmNbgl4H4hQmPdqoS4jG1PmBDFdnYZxlGS5bR9iy2kj2VMk5utIdnVgndQi?= =?us-ascii?Q?WxYn5vKrb9++KVnORHhQ1CEPNedtbFEKC/5z7fj4SGxSv4TWdGosu8Bw8kIg?= =?us-ascii?Q?9p5e3iTxdbcTL2eb15iarX78kPnzuvKZY6U45yKzmV+pb2P96Gt10zqrOaR9?= =?us-ascii?Q?0jVXXY6Nesi7aMxfPORXlGuLwPxzg3oQAOy0q5+hjAzl0Cd3RacFLmU9O0HP?= =?us-ascii?Q?SEZjysmcnA0uaqdOuzW0Jlg8A9AB97H+zrnC43CO/RWINCRdKnLFolI7cuZb?= =?us-ascii?Q?CabfqJJA2qySkKipfDwEXngikN0x4s5tWoDkXLXBiBAN2BFaoQTdv2E5lH8N?= =?us-ascii?Q?u5Ec5M6f7J9ngFihtCkN9B3M3xmWzTviH9yl9+SEHVoss3mTobTyQA0LF5Pg?= =?us-ascii?Q?CRL8XB+e8zObPjjV08N2L2Mczi9BAQKt7vmP1q9kZnPUtoqEO5DzhnCo1332?= =?us-ascii?Q?Ikb89aKFQVwpTSLim2EGbaNVFZ/H/fJd1leYMF+UrknNsJXtN19OD72TteaH?= =?us-ascii?Q?39XAPg2QUAcZNVGLVhA2hN2WnwpXdPXOt+8rtaK0bt+64bVYjCY9xKv4j/tV?= =?us-ascii?Q?4ggIOIWvRtxxeQY=3D?= X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SN7PR12MB8059.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(13230040)(7416014)(366016)(376014)(1800799024)(921020);DIR:OUT;SFP:1101; X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1 X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?G5r7dYB9cdmneYYQnZy6tAGwKkz/8WiYSHAo5tweyzWoatx45qju3jh7BTfQ?= =?us-ascii?Q?T6uiwoV2Iqh/iIN0UD0ZWOFiFtuZyQrO8ck6s/kB7jfy/nv6xZFTECqEA4vr?= =?us-ascii?Q?Rj9qORSHjpa+Pjd3383py+ZdHy0mflTxLyHFu8h2w3+jTuB69TsUN/6sZQev?= =?us-ascii?Q?LSuYMiB5Bs12rZK/ILiZ+G+q2w3wzdTcHkRCuFg2k/f0lt8fB9d56fNSUvUI?= =?us-ascii?Q?A8uGT2dI6uVx3iLS73u1JED7LiQQJ1b4IAPbAETqfhD3DiA06bM8WYIEUSxn?= =?us-ascii?Q?Gt0JssRg/kBrGlnyAej5e9M6N0lWeSF/ri5d+2AeWlzQx8hsBNfx84S2Ieex?= =?us-ascii?Q?tVKMkcATLZflu21flDWD1tg9RDL0pLQzSdAD4GVH+/o8e9AlZ3gD7LUh6qhy?= =?us-ascii?Q?4p0jQ71fiBNhMBQiTffo5B+RTV2BfStRI1CHxnOvJyOGLyUYYlV7l047ZeKt?= =?us-ascii?Q?uLwNisn0NdTvRlR+ZFMoKQW6ODtwQrPOWQiySZQIWGlX0wwcMqUECXAzmOnl?= =?us-ascii?Q?IrI4pbPqAEl/1eVqwRU4tMeBjvR5eUrcOOgba390+bme6BdAEWGRM//jSIwF?= =?us-ascii?Q?06NrzqWmNzTr8GU6+n4F5nZaz41hvEk5ieLMeqlpTE9ZtRoc7DXgfqr3vHXK?= =?us-ascii?Q?5eM4G/TYTIxcMw3r/0ICHunqkn010oRS9aWSEuDTPOtiUeE061X3J5+BC+z/?= =?us-ascii?Q?R0QFu8vVYYgfeaNRfRcRU3KPkykjm3SWCyWuc2WLeyNCilzOzOcqlrH1BmdL?= =?us-ascii?Q?afEEOr8ElIrvDSlZSABY+hCS6dtruIcafFC6Z3Q2CMJYE5JGUwxSrHnmkQKX?= =?us-ascii?Q?giEtlbiOe2JbYKenn3r1rzoEYexcbgGZrwoQtxwyHkgjV/9R+eGf61wPIvD7?= =?us-ascii?Q?ISWLZXbwuAYZ41B5Mi0tgvAu913c/vZgvFDE7W8eRW5YupVTd9iNEkGxqNvP?= =?us-ascii?Q?QdvqklL8XN4x41SCdD26UOwTtCtj/yi0y4n2w8wf5Vz1cBsT4FfxWWmbwL7v?= =?us-ascii?Q?wT24RkyvLO+6xClsz4lJlQjvW3zNgfVYvKh2wKx09sAkA96TxYLUgyUSWESd?= =?us-ascii?Q?leMsFN/wiQKyH3Ys6nL9Wipa926XJfg5Krtu0SGdzkbT0bAanvxN2YmyRh/V?= =?us-ascii?Q?FfY6FigrnSUhTEvWIUriStjJyWAGrqYhxYm8z6H7bU7TgJ/+vgCDOf1Vs4zW?= =?us-ascii?Q?irNG/WOxjaIWW6+HzG7iZFolZp65mGo9ToXgMuWZ1l8a8syph3JESbkq+/xU?= =?us-ascii?Q?OXJukga07m+WZASwEHTGPQKOH8uc/StHKF4OnHdG3KUTQ0tQCAdo3aXu+J85?= =?us-ascii?Q?R35UZKcDYQwmViK5LuwlRmxn0j906QYSOkjxPdZhQHm+5eyvNfqIW1mjOiAG?= =?us-ascii?Q?c8XUWKgBiWutt2PKrGiI0BnNh++CoJSSPuWJXmr+WrMXRBpqJQcytbWCB57q?= =?us-ascii?Q?iy1ELAEn8Zns2FPgKqFspr7/VDOUzEnJuc2XL+MzPLjgDnG3y4Hur3eP0gQS?= =?us-ascii?Q?dzXI9VLorOIDCFK79v0Wd/o9pl8ezF9iuoHU6OAy9zWGBmGo142ig6o36pcf?= =?us-ascii?Q?ZV+N+RAVl4vaS+xwG9VjgiQG5Vc6mfLBQqfnrI9FwQ0s9ALQAfy06dnPwFaa?= =?us-ascii?Q?mQaPP3bNv+xrsMAWqVSpNTAK0x8+OZlYaE+FBiZP7mNnqrzJY8Be2TH1z5jG?= =?us-ascii?Q?skOoQk6OTCsjRBED594m9ctCG65QjmupQFP5IcwByObz59OQEwOMmRZCwTUT?= =?us-ascii?Q?erZT3N01AQ=3D=3D?= X-OriginatorOrg: Nvidia.com X-MS-Exchange-CrossTenant-Network-Message-Id: ecf55a9e-ed11-40d8-d8c1-08de3f3e8964 X-MS-Exchange-CrossTenant-AuthSource: SN7PR12MB8059.namprd12.prod.outlook.com X-MS-Exchange-CrossTenant-AuthAs: Internal X-MS-Exchange-CrossTenant-OriginalArrivalTime: 19 Dec 2025 20:38:16.0402 (UTC) X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted X-MS-Exchange-CrossTenant-Id: 43083d15-7273-40c1-b7db-39efd9ccc17a X-MS-Exchange-CrossTenant-MailboxType: HOSTED X-MS-Exchange-CrossTenant-UserPrincipalName: hTYUxAvoKahDlgIWAtETeSyScBiB23Sl3sm5LiRgaPqIZTFLGFlIGhV9diCCwQ+yHGatPo01OlELDmZ1HwJ3cg== X-MS-Exchange-Transport-CrossTenantHeadersStamped: CH2PR12MB4248 Content-Type: text/plain; charset="utf-8" Add safe Rust abstractions over the Linux kernel's GPU buddy allocator for physical memory management. The GPU buddy allocator implements a binary buddy system for useful for GPU physical memory allocation. nova-core will use it for physical memory allocation. Signed-off-by: Joel Fernandes --- rust/bindings/bindings_helper.h | 11 + rust/helpers/gpu.c | 23 ++ rust/helpers/helpers.c | 1 + rust/kernel/gpu/buddy.rs | 518 ++++++++++++++++++++++++++++++++ rust/kernel/gpu/mod.rs | 5 + rust/kernel/lib.rs | 2 + 6 files changed, 560 insertions(+) create mode 100644 rust/helpers/gpu.c create mode 100644 rust/kernel/gpu/buddy.rs create mode 100644 rust/kernel/gpu/mod.rs diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helpe= r.h index a067038b4b42..940b854a1f93 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -29,6 +29,7 @@ #include =20 #include +#include #include #include #include @@ -144,6 +145,16 @@ const vm_flags_t RUST_CONST_HELPER_VM_MIXEDMAP =3D VM_= MIXEDMAP; const vm_flags_t RUST_CONST_HELPER_VM_HUGEPAGE =3D VM_HUGEPAGE; const vm_flags_t RUST_CONST_HELPER_VM_NOHUGEPAGE =3D VM_NOHUGEPAGE; =20 +#if IS_ENABLED(CONFIG_GPU_BUDDY) +const unsigned long RUST_CONST_HELPER_GPU_BUDDY_RANGE_ALLOCATION =3D GPU_B= UDDY_RANGE_ALLOCATION; +const unsigned long RUST_CONST_HELPER_GPU_BUDDY_TOPDOWN_ALLOCATION =3D GPU= _BUDDY_TOPDOWN_ALLOCATION; +const unsigned long RUST_CONST_HELPER_GPU_BUDDY_CONTIGUOUS_ALLOCATION =3D + GPU_BUDDY_CONTIGUOUS_ALLOCATION; +const unsigned long RUST_CONST_HELPER_GPU_BUDDY_CLEAR_ALLOCATION =3D GPU_B= UDDY_CLEAR_ALLOCATION; +const unsigned long RUST_CONST_HELPER_GPU_BUDDY_CLEARED =3D GPU_BUDDY_CLEA= RED; +const unsigned long RUST_CONST_HELPER_GPU_BUDDY_TRIM_DISABLE =3D GPU_BUDDY= _TRIM_DISABLE; +#endif + #if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_RUST) #include "../../drivers/android/binder/rust_binder.h" #include "../../drivers/android/binder/rust_binder_events.h" diff --git a/rust/helpers/gpu.c b/rust/helpers/gpu.c new file mode 100644 index 000000000000..415836b86abf --- /dev/null +++ b/rust/helpers/gpu.c @@ -0,0 +1,23 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +#ifdef CONFIG_GPU_BUDDY + +u64 rust_helper_gpu_buddy_block_offset(const struct gpu_buddy_block *block) +{ + return gpu_buddy_block_offset(block); +} + +unsigned int rust_helper_gpu_buddy_block_order(struct gpu_buddy_block *blo= ck) +{ + return gpu_buddy_block_order(block); +} + +u64 rust_helper_gpu_buddy_block_size(struct gpu_buddy *mm, + struct gpu_buddy_block *block) +{ + return gpu_buddy_block_size(mm, block); +} + +#endif /* CONFIG_GPU_BUDDY */ diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c index 634fa2386bbb..6db7c4c25afa 100644 --- a/rust/helpers/helpers.c +++ b/rust/helpers/helpers.c @@ -29,6 +29,7 @@ #include "err.c" #include "irq.c" #include "fs.c" +#include "gpu.c" #include "io.c" #include "jump_label.c" #include "kunit.c" diff --git a/rust/kernel/gpu/buddy.rs b/rust/kernel/gpu/buddy.rs new file mode 100644 index 000000000000..d75303baf4ec --- /dev/null +++ b/rust/kernel/gpu/buddy.rs @@ -0,0 +1,518 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! GPU buddy allocator bindings. +//! +//! C header: [`include/linux/gpu_buddy.h`](srctree/include/linux/gpu_budd= y.h) +//! +//! This module provides Rust abstractions over the Linux kernel's GPU bud= dy +//! allocator, which implements a binary buddy memory allocator. +//! +//! The buddy allocator manages a contiguous address space and allocates b= locks +//! in power-of-two sizes, useful for GPU physical memory management. +//! +//! # Examples +//! +//! ``` +//! use kernel::{ +//! gpu::buddy::{BuddyFlags, GpuBuddy, GpuBuddyAllocParams, GpuBuddyPa= rams}, +//! prelude::*, +//! sizes::*, // +//! }; +//! +//! // Create a 1GB buddy allocator with 4KB minimum chunk size. +//! let mut buddy =3D GpuBuddy::new(GpuBuddyParams { +//! physical_memory_size_bytes: SZ_1G as u64, +//! chunk_size_bytes: SZ_4K as u64, +//! })?; +//! +//! // Verify initial state. +//! assert_eq!(buddy.size(), SZ_1G as u64); +//! assert_eq!(buddy.chunk_size(), SZ_4K as u64); +//! let initial_free =3D buddy.free_memory_bytes(); +//! +//! // Base allocation params - reused across tests with field overrides. +//! let params =3D GpuBuddyAllocParams { +//! start_range_address: 0, +//! end_range_address: 0, // Entire range. +//! size_bytes: SZ_16M as u64, +//! min_block_size_bytes: SZ_16M as u64, +//! buddy_flags: BuddyFlags::try_new(BuddyFlags::RANGE_ALLOCATION)?, +//! }; +//! +//! // Test top-down allocation (allocates from highest addresses). +//! let topdown =3D buddy.alloc_blocks(GpuBuddyAllocParams { +//! buddy_flags: BuddyFlags::try_new(BuddyFlags::TOPDOWN_ALLOCATION)?, +//! ..params +//! })?; +//! assert_eq!(buddy.free_memory_bytes(), initial_free - SZ_16M as u64); +//! +//! for block in topdown.iter() { +//! assert_eq!(block.offset(), (SZ_1G - SZ_16M) as u64); +//! assert_eq!(block.order(), 12); // 2^12 pages +//! assert_eq!(block.size(), SZ_16M as u64); +//! } +//! drop(topdown); +//! assert_eq!(buddy.free_memory_bytes(), initial_free); +//! +//! // Allocate 16MB - should result in a single 16MB block at offset 0. +//! let allocated =3D buddy.alloc_blocks(params)?; +//! assert_eq!(buddy.free_memory_bytes(), initial_free - SZ_16M as u64); +//! +//! for block in allocated.iter() { +//! assert_eq!(block.offset(), 0); +//! assert_eq!(block.order(), 12); // 2^12 pages +//! assert_eq!(block.size(), SZ_16M as u64); +//! } +//! drop(allocated); +//! assert_eq!(buddy.free_memory_bytes(), initial_free); +//! +//! // Test non-contiguous allocation with fragmented memory. +//! // Create fragmentation by allocating 4MB blocks at [0,4M) and [8M,12M= ). +//! let params_4m =3D GpuBuddyAllocParams { +//! end_range_address: SZ_4M as u64, +//! size_bytes: SZ_4M as u64, +//! min_block_size_bytes: SZ_4M as u64, +//! ..params +//! }; +//! let frag1 =3D buddy.alloc_blocks(params_4m)?; +//! assert_eq!(buddy.free_memory_bytes(), initial_free - SZ_4M as u64); +//! +//! let frag2 =3D buddy.alloc_blocks(GpuBuddyAllocParams { +//! start_range_address: SZ_8M as u64, +//! end_range_address: (SZ_8M + SZ_4M) as u64, +//! ..params_4m +//! })?; +//! assert_eq!(buddy.free_memory_bytes(), initial_free - SZ_8M as u64); +//! +//! // Allocate 8MB without CONTIGUOUS - should return 2 blocks from the h= oles. +//! let fragmented =3D buddy.alloc_blocks(GpuBuddyAllocParams { +//! end_range_address: SZ_16M as u64, +//! size_bytes: SZ_8M as u64, +//! min_block_size_bytes: SZ_4M as u64, +//! ..params +//! })?; +//! assert_eq!(buddy.free_memory_bytes(), initial_free - (SZ_16M) as u64); +//! +//! let (mut count, mut total) =3D (0u32, 0u64); +//! for block in fragmented.iter() { +//! // The 8MB allocation should return 2 blocks, each 4MB. +//! assert_eq!(block.size(), SZ_4M as u64); +//! total +=3D block.size(); +//! count +=3D 1; +//! } +//! assert_eq!(total, SZ_8M as u64); +//! assert_eq!(count, 2); +//! drop(fragmented); +//! drop(frag2); +//! drop(frag1); +//! assert_eq!(buddy.free_memory_bytes(), initial_free); +//! +//! // Test CONTIGUOUS failure when only fragmented space available. +//! // Create a small buddy allocator with only 16MB of memory. +//! let mut small =3D GpuBuddy::new(GpuBuddyParams { +//! physical_memory_size_bytes: SZ_16M as u64, +//! chunk_size_bytes: SZ_4K as u64, +//! })?; +//! +//! // Allocate 4MB blocks at [0,4M) and [8M,12M) to create fragmented mem= ory. +//! let hole1 =3D small.alloc_blocks(params_4m)?; +//! let hole2 =3D small.alloc_blocks(GpuBuddyAllocParams { +//! start_range_address: SZ_8M as u64, +//! end_range_address: (SZ_8M + SZ_4M) as u64, +//! ..params_4m +//! })?; +//! +//! // 8MB contiguous should fail - only two non-contiguous 4MB holes exis= t. +//! let result =3D small.alloc_blocks(GpuBuddyAllocParams { +//! size_bytes: SZ_8M as u64, +//! min_block_size_bytes: SZ_4M as u64, +//! buddy_flags: BuddyFlags::try_new(BuddyFlags::CONTIGUOUS_ALLOCATION= )?, +//! ..params +//! }); +//! assert!(result.is_err()); +//! drop(hole2); +//! drop(hole1); +//! +//! # Ok::<(), Error>(()) +//! ``` + +use crate::{ + bindings, + clist::CListHead, + clist_create, + error::to_result, + new_mutex, + prelude::*, + sync::{ + lock::mutex::MutexGuard, + Arc, + Mutex, // + }, + types::Opaque, +}; + +/// Flags for GPU buddy allocator operations. +/// +/// These flags control the allocation behavior of the buddy allocator. +#[derive(Clone, Copy, Default, PartialEq, Eq)] +pub struct BuddyFlags(usize); + +impl BuddyFlags { + /// Range-based allocation from start to end addresses. + pub const RANGE_ALLOCATION: usize =3D bindings::GPU_BUDDY_RANGE_ALLOCA= TION; + + /// Allocate from top of address space downward. + pub const TOPDOWN_ALLOCATION: usize =3D bindings::GPU_BUDDY_TOPDOWN_AL= LOCATION; + + /// Allocate physically contiguous blocks. + pub const CONTIGUOUS_ALLOCATION: usize =3D bindings::GPU_BUDDY_CONTIGU= OUS_ALLOCATION; + + /// Request allocation from the cleared (zeroed) memory. The zero'ing = is not + /// done by the allocator, but by the caller before freeing old blocks. + pub const CLEAR_ALLOCATION: usize =3D bindings::GPU_BUDDY_CLEAR_ALLOCA= TION; + + /// Disable trimming of partially used blocks. + pub const TRIM_DISABLE: usize =3D bindings::GPU_BUDDY_TRIM_DISABLE; + + /// Mark blocks as cleared (zeroed) when freeing. When set during free, + /// indicates that the caller has already zeroed the memory. + pub const CLEARED: usize =3D bindings::GPU_BUDDY_CLEARED; + + /// Create [`BuddyFlags`] from a raw value with validation. + /// + /// Use `|` operator to combine flags if needed, before calling this m= ethod. + pub fn try_new(flags: usize) -> Result { + // Flags must not exceed u32::MAX to satisfy the GPU buddy allocat= or C API. + if flags > u32::MAX as usize { + return Err(EINVAL); + } + + // `TOPDOWN_ALLOCATION` only works without `RANGE_ALLOCATION`. Whe= n both are + // set, `TOPDOWN_ALLOCATION` is silently ignored by the allocator.= Reject this. + if (flags & Self::RANGE_ALLOCATION) !=3D 0 && (flags & Self::TOPDO= WN_ALLOCATION) !=3D 0 { + return Err(EINVAL); + } + + Ok(Self(flags)) + } + + /// Get raw value of the flags. + pub(crate) fn as_raw(self) -> usize { + self.0 + } +} + +/// Parameters for creating a GPU buddy allocator. +#[derive(Clone, Copy)] +pub struct GpuBuddyParams { + /// Total physical memory size managed by the allocator in bytes. + pub physical_memory_size_bytes: u64, + /// Minimum allocation unit / chunk size in bytes, must be >=3D 4KB. + pub chunk_size_bytes: u64, +} + +/// Parameters for allocating blocks from a GPU buddy allocator. +#[derive(Clone, Copy)] +pub struct GpuBuddyAllocParams { + /// Start of allocation range in bytes. Use 0 for beginning. + pub start_range_address: u64, + /// End of allocation range in bytes. Use 0 for entire range. + pub end_range_address: u64, + /// Total size to allocate in bytes. + pub size_bytes: u64, + /// Minimum block size for fragmented allocations in bytes. + pub min_block_size_bytes: u64, + /// Buddy allocator behavior flags. + pub buddy_flags: BuddyFlags, +} + +/// Inner structure holding the actual buddy allocator. +/// +/// # Synchronization +/// +/// The C `gpu_buddy` API requires synchronization (see `include/linux/gpu= _buddy.h`). +/// The internal [`GpuBuddyGuard`] ensures that the lock is held for all +/// allocator and free operations, preventing races between concurrent all= ocations +/// and the freeing that occurs when [`AllocatedBlocks`] is dropped. +/// +/// # Invariants +/// +/// The inner [`Opaque`] contains a valid, initialized buddy allocator. +#[pin_data(PinnedDrop)] +struct GpuBuddyInner { + #[pin] + inner: Opaque, + #[pin] + lock: Mutex<()>, + /// Cached chunk size (does not change after init). + chunk_size: u64, + /// Cached total size (does not change after init). + size: u64, +} + +impl GpuBuddyInner { + /// Create a pin-initializer for the buddy allocator. + fn new(params: &GpuBuddyParams) -> impl PinInit { + let size =3D params.physical_memory_size_bytes; + let chunk_size =3D params.chunk_size_bytes; + + try_pin_init!(Self { + inner <- Opaque::try_ffi_init(|ptr| { + // SAFETY: ptr points to valid uninitialized memory from t= he pin-init + // infrastructure. gpu_buddy_init will initialize the stru= cture. + to_result(unsafe { bindings::gpu_buddy_init(ptr, size, chu= nk_size) }) + }), + lock <- new_mutex!(()), + chunk_size: chunk_size, + size: size, + }) + } + + /// Lock the mutex and return a guard for accessing the allocator. + fn lock(&self) -> GpuBuddyGuard<'_> { + GpuBuddyGuard { + inner: self, + _guard: self.lock.lock(), + } + } +} + +#[pinned_drop] +impl PinnedDrop for GpuBuddyInner { + fn drop(self: Pin<&mut Self>) { + let guard =3D self.lock(); + + // SAFETY: guard provides exclusive access to the allocator. + unsafe { + bindings::gpu_buddy_fini(guard.as_raw()); + } + } +} + +// SAFETY: [`GpuBuddyInner`] can be sent between threads. +unsafe impl Send for GpuBuddyInner {} + +// SAFETY: [`GpuBuddyInner`] is `Sync` because the internal [`GpuBuddyGuar= d`] +// serializes all access to the C allocator, preventing data races. +unsafe impl Sync for GpuBuddyInner {} + +/// Guard that proves the lock is held, enabling access to the allocator. +/// +/// # Invariants +/// +/// The inner `_guard` holds the lock for the duration of this guard's lif= etime. +pub(crate) struct GpuBuddyGuard<'a> { + inner: &'a GpuBuddyInner, + _guard: MutexGuard<'a, ()>, +} + +impl GpuBuddyGuard<'_> { + /// Get a raw pointer to the underlying C `gpu_buddy` structure. + fn as_raw(&self) -> *mut bindings::gpu_buddy { + self.inner.inner.get() + } +} + +/// GPU buddy allocator instance. +/// +/// This structure wraps the C `gpu_buddy` allocator using reference count= ing. +/// The allocator is automatically cleaned up when all references are drop= ped. +/// +/// # Invariants +/// +/// The inner [`Arc`] points to a valid, initialized GPU buddy allocator. +pub struct GpuBuddy(Arc); + +impl GpuBuddy { + /// Create a new buddy allocator. + /// + /// Creates a buddy allocator that manages a contiguous address space = of the given + /// size, with the specified minimum allocation unit (chunk_size must = be at least 4KB). + pub fn new(params: GpuBuddyParams) -> Result { + Ok(Self(Arc::pin_init( + GpuBuddyInner::new(¶ms), + GFP_KERNEL, + )?)) + } + + /// Get the chunk size (minimum allocation unit). + pub fn chunk_size(&self) -> u64 { + self.0.chunk_size + } + + /// Get the total managed size. + pub fn size(&self) -> u64 { + self.0.size + } + + /// Get the available (free) memory in bytes. + pub fn free_memory_bytes(&self) -> u64 { + let guard =3D self.0.lock(); + // SAFETY: guard provides exclusive access to the allocator. + unsafe { (*guard.as_raw()).avail } + } + + /// Allocate blocks from the buddy allocator. + /// + /// Returns an [`Arc`] structure that owns the alloca= ted blocks + /// and automatically frees them when all references are dropped. + pub fn alloc_blocks(&mut self, params: GpuBuddyAllocParams) -> Result<= Arc> { + let buddy_arc =3D Arc::clone(&self.0); + + // Create pin-initializer that initializes list and allocates bloc= ks. + let init =3D try_pin_init!(AllocatedBlocks { + list <- CListHead::try_init(|list| { + // Lock while allocating to serialize with concurrent free= s. + let guard =3D buddy_arc.lock(); + + // SAFETY: guard provides exclusive access, list is initia= lized. + to_result(unsafe { + bindings::gpu_buddy_alloc_blocks( + guard.as_raw(), + params.start_range_address, + params.end_range_address, + params.size_bytes, + params.min_block_size_bytes, + list.as_raw(), + params.buddy_flags.as_raw(), + ) + }) + }), + buddy: Arc::clone(&buddy_arc), + flags: params.buddy_flags, + }); + + Arc::pin_init(init, GFP_KERNEL) + } +} + +/// Allocated blocks from the buddy allocator with automatic cleanup. +/// +/// This structure owns a list of allocated blocks and ensures they are +/// automatically freed when dropped. Use `iter()` to iterate over all +/// allocated [`Block`] structures. +/// +/// # Invariants +/// +/// - `list` is an initialized, valid list head containing allocated block= s. +/// - `buddy` references a valid [`GpuBuddyInner`]. +#[pin_data(PinnedDrop)] +pub struct AllocatedBlocks { + #[pin] + list: CListHead, + buddy: Arc, + flags: BuddyFlags, +} + +impl AllocatedBlocks { + /// Check if the block list is empty. + pub fn is_empty(&self) -> bool { + // An empty list head points to itself. + !self.list.is_linked() + } + + /// Iterate over allocated blocks. + /// + /// Returns an iterator yielding [`AllocatedBlock`] references. The bl= ocks + /// are only valid for the duration of the borrow of `self`. + pub fn iter(&self) -> impl Iterator> + '_ { + // SAFETY: list contains gpu_buddy_block items linked via __bindge= n_anon_1.link. + let clist =3D unsafe { + clist_create!( + self.list.as_raw(), + Block, + bindings::gpu_buddy_block, + __bindgen_anon_1.link + ) + }; + + clist + .iter() + .map(|block| AllocatedBlock { block, alloc: self }) + } +} + +#[pinned_drop] +impl PinnedDrop for AllocatedBlocks { + fn drop(self: Pin<&mut Self>) { + let guard =3D self.buddy.lock(); + + // SAFETY: + // - list is valid per the type's invariants. + // - guard provides exclusive access to the allocator. + // CAST: BuddyFlags were validated to fit in u32 at construction. + unsafe { + bindings::gpu_buddy_free_list( + guard.as_raw(), + self.list.as_raw(), + self.flags.as_raw() as u32, + ); + } + } +} + +/// A GPU buddy block. +/// +/// Transparent wrapper over C `gpu_buddy_block` structure. This type is r= eturned +/// as references from [`CListIter`] during iteration over [`AllocatedBloc= ks`]. +/// +/// # Invariants +/// +/// The inner [`Opaque`] contains a valid, allocated `gpu_buddy_block`. +#[repr(transparent)] +pub struct Block(Opaque); + +impl Block { + /// Get a raw pointer to the underlying C block. + fn as_raw(&self) -> *mut bindings::gpu_buddy_block { + self.0.get() + } + + /// Get the block's offset in the address space. + pub(crate) fn offset(&self) -> u64 { + // SAFETY: self.as_raw() is valid per the type's invariants. + unsafe { bindings::gpu_buddy_block_offset(self.as_raw()) } + } + + /// Get the block order. + pub(crate) fn order(&self) -> u32 { + // SAFETY: self.as_raw() is valid per the type's invariants. + unsafe { bindings::gpu_buddy_block_order(self.as_raw()) } + } +} + +// SAFETY: `Block` is a transparent wrapper over `gpu_buddy_block` which i= s not +// modified after allocation. It can be safely sent between threads. +unsafe impl Send for Block {} + +// SAFETY: `Block` is a transparent wrapper over `gpu_buddy_block` which i= s not +// modified after allocation. It can be safely shared among threads. +unsafe impl Sync for Block {} + +/// An allocated block with access to the allocation list. +/// +/// # Invariants +/// +/// - `block` is a valid reference to an allocated [`Block`]. +/// - `alloc` is a valid reference to the [`AllocatedBlocks`] that owns th= is block. +pub struct AllocatedBlock<'a> { + block: &'a Block, + alloc: &'a AllocatedBlocks, +} + +impl AllocatedBlock<'_> { + /// Get the block's offset in the address space. + pub fn offset(&self) -> u64 { + self.block.offset() + } + + /// Get the block order (size =3D chunk_size << order). + pub fn order(&self) -> u32 { + self.block.order() + } + + /// Get the block's size in bytes. + pub fn size(&self) -> u64 { + self.alloc.buddy.chunk_size << self.block.order() + } +} diff --git a/rust/kernel/gpu/mod.rs b/rust/kernel/gpu/mod.rs new file mode 100644 index 000000000000..8f25e6367edc --- /dev/null +++ b/rust/kernel/gpu/mod.rs @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! GPU subsystem abstractions. + +pub mod buddy; diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index cd7e6a1055b0..d754d777f8ff 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -98,6 +98,8 @@ pub mod firmware; pub mod fmt; pub mod fs; +#[cfg(CONFIG_GPU_BUDDY)] +pub mod gpu; #[cfg(CONFIG_I2C =3D "y")] pub mod i2c; pub mod id_pool; --=20 2.34.1 From nobody Sun Feb 8 06:56:18 2026 Received: from PH8PR06CU001.outbound.protection.outlook.com (mail-westus3azon11012058.outbound.protection.outlook.com [40.107.209.58]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 677972D8370; Fri, 19 Dec 2025 20:38:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=fail smtp.client-ip=40.107.209.58 ARC-Seal: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176703; cv=fail; b=WnYQenK8mTcjVNH2enAiqlH4ivlbFYmkmSAITAzPZwR0hxcfPFrR30jaLktV6KMy7PI03qmwGkEpM+vzYGbBG24z7QYEXAtNppEqL+Tp31Ev5Awk5u9ZguAxblsq2zikCN1OOqMm4eKaOxiwNNVruPkxvB96ASpX7QtxNtgF1eY= ARC-Message-Signature: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176703; c=relaxed/simple; bh=7A++xWHwKWKQIZdYUTUgyYx8mIs7PbuI+u9UNEEhfCY=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: Content-Type:MIME-Version; b=kIOfq08G8n79OYoVz/KJRAhMNyaTXSpZSRAGK4PxDC7kVMKGJIsBPjGRVBESkr+t+5SEAvZWS1yrqNz6WtQFTzpfXvSTMosHrXB+2E1KedAbI+BEmYm43K70kLSaPEMFTYUrqnx/45/Bp5RyNycllBAqvK7/rQ/GyNO0oveBbkE= ARC-Authentication-Results: i=2; smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com; spf=fail smtp.mailfrom=nvidia.com; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b=KSpBVpsY; arc=fail smtp.client-ip=40.107.209.58 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com Authentication-Results: smtp.subspace.kernel.org; spf=fail smtp.mailfrom=nvidia.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b="KSpBVpsY" ARC-Seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=vrBBbw2V14POtHlXrXvgx7VXrQejnK7yQXqs/7V5/GdmLz1ipJADMi5RtXgpcoE4DyPr2qM+51Yihaf8/Ui6h8xZ8LACtflDEZnKkL3Q6RM6BEhNu4MzG6mANZ+m27x5To1I2SeBBwm0ZqaMjFNOxQD9eDwjJAYU+Y4QZwaTzkThgwGB1FYKE7QS7wN7L6PMM4VC+yS34/9P+/Cu1asSm9g8J49sABz5iF1M1B8fRncJKBAlzgK5MwTEYRAufkLLa5ouYcNnJpp1V0dYAvrSKnYaugWkinln1EJtD7Pg+dljGWtLV64bogEKkS0xWHbqb9ubgn2q62uZ3MIrXYDj/w== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=EYkY4Jitome6fMArzQZjUSNPOBnCjXoEwDWPuV3E6dY=; b=xy35qCH7GYxbTvYq80xdS4DCcop7TveOCxBWhc5QVP5PG3AtjbS0x5IDJ9/Hf6XyIpP1CKtMn052bvvenfxuIY3gVT1t/fU3J20cJjfJqKjSi4PigKOmO6ZScGaprrOl2LVI5toFIBGyKEUHacP/E3ZcGvTMz85Je0eLdNunL/NMhmtnX5oGFy2kzXbBiabNRSoUjBpR5ptBUNkC+tg8tcQtcwgbpTwQfmw4GmZRGpgY6xZJf1qR0rB+zXe1ilkhvyVnk0/jlAqIHQW3DYIunT0d/gMNTbtpbVKodsPvDN8y8DCvDz6ezjYBZleycBtFMnrIRFCHi+CJpIW/B6tO2A== ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=nvidia.com; dmarc=pass action=none header.from=nvidia.com; dkim=pass header.d=nvidia.com; arc=none DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com; s=selector2; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=EYkY4Jitome6fMArzQZjUSNPOBnCjXoEwDWPuV3E6dY=; b=KSpBVpsYJpgcSeHMfwQRpBhNWKYRykCa9L2CGPOci98cqBiwcVeoloDG6i4nszDcwpbz+rn6SJ8NPBsGJFtQHOyd2OWN3+JOTpMeg61HbN7gCv6iXLd7CWQO15Qsdg6glfwjJfV9WUENhBBTsAGJmXSA4xVDzspMabOlV4lWTZkUhNkkTSm0e0roxUeIMXZ36f/69+prMRHHpRL077+cLFAF7f0fpSetA9OFDWfrhSQqNuLK6QbXA9IHRSFZQijfLcPyH8ShIgNqi51p58rmbi/pr9MKxhm0ldGoysDCn8O/z8Sm7uQO1NYtCZWYJfvLZFUN5UACs4Ui8PNoQVlvBg== Authentication-Results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=nvidia.com; Received: from SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) by CH2PR12MB4248.namprd12.prod.outlook.com (2603:10b6:610:7a::23) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.9434.6; Fri, 19 Dec 2025 20:38:18 +0000 Received: from SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91]) by SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91%2]) with mapi id 15.20.9434.009; Fri, 19 Dec 2025 20:38:18 +0000 From: Joel Fernandes To: Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , David Airlie , Simona Vetter , Jonathan Corbet , Alex Deucher , =?UTF-8?q?Christian=20K=C3=B6nig?= , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , Huang Rui , Matthew Auld , Matthew Brost , Lucas De Marchi , =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= , Helge Deller Cc: Danilo Krummrich , Alice Ryhl , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?UTF-8?q?Bj=C3=B6rn=20Roy=20Baron?= , Benno Lossin , Andreas Hindborg , Trevor Gross , John Hubbard , Alistair Popple , Timur Tabi , Edwin Peer , Alexandre Courbot , Andrea Righi , Philipp Stanner , Elle Rhumsaa , Daniel Almeida , joel@joelfernandes.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org, amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org, Joel Fernandes Subject: [PATCH RFC v5 4/6] nova-core: mm: Add support to use PRAMIN windows to write to VRAM Date: Fri, 19 Dec 2025 15:38:03 -0500 Message-Id: <20251219203805.1246586-5-joelagnelf@nvidia.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251219203805.1246586-1-joelagnelf@nvidia.com> References: <20251219203805.1246586-1-joelagnelf@nvidia.com> Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: BLAPR05CA0038.namprd05.prod.outlook.com (2603:10b6:208:335::19) To SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-MS-PublicTrafficType: Email X-MS-TrafficTypeDiagnostic: SN7PR12MB8059:EE_|CH2PR12MB4248:EE_ X-MS-Office365-Filtering-Correlation-Id: a7383991-7d55-4664-c1c2-08de3f3e8a9a X-MS-Exchange-SenderADCheck: 1 X-MS-Exchange-AntiSpam-Relay: 0 X-Microsoft-Antispam: BCL:0;ARA:13230040|7416014|366016|376014|1800799024|921020; X-Microsoft-Antispam-Message-Info: =?us-ascii?Q?bju/Dy62ylSCf0MzUot7fNLT7dOhksNQ4hU5OIRmVjItOhK86rOtM6DWKIjN?= =?us-ascii?Q?uo49ttXLt1jOBI1xch/6kBfmT35PZIMutxA/NctoYnRlYN46VFE3EyruInFF?= =?us-ascii?Q?j1kuASLh5kctVUZ9qPILYdUIIVVLBCgystIhvHqQMqsMm+lGYLrGW4aNaQDB?= =?us-ascii?Q?UbOv8N2n50JwhTFyNpsVY7CE4Xhf8inwoesNTV/430qYeSQ+J9DlyNFobSCA?= =?us-ascii?Q?xz1x557IwdF/4PFJcS/5PjCNaTNgdxTK5+focMkU9Xd5oDgJstcO+wWlZslb?= =?us-ascii?Q?wYwSBJ2h36VsHUXZXODdsKrL9XHNnxQXZ+MxRxhJAa0+v2ilR4XmPSCE2kvo?= =?us-ascii?Q?z8Nq4QL+q0qDR/zFmof+mZU5kz0AwMnIP7qi3+FHg5xQJgcXwGtrzRnoSoHS?= =?us-ascii?Q?EYnJqqIOc5Zocv1XvyLJWUT88uuFcnqEgoIoy8gBFy05W7s+MUTdxYpn4lud?= =?us-ascii?Q?hVfZ8W5mUvkYV1cQGSei6Jabh/Tj0xppIggkw8uqjERSLvdehHff0DDGh1UL?= =?us-ascii?Q?gkqCsiHN3zmWP92IRxwwl2VogZzy+23fpb/8a/9hYnf+i88xskQcyW7ZafKm?= =?us-ascii?Q?tW4iloBIUSkGbCOxI3HaHMG6XNG+hZjXM4Ahpic2yOis6X72ur7S6eXN9EHU?= =?us-ascii?Q?BpPSWX1hiakyYdrCEkNUO6u4J2RwjlIFDtUXmOD5Cd9Av2G0Qgjh4nYBzxFk?= =?us-ascii?Q?HK2Re/oofVnWVOFnBPbDSzWrXF/KiJmSIB6ec1q+dWn4wYyZNI3d0Ef4bu6I?= =?us-ascii?Q?oFNX+EYy1ofl7hdCtWFU8wYUNDma1+2/Ds8e+JuQM4OF8lN6sERXY6kFTBsd?= =?us-ascii?Q?ZzCWSRtAMkzUZ6MnUgS8ndqJHvsd5m/Dk/XCGxQUnw3RKB24Z9KuwTaFyiwK?= =?us-ascii?Q?TFyVB8yCBDE/SICW9TB2rbk3zwNM66X5l6Msc+yRXwcuAAjXUK9PmwV5GEYj?= =?us-ascii?Q?sM3uoL0k7QTjy7hIp2apdeSpznZ6BQwufR4BRnBrjty9BoCpS+7dPyBSUy64?= =?us-ascii?Q?or4qwPw/Txqq3FPrDsF8xYFcCDOwjIUBEqkNIomXQazlmlZGPeL4fo/FaKex?= =?us-ascii?Q?iLRcWqCJl+GusRN6g11ZNxZE9Y5+71ggJeSEGpc6CgagVAi16rUK8lRwNvrD?= =?us-ascii?Q?ZG5tFXTNqh/zfjO/9GbpWebFAvXTp4kKie5V+NpQfesw0QyWVj9TzQpoffGn?= =?us-ascii?Q?71f8kSoToOzo6OIhv/tBk0w9AG++sx45oLdXMLoQVeH1A4SDEWZLBakwx4Xy?= =?us-ascii?Q?pv434zOTEe74AVnJCNofSx/hYMe838XY7sTZB/d0xNojVCWu2JosR/sf8GHb?= =?us-ascii?Q?ShvF7lYyJNKKgEVl/DLlhfhgdKlDqxWX55lMehPj1Zum18Tae6otuEQmgWwT?= =?us-ascii?Q?h2RR6OWbmbalKWTUqCIlceWM6BxBK4mxxdZh6QBfCZpZ/+9wpcsvdyhPVzRL?= =?us-ascii?Q?HnuJZw4HVqUE8HkHN1AGgXrxGvnTZIbFBjWCmWr9PHYH6j0d1iGDMcfSpLAk?= =?us-ascii?Q?9Mg/S+/Yck5j468=3D?= X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SN7PR12MB8059.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(13230040)(7416014)(366016)(376014)(1800799024)(921020);DIR:OUT;SFP:1101; X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1 X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?EHYV5xV+d7oOt0o3G6SUtE9LgYy3ljlwJqBeulGA8alGdqAqqvWU0wFngcDo?= =?us-ascii?Q?wtPpGdF5TFuT9OVwVHS3GJpOQNqJ2QtQ0gLd0ZkULI0+NowPY//jPdxdXg+Q?= =?us-ascii?Q?b9RibtMAD6fUfoxPA5A1qpvydl88lERlrfzXmdwnqv8UxXyTYbiXK2AVjSGT?= =?us-ascii?Q?eRsdgP5yJdFibTP3CnCmgxsKWxCmD1Vs3DpsfEP+GCNJvtUcwX2lPcta4/Bn?= =?us-ascii?Q?47Ggx1HNBNQKZeHdNbcPIUaTCqzC5+FCya2UViNNvHWYzX/5RN32cvpF+ni2?= =?us-ascii?Q?1wZn0ATftopiIxw5SBRds7zuJVww5GM+iR47gevP5ZYYPs8v4GfKNWh8gWar?= =?us-ascii?Q?8ImrMaziSbcnnXGdB7gQkG9TEFq+U7MFSNi2wtqf+Qa7zfC0biOpYokgT8OZ?= =?us-ascii?Q?za1WINTzYlrkRcFqNXUeWNEphZgvxnY56cgp9U0RKXaoqiI+eVDZ4LNiFEPN?= =?us-ascii?Q?okh01Xk5YTPWRbYKJssRMK1z4BHLyb2Y1+g1uNOLREdZG5bIjAN8ypBYpPFd?= =?us-ascii?Q?tvYaF7yi6UiLkFPimW9sS9S5cQI2e1agQJk24yNvzjQa5xyKAUNcPMlQ3ycn?= =?us-ascii?Q?etYsn4dBvIbwIiEFVKjvrajecbTVAsmvgFZFMGRsH0yPFgjXknkAov6TyuaO?= =?us-ascii?Q?aIeB18JB0BmS3WaWiRSwi1x2yujF0eDdT1LnD0V+H46giEUxt5pOAFOnT3CB?= =?us-ascii?Q?XBrG4tZKncwg6cW33rPGDpdFCRtbHfzNTHzVz7dWZE6AyXqHGuuwKjM2qbcl?= =?us-ascii?Q?b+BKa/186y/jaAFDm2g6pkfEPYTLWVRC5uhfdnh63fzmHFAUpm7oFkTwOBDP?= =?us-ascii?Q?q5YoV5xaaHTyVfqvukWPPplij2d/CdpoLKoM0cTIzBTf48ZQj1QuAobt6GeP?= =?us-ascii?Q?8SBxRICoR5um/SZ902bbOwvMgQURizdpCgStLtvNuZYwdo4QphTo+nUtFOPV?= =?us-ascii?Q?kVL/J/FHZg77p14EFS1qur8fQxYaHTi7QZTEllK8dYO8hR5Slf3gLNAjn1JA?= =?us-ascii?Q?yaBbyhbN0Doj0yrI1zspKqJrHPiOt97gaYJuOwlecyUtQD8Q1cMKnPm3ggia?= =?us-ascii?Q?CkUJF/XwOeWzJ4d329thyDXiIfII/+80eZX+sxuaQBNW09EquQPxViWSY2pM?= =?us-ascii?Q?+kNWq/xOqXTNw5CMblRheIui5WCf65YUGv31lXwQqVzoYK0SgDHdAHBoMdVK?= =?us-ascii?Q?TpS/+H3//Xr7NIOaQniK3HmbwA/bkfOwDWu6DJJyoEHZMrS0jyCx03Edg47I?= =?us-ascii?Q?i85d+8lvlh4C3l28uW5fAIoRuc8BEH95DoWOse2+VPIpc+cPderyBi9EpeQk?= =?us-ascii?Q?i+vfzE4uHGH3EI1UysoVp+2838bfDMPR6Kr+thWLi5fOORmsMmnIo7BumBuX?= =?us-ascii?Q?ThLMjWEUNsIK4KgnNqZirtyv6c88Z3Vsp1LRrZJoIpipnJI/BZbZ4Z++BZ3J?= =?us-ascii?Q?RC53wXKPsVZses3b2xOvLMKK8c7AcuahFbRaxrGZlab4moYQibWC6sC4XcGb?= =?us-ascii?Q?x89ciI+M1VUFeSjYYt/wILo7OUvSmknXUiVxD0c+F6n0p+EcxBdxetYWsmni?= =?us-ascii?Q?rfVkaPSVujZAjRfaWcQv9Wm/eQCzsauIUF6Q5et+RQs+zTeM9txZfbylDXd1?= =?us-ascii?Q?G3rkbiHC6Pe/TUfTBT8yejY3xH8LIA76OUZFLIZd8FwYhZ93+zybKrt8napI?= =?us-ascii?Q?yyUUetUVHZYPEUxLyPVKjcP4aw82FWwq4dICAEA1y1+2l56nWTkrfWpgrZXg?= =?us-ascii?Q?/JM+BC9qGA=3D=3D?= X-OriginatorOrg: Nvidia.com X-MS-Exchange-CrossTenant-Network-Message-Id: a7383991-7d55-4664-c1c2-08de3f3e8a9a X-MS-Exchange-CrossTenant-AuthSource: SN7PR12MB8059.namprd12.prod.outlook.com X-MS-Exchange-CrossTenant-AuthAs: Internal X-MS-Exchange-CrossTenant-OriginalArrivalTime: 19 Dec 2025 20:38:18.1420 (UTC) X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted X-MS-Exchange-CrossTenant-Id: 43083d15-7273-40c1-b7db-39efd9ccc17a X-MS-Exchange-CrossTenant-MailboxType: HOSTED X-MS-Exchange-CrossTenant-UserPrincipalName: BjNPgylueXnhfYAe7ddL1Kxnwgz5+Nb4NIfZiTi5cjikeyLQhICVOTCkkWDq0ZlqPKhxcskkqdl9tK6SdHNowA== X-MS-Exchange-Transport-CrossTenantHeadersStamped: CH2PR12MB4248 Content-Type: text/plain; charset="utf-8" PRAMIN apertures are a crucial mechanism to direct read/write to VRAM. Add support for the same. Signed-off-by: Joel Fernandes --- drivers/gpu/nova-core/mm/mod.rs | 5 + drivers/gpu/nova-core/mm/pramin.rs | 200 +++++++++++++++++++++++++++++ drivers/gpu/nova-core/nova_core.rs | 1 + drivers/gpu/nova-core/regs.rs | 5 + 4 files changed, 211 insertions(+) create mode 100644 drivers/gpu/nova-core/mm/mod.rs create mode 100644 drivers/gpu/nova-core/mm/pramin.rs diff --git a/drivers/gpu/nova-core/mm/mod.rs b/drivers/gpu/nova-core/mm/mod= .rs new file mode 100644 index 000000000000..7a5dd4220c67 --- /dev/null +++ b/drivers/gpu/nova-core/mm/mod.rs @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Memory management subsystems for nova-core. + +pub(crate) mod pramin; diff --git a/drivers/gpu/nova-core/mm/pramin.rs b/drivers/gpu/nova-core/mm/= pramin.rs new file mode 100644 index 000000000000..5878a3f80b9c --- /dev/null +++ b/drivers/gpu/nova-core/mm/pramin.rs @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 + +#![expect(unused)] + +//! Direct VRAM access through the PRAMIN aperture. +//! +//! PRAMIN provides a 1MB sliding window into VRAM through BAR0, allowing = the CPU to access +//! video memory directly. The [`Window`] type automatically repositions t= he window when +//! accessing different VRAM regions and restores the original position on= drop. This allows +//! to reuse the same window for multiple accesses in the same window. +//! +//! The PRAMIN aperture is a 1MB region at BAR0 + 0x700000 for all GPUs. T= he window base is +//! controlled by the `NV_PBUS_BAR0_WINDOW` register and must be 64KB alig= ned. +//! +//! # Examples +//! +//! ## Basic read/write +//! +//! ```no_run +//! use crate::driver::Bar0; +//! use crate::mm::pramin; +//! +//! fn example(bar: &Bar0) -> Result<()> { +//! let mut pram_win =3D pramin::Window::new(bar); +//! +//! // Write and read back. +//! pram_win.try_write32(0x100, 0xDEADBEEF)?; +//! let val =3D pram_win.try_read32(0x100)?; +//! assert_eq!(val, 0xDEADBEEF); +//! +//! Ok(()) +//! // Original window position restored on drop. +//! } +//! ``` +//! +//! ## Auto-repositioning across VRAM regions +//! +//! ```no_run +//! use crate::driver::Bar0; +//! use crate::mm::pramin; +//! +//! fn example(bar: &Bar0) -> Result<()> { +//! let mut pram_win =3D pramin::Window::new(bar); +//! +//! // Access first 1MB region. +//! pram_win.try_write32(0x100, 0x11111111)?; +//! +//! // Access at 2MB - window auto-repositions. +//! pram_win.try_write32(0x200000, 0x22222222)?; +//! +//! // Back to first region - window repositions again. +//! let val =3D pram_win.try_read32(0x100)?; +//! assert_eq!(val, 0x11111111); +//! +//! Ok(()) +//! } +//! ``` + +use crate::{ + driver::Bar0, + regs, // +}; + +use kernel::bits::genmask_u64; +use kernel::prelude::*; +use kernel::ptr::{Alignable, Alignment}; +use kernel::sizes::{SZ_1M, SZ_64K}; + +/// PRAMIN aperture base offset in BAR0. +const PRAMIN_BASE: usize =3D 0x700000; + +/// PRAMIN aperture size (1MB). +const PRAMIN_SIZE: usize =3D SZ_1M; + +/// 64KB alignment for window base. +const WINDOW_ALIGN: Alignment =3D Alignment::new::(); + +/// Maximum addressable VRAM offset (40-bit address space). +/// +/// The `NV_PBUS_BAR0_WINDOW` register has a 24-bit `window_base` field (b= its 23:0) that stores +/// bits [39:16] of the target VRAM address. This limits the addressable s= pace to 2^40 bytes. +const MAX_VRAM_OFFSET: usize =3D genmask_u64(0..=3D39) as usize; + +/// Generate a PRAMIN read accessor. +macro_rules! define_pramin_read { + ($name:ident, $ty:ty) =3D> { + #[doc =3D concat!("Read a `", stringify!($ty), "` from VRAM at the= given offset.")] + pub(crate) fn $name(&mut self, vram_offset: usize) -> Result<$ty> { + let bar_offset =3D self.ensure_window(vram_offset, ::core::mem= ::size_of::<$ty>())?; + self.bar.$name(bar_offset) + } + }; +} + +/// Generate a PRAMIN write accessor. +macro_rules! define_pramin_write { + ($name:ident, $ty:ty) =3D> { + #[doc =3D concat!("Write a `", stringify!($ty), "` to VRAM at the = given offset.")] + pub(crate) fn $name(&mut self, vram_offset: usize, value: $ty) -> = Result { + let bar_offset =3D self.ensure_window(vram_offset, ::core::mem= ::size_of::<$ty>())?; + self.bar.$name(value, bar_offset) + } + }; +} + +/// PRAMIN window for direct VRAM access. +/// +/// The window auto-repositions when accessing VRAM offsets outside the cu= rrent 1MB range. +/// Original window position is saved on creation and restored on drop. +pub(crate) struct Window<'a> { + bar: &'a Bar0, + saved_base: usize, + current_base: usize, +} + +impl<'a> Window<'a> { + /// Create a new PRAMIN window accessor. + /// + /// Saves the current window position for restoration on drop. + pub(crate) fn new(bar: &'a Bar0) -> Self { + let saved_base =3D Self::read_window_base(bar); + + Self { + bar, + saved_base, + current_base: saved_base, + } + } + + /// Read the current window base from the BAR0_WINDOW register. + fn read_window_base(bar: &Bar0) -> usize { + let reg =3D regs::NV_PBUS_BAR0_WINDOW::read(bar); + // CAST: u32 to usize is lossless. + (reg.window_base() as usize) << 16 + } + + /// Write a new window base to the BAR0_WINDOW register. + fn write_window_base(bar: &Bar0, base: usize) { + // CAST: + // - We have guaranteed that the base is within the addressable ra= nge (40-bits). + // - After >> 16, a 40-bit aligned base becomes 24 bits, which fit= s in u32. + regs::NV_PBUS_BAR0_WINDOW::default() + .set_window_base((base >> 16) as u32) + .write(bar); + } + + /// Ensure the window covers the given VRAM offset returning the BAR0 = offset to use. + fn ensure_window(&mut self, vram_offset: usize, access_size: usize) ->= Result { + // Validate VRAM offset is within addressable range (40-bit addres= s space). + let end_offset =3D vram_offset.checked_add(access_size).ok_or(EINV= AL)?; + if end_offset > MAX_VRAM_OFFSET + 1 { + return Err(EINVAL); + } + + // Calculate which 64KB-aligned base we need. + let needed_base =3D vram_offset.align_down(WINDOW_ALIGN); + + // Calculate offset within the window. + let offset_in_window =3D vram_offset - needed_base; + + // Check if access fits in 1MB window from this base. + if offset_in_window + access_size > PRAMIN_SIZE { + return Err(EINVAL); + } + + // Reposition window if needed. + if self.current_base !=3D needed_base { + Self::write_window_base(self.bar, needed_base); + self.current_base =3D needed_base; + } + + // Return BAR0 offset to access. + Ok(PRAMIN_BASE + offset_in_window) + } + + define_pramin_read!(try_read8, u8); + define_pramin_read!(try_read16, u16); + define_pramin_read!(try_read32, u32); + define_pramin_read!(try_read64, u64); + + define_pramin_write!(try_write8, u8); + define_pramin_write!(try_write16, u16); + define_pramin_write!(try_write32, u32); + define_pramin_write!(try_write64, u64); +} + +impl Drop for Window<'_> { + fn drop(&mut self) { + // Restore the original window base if it changed. + if self.current_base !=3D self.saved_base { + Self::write_window_base(self.bar, self.saved_base); + } + } +} + +// SAFETY: `Window` requires `&mut self` for all accessors. +unsafe impl Send for Window<'_> {} + +// SAFETY: `Window` requires `&mut self` for all accessors. +unsafe impl Sync for Window<'_> {} diff --git a/drivers/gpu/nova-core/nova_core.rs b/drivers/gpu/nova-core/nov= a_core.rs index b98a1c03f13d..3104fea04128 100644 --- a/drivers/gpu/nova-core/nova_core.rs +++ b/drivers/gpu/nova-core/nova_core.rs @@ -13,6 +13,7 @@ mod gfw; mod gpu; mod gsp; +mod mm; mod num; mod regs; mod sbuffer; diff --git a/drivers/gpu/nova-core/regs.rs b/drivers/gpu/nova-core/regs.rs index 82cc6c0790e5..c8b8fbdcf608 100644 --- a/drivers/gpu/nova-core/regs.rs +++ b/drivers/gpu/nova-core/regs.rs @@ -96,6 +96,11 @@ fn fmt(&self, f: &mut kernel::fmt::Formatter<'_>) -> ker= nel::fmt::Result { 31:16 frts_err_code as u16; }); =20 +register!(NV_PBUS_BAR0_WINDOW @ 0x00001700, "BAR0 window control for PRAMI= N access" { + 25:24 target as u8, "Target memory (0=3DVRAM, 1=3DSYS_MEM_COH, 2=3DS= YS_MEM_NONCOH)"; + 23:0 window_base as u32, "Window base address (bits 39:16 of FB add= r)"; +}); + // PFB =20 // The following two registers together hold the physical system memory ad= dress that is used by the --=20 2.34.1 From nobody Sun Feb 8 06:56:18 2026 Received: from PH8PR06CU001.outbound.protection.outlook.com (mail-westus3azon11012058.outbound.protection.outlook.com [40.107.209.58]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7048D2DD608; Fri, 19 Dec 2025 20:38:27 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=fail smtp.client-ip=40.107.209.58 ARC-Seal: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176709; cv=fail; b=Fml785Jy/syNO8lEtIyNx1ENfYnweddlhjsoBhD5SxhMhSRHVC91DxfwXkyCGM9NlSP1dDBQ3b9gzdHIy+gqm00iuuW+YWl7iMxLQYvmf++VjkB94RzLksEssOXzfXsP3sN8tj16GHWvTfnbArsgejRWLMGf+wbnzy/oe2wqZ6c= ARC-Message-Signature: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176709; c=relaxed/simple; bh=e+PTyV9dTIwzBd51miBl2hLZ9CDRkwc8t1veWtGTpKY=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: Content-Type:MIME-Version; b=dKcf7+l4oA6rMI4MlqNZT23d1drJ730Z6+FoCyZhhsR1HQMcCg0t6vdCO4XSIz8f5G52ZcqjnPWLw9bDKUQwZVL6bHuXMthfFZj6Qn6ah5/hxgcjsK+VI1XhBsY46ZDPJlwLJNVJmNPB0wGDH5jM7LBcMu0LYyUk1EQoVOdWFOA= ARC-Authentication-Results: i=2; smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com; spf=fail smtp.mailfrom=nvidia.com; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b=Hqz2fUNz; arc=fail smtp.client-ip=40.107.209.58 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com Authentication-Results: smtp.subspace.kernel.org; spf=fail smtp.mailfrom=nvidia.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b="Hqz2fUNz" ARC-Seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=cFT/Pk7Y8CHaD4HFIxw80YsOTV5FdLBYOdSQljg59i4V+RdPeHy9X1RHChphReiZAydzTssg6NNQzDrbC3mxnt/73LhDk804+YNSPvFqC8JxVo3s4BzD3D5qQJ8jXOIxRdc1p3mfYqUyPz/Z1bV/ijqgvyD49G6z9G32qUrBP0l6JaeBNxYgkErsE79Nnk945QAiTlpZU9NUeQ5XRcTSKgYH/nnHl2IHO6nPop1WH0TPb/D3YKB7v0/KP3X6tnSMDWx8DiF8/RpUdM8gnboJiPikpACU9IKwDcR8pymf9taerVxUyYqVySvSW/vaAhWHlp18F5XSN4SiHAODBmccgg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=0lLiZUeYPn/js39AiTKJRSj8gMeB3+hvMowM+2efJOA=; b=v+YAK8rxA+jDravRiFfl6kL6S1Xt2F9/UDqltePq4i3wrt+2kLG8AW3OYWgfangOaw+VbcIyfmHkeooQj8u2vZV3tLVsD9o/4dCGr4eVJYJdAZNYTbCziG8vCryHy6YLXLqQRBrPSe/GNR6/V4WQAyQcHaWMZDk4c57aBMZ0goREQMR5sQZr9F7z0eE38mZcFMTztC98z6vjYhtupjkiIuFvAdIGcjozLJYS4H3WH/fkr8c9+LxztGhYIx/VbkZzo/QuQaTIcvTWVL4w6G6aC6WEJuhMmTnmK30VkpBoOE1FQY5SkLk7Bl01+1lK+jDcxpPv3hrbfBYajM71D8hlzQ== ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=nvidia.com; dmarc=pass action=none header.from=nvidia.com; dkim=pass header.d=nvidia.com; arc=none DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com; s=selector2; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=0lLiZUeYPn/js39AiTKJRSj8gMeB3+hvMowM+2efJOA=; b=Hqz2fUNz1xrcTrMSq3i4T1hAzlJzrKstk+/bkAdUjzhhZlmGYIEFH5Us+1JhujDJ9EzIULL039zPg1q2AGCICTD8scZVTHiEPrUbinYIsiu7tL8NoGGly1I87djIdi1KGNo/8ISXQaIDdZE3Iqgiji35jACObvXRa8TAlHLA4o8nmvSQWozyd1UjRX+vNPDzbjpRT0apGB10cBRArWz7XX7afUDzRKT9Kco8+0A7aKMA9+h/h3INiDbr0MZPpz3l/LwdAuYz8Wa9E2lSMp1Rsn1K6pKh3R5e/go1zAzSLVxUmCBYQlfaLzH0eNFcKMhBZC4HYJMMMjNT1vdn9OSkkQ== Authentication-Results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=nvidia.com; Received: from SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) by CH2PR12MB4248.namprd12.prod.outlook.com (2603:10b6:610:7a::23) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.9434.6; Fri, 19 Dec 2025 20:38:20 +0000 Received: from SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91]) by SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91%2]) with mapi id 15.20.9434.009; Fri, 19 Dec 2025 20:38:20 +0000 From: Joel Fernandes To: Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , David Airlie , Simona Vetter , Jonathan Corbet , Alex Deucher , =?UTF-8?q?Christian=20K=C3=B6nig?= , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , Huang Rui , Matthew Auld , Matthew Brost , Lucas De Marchi , =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= , Helge Deller Cc: Danilo Krummrich , Alice Ryhl , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?UTF-8?q?Bj=C3=B6rn=20Roy=20Baron?= , Benno Lossin , Andreas Hindborg , Trevor Gross , John Hubbard , Alistair Popple , Timur Tabi , Edwin Peer , Alexandre Courbot , Andrea Righi , Philipp Stanner , Elle Rhumsaa , Daniel Almeida , joel@joelfernandes.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org, amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org, Joel Fernandes Subject: [PATCH RFC v5 5/6] docs: gpu: nova-core: Document the PRAMIN aperture mechanism Date: Fri, 19 Dec 2025 15:38:04 -0500 Message-Id: <20251219203805.1246586-6-joelagnelf@nvidia.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251219203805.1246586-1-joelagnelf@nvidia.com> References: <20251219203805.1246586-1-joelagnelf@nvidia.com> Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: BLAP220CA0013.NAMP220.PROD.OUTLOOK.COM (2603:10b6:208:32c::18) To SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-MS-PublicTrafficType: Email X-MS-TrafficTypeDiagnostic: SN7PR12MB8059:EE_|CH2PR12MB4248:EE_ X-MS-Office365-Filtering-Correlation-Id: bc7002e2-f6d5-4b25-a9b7-08de3f3e8bd8 X-MS-Exchange-SenderADCheck: 1 X-MS-Exchange-AntiSpam-Relay: 0 X-Microsoft-Antispam: BCL:0;ARA:13230040|7416014|366016|376014|1800799024|921020; X-Microsoft-Antispam-Message-Info: =?us-ascii?Q?OnINF2ao2PxtLFvsCJwAptGEvrp8UjOIPbPNQYCqgpXGVs4oyvSBrfjcdI1Y?= =?us-ascii?Q?l/SJM/WHSfmnSTE27JCNpgH78aJcwz5/sJOe5Q0dfqoaQHMNHNK6Q32G4bzo?= =?us-ascii?Q?AnhFaLqsIYc3oNlc1DmdtZ4FL23WtqUUmrfqLSP12e1ovYxm3Of39OLRcJvh?= =?us-ascii?Q?pp40dmKNQ0vH7wyLxCAFA9Bo7qYV8zSTgqhSSq4MFfCE4NfinIrN/FydJLAU?= =?us-ascii?Q?GrLmhOWybyiSAxhpfFZJzL/EeOeROL7m0bNWAFVAXbRhlp/bhX00YskPglHs?= =?us-ascii?Q?M02m7xIuh8v5Lc7Vpn3QTn1Rvbf6g5llDL36fsVRFaFlMq169uUEJdKEK1Mk?= =?us-ascii?Q?HzQMadir83nVZxq2amyq7f3eCekNBije4UVuEqQmc6GpRB548WuFb8LtV9fS?= =?us-ascii?Q?Dz3a/2vFEYlpkLmIp/C7YHxUD9VoaO4tV5A4WKT2pJ8CvJ0UiXVHT2mWZ2z3?= =?us-ascii?Q?yd4gMcZyXLzLPby+56av+u3sdj7GDA+331l+djiZibdrI5zsC84ZyAbmGkwa?= =?us-ascii?Q?/uiXHeTsXowBP2n1YQcdEo1v01MXGejpY/KD5PWDL3Xy+lFkJcEoRBziSknN?= =?us-ascii?Q?+4NmeSppPWZrP9md1+vGTaedb0SfIYxcujhlzmTAkS/j3e351TEY3/IIa+Ui?= =?us-ascii?Q?t3WfzjCdNA79mA2VF3HtUb5NIuQE0ZoskMOLR2pKerQQ1ZU+AIorUNaLzhBy?= =?us-ascii?Q?KE+NcmCs7A0GHho9XZwAC3DqBS/n+8YMs53+lFLh9mcAsoYLzDWSR41SBEQB?= =?us-ascii?Q?pYk4mUTv/Ur0CIGAlqLBnPGufK2WEKax00QCSrN+08K7Z0FGw6qvmhxKdHB5?= =?us-ascii?Q?7XNjmOO2ms0BnEPpYPfH4oceGW6HMKZI1O3SPn5b79XnQO6Azbugsphv74Yx?= =?us-ascii?Q?YLNlVhmAFoE/fqC/EEs0Bv0yhh6wpfIdXLVVVUX6EQI+lQFedCUZg08kvxgp?= =?us-ascii?Q?Z7im2+2ZDMYre5kx02cmxvu8RJ0HOnka2Z18ly6URcxKXutmVywzpQrT7IN1?= =?us-ascii?Q?PHfZKxkJJQgaHjIf8PfMFpTnNVAVTJyTsIjlheVPLpYfffoFc9SCxJRZwgY+?= =?us-ascii?Q?JQhak68C3CmCAe/+7SzfFovfT5wZ26pbp52louGrfHrzGBBmuNa4+TGH8RwE?= =?us-ascii?Q?F3xk0BPb7uX23KFyGPYJXPwOKr4vuOCDJFpLExrxGDo28nrIs6x0gI/9UzzZ?= =?us-ascii?Q?Db9URWaix0YFvQHNvzfGEJRvqmEBQRvCftaq5IpJdrYm14GO/TSmTY8pKBg4?= =?us-ascii?Q?eH0jQnrof8WYYdvD2MePdtpuJ7hXl83MDN1wYXempsrKumZxnJtQ5qK7SE12?= =?us-ascii?Q?GW4mZ9q8tcZJRaxkQoKGlht6UgwaZG9jPlqfd9XLF5hdU8rpQCekOq3hc6K7?= =?us-ascii?Q?98XZ/Ket8cTTyijtToMOOXu9EmdYd7Q71ewGXnKUdy+JDKQ4BcWIEuYgTY2l?= =?us-ascii?Q?/ORsd4vqSDHkhLo7VIycvPt3oa6lxkwvDHVBJB+nvrrKRn8xr29ZYMxAX8/4?= =?us-ascii?Q?fIxuorBMpXkHyFo=3D?= X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SN7PR12MB8059.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(13230040)(7416014)(366016)(376014)(1800799024)(921020);DIR:OUT;SFP:1101; X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1 X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?090Onx0rcL/VefXWnyn78MltJlvffwcvf6K4T2dgqIqsUGUOmITZPrEElhqz?= =?us-ascii?Q?YK8S7gjVC8I+Pa8PBORAdu297h9jY5ZkZea8Uma1hE2+qeYC/heU6va4JLV6?= =?us-ascii?Q?90cNYnG6vX6OUfGkMcnFAf664CSIyzcE7k0bLUXqTaXyXmz0TZm9tiWzs6GK?= =?us-ascii?Q?ht5bPYuO01ohkCrWPBHpoG5pSIUT/qtMVLP3cOiJ7ahe7EcFLii2ZRM5TVGl?= =?us-ascii?Q?/HjRe0hCh+6x7CQvvIlfk+gH43PDbuXGC5LFRH4XPi+AaXvWVESfX/sJxwY2?= =?us-ascii?Q?QZ/OIslWNwUSNe77qg3Bzym/7M8L9y2Nswi8mPNup+a3bEg5zPIjNSWdYgrV?= =?us-ascii?Q?ZRr0s95SGFl7g+AvyDEKWhG8dK3+FGkeTCFm1Xn8sWq1Oh1hE0aHKuF/z6ta?= =?us-ascii?Q?9wY65il1QsrHyf6j6ij6gcoX2w40dIGSGDGsURzbKFs+jTV2gRzLv8JQiPa3?= =?us-ascii?Q?hrsLababAoUVVdWnSzzTEQi1PUkyFP3Fp78jVLFIcBoLchKfcRncwvpVgUow?= =?us-ascii?Q?KuVsV6AP/CjwB7oRvfLHOIo7Qpnf+gMCHCYjsI6viMnpO6BPnbHvDIAYIdZh?= =?us-ascii?Q?wKR4NJFAlr8Gelub791pXwDxY1kIWG0C1CAZ64layprV9jWY0Zz0miY8tLN+?= =?us-ascii?Q?dTQoey8w9zys/aJgHoBpgZcOw3A7fzk+WGtdcWOz+C1JTDz+9/IFyHwu+RYE?= =?us-ascii?Q?jFka3xCJMWC0TpFOAtx/K6gMRB8bp2HDzfxkz4ur5p+eDyUyQfdTM/z7QsNN?= =?us-ascii?Q?4tOmV4U3rGhrBN1tBKGg8pNiotKmgJNXcka6/wDbeLd//39td7/6+PzXRrh7?= =?us-ascii?Q?hueHbZJeRnDhuBp3HgpWGGl7piXZikkQ9c9GEA3uQC6urj6Z5G0WKY/LwN4R?= =?us-ascii?Q?Gric0HVCcSY4mQ2x+cXt1N7yyphnBvHd1yHzoBzXzAQnc9gCqLfbgjgqRIXo?= =?us-ascii?Q?0YU3kzIpUjAp96fG1KvwvH+Xf7a3VTLyc+Vz7PN48DYs21YBNmsPne8YiWY9?= =?us-ascii?Q?mHgPgTdOi2jGvYV6pbJeS+TVSDDCY+oECUuJQOKSMlVRPQ/OBRhAlXg3TrbP?= =?us-ascii?Q?bEFiUSzYbe1zN+j+wKYEq/NHdQPBl2Y4Oc/4sue4tECHk7mK+cggSQ5VFVT/?= =?us-ascii?Q?NHpf0OkUbMCWra9h60KE+9AMRkBkzqN+bhK5ln8LDwnf3fWWcyjMWcDSqvWY?= =?us-ascii?Q?FJDW+iXWV00emVjG+aNq/c3wnDq9cR7XIepdq2+buZNY1zNsymTwiLBkM4nM?= =?us-ascii?Q?lBgdNeD3Kwpn2XDlEo75uv3r5yuApiJrHU8IpiIK1TWHMsDiuzKK7PHb5gVb?= =?us-ascii?Q?WxXv9kUecJLcFL56wnXEfOXc/Fyilnspu4OKFKpd5HmD/HPsYyBZ4u8+lYtg?= =?us-ascii?Q?FYoO0CmtzmPRRNe9usnLHpEQjghsIF5QRVByjCcaA8ZZ4LmW72NeM57HmIi2?= =?us-ascii?Q?scNM1e/IwGYwkZTaLrCXhY68ih7O4i9urcBZygufBFnlYEaMQAePdF39fPa9?= =?us-ascii?Q?tOQBpfo4Oxu7z3eF7oL9LsazC+t+oBNerwEHk8svIhBjaZ0ibfxPfa2Vs0+o?= =?us-ascii?Q?IeQLyVfDrTtrZVat4vWHgvweXX4Mk5eCpcELIK8ZEteuG/lE+cl/nfVWwomM?= =?us-ascii?Q?EgVQecF0N20AjI38Km0yUdr53EpNHhxXMx+lRwBzrwfjB3XvcvA43La41O2N?= =?us-ascii?Q?zY3Ezh5fXA8+dGdOhEMlHT2xeF5JnO39BXgF25jQdL4/i8gNY69hjofMg9IA?= =?us-ascii?Q?D9CrQTCV/Q=3D=3D?= X-OriginatorOrg: Nvidia.com X-MS-Exchange-CrossTenant-Network-Message-Id: bc7002e2-f6d5-4b25-a9b7-08de3f3e8bd8 X-MS-Exchange-CrossTenant-AuthSource: SN7PR12MB8059.namprd12.prod.outlook.com X-MS-Exchange-CrossTenant-AuthAs: Internal X-MS-Exchange-CrossTenant-OriginalArrivalTime: 19 Dec 2025 20:38:20.0816 (UTC) X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted X-MS-Exchange-CrossTenant-Id: 43083d15-7273-40c1-b7db-39efd9ccc17a X-MS-Exchange-CrossTenant-MailboxType: HOSTED X-MS-Exchange-CrossTenant-UserPrincipalName: MjLhs0rHeM/AiFRscGtAJRUvIOJdkycYNgtnxJua77PR/IbhCJb/enq57aAHEqO1YggBymYPmT1DH2lGFBKGhg== X-MS-Exchange-Transport-CrossTenantHeadersStamped: CH2PR12MB4248 Content-Type: text/plain; charset="utf-8" Add documentation for the PRAMIN aperture mechanism used by nova-core for direct VRAM access. Nova only uses TARGET=3DVID_MEM for VRAM access. The SYS_MEM target values are documented for completeness but not used by the driver. Signed-off-by: Joel Fernandes --- Documentation/gpu/nova/core/pramin.rst | 125 +++++++++++++++++++++++++ Documentation/gpu/nova/index.rst | 1 + 2 files changed, 126 insertions(+) create mode 100644 Documentation/gpu/nova/core/pramin.rst diff --git a/Documentation/gpu/nova/core/pramin.rst b/Documentation/gpu/nov= a/core/pramin.rst new file mode 100644 index 000000000000..55ec9d920629 --- /dev/null +++ b/Documentation/gpu/nova/core/pramin.rst @@ -0,0 +1,125 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D +PRAMIN aperture mechanism +=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D= =3D + +.. note:: + The following description is approximate and current as of the Ampere f= amily. + It may change for future generations and is intended to assist in under= standing + the driver code. + +Introduction +=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D + +PRAMIN is a hardware aperture mechanism that provides CPU access to GPU Vi= deo RAM (VRAM) before +the GPU's Memory Management Unit (MMU) and page tables are initialized. Th= is 1MB sliding window, +located at a fixed offset within BAR0, is essential for setting up page ta= bles and other critical +GPU data structures without relying on the GPU's MMU. + +Architecture Overview +=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D + +The PRAMIN aperture mechanism is logically implemented by the GPU's PBUS (= PCIe Bus Controller Unit) +and provides a CPU-accessible window into VRAM through the PCIe interface:: + + +-----------------+ PCIe +------------------------------+ + | CPU |<----------->| GPU | + +-----------------+ | | + | +----------------------+ | + | | PBUS | | + | | (Bus Controller) | | + | | | | + | | +--------------+<------------ (w= indow starts at + | | | PRAMIN | | | B= AR0 + 0x700000) + | | | Window | | | + | | | (1MB) | | | + | | +--------------+ | | + | | | | | + | +---------|------------+ | + | | | + | v | + | +----------------------+<----------= -- (Program PRAMIN to any + | | VRAM | | 64= KB-aligned VRAM boundary) + | | (Several GBs) | | + | | | | + | | FB[0x000000000000] | | + | | ... | | + | | FB[0x7FFFFFFFFFF] | | + | +----------------------+ | + +------------------------------+ + +PBUS (PCIe Bus Controller) is responsible for, among other things, handlin= g MMIO +accesses to the BAR registers. + +PRAMIN Window Operation +=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D + +The PRAMIN window provides a 1MB sliding aperture that can be repositioned= over +the entire VRAM address space using the ``NV_PBUS_BAR0_WINDOW`` register. + +Window Control Mechanism +------------------------- + +The window position is controlled via the PBUS ``BAR0_WINDOW`` register:: + + NV_PBUS_BAR0_WINDOW Register (0x1700): + +-------+--------+--------------------------------------+ + | 31:26 | 25:24 | 23:0 | + | RSVD | TARGET | BASE_ADDR | + | | | (bits 39:16 of VRAM address) | + +-------+--------+--------------------------------------+ + + BASE_ADDR field (bits 23:0): + - Contains bits [39:16] of the target VRAM address + - Provides 40-bit (1TB) address space coverage + - Must be programmed with 64KB-aligned addresses + + TARGET field (bits 25:24): + - 0x0: VRAM (Video Memory) + - 0x1: SYS_MEM_COH (Coherent System Memory) + - 0x2: SYS_MEM_NONCOH (Non-coherent System Memory) + - 0x3: Reserved + + .. note:: + Nova only uses TARGET=3DVRAM (0x0) for video memory access. The SYS= _MEM + target values are documented here for hardware completeness but are + not used by the driver. + +64KB Alignment Requirement +--------------------------- + +The PRAMIN window must be aligned to 64KB boundaries in VRAM. This is enfo= rced +by the ``BASE_ADDR`` field representing bits [39:16] of the target address= :: + + VRAM Address Calculation: + actual_vram_addr =3D (BASE_ADDR << 16) + pramin_offset + Where: + - BASE_ADDR: 24-bit value from NV_PBUS_BAR0_WINDOW[23:0] + - pramin_offset: 20-bit offset within the PRAMIN window [0x00000-0xFFF= FF] + + Example Window Positioning: + +---------------------------------------------------------+ + | VRAM Space | + | | + | 0x000000000 +-----------------+ <-- 64KB aligned | + | | PRAMIN Window | | + | | (1MB) | | + | 0x0000FFFFF +-----------------+ | + | | + | | ^ | + | | | Window can slide | + | v | to any 64KB-aligned boundary | + | | + | 0x123400000 +-----------------+ <-- 64KB aligned | + | | PRAMIN Window | | + | | (1MB) | | + | 0x1234FFFFF +-----------------+ | + | | + | ... | + | | + | 0x7FFFF0000 +-----------------+ <-- 64KB aligned | + | | PRAMIN Window | | + | | (1MB) | | + | 0x7FFFFFFFF +-----------------+ | + +---------------------------------------------------------+ diff --git a/Documentation/gpu/nova/index.rst b/Documentation/gpu/nova/inde= x.rst index e39cb3163581..b8254b1ffe2a 100644 --- a/Documentation/gpu/nova/index.rst +++ b/Documentation/gpu/nova/index.rst @@ -32,3 +32,4 @@ vGPU manager VFIO driver and the nova-drm driver. core/devinit core/fwsec core/falcon + core/pramin --=20 2.34.1 From nobody Sun Feb 8 06:56:18 2026 Received: from PH8PR06CU001.outbound.protection.outlook.com (mail-westus3azon11012058.outbound.protection.outlook.com [40.107.209.58]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 75AD02848A1; Fri, 19 Dec 2025 20:38:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=fail smtp.client-ip=40.107.209.58 ARC-Seal: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176711; cv=fail; b=T654oAIq477e+Uwtsjjpt1XkbKyRIYWZEtxXHW72JM+9VUhxFi7/D5H+i+v8i0Z9pvL8C29Qb9YuPNckyNLxODovRRkVYpGO5kImbrXButLAjCL/0CrxNhP+Y1i3T0MKZK+hp/4Wr+8fzJewqK8sx+bmHcLq705Hi8Vp/u6AhoI= ARC-Message-Signature: i=2; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1766176711; c=relaxed/simple; bh=oldvdKzX4kwZmUk0rVJqZs6JK3Nnr9OdcnqfJ5PIZjw=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: Content-Type:MIME-Version; b=qYDHe7WIJgL7cgenzc/t8vRZRhHcrJZXlQN4k9IDHz7oyVfZNIR/Ba1LE5pE5E8b1O9piLHll1qTH0EmsKc+1LniIEibkGzJ5GTgDUnOuCF2mlsgX5em5F1Su0YOrtHyQxF9ZD8pGdLgQyYYt4gSWT/nMpOZq7m4qR5wINaUJMQ= ARC-Authentication-Results: i=2; smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com; spf=fail smtp.mailfrom=nvidia.com; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b=WPxhpFWx; arc=fail smtp.client-ip=40.107.209.58 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=reject dis=none) header.from=nvidia.com Authentication-Results: smtp.subspace.kernel.org; spf=fail smtp.mailfrom=nvidia.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=Nvidia.com header.i=@Nvidia.com header.b="WPxhpFWx" ARC-Seal: i=1; a=rsa-sha256; s=arcselector10001; d=microsoft.com; cv=none; b=M8fJC1Neg1ClxahSjT2n1eAf2N/Rz5XooPFalCC05hUiNWF1x0rRrbGEpy3yAqy/J2n+lBlhoXPcnv5IVMgb2JYr1pQwrrB70J8wU5K25vg8+3P8wfTgoV8DiM7iZs8/vRKgmZo1iEI4ctlLeLj5Q0VBfw5jelLDJuvZTeWJuDbEy6RZoEAbeyfidVQyxDzJq4LifTPUT0cDF+DIFHaW4WcusORElVYQuOpbCGCmqsNr2oc+9DJ5VZA91x7sJjINVjqEFKc8fskLfXw+sQ0TBxjcGNQ+pn42dBye9k+7+b88Mt7po69flc2V/ox2ttMifTey8gIaUcwZqSDLXxp4jg== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector10001; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-AntiSpam-MessageData-ChunkCount:X-MS-Exchange-AntiSpam-MessageData-0:X-MS-Exchange-AntiSpam-MessageData-1; bh=6Bw9Y9t81RX85yPCVF1AILpQnKQkeDIUks3h8hK944w=; b=yOHWcdLr/dkBE1ikEHjN6J4t35B0ssKFBsSj8e9IZ24zlq1nRCY4lNBw2tc5uADgGG75fAuvM5r8Zlg30Hc7uH+m7PwEP45Y968ldWSLxndHgwNbj5byNfVtmFd9A8qwp7hTD3gccEfphIEoDV113DkD+IHRp2IsY80jU5HjvFPJlCc5XyiDUq38SBXuhNXIVMkFCmXmNNPUrIisM+iHAmlJp2xaZFYXoL0J/2JxOnXcT5aE7VoC9xPBsx3tCO/GUjvJcyt4wdY0w2+sMMVCuUMCyc61qL/qObtHi+mpTa59XLO6Hxs3l/Ex2rL5DD7IjdcFzwk89rvLu9B1ebyuRg== ARC-Authentication-Results: i=1; mx.microsoft.com 1; spf=pass smtp.mailfrom=nvidia.com; dmarc=pass action=none header.from=nvidia.com; dkim=pass header.d=nvidia.com; arc=none DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=Nvidia.com; s=selector2; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=6Bw9Y9t81RX85yPCVF1AILpQnKQkeDIUks3h8hK944w=; b=WPxhpFWxEioqbxSfBT85pXGoLVnmdN6aro41ywpEpP9N5DZVvj1bkfQKOqJY0j6jv8iHYcdCiMMuBTYqOEBnBZKSr7V/OlZLkANKUEZArfjxHcVY3abVA3TAFFsDsc73PGDesGtFexfq+EImxOMcDMFxIeUbeb3q2EQXjGSqB+7jfvWbCZGTAzqiMFeAnE/zOlfAkIHhVcWDH+hgVS0P55+pFVLjd6XYseSj7RgMiN7fkkv/3nBDR1XsWB52UC3TIWBkUW9cyReP9zS6iEyM562xkc0WC9qUj3iKQoKWWD26OSrrarP/JKeKoJEOex9kEs0B/YsYYBA8jKuUcSd6QQ== Authentication-Results: dkim=none (message not signed) header.d=none;dmarc=none action=none header.from=nvidia.com; Received: from SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) by CH2PR12MB4248.namprd12.prod.outlook.com (2603:10b6:610:7a::23) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.20.9434.6; Fri, 19 Dec 2025 20:38:22 +0000 Received: from SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91]) by SN7PR12MB8059.namprd12.prod.outlook.com ([fe80::4ee2:654e:1fe8:4b91%2]) with mapi id 15.20.9434.009; Fri, 19 Dec 2025 20:38:22 +0000 From: Joel Fernandes To: Maarten Lankhorst , Maxime Ripard , Thomas Zimmermann , David Airlie , Simona Vetter , Jonathan Corbet , Alex Deucher , =?UTF-8?q?Christian=20K=C3=B6nig?= , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , Huang Rui , Matthew Auld , Matthew Brost , Lucas De Marchi , =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= , Helge Deller Cc: Danilo Krummrich , Alice Ryhl , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?UTF-8?q?Bj=C3=B6rn=20Roy=20Baron?= , Benno Lossin , Andreas Hindborg , Trevor Gross , John Hubbard , Alistair Popple , Timur Tabi , Edwin Peer , Alexandre Courbot , Andrea Righi , Philipp Stanner , Elle Rhumsaa , Daniel Almeida , joel@joelfernandes.org, nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org, rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-doc@vger.kernel.org, amd-gfx@lists.freedesktop.org, intel-gfx@lists.freedesktop.org, intel-xe@lists.freedesktop.org, linux-fbdev@vger.kernel.org, Joel Fernandes Subject: [PATCH RFC v5 6/6] nova-core: Add PRAMIN aperture self-tests Date: Fri, 19 Dec 2025 15:38:05 -0500 Message-Id: <20251219203805.1246586-7-joelagnelf@nvidia.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20251219203805.1246586-1-joelagnelf@nvidia.com> References: <20251219203805.1246586-1-joelagnelf@nvidia.com> Content-Transfer-Encoding: quoted-printable X-ClientProxiedBy: MN0PR05CA0009.namprd05.prod.outlook.com (2603:10b6:208:52c::29) To SN7PR12MB8059.namprd12.prod.outlook.com (2603:10b6:806:32b::7) Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 X-MS-PublicTrafficType: Email X-MS-TrafficTypeDiagnostic: SN7PR12MB8059:EE_|CH2PR12MB4248:EE_ X-MS-Office365-Filtering-Correlation-Id: e2fcdb99-dc15-4f5f-12a7-08de3f3e8d0b X-MS-Exchange-SenderADCheck: 1 X-MS-Exchange-AntiSpam-Relay: 0 X-Microsoft-Antispam: BCL:0;ARA:13230040|7416014|366016|376014|1800799024|921020; X-Microsoft-Antispam-Message-Info: =?us-ascii?Q?r7Jzjxzgs2hlHEU3My2f+/9tcKBQQmA7e/UVwG9+oMxVue5oCBq8yKX7nNC8?= =?us-ascii?Q?ZxGJDbt6wv4U1PjKRIuSgFF8TNiZjEEiGDdB7TBY5rpt8AjyoKJb+2W1uITT?= =?us-ascii?Q?XCf8lb/Ehx+xXClQ8q9DuBIx+yiC5OiLPWt/XBxjDsQfZBtSQww1dl28SL0/?= =?us-ascii?Q?9rMqed2cEjKq9YrLBKeJC8Aew+qVYfYmXHHwVF1wFVJ0sDtc9rd7chcs6p4P?= =?us-ascii?Q?A8hik7KJXNr3buKIo1KH0952LOrnT1WEZ1e4cJ9GvXpGk/u1kjeYnDJap6mE?= =?us-ascii?Q?udmeOpDN+sqHfF8zCyTYNAev2fRoMkafQnLuzxX+Ta87OkS9gKLlABItrRrb?= =?us-ascii?Q?m0UD2Zo5FsII2aB3XyziieRP0AEquIQLrkETIUKJDj+zgg09QXK6aZ9EKLE3?= =?us-ascii?Q?FK7S64QpxK3SFqrxhK2ka3nwI0TT6OibrNvq5sZ9SUQH9wwkyRC6IKsP9Kzc?= =?us-ascii?Q?beAZ+G/8BcvukWi1X00pBXXKWHYPAtJnbZfVI94jHsSvopk7rpPZvx8HGMEZ?= =?us-ascii?Q?SYsKiqkO9I6z1MF3jhNQKa8LYLC2sc5KIDIsiFZlYk6cEPw1HHnrVpM1MPgC?= =?us-ascii?Q?S2zPVx5B8rDY7cQ5PpROzbgOcBE53afDS1qTM8bLF320V2vqY731Sldg+Wi6?= =?us-ascii?Q?IsjE9vDLKKQtL22wwwzzUvHGmN84wQGSgG0lwN4eEpfSVLQFDzxyOaXK4xoP?= =?us-ascii?Q?oxdYhzIH5aqKHDMSZaKgSh5fziJ6A8F7wtLlZJN6rpP62gQzGf2m8xIkYjHU?= =?us-ascii?Q?+nsQIGpV+3L7KHZnnw/A2btBpI9Eebdz4v/EBzPWuVI+LGe7myoMdEZvUrj+?= =?us-ascii?Q?Gr+IsYVtnBxyomyzfiQMfRym6s/dK8vbIlz/OvYjTzIJtI1TuQjzsl8PZZx8?= =?us-ascii?Q?LDHaZPV73PXGRmtTcWnVgaGylqy4Go5e1W6JOdrxQ4PoJiZB9XwbSRPbn1qX?= =?us-ascii?Q?Wla0dgJwg12/L1a0d9ZM6MkM+ecQO/TN3HWrVjEJj1a29Kt3I52kwA/SisJc?= =?us-ascii?Q?1itdL4VtGxg+NmBEf6+FejxeaTP3U2scEFSoZED9hJk7p4MZvSk8GYtUa4mz?= =?us-ascii?Q?vM/2e19/hhw9IE6MM4f+bIx9DTyOhQRGnkzUb4CB4LhkPn0u95aKA2ETO+pd?= =?us-ascii?Q?KXGl2UiNPBQ9XNbPBsR62ZYq1fuXspM+usdLZPqIo/7XeVe6Lb314xb5h2mV?= =?us-ascii?Q?nFlgodlFhNEomnIGhIu1q5wzcHxBCeRqFPtmIR2abCd97bVMmaTi60hzFVCA?= =?us-ascii?Q?HbCj6NEZB1rz3DhGglt5eLugSNUI4g9lk0Q8AM+vfHtjojERFfuaX0jsBR0G?= =?us-ascii?Q?URRDX1X/7f2HcGjPMqHC5yL9Ac5t6fd7jgMj8upjdW/A9NdeTfdWZKka8CEu?= =?us-ascii?Q?RDLlyFwuXhI0pUL9TSXRXwPQ8dY1B2YOrx1HMn0/yeEhC2hkjVTEbTQIurPG?= =?us-ascii?Q?aUNevEfdQXccvzG/Q9MUAFA6NHUt0/dqz3qlhMQyeW8Y/dguxgOo8BZQtgRq?= =?us-ascii?Q?rcE1T7dH9w6QZRs=3D?= X-Forefront-Antispam-Report: CIP:255.255.255.255;CTRY:;LANG:en;SCL:1;SRV:;IPV:NLI;SFV:NSPM;H:SN7PR12MB8059.namprd12.prod.outlook.com;PTR:;CAT:NONE;SFS:(13230040)(7416014)(366016)(376014)(1800799024)(921020);DIR:OUT;SFP:1101; X-MS-Exchange-AntiSpam-MessageData-ChunkCount: 1 X-MS-Exchange-AntiSpam-MessageData-0: =?us-ascii?Q?ynfgHgSTbU2F5W6H5CbnueZMjQ/byZA95sGwjDUR10/xl/qfpRhhUz0zbEFS?= =?us-ascii?Q?zSnu6uv4+Gfotly4J7+Si74TTeupmq6cYMBX351/I7QlESeyAMTduu9hNfum?= =?us-ascii?Q?HB486OxBwPnVZehB2oead4KkGjACTdWAbgBuH7ITOVAZKNvBrSN/n9kwl6ZX?= =?us-ascii?Q?1167NIIY3Ti2rJHmChEY4ZuZf4tq5qqKHqwe89/XnTs4kNUEMLqVUNrqKp21?= =?us-ascii?Q?MaJRw/s3a+doty9auTsy5zvdSn/rCgXJLfPjTP83L2QyvZt6OobajSJ4sfw6?= =?us-ascii?Q?L3O3l4MIfnaaCGzJS4Bmbms2SGIRwdvKb/MyTDTEZ/pylYA8MlloMsB8kfE3?= =?us-ascii?Q?+jL0nvNWQbUFxULv5Vf8nx2cBfU4X3qTgbAwn3aG0PuVQM3+8FMCS8OtaCVh?= =?us-ascii?Q?J2crUS0dAbmA7notQHgqVs8G7fV/qNBY9RkU7Kpv2UzOpClfYHqEKwDiNeGt?= =?us-ascii?Q?yOzR7UU3zIuW6zQOERXCjntCADy0nTXyqZjtP7za2P5g+yUcyTSvlh/gBWRT?= =?us-ascii?Q?r4SBub6A6juzF5SwXKnpVkzaw2V6xnASPOGN/rcjode4MYF/wqo6uy3MtCf5?= =?us-ascii?Q?ThC2N+P8V2rMYGak5ibERdLrHfI+/8HCAJ2mmYRqAC6I45pmTjvZKUgEK/bA?= =?us-ascii?Q?Fhvv8OiE9ec0JIHXshMfEua/ic1t+SAV+0lyJ8F8I04p8IcsHkxAIW+a/T92?= =?us-ascii?Q?LXG0CFSJvnaiGseQH4+BHx0xES011WY2NlN6xrnIADCayMfkymvxE6H+m26j?= =?us-ascii?Q?hpQzPfR5obuDDFsUXctL8Srizd/ht0AILEpL/ihGww/Nv/ysnlmMZdmoNURy?= =?us-ascii?Q?vMQmuXJUQDeY2IADiQynx5zK7rd6ND+/sHEI7JuK0YNMa9kLy5+w1ki+y9Oe?= =?us-ascii?Q?PrFzD/l+F+qC+Pu6UnrxCuuPR5xiUueEcvVA16f19zRo8S85iSKofw42LIMS?= =?us-ascii?Q?DgzZMhLkUqmFPispNrZ/HMw7qdqdJX25mRSpaQ+Qjp0XcNXBLvMrvExSSA1r?= =?us-ascii?Q?OZY5/n3ZrGLpmUWC8TaU+pP98DMFxUKm5s2hJplhSlCHLH2r9miogwULMuDd?= =?us-ascii?Q?vqKp2RMqkvB4I+9+3JpBZPne6uv7mBy2bbIb1rHkJyIcAF1VsfS56ZxzcARy?= =?us-ascii?Q?KvjeUyd/kirO0E/CILfFdjq9IR8zAGEfJWe87SthfJEh/SM+DyMW4xgExA2v?= =?us-ascii?Q?+8v6OlViiyEMy1hThei0IFpKMgOr/gAVC0u7Hej1mvQToc9h+CRvlb185xwl?= =?us-ascii?Q?xccwEO5kVB/uzXV3ncLutDCLfDpcKC/3zqj1454CRJHFkV0Ado6RBXtXHYXs?= =?us-ascii?Q?5DAtmtYQr22xXw6IWG8jPPVr8knk19HYCsnKqd332z6VFKExWT/CvMw1uRmG?= =?us-ascii?Q?C/CyR8dN8z0gS1OkvGZTK9rIWOf0BQHHMekhmKtuSJIczzh8FHpubnqS9aq6?= =?us-ascii?Q?XuAXyUu8h59ZVYD4OcaxvNJvRwa8uxT1Xvsw4KbeWhZzwCokLOeZLn8muf4g?= =?us-ascii?Q?Ex0MtOJdsqQbmwgFP98r732m9DT+M/7+TICJi+FfH6nC0Xs4kklFEfdZkuBp?= =?us-ascii?Q?gtddjWVlOiSfj9ZKRAUMpeXiv8+M8/K/0lAS7RFeCwWFFiWNYlpM0uWmPYRE?= =?us-ascii?Q?Ya9qEvELi4FQZojvUpzKFIJ95GHwlrxizIU8i695gCy1BDTun3DXpkgQsSTi?= =?us-ascii?Q?MWca2lHIKZEBzT5mOvxoWEgzhmyrM+OUuoxgNUPwD6h336Ud1QIrOO/i8SsW?= =?us-ascii?Q?3n/ZZrVd1g=3D=3D?= X-OriginatorOrg: Nvidia.com X-MS-Exchange-CrossTenant-Network-Message-Id: e2fcdb99-dc15-4f5f-12a7-08de3f3e8d0b X-MS-Exchange-CrossTenant-AuthSource: SN7PR12MB8059.namprd12.prod.outlook.com X-MS-Exchange-CrossTenant-AuthAs: Internal X-MS-Exchange-CrossTenant-OriginalArrivalTime: 19 Dec 2025 20:38:22.1143 (UTC) X-MS-Exchange-CrossTenant-FromEntityHeader: Hosted X-MS-Exchange-CrossTenant-Id: 43083d15-7273-40c1-b7db-39efd9ccc17a X-MS-Exchange-CrossTenant-MailboxType: HOSTED X-MS-Exchange-CrossTenant-UserPrincipalName: Uyq8tAIDvH49o64Q6xU2vKpaHKkVbFAhnXclh3HRuPBJ10l8WoqXDWda609ESfG5HiIZbOyakzfKa2wB1VDP+Q== X-MS-Exchange-Transport-CrossTenantHeadersStamped: CH2PR12MB4248 Content-Type: text/plain; charset="utf-8" Add self-tests for the PRAMIN aperture mechanism to verify correct operation during GPU probe. The tests validate: - Byte-level read/write at odd-aligned locations - Word write followed by byte-level readback (endianness verification) - Window repositioning across 1MB boundaries The tests are gated behind CONFIG_NOVA_PRAMIN_SELFTESTS which is disabled by default. When enabled, tests run after GSP boot during probe. Also remove the unused lint suppression at the module level since the PRAMIN code is now being actively used. Signed-off-by: Joel Fernandes --- drivers/gpu/nova-core/Kconfig | 11 ++++ drivers/gpu/nova-core/gsp/boot.rs | 4 ++ drivers/gpu/nova-core/mm/pramin.rs | 102 ++++++++++++++++++++++++++++- 3 files changed, 115 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/nova-core/Kconfig b/drivers/gpu/nova-core/Kconfig index 20d3e6d0d796..a37335e3381c 100644 --- a/drivers/gpu/nova-core/Kconfig +++ b/drivers/gpu/nova-core/Kconfig @@ -14,3 +14,14 @@ config NOVA_CORE This driver is work in progress and may not be functional. =20 If M is selected, the module will be called nova_core. + +config NOVA_PRAMIN_SELFTESTS + bool "PRAMIN self-tests" + depends on NOVA_CORE + default n + help + Enable self-tests for the PRAMIN aperture mechanism. When enabled, + basic tests are run during GPU probe after GSP boot to + verify PRAMIN functionality. + + This is a testing option and should normally be disabled. diff --git a/drivers/gpu/nova-core/gsp/boot.rs b/drivers/gpu/nova-core/gsp/= boot.rs index 54937606b5b0..b9750c86b6ed 100644 --- a/drivers/gpu/nova-core/gsp/boot.rs +++ b/drivers/gpu/nova-core/gsp/boot.rs @@ -239,6 +239,10 @@ pub(crate) fn boot( // Wait until GSP is fully initialized. commands::wait_gsp_init_done(&mut self.cmdq)?; =20 + // Run PRAMIN aperture self-tests (disabled by default). + #[cfg(CONFIG_NOVA_PRAMIN_SELFTESTS)] + crate::mm::pramin::run_self_test(pdev.as_ref(), bar)?; + // Obtain and display basic GPU information. let info =3D commands::get_gsp_info(&mut self.cmdq, bar)?; dev_info!( diff --git a/drivers/gpu/nova-core/mm/pramin.rs b/drivers/gpu/nova-core/mm/= pramin.rs index 5878a3f80b9c..483832b8f87e 100644 --- a/drivers/gpu/nova-core/mm/pramin.rs +++ b/drivers/gpu/nova-core/mm/pramin.rs @@ -1,7 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 =20 -#![expect(unused)] - //! Direct VRAM access through the PRAMIN aperture. //! //! PRAMIN provides a 1MB sliding window into VRAM through BAR0, allowing = the CPU to access @@ -56,6 +54,8 @@ //! } //! ``` =20 +#![allow(unused)] + use crate::{ driver::Bar0, regs, // @@ -198,3 +198,101 @@ unsafe impl Send for Window<'_> {} =20 // SAFETY: `Window` requires `&mut self` for all accessors. unsafe impl Sync for Window<'_> {} + +/// Run PRAMIN self-tests during probe. +#[cfg(CONFIG_NOVA_PRAMIN_SELFTESTS)] +pub(crate) fn run_self_test(dev: &kernel::device::Device, bar: &Bar0) -> R= esult { + dev_info!(dev, "PRAMIN: Starting self-test...\n"); + + let mut win =3D Window::new(bar); + + // Use offset 0x1000 as test area. + let base: usize =3D 0x1000; + + // Test 1: Read/write at odd-aligned locations. + dev_info!(dev, "PRAMIN: Test 1 - Odd-aligned u8 read/write\n"); + for i in 0u8..4 { + let offset =3D base + 1 + i as usize; // Offsets 0x1001, 0x1002, 0= x1003, 0x1004 + let val =3D 0xA0 + i; + win.try_write8(offset, val)?; + let read_val =3D win.try_read8(offset)?; + if read_val !=3D val { + dev_err!( + dev, + "PRAMIN: FAIL - offset {:#x}: wrote {:#x}, read {:#x}\n", + offset, + val, + read_val + ); + return Err(EIO); + } + } + dev_info!(dev, "PRAMIN: Test 1 PASSED\n"); + + // Test 2: Write u32 and read back as u8s. + dev_info!(dev, "PRAMIN: Test 2 - Write u32, read as u8s\n"); + let test2_offset =3D base + 0x10; + let test2_val: u32 =3D 0xDEADBEEF; + win.try_write32(test2_offset, test2_val)?; + + // Read back as individual bytes (little-endian: EF BE AD DE). + let expected_bytes: [u8; 4] =3D [0xEF, 0xBE, 0xAD, 0xDE]; + for (i, &expected) in expected_bytes.iter().enumerate() { + let read_val =3D win.try_read8(test2_offset + i)?; + if read_val !=3D expected { + dev_err!( + dev, + "PRAMIN: FAIL - offset {:#x}: expected {:#x}, read {:#x}\n= ", + test2_offset + i, + expected, + read_val + ); + return Err(EIO); + } + } + dev_info!(dev, "PRAMIN: Test 2 PASSED\n"); + + // Test 3: Window repositioning across 1MB boundaries. + // Write to offset > 1MB to trigger window slide, then verify. + dev_info!(dev, "PRAMIN: Test 4 - Window repositioning\n"); + let test4_offset_a =3D base; // First 1MB region + let test4_offset_b =3D 0x200000 + base; // 2MB + base (different 1MB r= egion) + let val_a: u32 =3D 0x11111111; + let val_b: u32 =3D 0x22222222; + + // Write to first region. + win.try_write32(test4_offset_a, val_a)?; + + // Write to second region (triggers window reposition). + win.try_write32(test4_offset_b, val_b)?; + + // Read back from second region. + let read_b =3D win.try_read32(test4_offset_b)?; + if read_b !=3D val_b { + dev_err!( + dev, + "PRAMIN: FAIL - offset {:#x}: expected {:#x}, read {:#x}\n", + test4_offset_b, + val_b, + read_b + ); + return Err(EIO); + } + + // Read back from first region (triggers window reposition again). + let read_a =3D win.try_read32(test4_offset_a)?; + if read_a !=3D val_a { + dev_err!( + dev, + "PRAMIN: FAIL - offset {:#x}: expected {:#x}, read {:#x}\n", + test4_offset_a, + val_a, + read_a + ); + return Err(EIO); + } + dev_info!(dev, "PRAMIN: Test 3 PASSED\n"); + + dev_info!(dev, "PRAMIN: All self-tests PASSED\n"); + Ok(()) +} --=20 2.34.1