From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1E10037BE7F; Mon, 9 Feb 2026 14:38:47 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647927; cv=none; b=HN1IgLTMdGA+g8++TjCOoeR+KzVJ1RipGssdVozeV5wXXE2hOnoaHzUBWNi/SEg2yAe3rv9aEnht/9FUGKc5hEsnJUyyu63Uh28r5yX9IzlGFYmMCZ34p88oafIip1qq8T+yN2HHGbK/IHsat1JLRBJV3Kx10Bt6Ai9ED4fS8aE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647927; c=relaxed/simple; bh=IuZIeAbMbsdgaivN59tt1avMvbqBct88rIBvqOT1fR4=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=UbckW9P6YEcQzSanTVcJ2QuUZ/yUSvG0X2q+442kqndsEUqF+Serk0T/XTdDOCepcaoPfH95TApxH7HwY2bqPLaacgjDq55XK5kNnJU1/6f0IrZGizL9Ga8yy6ZL5AQAeQ/W6Yvov8JEQuoIl/At+IHcWU0AMKAsfjmU2rBqNQ8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=Ej84H0zM; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="Ej84H0zM" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 7670FC19424; Mon, 9 Feb 2026 14:38:41 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647926; bh=IuZIeAbMbsdgaivN59tt1avMvbqBct88rIBvqOT1fR4=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=Ej84H0zMEMGgPiMbNYWOo0DChrVkdP7DvC+nXkl3vvKX71T8AxtfNA8lGZqiX6vds ReWeu7Hg0qUhCJFGdLXVHD9eO5jE9c7j1PvVrr5CrhkIxkBXm+ih5ylQ4gTq6q6/QO RgERqiHV4Hn+XmdjDD0XOQaE5lDpmtDdKKt/ILzpzZWgjewqfRk3aC2qJI12LDzaeZ 0/OmXxf2Y5tE3x4xbZ6Thjw7e65CpneFW0dAuyZn9E9/z2PXQVQVb5Tf8QPshgs3OM Pewd8FijlCH473aXx+2SQagSCXaE8Qxqw++NSgpHygGjUmYGDVeNqEY521s68ChpUg JdHOEq/YOHbPw== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:06 +0100 Subject: [PATCH v3 01/12] rust: xarray: minor formatting fixes Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-1-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=2161; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=IuZIeAbMbsdgaivN59tt1avMvbqBct88rIBvqOT1fR4=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFVZvzAtSDsr+CZ/ZQXchk3O+6YaG5edPjg5 +kyPR0aS+6JAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxVQAKCRDhuBo+eShj d2b7D/9i4AnYsinT630ZGFRy/Kh8bYrxUBt/LUmTe30OOzGERcX8Ib50IC1qO3zs69GiuFbqRFi 9QiIaBN/0HodwyMFQAtubXEGX8gOm5kXkl+4cMPkQLDIJ2oKaL2TfPMtVcECwU260gOp4t5u9Jw LUDhX8V8RR/W1v8ca7+hnqjwauXgTZkNy3fquLEDQwM9tIt+jLCxljAbiTJeaGbOSg/Qp4fse31 sgOgNzjJxhYqbSM0oAAPh0okko/NI9FXP5RazyAA3wSwVMu7ep7SUm9p6uVdDQXS8rYb1AGX8qd iZwB+5mmnE5GBrg0wGnWtWYuyX93FdjL85boyY3seBWIrgs4mHT3v6/NGDXxe+ftwWFruGucANT fNaymZGF+JgIawTniS/2g+uEa6c5G4qFNtuIQhWp5cawKpGEgF9r9OcfR2XMzcXugW1kdYv3bmn xf8gevt43sKknTy6JjKRi+wAkSo796bG5FV2k5r+lQs6C31hooZ7qVUNP7c+F8753wWcd5UGejX RKwmplbzFCh6mou5NQBraIYiSTy66YyxqLYro9HjYzE9FqPQ9bRCU5QXObA2Mre4dNT6YWILRsu FM0tWtdOIZOuDlL0lOmr+6DUXLf9OjO7Aa9xL8S8Km8QU4O92D0C11RM3ffiNhY436oiI5sIOfP 301iZwvcnhfZaOw== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Fix formatting in xarray module to comply with kernel coding guidelines: - Update use clauses to use vertical layout with each import on its own line. - Add trailing empty comments to preserve formatting and prevent rustfmt from collapsing imports. - Break long assert_eq! statement in documentation across multiple lines for better readability. Reviewed-by: Gary Guo Reviewed-by: Tamir Duberstein Acked-by: Tamir Duberstein Signed-off-by: Andreas Hindborg --- rust/kernel/xarray.rs | 36 +++++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index a49d6db288458..88625c9abf4ef 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -4,14 +4,33 @@ //! //! C header: [`include/linux/xarray.h`](srctree/include/linux/xarray.h) =20 -use crate::{ - alloc, bindings, build_assert, - error::{Error, Result}, +use core::{ + iter, + marker::PhantomData, + pin::Pin, + ptr::NonNull, // +}; +use kernel::{ + alloc, + bindings, + build_assert, // + error::{ + Error, + Result, // + }, ffi::c_void, - types::{ForeignOwnable, NotThreadSafe, Opaque}, + types::{ + ForeignOwnable, + NotThreadSafe, + Opaque, // + }, +}; +use pin_init::{ + pin_data, + pin_init, + pinned_drop, + PinInit, // }; -use core::{iter, marker::PhantomData, pin::Pin, ptr::NonNull}; -use pin_init::{pin_data, pin_init, pinned_drop, PinInit}; =20 /// An array which efficiently maps sparse integer indices to owned object= s. /// @@ -44,7 +63,10 @@ /// *guard.get_mut(0).unwrap() =3D 0xffff; /// assert_eq!(guard.get(0).copied(), Some(0xffff)); /// -/// assert_eq!(guard.store(0, beef, GFP_KERNEL)?.as_deref().copied(), Some= (0xffff)); +/// assert_eq!( +/// guard.store(0, beef, GFP_KERNEL)?.as_deref().copied(), +/// Some(0xffff) +/// ); /// assert_eq!(guard.get(0).copied(), Some(0xbeef)); /// /// guard.remove(0); --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 0A11D2222B2; Mon, 9 Feb 2026 14:39:22 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647963; cv=none; b=c8fLnDV6vqaCUpwRZganhXfqFo3Q35W7ewUBi/RFdDt/XjPP8MRNDcSyE/TnSN1Q82q+JveOLw5wjfCWJqP3G/gW6orp85tQPMtnjlIj4XY4vI5VBu0RME7wTPkTA+Shaks1M/kfFi5xv7uOAE726mjNoDmHMD+wmCvxZDsfwpI= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647963; c=relaxed/simple; bh=QeBXOf7iICj0Ad5qHcIJv0VhQMqjDmVdKEV04oiCJGE=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=UFL36CMtmjzyNOpaW/Xo3WSnbSA2nji4QJp/tJ9ZkpRrM/sxQ1NIzVuRJjym7mAF9lX1JsusjkRBHa3vjx6wZ4sshFaAJtr/K25WymH9/x5nWxtrAKY8WYHGNg2BNkLArDPwJS6AKJ6SZNmdbGD1nB+0mLXL2r4hpS8Qwm9xAaM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=KsCOB8hv; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="KsCOB8hv" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4FFB5C16AAE; Mon, 9 Feb 2026 14:39:17 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647962; bh=QeBXOf7iICj0Ad5qHcIJv0VhQMqjDmVdKEV04oiCJGE=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=KsCOB8hvj7Gtk4r2EvVikB0I4H7vI12YU2LI1k0BQOQnCeokh6tjUPBHt71Q9pkdZ 1lNhcWa9XKHUI8c1dl/BxAg8wdd4stVJralvwL6pR28PRYVYPslxkrZ/GSWS2he9TO 3s6rWp7/K3KEbpBykpo4f+sVpz/vyJx3w9VsV81QqTq7sd8QwFVdaTtBKw8OcFuFro SedCpoe7++qniGqyVXi/+cdSI4xr69ogDjCnr0+9dBH4u/ExIVPzBsxc/7HdhrWNOY rNWc5pQf3vpU61ghIg0y8I57IxpcczFGnENVCPWsW+Kt5NmWivpJPsoJbtLRJR2F3a W3gCw7K8boG+g== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:07 +0100 Subject: [PATCH v3 02/12] rust: xarray: add debug format for `StoreError` Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-2-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=1047; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=QeBXOf7iICj0Ad5qHcIJv0VhQMqjDmVdKEV04oiCJGE=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFW0J5aHNJu6sg5tbQ2bMQpjZ6diEOzfohIH ThYiryfIliJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxVgAKCRDhuBo+eShj d/aCD/9U7agBCp8sIBnhY4v7lk+rLva9/so2zOxVLJn+Ju2B2HRFE10h0Jx9KyJVbbvbeIcxgJf BBq2DOvK+m0c5GSw2uewYmJaqcLA4mxfaOzgtfFsA94TV0MBmp9XydMMTvx4/zRi9BeLIu7Uko3 thaN5I1xUBYiVkhk3KlJ2BQujOvvZwmKqROn03pXpFBr0cBSKuHmFVVprzMRd7UpSMRqoBEoh60 9+Qe1KDMDPekp0CezkJurKJK/E5uVbdwSgRRAfXonDulmT2hhARoZBe4sJ1HaUczJUNWYzs2Wjr XOJO7pKsDEwGnK86gnShBPj/5VQ5PqicQmpGikFIwUcDDxWSLyA6nREmK8gl+fm0Fj7JQrvX/3O lg7dXPMeWFMCbN0aAitSXxaxYDIZf0xaTpeiUebEO+w8BU8zlW7ICOO7UUyhYuonKEg1aLQIiyP kTkcIlosy5t3uXD5hYKDLQ/ZnS3GQdxjEiw1uHS3W1Zd/I6P63lvkaPI9RIZbiiD/6kmKsgp1Zm wRMVg4srvYx1V2/ZkDxDE4b9qqXT/jxvkzKe5OuVwK4aa/P0kEs5NIO2g2vh4t7SJE7PlHA/eVT EW9eLL9eROuQmExyYOAAMvWNTC5fxcPKDH6gZWBkAwjID7zizU2AD1r3Puc1cMN6sBeNrOrr1+7 HZgrFfLb68bznRQ== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Add a `Debug` implementation for `StoreError` to enable better error reporting and debugging. The implementation only displays the `error` field and omits the `value` field, as `T` may not implement `Debug`. Reviewed-by: Gary Guo Acked-by: Tamir Duberstein Signed-off-by: Andreas Hindborg --- rust/kernel/xarray.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index 88625c9abf4ef..d9762c6bef19c 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -193,6 +193,14 @@ pub struct StoreError { pub value: T, } =20 +impl core::fmt::Debug for StoreError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + f.debug_struct("StoreError") + .field("error", &self.error) + .finish() + } +} + impl From> for Error { fn from(value: StoreError) -> Self { value.error --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1AA8F36BCE2; Mon, 9 Feb 2026 14:38:53 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647933; cv=none; b=D6159vZS5t6nhuyTcLGwiMTd5Od2CixP5M+cgUkAN8xnltg1gdQMsPxTHDUSDA979e9ukpeXR8uK16pu/QuNYg/PHouzJ1L6Lmw3njT1MfN5ALK51k3CK/nUxRm/2b3ZIBIDexziPe4PVUW1tS9eVZ5kEArSoW3RqXaHqyfaROo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647933; c=relaxed/simple; bh=PrcApbk7BCNSPsQwnef2I6IdaZ2i07Y2ln8fzlwV9sQ=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=VJo40/Fp2hmRhBdR3UrFV7A/RsL75Upz255wj4/CT9nR7TGsx6/zQkcJrQrtBV/UFXqK+BixQ8jngwu23L10/UyNxHCN1GGORxuQGXxcNDQ3IIsPsIRl5EmppwrwUClqyPedMCWOsTsoV8nnyWnOM0ZRe0C61kecMvAUA88h/v8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=MgUYcmvE; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="MgUYcmvE" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 8F823C19422; Mon, 9 Feb 2026 14:38:47 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647933; bh=PrcApbk7BCNSPsQwnef2I6IdaZ2i07Y2ln8fzlwV9sQ=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=MgUYcmvEUQ9An1F2rMEYfu+G6E7UYw7TRPeZBzGLNjCdhoccnuiCKATj05xiNBIa5 WW2426/lh800d7CPoGN7MiYWSKlcZbPxcVOUrE18x56MH2iZ21Pz55+lqdDDsaWTCj djG0guEOEV7aSvbwvUkJvz7AM7lJv0ioRrrY86rkFuNoK2Z5nSuN5nMJrkky1Kz5Ty 01oT52B0DvivieFrqElBXFDR5fOB1rpoleTMhAC8zZM8oqL81S3qez8nBVEgR7wHin HdNgoc3Sh59DHy47BYeSBYtXUQEsmpjB799aAYCZuF3sI5CcWKy1/3JTmUOq4fZEG9 XYJYElhzXAdNQ== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:08 +0100 Subject: [PATCH v3 03/12] rust: xarray: add `contains_index` method Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-3-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=1538; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=PrcApbk7BCNSPsQwnef2I6IdaZ2i07Y2ln8fzlwV9sQ=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFXwEdzwCsucoKq2KE54BrulAwrQmBpO4yTF HcVvqk8u0uJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxVwAKCRDhuBo+eShj d3PQD/oDY7iFQvP7waZGcxclTVtbgYcbDsPw2BXqzs4G4r0nEmsZ/bc3z3fctpmk5Eu4QBQBSmz GquJbOx7DS+XXhyisG/lSfDSZTcjM3B7djaHs6kv2X2GpCaUTA1D7we6YKiCLNQKKWSz8rPBedH AS0WmyqMBJGsnjNcTB6ho2HB80mJuc87vpqgnong594U46aZH98gBMZjCR3WL+eFNZlbl2hFTQZ Yxi8R/7glxzu6qhJhL1OVS2sNimeBM2TsYuj5g0a8QJCrRnNqOJum1BMAeieTDDW0j+3gnn1IOp k4kdrK3jzQ6DLIQiKbLvV/dsCJ1qX8+wny8gwKWuB4f19wSKV55RUPzmUnlH/qMQdKK0ucjZEH0 yXg1X/DtCzjDdp4cTqS90gIquSlMVVcyZurN06hK/O8WdYHh/crxpDtb7Wtq3HHrsxnMTppHeK+ H5iFzc0pNaUVCFYuMEDzDv3EnVGt2uGtkQE37lz8ttk2jGr2WdhUnR0ARFRLItdTm5SXRml0ayX cwBs5jBKuS70hr93mo9OvR/zboRwQUqq18WdAk5owQSaAEiwN0H5I49xml/jOydbTFc5S1udD5D QbYxPkEFxloK1KgcD15RJX98bVmLwNWlICo9n9iBiD+TPmMsnMIkqWVNHgThONOrkFG1q6/6SO/ 7naUgNlrGsokBFA== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Add a convenience method `contains_index` to check whether an element exists at a given index in the XArray. This method provides a more ergonomic API compared to calling `get` and checking for `Some`. Signed-off-by: Andreas Hindborg --- rust/kernel/xarray.rs | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index d9762c6bef19c..ede48b5e1dba3 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -218,6 +218,27 @@ fn load(&self, index: usize, f: F) -> Option Some(f(ptr)) } =20 + /// Checks if the XArray contains an element at the specified index. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{alloc::{flags::GFP_KERNEL, kbox::KBox}, xarray::{Al= locKind, XArray}}; + /// let xa =3D KBox::pin_init(XArray::new(AllocKind::Alloc), GFP_KERNE= L)?; + /// + /// let mut guard =3D xa.lock(); + /// assert_eq!(guard.contains_index(42), false); + /// + /// guard.store(42, KBox::new(0u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// assert_eq!(guard.contains_index(42), true); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn contains_index(&self, index: usize) -> bool { + self.get(index).is_some() + } + /// Provides a reference to the element at the given index. pub fn get(&self, index: usize) -> Option> { self.load(index, |ptr| { --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 74C1A37BE93; Mon, 9 Feb 2026 14:39:35 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647975; cv=none; b=InBp6UezP+1dW9q+YJmeSvWQGd4XJHk9JpcuSda8YKFDVbbqwwAHnzIn5RL0m4AaPAPK5vyiWVAqEAEd4RaahCo/wyUMJMEPp7/jxm22AvuSc+8K9kI9of94w27dmL/+LXA9ssTFwUL+t/FPp3UAHqczhG8Rbrss4eoSbNdrIaA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647975; c=relaxed/simple; bh=2aA6TsP5TqOXxamqOrEM+ulf/tg/RPrxh0tZsqKQ8Qk=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=LSc6Octyd9r0n5XSR7Wr5cUhCYoLrW50bw+sjOT2fu87HZiKO+EIyRzh8YsOdUHa+xqcdpb5DND5FRldPOnEf98FCrEgrE7n/moo1vvKulZXZ9BNN+IkTE3QvXTgxnBfR9WU10/rcHh9mwYyC7fjVky+EtdxLi9bYzSQF0/ALfM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=f3/6c+CD; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="f3/6c+CD" Received: by smtp.kernel.org (Postfix) with ESMTPSA id AFD43C19423; Mon, 9 Feb 2026 14:39:29 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647975; bh=2aA6TsP5TqOXxamqOrEM+ulf/tg/RPrxh0tZsqKQ8Qk=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=f3/6c+CDPCNHcWfMSeuShETnmdtjswhGYyK9abyIPkMMLBHgipbzcxoFAn+ZhIw3W THwWkaLF5zs+2Cztg1ctGv0W03mRF8jvkMYsIUASGbyiOP/xL59u2z/vTd6hmaCsvZ aYCrroCi8cfkdVD6cJgS+mfq3Sxq1Au75al6b4C7EkPFqtJtn5ze1qlaTysLNVdmNR vVkuuhB6gkY5RQrr2akrRTx1196traXRoI3MPRJCkZIFN6dOzKNdnYu3cuGZNhouDD UX0hU8WZNM62DlEpSrxc0BAf635t9Rz3J9c393HtV7fOIkWOixLT0PFrJ7EttiYPC4 WhHTWO4b4ppnQ== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:09 +0100 Subject: [PATCH v3 04/12] rust: xarray: add `XArrayState` Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-4-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=3142; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=2aA6TsP5TqOXxamqOrEM+ulf/tg/RPrxh0tZsqKQ8Qk=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFY1Yb/s0Cx5dL+ceTrbgCsO2VEgXHKl81NG XaSBz9rJQKJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxWAAKCRDhuBo+eShj d6GqD/9zwzHqTXewAxq/4Wup8VvXE1kLNVxZpeJpW/OsViwlFsajqwWV7FfAutYCIWFTcnPkTnj 0KKlFWUqLdmeYmIhfzpePwVrz2E5PlCGWXKO1XlVgVp0Q8iinr6dpXyEQxvsz8pYWXlWCkQSQZL eR44WTMOlaPhzuCALGKg+ijVlbokb4zq/j21bPjYozplxqlXPZud2oHZC/Rl2FOchlj5Zn5SVn1 bgu9ICVxaFx1umTR83cLpYv5sZLz1+I285ESrQwJG4faCrDcA0GrB6nllbYOuf/3mTZC7tem+B6 1tY7L5iEy8CZKLz1m6GATAeqapHo94gkhOt13tuX08sI9dONC0VOBl6VCUWS5lthQX6j2ousEhA S8j3JKNfWmBtsjn9btygkJeN2mVG4sDN3iYuWnMxe/CNtrxMG6jHP/JFa97Ic1lMVbDZoMqXsAq n5qlRo7XpyBq+sYqYobnMzRXtKBl092PLTVDY0aY4dCKscf+MhHxF2GnyEuDR7UlMWSvHMwPfVu vuiZY7nQXW4LoyQXPak9P5Ot59iYBk6xlG2vB4x+f7MPFVwyQVOq3fNC0iOnsrtEd9Q36ZvV6op yazBSX4V79D0JXg+QCpSeCtGjNqI9IfmR26sZGs+X8DCdO8HerDf8MEImi1dwbbM54y1yQe6oZx uh9A7d6F+FrrOow== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Add `XArrayState` as internal state for XArray iteration and entry operations. This struct wraps the C `xa_state` structure and holds a reference to a `Guard` to ensure exclusive access to the XArray for the lifetime of the state object. The `XAS_RESTART` constant is also exposed through the bindings helper to properly initialize the `xa_node` field. The struct and its constructor are marked with `#[expect(dead_code)]` as there are no users yet. We will remove this annotation in a later patch. Signed-off-by: Andreas Hindborg --- rust/bindings/bindings_helper.h | 1 + rust/kernel/xarray.rs | 41 +++++++++++++++++++++++++++++++++++++= +++- 2 files changed, 41 insertions(+), 1 deletion(-) diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helpe= r.h index a067038b4b422..58605c32e8102 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -117,6 +117,7 @@ const xa_mark_t RUST_CONST_HELPER_XA_PRESENT =3D XA_PRE= SENT; =20 const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC =3D XA_FLAGS_ALLOC; const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 =3D XA_FLAGS_ALLOC1; +const size_t RUST_CONST_HELPER_XAS_RESTART =3D (size_t)XAS_RESTART; =20 const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE =3D VM_MERGEABLE; const vm_flags_t RUST_CONST_HELPER_VM_READ =3D VM_READ; diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index ede48b5e1dba3..d1246ec114898 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -8,7 +8,10 @@ iter, marker::PhantomData, pin::Pin, - ptr::NonNull, // + ptr::{ + null_mut, + NonNull, // + }, }; use kernel::{ alloc, @@ -319,6 +322,42 @@ pub fn store( } } =20 +/// Internal state for XArray iteration and entry operations. +/// +/// # Invariants +/// +/// - `state` is always a valid `bindings::xa_state`. +#[expect(dead_code)] +pub(crate) struct XArrayState<'a, 'b, T: ForeignOwnable> { + /// Holds a reference to the lock guard to ensure the lock is not drop= ped + /// while `Self` is live. + _access: PhantomData<&'b Guard<'a, T>>, + state: bindings::xa_state, +} + +impl<'a, 'b, T: ForeignOwnable> XArrayState<'a, 'b, T> { + #[expect(dead_code)] + fn new(access: &'b Guard<'a, T>, index: usize) -> Self { + let ptr =3D access.xa.xa.get(); + // INVARIANT: We initialize `self.state` to a valid value below. + Self { + _access: PhantomData, + state: bindings::xa_state { + xa: ptr, + xa_index: index, + xa_shift: 0, + xa_sibs: 0, + xa_offset: 0, + xa_pad: 0, + xa_node: bindings::XAS_RESTART as *mut bindings::xa_node, + xa_alloc: null_mut(), + xa_update: None, + xa_lru: null_mut(), + }, + } + } +} + // SAFETY: `XArray` has no shared mutable state so it is `Send` iff `T`= is `Send`. unsafe impl Send for XArray {} =20 --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 11DEA37C0F1; Mon, 9 Feb 2026 14:39:04 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647945; cv=none; b=A5LI3r93SVtrEqBrobnN9V5004vyWsn5aY2iEJgl735qAtz/JKKI/nNP9r2pyN/968OY0nfCSPN+uFUD7PIYUXwChbNWaSiSdNOUmWvwCVAE9a+XZTuu+p9OYInfawTXT8rMT6dXxLMhvuyVoNrnu4beDk/za2zTUgyRx8T1xO0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647945; c=relaxed/simple; bh=hPUvMpxg3yU6YJ5+chgN6zUpPTceUM2JTPt9rO+kJpw=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=TtBM7TGNMo0TBFmLlFO32fg86FTH6Eivl9b5mdGLAJQyUT4pMCbFngQIGf8aBxXlMxj9KKZNadRSFzkVYt85rsCZ4mjCLfs4AZZ2eUtp1Jyp85kfYpW3KDvmK+JPhOqXwGL6JcfoXE6tM3hGER4UPCA1vJgLEQNUt0VWjnn83U8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=L35vImqr; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="L35vImqr" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 9EB47C19425; Mon, 9 Feb 2026 14:38:59 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647944; bh=hPUvMpxg3yU6YJ5+chgN6zUpPTceUM2JTPt9rO+kJpw=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=L35vImqrNtEsIYbz0XKnuOtX8nsW6acGFOYBDJpFKeRNY3s71iAPwhas4hXmvRsrm fUnTzOwPUKWy6mwIjyYM/GJQBfrTw8BDvOwTg81cXK0bGCqTABDXYCCJabUz4snejs 8GfEBKTGa6Sh5jhmRiaoXuMq99uN0npnM7jXIDgyLIBbWOrBWVcqbmjyZDsU2HgDSK LuNrHzoUs49Q2KsmGe0CgFE/4MNezLeEDQ6RTZYed6e1anXBiSXLDBp3ErhxT3vzR5 ydLTbir9G2edTPnS2FdVabJ7jHVRJxu6mU7FSgVDHyDkdLnOd1gTxQhAAwQNtphl2Y AhCUwBfJpHNaw== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:10 +0100 Subject: [PATCH v3 05/12] rust: xarray: use `xas_load` instead of `xa_load` in `Guard::load` Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-5-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=2487; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=hPUvMpxg3yU6YJ5+chgN6zUpPTceUM2JTPt9rO+kJpw=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFZFhygcsP92+CC/iGZ/yyaRbPPG+QQI1WSU gLT911IMLiJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxWQAKCRDhuBo+eShj d2zRD/oDbAVgfgHXeRmBddcqnK4VqTy56UmL/gr82qWgR5P3fipSqmnmxnrSoE8pQBBd/bAtADt YgaB/4g53dINi8fCUbJ4Q3sXtImvJjn7pPenPU977QBqf3rla5HIyXEzQedjtj04MLFpMnUz9ed eHZ9/pe3NbZAc47xhNeoeAJrrkhOKQOBYrbHrHJ1kKb0iIwOziicEBVzZ9SE1EireZ+ENGlUjKf nI4uJ1SEEWrU0qvSmttePZZr0ffv1oVUeBcCHEOtI0oy6Mz9sMj4uEgkE5ixhKkItsC7Ebz3ssG o6JjPUs29vsmYOBHA2taOsswLKU924EXfw/QadLhFCHa+pFITxKYj2eTK899mShqfNunqF8GYyv W0TkAZLFRRcKnTwbrSTsCzpqzRaNDprv8tvbSX74iFI62a7WHiLg/Vj+V2LYsWQdHu/bbDfrFzs Bgb0IdWmYdRK28yQs9D5W3WpMIrAbqAQO5JHsGsKgeDovzaiiDpJYnhF+Ko1KyiQVYRe0CpJ8EZ Z2drb8mSKhfEd+W6NirmXEYLkbACwoDeaW3Vya0XG61wvEwtY6OxMk/pR6FTxPhbOhUfCJQAb4G EOx/PgNbFCu/GhsxYIwmyZpG0ZmDUsH/7CLINow74TjDXV43F7xjeUmaTOHEi828iBdO1Yi280u 1O2mVrr2mtqx/Fg== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Replace the call to `xa_load` with `xas_load` in `Guard::load`. The `xa_load` function takes the RCU lock internally, which we do not need, since the `Guard` already holds an exclusive lock on the `XArray`. The `xas_load` function operates on `xa_state` and assumes the required locks are already held. This change also removes the `#[expect(dead_code)]` annotation from `XArrayState` and its constructor, as they are now in use. Signed-off-by: Andreas Hindborg --- rust/kernel/xarray.rs | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index d1246ec114898..eadddafb180ec 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -215,10 +215,8 @@ fn load(&self, index: usize, f: F) -> Option where F: FnOnce(NonNull) -> U, { - // SAFETY: `self.xa.xa` is always valid by the type invariant. - let ptr =3D unsafe { bindings::xa_load(self.xa.xa.get(), index) }; - let ptr =3D NonNull::new(ptr.cast())?; - Some(f(ptr)) + let mut state =3D XArrayState::new(self, index); + Some(f(state.load()?)) } =20 /// Checks if the XArray contains an element at the specified index. @@ -327,7 +325,6 @@ pub fn store( /// # Invariants /// /// - `state` is always a valid `bindings::xa_state`. -#[expect(dead_code)] pub(crate) struct XArrayState<'a, 'b, T: ForeignOwnable> { /// Holds a reference to the lock guard to ensure the lock is not drop= ped /// while `Self` is live. @@ -336,7 +333,6 @@ pub(crate) struct XArrayState<'a, 'b, T: ForeignOwnable= > { } =20 impl<'a, 'b, T: ForeignOwnable> XArrayState<'a, 'b, T> { - #[expect(dead_code)] fn new(access: &'b Guard<'a, T>, index: usize) -> Self { let ptr =3D access.xa.xa.get(); // INVARIANT: We initialize `self.state` to a valid value below. @@ -356,6 +352,13 @@ fn new(access: &'b Guard<'a, T>, index: usize) -> Self= { }, } } + + fn load(&mut self) -> Option> { + // SAFETY: `state.state` is always valid by the type invariant of + // `XArrayState and we hold the xarray lock`. + let ptr =3D unsafe { bindings::xas_load(&raw mut self.state) }; + NonNull::new(ptr.cast()) + } } =20 // SAFETY: `XArray` has no shared mutable state so it is `Send` iff `T`= is `Send`. --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E0B2E37BE78; Mon, 9 Feb 2026 14:39:47 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647988; cv=none; b=B7OnlfKHujulMtLw6mDpE51Z+HVNkT/M6iVK7A6wXheNVfRSUyf4tKeVwIkVGwoBo0iIHFvLCc7ZjaM5Pge38PhNsSQ4HfrKO2fidna0ywUOXKUWUV9309unqWidfFtoXvlQogSBeUHvOml4p1G1eifh7lH1FJ6dIC7v1dEWLEY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647988; c=relaxed/simple; bh=SUnEj4f1l9mHuLlzJWz0dNzh+/snH5Ql51wel6NI7LE=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=ZVryf4lUm8agEcCnraR/5VVtubHu+mT/vvbxY/4uY+3RZN3MVct8JWxBi7MHPKmh77sVVBrqRiU5vSjpq6rU2PW3J04TS68N/WP/ZPn4kmMR5nrUzi49CoiGTYhLKnWnpix8Z1v5NBK5YMEPHmrWNW0ZdEdJZtUeDLfxZIkz190= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=GNMvntLz; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="GNMvntLz" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 74A21C19422; Mon, 9 Feb 2026 14:39:42 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647987; bh=SUnEj4f1l9mHuLlzJWz0dNzh+/snH5Ql51wel6NI7LE=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=GNMvntLzZ2YxBuqe49g3RUNZCw6QPe0fv8HqUkyAajaigNKTo50KAkjyYvbdCbgPC Ow0bOv8NCQOrPKJUiXzlg6w+tFmXrHx9MGDCtGalADkaZk3jX5QpGXqxlfnCSpfWLq u43MgZRFVnV3M2AcH5AbrYFFU4wPIoo7VlGwR0LjITIk5J4+zlAkAmuQxsxz9bhsyO wKyvORoVhnS6q5TAy+J3W3DF1FrVPTD09nceFC0kTYUien2RPzfa1BXJ0PqNighoeG 6DjJJcGVCtqV7xiKpKEf85eE/qzvawVTXt/AP3bFdMSysLgGEYtt7iUGGkQxlAtd56 kLDDvwXNSJUQg== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:11 +0100 Subject: [PATCH v3 06/12] rust: xarray: simplify `Guard::load` Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-6-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=2076; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=SUnEj4f1l9mHuLlzJWz0dNzh+/snH5Ql51wel6NI7LE=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFaKdOChhRuevCXo/AyFuqJpjt9pI2F9DYSY GB02zlFWYqJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxWgAKCRDhuBo+eShj dzDZD/92MQDGevDXbOVb/99y5TakQLCirDQL22LaLohFi7zXhh3Sk30nKFNQWW2EI02c/GE3R1V Z6EDG5Vp5Me0WyCQecPlnATdKXOdn2r34nc0rhIO20n1Jn89B4QAUB++YtWLTwT4vESv4B6r6qU Ms+5lXL2uAdKHD2EZc9F+I89JzMRJ6CGdhn48XsdHTXufxLGo2gmtKGVE0hTfGRAW8nyhlBbt3l dgZSoEmEOZFV5EwStKxhoNh4K1DZ3xGgjbJK+BPVBNRTU+BEC3z4T5lQOOglFuegkiu2BIBwQ3O 5w/MQWZlBbYrrcQd/CGdSLmTmhOoZouxKvyJc/OK9OOuXmacPuaZZjAdPL48sVDBeEF2FimAU1l NUpA0UKfsNRTWFRlGTvl6keLMEPRDA0UHNZhwntuZI7efvLcWDFAwgikoNsZPeW69NV5Ug7a8W8 YIWjGbfYE1GwF5Ewsl56Fxjo9NaEP8X2EbDEtNcEvpLmXSfE2NOKwsr4EiwIoSBbUqUi5fyQtRg FDQJmqz7iGKIkYEh3ufL0aB/XjVrf9Mi1xhn8pbYlT2YqScSYAZTHQnLlsc3b79IMc4M+xn9qrF UK9jaydcY0Z2kUTpBMERJPQew4QWcfB9ZmFK+KcyMk5RfEE3CQ1BcrZHUhYqK0ZgClNrm64x0Vp L6BjmJfPZxCL7uQ== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Simplify the implementation by removing the closure-based API from `Guard::load` in favor of returning `Option>` directly. Signed-off-by: Andreas Hindborg --- rust/kernel/xarray.rs | 23 +++++++++-------------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index eadddafb180ec..e654bf56dc97c 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -211,12 +211,8 @@ fn from(value: StoreError) -> Self { } =20 impl<'a, T: ForeignOwnable> Guard<'a, T> { - fn load(&self, index: usize, f: F) -> Option - where - F: FnOnce(NonNull) -> U, - { - let mut state =3D XArrayState::new(self, index); - Some(f(state.load()?)) + fn load(&self, index: usize) -> Option> { + XArrayState::new(self, index).load() } =20 /// Checks if the XArray contains an element at the specified index. @@ -242,18 +238,17 @@ pub fn contains_index(&self, index: usize) -> bool { =20 /// Provides a reference to the element at the given index. pub fn get(&self, index: usize) -> Option> { - self.load(index, |ptr| { - // SAFETY: `ptr` came from `T::into_foreign`. - unsafe { T::borrow(ptr.as_ptr()) } - }) + let ptr =3D self.load(index)?; + // SAFETY: `ptr` came from `T::into_foreign`. + Some(unsafe { T::borrow(ptr.as_ptr()) }) } =20 /// Provides a mutable reference to the element at the given index. pub fn get_mut(&mut self, index: usize) -> Option> { - self.load(index, |ptr| { - // SAFETY: `ptr` came from `T::into_foreign`. - unsafe { T::borrow_mut(ptr.as_ptr()) } - }) + let ptr =3D self.load(index)?; + + // SAFETY: `ptr` came from `T::into_foreign`. + Some(unsafe { T::borrow_mut(ptr.as_ptr()) }) } =20 /// Removes and returns the element at the given index. --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1A464379990; Mon, 9 Feb 2026 14:39:53 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647994; cv=none; b=O/xtcZNzaSpCfT6n6yV4+S/f1iPbwMtRLE4899cumBNZsZgCPJI7tsF9tf5rKaecbrFCWqdpw4kG8VUp4eFW86sMlYqJ5GErXCRH40JhJT3IMgKPoFBZPCjQlJmEu/aOoLgq/DNXEGFhWYtW763VjlEtL/K5JIbrCdBYHvmjHeg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647994; c=relaxed/simple; bh=ziAsziTOC+GzNf139Ut4jKaT94DhutbUW+Q40shocZ0=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=BAeVmXbUg9aK7+h79E0qrXNYz/iWPvJMHIbGZIf7aWzaYYWRjEg2KRMZyyO18a622igMf3895V9ZHml/mn2k/KkBS2iOiGfbVB5L0V0iEVkmZNxYDEqU4MVlq7IUILlPkjkQTxY84hxuiMMzE/V0BvHlerxoqeonR7WMTC0ZVKM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=W0LbIhu3; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="W0LbIhu3" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 72255C19422; Mon, 9 Feb 2026 14:39:48 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647993; bh=ziAsziTOC+GzNf139Ut4jKaT94DhutbUW+Q40shocZ0=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=W0LbIhu3yXflRjlR51USLzE6TCZblrZPirDrUOTQtPBBtw8E+4tXEVs83O6SNUJ1b yp6nZLa99Zuka8FviQlmEioGfKOP9SvbsquuhAHvTaT30J2/EpWzwX3vvFtiPwTBRT 0svmi+1DGm/1KkGXe2Nt8+K0GQemFVPx4VZVdsE4uTOTrFH0A1LHLT04fOmHqXUYMb SdeLtezbZXJFSm/vhX4WVgZajI8YGyffgd3+mmlTfM0U8zDH2lk8bXrxjYvhJISkKM Y/vETLiIsKg9eEjO78ENWxTiqprHdyGoexU7OIT0HH1yV9z2NIbtb5hEy9tK1LVxmX whdxZYQjQJgjQ== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:12 +0100 Subject: [PATCH v3 07/12] rust: xarray: add `find_next` and `find_next_mut` Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-7-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=4023; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=ziAsziTOC+GzNf139Ut4jKaT94DhutbUW+Q40shocZ0=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFb+WmuAcVCQgvEERtgQ/qOeo8uSZnzezgaA +8xw4pzCZmJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxWwAKCRDhuBo+eShj d4DhD/9wCbNkHivRJZdlO3SafsRrM1Jz/9FULnqXMomuVFOyx4R8DA04Vr1ryCJ05XWJyhekl4Z G5vVUO8k7DN6sWkUJEoaVgwaA+cAdqR6vETzYtNa8CNUYFrBH7F7IuNhOehoVVMRvTViVrfBiHn Omr9wEVK9MuXNHJWTwnGsLCVyGGgpqzlbwlL0r9HilBKQ2sJM4Ms/9G2LmyjDh8PExLTNxDxl/f n6TZWTg20i8MW5Ah6w0oDUrXSzduI0NuQTki4WxhKTtaRzvofUZuAztzg0lyT49muXqhhqMLBas JGmniyaN9E+oneAYw1iXPAZGQ6BSioVn5m6JPVB/FdUc3w7+4Yh6WG//E9j6bJHQn+Nwh0Csc/W OOVLjAamz/vGabbs7W8G+tWP3rJhq01jdtXaAONXcE/S2CT5B3exP5+ymlnemY9MqAVktGE2mvg 5OTbekmZVE37vqI9e9/9CjVsFw6dyC0espFckCrEbgE0zh6CpPt5IEaSGPF4sBhm9WyLmQsZCpw GdbWMoq0K9av8QM75C7DvHme4QBQJWpvAlBhaRNX4F+G2MzpBZZTMGes/bcWRSW+igfHxyum7j5 /uriDRU6pIaS6gsbLqj+fjWHKwrsxdFBlP9Ix+nyDnMigT+SRQZm2PlAuEwSok+Ws/jmFTFBW/m GYLsZBX4KKU+Zrg== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Add methods to find the next element in an XArray starting from a given index. The methods return a tuple containing the index where the element was found and a reference to the element. The implementation uses the XArray state API via `xas_find` to avoid taking the rcu lock as an exclusive lock is already held by `Guard`. Signed-off-by: Andreas Hindborg --- rust/kernel/xarray.rs | 68 +++++++++++++++++++++++++++++++++++++++++++++++= ++++ 1 file changed, 68 insertions(+) diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index e654bf56dc97c..656ec897a0c41 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -251,6 +251,67 @@ pub fn get_mut(&mut self, index: usize) -> Option> { Some(unsafe { T::borrow_mut(ptr.as_ptr()) }) } =20 + fn load_next(&self, index: usize) -> Option<(usize, NonNull)> { + XArrayState::new(self, index).load_next() + } + + /// Finds the next element starting from the given index. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(10, KBox::new(10u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// guard.store(20, KBox::new(20u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// if let Some((found_index, value)) =3D guard.find_next(11) { + /// assert_eq!(found_index, 20); + /// assert_eq!(*value, 20); + /// } + /// + /// if let Some((found_index, value)) =3D guard.find_next(5) { + /// assert_eq!(found_index, 10); + /// assert_eq!(*value, 10); + /// } + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn find_next(&self, index: usize) -> Option<(usize, T::Borrowed<'_= >)> { + self.load_next(index) + // SAFETY: `ptr` came from `T::into_foreign`. + .map(|(index, ptr)| (index, unsafe { T::borrow(ptr.as_ptr()) }= )) + } + + /// Finds the next element starting from the given index, returning a = mutable reference. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(10, KBox::new(10u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// guard.store(20, KBox::new(20u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// if let Some((found_index, mut_value)) =3D guard.find_next_mut(5) { + /// assert_eq!(found_index, 10); + /// *mut_value =3D 0x99; + /// } + /// + /// assert_eq!(guard.get(10).copied(), Some(0x99)); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn find_next_mut(&mut self, index: usize) -> Option<(usize, T::Bor= rowedMut<'_>)> { + self.load_next(index) + // SAFETY: `ptr` came from `T::into_foreign`. + .map(move |(index, ptr)| (index, unsafe { T::borrow_mut(ptr.as= _ptr()) })) + } + /// Removes and returns the element at the given index. pub fn remove(&mut self, index: usize) -> Option { // SAFETY: @@ -354,6 +415,13 @@ fn load(&mut self) -> Option> { let ptr =3D unsafe { bindings::xas_load(&raw mut self.state) }; NonNull::new(ptr.cast()) } + + fn load_next(&mut self) -> Option<(usize, NonNull)> { + // SAFETY: `self.state` is always valid by the type invariant of + // `XArrayState` and the we hold the xarray lock. + let ptr =3D unsafe { bindings::xas_find(&raw mut self.state, usize= ::MAX) }; + NonNull::new(ptr).map(|ptr| (self.state.xa_index, ptr)) + } } =20 // SAFETY: `XArray` has no shared mutable state so it is `Send` iff `T`= is `Send`. --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E741F37AA71; Mon, 9 Feb 2026 14:38:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647921; cv=none; b=DYudFTjajOhuajqgilbvMpx5h9HGFNwp9lXHiziJ/l7+R3593HvQ4fvpoWcKPpzrnbhuwTZDD6VBkbrDY6NuPaFagWgk4JOo9UYPQafCZntwS5J8IAxeNZPusSIA9FC4mtItJB2ctjEoOWB7161ocACVs2lOC+XFRfzqMrPW2Kc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647921; c=relaxed/simple; bh=tGpkTbkQCs2yz/lXZr74M2PLP6TLYGnHySlF4MFb55o=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=NwW8kPUKPEXPNcA2If5PmztgbVl32qrwQkPxOcLhn73MOE6IE+FqZTAW3s/U3mIEhEpi+0/CQfvV9fSG8LxKJsqhaa5Po+nAqoiqxxutamh5pqw2x3lACuBgfDN/vtclmRu/+iDNwNMwyzP8J8CgG6Ocd7zJjLWDbJlBTDq7QT8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=tllYsx4L; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="tllYsx4L" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 7B77DC116C6; Mon, 9 Feb 2026 14:38:35 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647920; bh=tGpkTbkQCs2yz/lXZr74M2PLP6TLYGnHySlF4MFb55o=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=tllYsx4L+h2OpJAeRmCRGiw+Rqo9DtkPIvy4dh4tbf/TitOu0/y3XyRxyrNOKy7/S 1skumPTPR37FhhnRawV/FKbE/GydHvbq1ZcKD79S4sV8kCbUobRQVyLaZw7u8hpmH/ 3GTK5YPpq+7vc24Cea0df/feEBLEH3WrxL9TRPLhPaZp81ZaeOgvF6WL/2VU/TO2to 9LAQnBPM8rmKVEYlCvDAjyFW1Dwv5HfVPaFC6/j7uaGKSNgr+2LuSJDdS/PR5scyRb 6IWAaS9+BniIZbyoNbBN0NqmIZA8odCYz5lxFtzZakAxXhzRTCIGtaXqWlihoPZUuZ jBoOryj45quWg== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:13 +0100 Subject: [PATCH v3 08/12] rust: xarray: add entry API Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-8-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=21192; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=tGpkTbkQCs2yz/lXZr74M2PLP6TLYGnHySlF4MFb55o=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFcuCZPpcqewn9lY4/88YakUk04i8FdTR0aX STwEMm7OyiJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxXAAKCRDhuBo+eShj dzw6D/90Pktt9jbEw/+eRAhBBEh56lj3lam6VH3pakIyIZCpZczI5NXyGbLGg2fd1y60d3to8Z7 8fgC3DgpnYilVxYF/FU41/xiv/SJMcmEN29DSFlXz6pP0JHER0BzHfYuJWSu2oO8oHfsa9QYWZq eR8ZIqjnYbcjbqt8EmU9fqAvXFYYR4C+246xa32dWt3IJb8iS39GSe60bHIv1CVeZJ3vF8O/gxe vbuIFYU7+g8UUH8tKJ5oFPv9iHRbB8GOA60/9q99mW6EBjILxQOJswC3pRtN4PEPQW+0C6Od4Rm Hbaz1kOeyh+plrR6hoF9LgN0XFbJePd+cOIbbH0p9Wdx761QlLq+v2/Swq8QMlXkbbcTQy3sNbL ddmPePo5VCasM5w37IhOSXN4Zv+WGcoupVmBhUENSG6ZftxM5NVgEzdo26IcnR42EJ6KZw21R0p iUQ5KN1YtBPafFIsNT/ayUPX/gi9yCnsSjV6XMSqmZvhD+N29lCAXiVmthvdIFw4Ma6o/pzDwk6 tyWWJWJ6bBK6NRDYuI3OykGWYAlLg20+1phUpY+Ir23PbdLDytNGTz+hKNauC5ROCOJtLNgtRdJ KpWZBvlZSC+5D1eQ5OW8jiMrcLZLMDRcbmijl/Cn8Jj/YXP1JYg4ETGK7T+DnzAcV7Xc0lgfYGb oaXxQVjJ+L54qJw== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Add an Entry API for XArray that provides ergonomic access to array slots that may be vacant or occupied. The API follows the pattern of Rust's standard library HashMap entry API, allowing efficient conditional insertion and modification of entries. The implementation uses the XArray state API (`xas_*` functions) for efficient operations without requiring multiple lookups. Helper functions are added to rust/helpers/xarray.c to wrap C macros that are not directly accessible from Rust. Also update MAINTAINERS to cover the new rust files. Signed-off-by: Andreas Hindborg --- MAINTAINERS | 1 + rust/helpers/xarray.c | 17 ++ rust/kernel/xarray.rs | 123 +++++++++++++++ rust/kernel/xarray/entry.rs | 367 ++++++++++++++++++++++++++++++++++++++++= ++++ 4 files changed, 508 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 0efa8cc6775b7..8202515c6065b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -28361,6 +28361,7 @@ B: https://github.com/Rust-for-Linux/linux/issues C: https://rust-for-linux.zulipchat.com T: git https://github.com/Rust-for-Linux/linux.git xarray-next F: rust/kernel/xarray.rs +F: rust/kernel/xarray/ =20 XBOX DVD IR REMOTE M: Benjamin Valentin diff --git a/rust/helpers/xarray.c b/rust/helpers/xarray.c index 60b299f11451d..425a6cc494734 100644 --- a/rust/helpers/xarray.c +++ b/rust/helpers/xarray.c @@ -26,3 +26,20 @@ void rust_helper_xa_unlock(struct xarray *xa) { return xa_unlock(xa); } + +void *rust_helper_xas_result(struct xa_state *xas, void *curr) +{ + if (xa_err(xas->xa_node)) + curr =3D xas->xa_node; + return curr; +} + +void *rust_helper_xa_zero_to_null(void *entry) +{ + return xa_is_zero(entry) ? NULL : entry; +} + +int rust_helper_xas_error(const struct xa_state *xas) +{ + return xas_error(xas); +} diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index 656ec897a0c41..8c10e8fd76f15 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -13,11 +13,17 @@ NonNull, // }, }; +pub use entry::{ + Entry, + OccupiedEntry, + VacantEntry, // +}; use kernel::{ alloc, bindings, build_assert, // error::{ + to_result, Error, Result, // }, @@ -251,6 +257,35 @@ pub fn get_mut(&mut self, index: usize) -> Option> { Some(unsafe { T::borrow_mut(ptr.as_ptr()) }) } =20 + /// Gets an entry for the specified index, which can be vacant or occu= pied. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// assert_eq!(guard.contains_index(42), false); + /// + /// match guard.entry(42) { + /// Entry::Vacant(entry) =3D> { + /// entry.insert(KBox::new(0x1337u32, GFP_KERNEL)?)?; + /// } + /// Entry::Occupied(_) =3D> unreachable!("We did not insert an ent= ry yet"), + /// } + /// + /// assert_eq!(guard.get(42), Some(&0x1337)); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn entry<'b>(&'b mut self, index: usize) -> Entry<'a, 'b, T> { + match self.load(index) { + None =3D> Entry::Vacant(VacantEntry::new(self, index)), + Some(ptr) =3D> Entry::Occupied(OccupiedEntry::new(self, index,= ptr)), + } + } + fn load_next(&self, index: usize) -> Option<(usize, NonNull)> { XArrayState::new(self, index).load_next() } @@ -312,6 +347,72 @@ pub fn find_next_mut(&mut self, index: usize) -> Optio= n<(usize, T::BorrowedMut<' .map(move |(index, ptr)| (index, unsafe { T::borrow_mut(ptr.as= _ptr()) })) } =20 + /// Finds the next occupied entry starting from the given index. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(10, KBox::new(10u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// guard.store(20, KBox::new(20u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// if let Some(entry) =3D guard.find_next_entry(5) { + /// assert_eq!(entry.index(), 10); + /// let value =3D entry.remove(); + /// assert_eq!(*value, 10); + /// } + /// + /// assert_eq!(guard.get(10), None); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn find_next_entry<'b>(&'b mut self, index: usize) -> Option> { + let mut state =3D XArrayState::new(self, index); + let (_, ptr) =3D state.load_next()?; + Some(OccupiedEntry { state, ptr }) + } + + /// Finds the next occupied entry starting at the given index, wrappin= g around. + /// + /// Searches for an entry starting at `index` up to the maximum index.= If no entry + /// is found, wraps around and searches from index 0 up to `index`. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(100, KBox::new(42u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// let entry =3D guard.find_next_entry_circular(101); + /// assert_eq!(entry.map(|e| e.index()), Some(100)); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn find_next_entry_circular<'b>( + &'b mut self, + index: usize, + ) -> Option> { + let mut state =3D XArrayState::new(self, index); + + // SAFETY: `state.state` is properly initialized by XArrayState::n= ew and the caller holds + // the lock. + let ptr =3D NonNull::new(unsafe { bindings::xas_find(&mut state.st= ate, usize::MAX) }) + .or_else(|| { + state.state.xa_node =3D bindings::XAS_RESTART as *mut bind= ings::xa_node; + state.state.xa_index =3D 0; + // SAFETY: `state.state` is properly initialized and by ty= pe invariant, we hold the + // xarray lock. + NonNull::new(unsafe { bindings::xas_find(&mut state.state,= index) }) + })?; + + Some(OccupiedEntry { state, ptr }) + } + /// Removes and returns the element at the given index. pub fn remove(&mut self, index: usize) -> Option { // SAFETY: @@ -422,8 +523,30 @@ fn load_next(&mut self) -> Option<(usize, NonNull)> { let ptr =3D unsafe { bindings::xas_find(&raw mut self.state, usize= ::MAX) }; NonNull::new(ptr).map(|ptr| (self.state.xa_index, ptr)) } + + fn status(&self) -> Result { + // SAFETY: `self.state` is properly initialized and valid. + to_result(unsafe { bindings::xas_error(&self.state) }) + } + + fn insert(&mut self, value: T) -> Result<*mut c_void, StoreError> { + let new =3D T::into_foreign(value).cast(); + + // SAFETY: `self.state.state` is properly initialized and `new` ca= me from `T::into_foreign`. + // We hold the xarray lock. + unsafe { bindings::xas_store(&mut self.state, new) }; + + self.status().map(|()| new).map_err(|error| { + // SAFETY: `new` came from `T::into_foreign` and `xas_store` d= oes not take ownership of + // the value on error. + let value =3D unsafe { T::from_foreign(new) }; + StoreError { value, error } + }) + } } =20 +mod entry; + // SAFETY: `XArray` has no shared mutable state so it is `Send` iff `T`= is `Send`. unsafe impl Send for XArray {} =20 diff --git a/rust/kernel/xarray/entry.rs b/rust/kernel/xarray/entry.rs new file mode 100644 index 0000000000000..1b1c21bed7022 --- /dev/null +++ b/rust/kernel/xarray/entry.rs @@ -0,0 +1,367 @@ +// SPDX-License-Identifier: GPL-2.0 + +use super::{ + Guard, + StoreError, + XArrayState, // +}; +use core::ptr::NonNull; +use kernel::{ + prelude::*, + types::ForeignOwnable, // +}; + +/// Represents either a vacant or occupied entry in an XArray. +pub enum Entry<'a, 'b, T: ForeignOwnable> { + /// A vacant entry that can have a value inserted. + Vacant(VacantEntry<'a, 'b, T>), + /// An occupied entry containing a value. + Occupied(OccupiedEntry<'a, 'b, T>), +} + +impl Entry<'_, '_, T> { + /// Returns true if this entry is occupied. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// + /// let entry =3D guard.entry(42); + /// assert_eq!(entry.is_occupied(), false); + /// + /// guard.store(42, KBox::new(0x1337u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// let entry =3D guard.entry(42); + /// assert_eq!(entry.is_occupied(), true); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn is_occupied(&self) -> bool { + matches!(self, Entry::Occupied(_)) + } +} + +/// A view into a vacant entry in an XArray. +pub struct VacantEntry<'a, 'b, T: ForeignOwnable> { + state: XArrayState<'a, 'b, T>, +} + +impl<'a, 'b, T> VacantEntry<'a, 'b, T> +where + T: ForeignOwnable, +{ + pub(crate) fn new(guard: &'b mut Guard<'a, T>, index: usize) -> Self { + Self { + state: XArrayState::new(guard, index), + } + } + + /// Inserts a value into this vacant entry. + /// + /// Returns a reference to the newly inserted value. + /// + /// - This method will fail if the nodes on the path to the index + /// represented by this entry are not present in the XArray. + /// - This method will not drop the XArray lock. + /// + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// assert_eq!(guard.get(42), None); + /// + /// if let Entry::Vacant(entry) =3D guard.entry(42) { + /// let value =3D KBox::new(0x1337u32, GFP_KERNEL)?; + /// let borrowed =3D entry.insert(value)?; + /// assert_eq!(*borrowed, 0x1337); + /// } + /// + /// assert_eq!(guard.get(42).copied(), Some(0x1337)); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn insert(mut self, value: T) -> Result, StoreE= rror> { + let new =3D self.state.insert(value)?; + + // SAFETY: `new` came from `T::into_foreign`. The entry has exclus= ive + // ownership of `new` as it holds a mutable reference to `Guard`. + Ok(unsafe { T::borrow_mut(new) }) + } + + /// Inserts a value and returns an occupied entry representing the new= ly inserted value. + /// + /// - This method will fail if the nodes on the path to the index + /// represented by this entry are not present in the XArray. + /// - This method will not drop the XArray lock. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// assert_eq!(guard.get(42), None); + /// + /// if let Entry::Vacant(entry) =3D guard.entry(42) { + /// let value =3D KBox::new(0x1337u32, GFP_KERNEL)?; + /// let occupied =3D entry.insert_entry(value)?; + /// assert_eq!(occupied.index(), 42); + /// } + /// + /// assert_eq!(guard.get(42).copied(), Some(0x1337)); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn insert_entry(mut self, value: T) -> Result, StoreError> { + let new =3D self.state.insert(value)?; + + Ok(OccupiedEntry::<'a, 'b, T> { + state: self.state, + // SAFETY: `new` came from `T::into_foreign` and is guaranteed= non-null. + ptr: unsafe { core::ptr::NonNull::new_unchecked(new) }, + }) + } + + /// Returns the index of this vacant entry. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// assert_eq!(guard.get(42), None); + /// + /// if let Entry::Vacant(entry) =3D guard.entry(42) { + /// assert_eq!(entry.index(), 42); + /// } + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn index(&self) -> usize { + self.state.state.xa_index + } +} + +/// A view into an occupied entry in an XArray. +pub struct OccupiedEntry<'a, 'b, T: ForeignOwnable> { + pub(crate) state: XArrayState<'a, 'b, T>, + pub(crate) ptr: NonNull, +} + +impl<'a, 'b, T> OccupiedEntry<'a, 'b, T> +where + T: ForeignOwnable, +{ + pub(crate) fn new(guard: &'b mut Guard<'a, T>, index: usize, ptr: NonN= ull) -> Self { + Self { + state: XArrayState::new(guard, index), + ptr, + } + } + + /// Removes the value from this occupied entry and returns it, consumi= ng the entry. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(42, KBox::new(0x1337u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// assert_eq!(guard.get(42).copied(), Some(0x1337)); + /// + /// if let Entry::Occupied(entry) =3D guard.entry(42) { + /// let value =3D entry.remove(); + /// assert_eq!(*value, 0x1337); + /// } + /// + /// assert_eq!(guard.get(42), None); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn remove(mut self) -> T { + // SAFETY: `self.state.state` is properly initialized and valid fo= r XAS operations. + let ptr =3D unsafe { + bindings::xas_result( + &mut self.state.state, + bindings::xa_zero_to_null(bindings::xas_store( + &mut self.state.state, + core::ptr::null_mut(), + )), + ) + }; + + // SAFETY: `ptr` is a valid return value from xas_result. + let errno =3D unsafe { bindings::xa_err(ptr) }; + + // NOTE: Storing NULL to an occupied slot never fails. This is by = design + // of the xarray data structure. If a slot is occupied, a store is= a + // simple pointer swap. + debug_assert!(errno =3D=3D 0); + + // SAFETY: + // - `ptr` came from `T::into_foreign`. + // - As this method takes self by value, the lifetimes of any [`T:= :Borrowed`] and + // [`T::BorrowedMut`] we have created must have ended. + unsafe { T::from_foreign(ptr.cast()) } + } + + /// Returns the index of this occupied entry. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(42, KBox::new(0x1337u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// if let Entry::Occupied(entry) =3D guard.entry(42) { + /// assert_eq!(entry.index(), 42); + /// } + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn index(&self) -> usize { + self.state.state.xa_index + } + + /// Replaces the value in this occupied entry and returns the old valu= e. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(42, KBox::new(0x1337u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// if let Entry::Occupied(mut entry) =3D guard.entry(42) { + /// let new_value =3D KBox::new(0x9999u32, GFP_KERNEL)?; + /// let old_value =3D entry.insert(new_value); + /// assert_eq!(*old_value, 0x1337); + /// } + /// + /// assert_eq!(guard.get(42).copied(), Some(0x9999)); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn insert(&mut self, value: T) -> T { + let new =3D T::into_foreign(value).cast(); + // SAFETY: `new` came from `T::into_foreign` and is guaranteed non= -null. + self.ptr =3D unsafe { NonNull::new_unchecked(new) }; + + // SAFETY: `self.state.state` is properly initialized and valid fo= r XAS operations. + let old =3D unsafe { + bindings::xas_result( + &mut self.state.state, + bindings::xa_zero_to_null(bindings::xas_store(&mut self.st= ate.state, new)), + ) + }; + + // SAFETY: `old` is a valid return value from xas_result. + let errno =3D unsafe { bindings::xa_err(old) }; + + // NOTE: Storing NULL to an occupied slot never fails. This is by = design + // of the xarray data structure. If a slot is occupied, a store is= a + // simple pointer swap. + debug_assert!(errno =3D=3D 0); + + // SAFETY: + // - `ptr` came from `T::into_foreign`. + // - As this method takes self by value, the lifetimes of any [`T:= :Borrowed`] and + // [`T::BorrowedMut`] we have created must have ended. + unsafe { T::from_foreign(old) } + } + + /// Converts this occupied entry into a mutable reference to the value= in the slot represented + /// by the entry. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(42, KBox::new(0x1337u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// if let Entry::Occupied(entry) =3D guard.entry(42) { + /// let value_ref =3D entry.into_mut(); + /// *value_ref =3D 0x9999; + /// } + /// + /// assert_eq!(guard.get(42).copied(), Some(0x9999)); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn into_mut(self) -> T::BorrowedMut<'b> { + // SAFETY: `ptr` came from `T::into_foreign`. + unsafe { T::borrow_mut(self.ptr.as_ptr()) } + } + + /// Swaps the value in this entry with the provided value. + /// + /// Returns the old value that was in the entry. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray, Entry}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// guard.store(42, KBox::new(100u32, GFP_KERNEL)?, GFP_KERNEL)?; + /// + /// if let Entry::Occupied(mut entry) =3D guard.entry(42) { + /// let mut other =3D 200u32; + /// entry.swap(&mut other); + /// assert_eq!(other, 100); + /// assert_eq!(*entry, 200); + /// } + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn swap(&mut self, other: &mut U) + where + T: for<'c> ForeignOwnable =3D &'c U, BorrowedMut<'c> = =3D &'c mut U>, + { + use core::ops::DerefMut; + core::mem::swap(self.deref_mut(), other); + } +} + +impl core::ops::Deref for OccupiedEntry<'_, '_, T> +where + T: for<'a> ForeignOwnable =3D &'a U, BorrowedMut<'a> =3D = &'a mut U>, +{ + type Target =3D U; + + fn deref(&self) -> &Self::Target { + // SAFETY: `ptr` came from `T::into_foreign`. + unsafe { T::borrow(self.ptr.as_ptr()) } + } +} + +impl core::ops::DerefMut for OccupiedEntry<'_, '_, T> +where + T: for<'a> ForeignOwnable =3D &'a U, BorrowedMut<'a> =3D = &'a mut U>, +{ + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: `ptr` came from `T::into_foreign`. + unsafe { T::borrow_mut(self.ptr.as_ptr()) } + } +} --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 032DA2222B2; Mon, 9 Feb 2026 14:39:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647957; cv=none; b=sm4uEw4JzL0FCyEvimiECPFIYQ9HsXG4w0skYP+Y4TCJKQG4Uue+bQqXNeuTyBvsQP6I+zTHm8hdY4LQWrcFhS4qpx1j9UKdExQHtLFc0HglB3w6GYn3cEv9XksLfJB9AfQA0/iT7aYBFD/vx6iAGMQghP80Z0g1VmF39/w635c= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647957; c=relaxed/simple; bh=zWHl+04q+Q7aOEAbamA9+ysRpObfDy8AGvyFnLTXyU4=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=VVanLoOZ9kXSYHSQ26B1cQyLUdivgItHm7II60xxctM3A8dsJSbQ+EuZFqTBaEkx6/Pf86wpai9Fe3sg6vfYdJLgdXw7qDSjpuGCwyFJ9Xc8jBvWQF7Sy9CPQ/I3xNap+fdeQZxcgPioYEgp/0b/cTzQz/KinQPJQhV4a6ARHvw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=iSvHcaO6; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="iSvHcaO6" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 3A061C16AAE; Mon, 9 Feb 2026 14:39:11 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647956; bh=zWHl+04q+Q7aOEAbamA9+ysRpObfDy8AGvyFnLTXyU4=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=iSvHcaO6nOFEO+/hH5VUA/iXZEfVSAlND1h5WXVX4EOCpphGJONC32zFGes6UaH2M DEyR45XMS7NENe8PGHYLjvT7xP3oYY0qlhaB/o7KyBy6DNSu5zwZEPWPScg9to+MCG tgTPhfoZQqVuq5OqfZr0w1smuhrdfkJcVi0u8lJiLMqxlUxVzm6O//O7elBwjv42Im flGrcsIdjWgjw6h03IsmcVsXFfBL0ywAccBAwV8jxXHbE5guXJTkL/W4L+Gzyy64Zi VWEEIwrHDTRQ+6KuepBrK1C75fIf6C91mIXq6lxp7PYkr7kWYkIdUrG02qvKGHux7h s4ZjDGb7CjQ4A== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:14 +0100 Subject: [PATCH v3 09/12] rust: mm: add abstractions for allocating from a `sheaf` Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-9-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg , "Matthew Wilcox (Oracle)" X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=16737; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=zWHl+04q+Q7aOEAbamA9+ysRpObfDy8AGvyFnLTXyU4=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFcVepVTEYkwpOzWlqjoF2ly/X0G2TUoK1rr tAoXoLfavOJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxXAAKCRDhuBo+eShj d49MD/9e1TKVqfiU351/eXktlb+bV8ydHFdGIZBDapOfelWzGSl+zZ5KcPEyckk7AjiBagrZi50 1LZVJxgL7DL0WAHrl0xkGS//I+l5AEca10X/Q0gh1JF4t6cqoAQ2xeXa8t00zz4EEwzoPDkVr5G XVNKCfff9+fCkkmdIjEwbS4J1A04L4tkBgJ6nuhWgi0lCG5XuD8MAM71Wdt5J5XM6fU1PZ9frJG l5P+bs3XfhugoktF3lf3pBwTZ0m/xXhdhZgkPbSVJHkU3YD74UrwsYpncxEe9ooTFVIFHww6Sl3 U27ZbCY7e+CEVO1LT4QJQZ3Qn8Bp8/8tVy971mCCu5UtJi7zkEU1m6ajuJ/PbuverlUmi70gisS 16GMKWGr0O34TTKFpaYijecjz0U6pTSEYLF6uvPMOxuJZFG7KfsruK1C4bWKtN3p4QtHTelMM0B Z1KiLghmVacEjDsHqxktPVzJ86ej0hZHdUu++vVh+IWhXBlvUPFA13gksa1vt+y9PH1RPAPdLbx FU1VtfiavI2slGfX6PCd5l6IyEx5dWOCNY5KDqb6GVRW5M1nmyKzvC1b75pmHRNo38DDrdNBOXO b/pATT0cXz2SLykFRqj+ATSmbzxYjdYRQwKuM4j30YsLg3eShuy1oxqxndfVasg92pEn2Aa5E99 uqR0iAtpexRIJMw== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Add Rust APIs for allocating objects from a `sheaf`. Introduce a reduced abstraction `KMemCacheInit` for `struct kmem_cache` to support management of the `Sheaf`s. Initialize objects using in-place initialization when objects are allocated from a `Sheaf`. This is different from C which tends to do some initialization when the cache is filled. This approach is chosen because there is no destructor/drop capability in `struct kmem_cache` that can be invoked when the cache is dropped. Cc: Vlastimil Babka Cc: "Liam R. Howlett" Cc: "Matthew Wilcox (Oracle)" Cc: Lorenzo Stoakes Cc: linux-mm@kvack.org Signed-off-by: Andreas Hindborg --- rust/kernel/mm.rs | 1 + rust/kernel/mm/sheaf.rs | 407 ++++++++++++++++++++++++++++++++++++++++++++= ++++ 2 files changed, 408 insertions(+) diff --git a/rust/kernel/mm.rs b/rust/kernel/mm.rs index 4764d7b68f2a7..1aa44424b0d53 100644 --- a/rust/kernel/mm.rs +++ b/rust/kernel/mm.rs @@ -18,6 +18,7 @@ }; use core::{ops::Deref, ptr::NonNull}; =20 +pub mod sheaf; pub mod virt; use virt::VmaRef; =20 diff --git a/rust/kernel/mm/sheaf.rs b/rust/kernel/mm/sheaf.rs new file mode 100644 index 0000000000000..b8fd321335ace --- /dev/null +++ b/rust/kernel/mm/sheaf.rs @@ -0,0 +1,407 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Slub allocator sheaf abstraction. +//! +//! Sheaves are percpu array-based caching layers for the slub allocator. +//! They provide a mechanism for pre-allocating objects that can later +//! be retrieved without risking allocation failure, making them useful in +//! contexts where memory allocation must be guaranteed to succeed. +//! +//! The term "sheaf" is the english word for a bundle of straw. In this co= ntext +//! it means a bundle of pre-allocated objects. A per-NUMA-node cache of s= heaves +//! is called a "barn". Because you store your sheafs in barns. +//! +//! # Use cases +//! +//! Sheaves are particularly useful when: +//! +//! - Allocations must be guaranteed to succeed in a restricted context (e= .g., +//! while holding locks or in atomic context). +//! - Multiple allocations need to be performed as a batch operation. +//! - Fast-path allocation performance is critical, as sheaf allocations a= void +//! atomic operations by using local locks with preemption disabled. +//! +//! # Architecture +//! +//! The sheaf system consists of three main components: +//! +//! - [`KMemCache`]: A slab cache configured with sheaf support. +//! - [`Sheaf`]: A pre-filled container of objects from a specific cache. +//! - [`SBox`]: An owned allocation from a sheaf, similar to a `Box`. +//! +//! # Example +//! +//! ``` +//! use kernel::c_str; +//! use kernel::mm::sheaf::{KMemCache, KMemCacheInit, Sheaf, SBox}; +//! use kernel::prelude::*; +//! +//! struct MyObject { +//! value: u32, +//! } +//! +//! impl KMemCacheInit for MyObject { +//! fn init() -> impl Init { +//! init!(MyObject { value: 0 }) +//! } +//! } +//! +//! // Create a cache with sheaf capacity of 16 objects. +//! let cache =3D KMemCache::::new(c_str!("my_cache"), 16)?; +//! +//! // Pre-fill a sheaf with 8 objects. +//! let mut sheaf =3D cache.as_arc_borrow().sheaf(8, GFP_KERNEL)?; +//! +//! // Allocations from the sheaf are guaranteed to succeed until empty. +//! let obj =3D sheaf.alloc().unwrap(); +//! +//! // Return the sheaf when done, attempting to refill it. +//! sheaf.return_refill(GFP_KERNEL); +//! # Ok::<(), Error>(()) +//! ``` +//! +//! # Constraints +//! +//! - Sheaves are slower when `CONFIG_SLUB_TINY` or `CONFIG_SLUB_DEBUG` is +//! enabled due to cpu sheaves being disabled. All prefilled sheaves bec= ome +//! "oversize" and go through a slower allocation path. +//! - The sheaf capacity is fixed at cache creation time. + +use core::{ + convert::Infallible, + marker::PhantomData, + ops::{Deref, DerefMut}, + ptr::NonNull, +}; + +use kernel::prelude::*; + +use crate::sync::{Arc, ArcBorrow}; + +/// A slab cache with sheaf support. +/// +/// This type wraps a kernel `kmem_cache` configured with a sheaf capacity, +/// enabling pre-allocation of objects via [`Sheaf`]. +/// +/// For now, this type only exists for sheaf management. +/// +/// # Type parameter +/// +/// - `T`: The type of objects managed by this cache. Must implement +/// [`KMemCacheInit`] to provide initialization logic for new allocation= s. +/// +/// # Invariants +/// +/// - `cache` is a valid pointer to a `kmem_cache` created with +/// `__kmem_cache_create_args`. +/// - The cache is valid for the lifetime of this struct. +pub struct KMemCache> { + cache: NonNull, + _p: PhantomData, +} + +impl> KMemCache { + /// Creates a new slab cache with sheaf support. + /// + /// Creates a kernel slab cache for objects of type `T` with the speci= fied + /// sheaf capacity. The cache uses the provided `name` for identificat= ion + /// in `/sys/kernel/slab/` and debugging output. + /// + /// # Arguments + /// + /// - `name`: A string identifying the cache. This name appears in sys= fs and + /// debugging output. + /// - `sheaf_capacity`: The maximum number of objects a sheaf from this + /// cache can hold. A capacity of zero disables sheaf support. + /// + /// # Errors + /// + /// Returns an error if: + /// + /// - The cache could not be created due to memory pressure. + /// - The size of `T` cannot be represented as a `c_uint`. + pub fn new(name: &CStr, sheaf_capacity: u32) -> Result> + where + T: KMemCacheInit, + { + let flags =3D 0; + let mut args: bindings::kmem_cache_args =3D pin_init::zeroed(); + args.sheaf_capacity =3D sheaf_capacity; + + // NOTE: We are not initializing at object allocation time, because + // there is no matching teardown function on the C side machinery. + args.ctor =3D None; + + // SAFETY: `name` is a valid C string, `args` is properly initiali= zed, + // and the size of `T` has been validated to fit in a `c_uint`. + let ptr =3D unsafe { + bindings::__kmem_cache_create_args( + name.as_ptr().cast::(), + core::mem::size_of::().try_into()?, + &mut args, + flags, + ) + }; + + // INVARIANT: `ptr` was returned by `__kmem_cache_create_args` and= is + // non-null (checked below). The cache is valid until + // `kmem_cache_destroy` is called in `Drop`. + Ok(Arc::new( + Self { + cache: NonNull::new(ptr).ok_or(ENOMEM)?, + _p: PhantomData, + }, + GFP_KERNEL, + )?) + } + + /// Creates a pre-filled sheaf from this cache. + /// + /// Allocates a sheaf and pre-fills it with `size` objects. Once creat= ed, + /// allocations from the sheaf via [`Sheaf::alloc`] are guaranteed to + /// succeed until the sheaf is depleted. + /// + /// # Arguments + /// + /// - `size`: The number of objects to pre-allocate. Must not exceed t= he + /// cache's `sheaf_capacity`. + /// - `gfp`: Allocation flags controlling how memory is obtained. Use + /// [`GFP_KERNEL`] for normal allocations that may sleep, or + /// [`GFP_NOWAIT`] for non-blocking allocations. + /// + /// # Errors + /// + /// Returns [`ENOMEM`] if the sheaf or its objects could not be alloca= ted. + /// + /// # Warnings + /// + /// The kernel will warn if `size` exceeds `sheaf_capacity`. + pub fn sheaf( + self: ArcBorrow<'_, Self>, + size: usize, + gfp: kernel::alloc::Flags, + ) -> Result> { + // SAFETY: `self.as_raw()` returns a valid cache pointer, and `siz= e` + // has been validated to fit in a `c_uint`. + let ptr =3D unsafe { + bindings::kmem_cache_prefill_sheaf(self.as_raw(), gfp.as_raw()= , size.try_into()?) + }; + + // INVARIANT: `ptr` was returned by `kmem_cache_prefill_sheaf` and= is + // non-null (checked below). `cache` is the cache from which this = sheaf + // was created. `dropped` is false since the sheaf has not been re= turned. + Ok(Sheaf { + sheaf: NonNull::new(ptr).ok_or(ENOMEM)?, + cache: self.into(), + dropped: false, + }) + } + + fn as_raw(&self) -> *mut bindings::kmem_cache { + self.cache.as_ptr() + } +} + +impl> Drop for KMemCache { + fn drop(&mut self) { + // SAFETY: `self.as_raw()` returns a valid cache pointer that was + // created by `__kmem_cache_create_args`. As all objects allocated= from + // this hold a reference on `self`, they must have been dropped fo= r this + // `drop` method to execute. + unsafe { bindings::kmem_cache_destroy(self.as_raw()) }; + } +} + +/// Trait for types that can be initialized in a slab cache. +/// +/// This trait provides the initialization logic for objects allocated fro= m a +/// [`KMemCache`]. When the slab allocator creates new objects, it invokes= the +/// constructor to ensure objects are in a valid initial state. +/// +/// # Implementation +/// +/// Implementors must provide [`init`](KMemCacheInit::init), which returns +/// a in-place initializer for the type. +/// +/// # Example +/// +/// ``` +/// use kernel::mm::sheaf::KMemCacheInit; +/// use kernel::prelude::*; +/// +/// struct MyData { +/// counter: u32, +/// name: [u8; 16], +/// } +/// +/// impl KMemCacheInit for MyData { +/// fn init() -> impl Init { +/// init!(MyData { +/// counter: 0, +/// name: [0; 16], +/// }) +/// } +/// } +/// ``` +pub trait KMemCacheInit { + /// Returns an initializer for creating new objects of type `T`. + /// + /// This method is called by the allocator's constructor to initialize= newly + /// allocated objects. The initializer should set all fields to their + /// default or initial values. + fn init() -> impl Init; +} + +/// A pre-filled container of slab objects. +/// +/// A sheaf holds a set of pre-allocated objects from a [`KMemCache`]. +/// Allocations from a sheaf are guaranteed to succeed until the sheaf is +/// depleted, making sheaves useful in contexts where allocation failure is +/// not acceptable. +/// +/// Sheaves provide faster allocation than direct allocation because they = use +/// local locks with preemption disabled rather than atomic operations. +/// +/// # Lifecycle +/// +/// Sheaves are created via [`KMemCache::sheaf`] and should be returned to= the +/// allocator when no longer needed via [`Sheaf::return_refill`]. If a she= af is +/// simply dropped, it is returned with `GFP_NOWAIT` flags, which may resu= lt in +/// the sheaf being flushed and freed rather than being cached for reuse. +/// +/// # Invariants +/// +/// - `sheaf` is a valid pointer to a `slab_sheaf` obtained from +/// `kmem_cache_prefill_sheaf`. +/// - `cache` is the cache from which this sheaf was created. +/// - `dropped` tracks whether the sheaf has been explicitly returned. +pub struct Sheaf> { + sheaf: NonNull, + cache: Arc>, + dropped: bool, +} + +impl> Sheaf { + fn as_raw(&self) -> *mut bindings::slab_sheaf { + self.sheaf.as_ptr() + } + + /// Return the sheaf and try to refill using `flags`. + /// + /// If the sheaf cannot simply become the percpu spare sheaf, but ther= e's + /// space for a full sheaf in the barn, we try to refill the sheaf bac= k to + /// the cache's sheaf_capacity to avoid handling partially full sheave= s. + /// + /// If the refill fails because gfp is e.g. GFP_NOWAIT, or the barn is= full, + /// the sheaf is instead flushed and freed. + pub fn return_refill(mut self, flags: kernel::alloc::Flags) { + self.dropped =3D true; + // SAFETY: `self.cache.as_raw()` and `self.as_raw()` return valid + // pointers to the cache and sheaf respectively. + unsafe { + bindings::kmem_cache_return_sheaf(self.cache.as_raw(), flags.a= s_raw(), self.as_raw()) + }; + drop(self); + } + + /// Allocates an object from the sheaf. + /// + /// Returns a new [`SBox`] containing an initialized object, or [`None= `] + /// if the sheaf is depleted. Allocations are guaranteed to succeed as + /// long as the sheaf contains pre-allocated objects. + /// + /// The `gfp` flags passed to `kmem_cache_alloc_from_sheaf` are set to= zero, + /// meaning no additional flags like `__GFP_ZERO` or `__GFP_ACCOUNT` a= re + /// applied. + /// + /// The returned `T` is initialized as part of this function. + pub fn alloc(&mut self) -> Option> { + // SAFETY: `self.cache.as_raw()` and `self.as_raw()` return valid + // pointers. The function returns NULL when the sheaf is empty. + let ptr =3D unsafe { + bindings::kmem_cache_alloc_from_sheaf_noprof(self.cache.as_raw= (), 0, self.as_raw()) + }; + + // SAFETY: + // - `ptr` is a valid pointer as it was just returned by the cache. + // - The initializer is infallible, so an error is never returned. + unsafe { T::init().__init(ptr.cast()) }.expect("Initializer is inf= allible"); + + let ptr =3D NonNull::new(ptr.cast::())?; + + // INVARIANT: `ptr` was returned by `kmem_cache_alloc_from_sheaf_n= oprof` + // and initialized above. `cache` is the cache from which this obj= ect + // was allocated. The object remains valid until freed in `Drop`. + Some(SBox { + ptr, + cache: self.cache.clone(), + }) + } +} + +impl> Drop for Sheaf { + fn drop(&mut self) { + if !self.dropped { + // SAFETY: `self.cache.as_raw()` and `self.as_raw()` return va= lid + // pointers. Using `GFP_NOWAIT` because the drop may occur in a + // context where sleeping is not permitted. + unsafe { + bindings::kmem_cache_return_sheaf( + self.cache.as_raw(), + GFP_NOWAIT.as_raw(), + self.as_raw(), + ) + }; + } + } +} + +/// An owned allocation from a cache sheaf. +/// +/// `SBox` is similar to `Box` but is backed by a slab cache allocation ob= tained +/// through a [`Sheaf`]. It provides owned access to an initialized object= and +/// ensures the object is properly freed back to the cache when dropped. +/// +/// The contained `T` is initialized when the `SBox` is returned from allo= c and +/// dropped when the `SBox` is dropped. +/// +/// # Invariants +/// +/// - `ptr` points to a valid, initialized object of type `T`. +/// - `cache` is the cache from which this object was allocated. +/// - The object remains valid for the lifetime of the `SBox`. +pub struct SBox> { + ptr: NonNull, + cache: Arc>, +} + +impl> Deref for SBox { + type Target =3D T; + + fn deref(&self) -> &Self::Target { + // SAFETY: `ptr` is valid and properly aligned per the type invari= ants. + unsafe { self.ptr.as_ref() } + } +} + +impl> DerefMut for SBox { + fn deref_mut(&mut self) -> &mut Self::Target { + // SAFETY: `ptr` is valid and properly aligned per the type invari= ants, + // and we have exclusive access via `&mut self`. + unsafe { self.ptr.as_mut() } + } +} + +impl> Drop for SBox { + fn drop(&mut self) { + // SAFETY: By type invariant, `ptr` points to a valid and initiali= zed + // object. We do not touch `ptr` after returning it to the cache. + unsafe { core::ptr::drop_in_place(self.ptr.as_ptr()) }; + + // SAFETY: `self.ptr` was allocated from `self.cache` via + // `kmem_cache_alloc_from_sheaf_noprof` and is valid. + unsafe { + bindings::kmem_cache_free(self.cache.as_raw(), self.ptr.as_ptr= ().cast()); + } + } +} --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 16CCB3783A1; Mon, 9 Feb 2026 14:38:59 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647939; cv=none; b=KPoRmPJl/f6jQ7vCkCfTVbVFb5aSXw8r5wZQQ9r+tkhgz01C1ReEPh2R11fz2WWtzUkJDdQF0gJ3VVKEbceviO1pHL97qkDbm8id6sDrYRRnwK1R3ixONAItSruSlAQ4LHZaKd+rnp8il5zS4rP+YwTtM6To/Rpakd/RoQUKBeo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647939; c=relaxed/simple; bh=9Rord1GjbuU/nDocLnmD0XDpfMF4CDFshxEJfFsjWOY=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=TrtDUY5AVErHEMQeTLVQ0LgAninHJ7v/pyhWjNCQoEn0IVJgQB9qMJNOBcqkRKswqzLo5O7JLrxScvoB/3zG2JV8ZIS3W6ruMmGJrfOPoxCisIUW9PzIhaYvd/coy6MO877R2MH+obf5q0DCbXK6LsINOiXP6bzU2/y8csF03Ps= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=OF7VSIeE; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="OF7VSIeE" Received: by smtp.kernel.org (Postfix) with ESMTPSA id A1543C19425; Mon, 9 Feb 2026 14:38:53 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647938; bh=9Rord1GjbuU/nDocLnmD0XDpfMF4CDFshxEJfFsjWOY=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=OF7VSIeEYh7mdVHPfRPc/LxMX4EC9ck4OvNXtBDMmZ9L9xTQc/k0YhYIbZivbxN/N +UpFnXKZqqMkYil9ULay1uz65+sSKXvkkGOrsTWDzcsU25bHl+PS0zQ6+MSrY6SoY2 v21z3jFnNSsftxxtxI4qQU4g0s27bXNGLTCRVOKK6/as5j8Pn7eXZt1Ys6YgOaVLr4 dNZQCBb3Ip7w/CKVQ2KHn8myRwz4DEXkZb4fwstGwM6kseim83wm+AU3MuqhlhTUCw r+2ma5zpXpgEaUmn7XLdP3uHYxvi/lCeslqsWLwwj/XbdMBA8e7/V6WzyQIzYnHtnT UFzOIVK8azfyA== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:15 +0100 Subject: [PATCH v3 10/12] rust: mm: sheaf: allow use of C initialized static caches Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-10-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg , "Matthew Wilcox (Oracle)" X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=21503; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=9Rord1GjbuU/nDocLnmD0XDpfMF4CDFshxEJfFsjWOY=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFdVEWC4I4vcBuU02CNL1nEBRPJm+FihuZCB jRr8v1/g16JAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxXQAKCRDhuBo+eShj d8kWD/9KcMP4auGiGg0vTqMX6qBzXoTikq1cmnuNMJmHP3NfkHrVoynDyCQ3ZPBkkfVOr62U5hi UUOOT2RVy+e6iFS0C0u1X8ZqLp4vCOQ49KUOezep46MCtFMZsVO49FvuraxgPzneswB8Vy6apt6 bOpc5i0ywtIykqvLExkC96wkQPKf3nA9QYV1iJBuKPVMhCM/zShtePxpWdMshyiB0cUhgoX2xDJ qnmRGsl1xkiV7uHOIuHy4HoFlh+6dR62WWxW6LYjgytVXMr2UTSQOdemKAdE+JEnaILWkg8Oyro 9gVdnIJA/rSvFVoV7zR3Jd7yk78DNBepIUE2o1L0fzTH/NYi411CvMEWrE5yZZEp4MtOyK8j+OY n6VR7N9ShPHtbWQNEuHceGU0Y+0QfFRUB0fXoxoz7nIdcME2HiWFfhLLhlafScYJ8hZjgESvRNl 3vShvuuUrbSz/45ru0aRoShZ7Se+FHac6h7MS5tsXEIR8zIGGVtdZ7F8oKSLxhVOl33BOaoarmK tfOZri6WFKbz8RCgYRr+dLPZXeDlguznDiDwzxhL2WqoCc5RTNrabIiGtq0A3ci0VtB34rLmw01 39UaUctRFMWrQWqDJ6wYXmTqiWtjTJwuQORB+KCMpW7dWnCRFDHKoX2XJpGALO3JxNYES5Fe98x XrHHLbD+2fSctHw== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Extend the sheaf abstraction to support caches initialized by C at kernel boot time, in addition to dynamically created Rust caches. Introduce `KMemCache` as a transparent wrapper around `kmem_cache` for static caches with `'static` lifetime. Rename the previous `KMemCache` to `KMemCacheHandle` to represent dynamically created, reference-counted caches. Add `Static` and `Dynamic` marker types along with `StaticSheaf` and `DynamicSheaf` type aliases to distinguish sheaves from each cache type. The `Sheaf` type now carries lifetime and allocation mode type parameters. Add `SBox::into_ptr()` and `SBox::static_from_ptr()` methods for passing allocations through C code via raw pointers. Add `KMemCache::from_raw()` for wrapping C-initialized static caches and `Sheaf::refill()` for replenishing a sheaf to a minimum size. Export `kmem_cache_prefill_sheaf`, `kmem_cache_return_sheaf`, `kmem_cache_refill_sheaf`, and `kmem_cache_alloc_from_sheaf_noprof` to allow Rust module code to use the sheaf API. Cc: Vlastimil Babka Cc: "Liam R. Howlett" Cc: "Matthew Wilcox (Oracle)" Cc: Lorenzo Stoakes Cc: linux-mm@kvack.org Signed-off-by: Andreas Hindborg --- mm/slub.c | 4 + rust/kernel/mm/sheaf.rs | 343 +++++++++++++++++++++++++++++++++++++++++++-= ---- 2 files changed, 317 insertions(+), 30 deletions(-) diff --git a/mm/slub.c b/mm/slub.c index f77b7407c51bc..7c6b1d28778d0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -5428,6 +5428,7 @@ kmem_cache_prefill_sheaf(struct kmem_cache *s, gfp_t = gfp, unsigned int size) =20 return sheaf; } +EXPORT_SYMBOL(kmem_cache_prefill_sheaf); =20 /* * Use this to return a sheaf obtained by kmem_cache_prefill_sheaf() @@ -5483,6 +5484,7 @@ void kmem_cache_return_sheaf(struct kmem_cache *s, gf= p_t gfp, barn_put_full_sheaf(barn, sheaf); stat(s, BARN_PUT); } +EXPORT_SYMBOL(kmem_cache_return_sheaf); =20 /* * refill a sheaf previously returned by kmem_cache_prefill_sheaf to at le= ast @@ -5536,6 +5538,7 @@ int kmem_cache_refill_sheaf(struct kmem_cache *s, gfp= _t gfp, *sheafp =3D sheaf; return 0; } +EXPORT_SYMBOL(kmem_cache_refill_sheaf); =20 /* * Allocate from a sheaf obtained by kmem_cache_prefill_sheaf() @@ -5573,6 +5576,7 @@ kmem_cache_alloc_from_sheaf_noprof(struct kmem_cache = *s, gfp_t gfp, =20 return ret; } +EXPORT_SYMBOL(kmem_cache_alloc_from_sheaf_noprof); =20 unsigned int kmem_cache_sheaf_size(struct slab_sheaf *sheaf) { diff --git a/rust/kernel/mm/sheaf.rs b/rust/kernel/mm/sheaf.rs index b8fd321335ace..e98879f9881c3 100644 --- a/rust/kernel/mm/sheaf.rs +++ b/rust/kernel/mm/sheaf.rs @@ -23,17 +23,26 @@ //! //! # Architecture //! -//! The sheaf system consists of three main components: +//! The sheaf system supports two modes of operation: +//! +//! - **Static caches**: [`KMemCache`] represents a cache initialized by C= code at +//! kernel boot time. These have `'static` lifetime and produce [`Static= Sheaf`] +//! instances. +//! - **Dynamic caches**: [`KMemCacheHandle`] wraps a cache created at run= time by +//! Rust code. These are reference-counted and produce [`DynamicSheaf`] = instances. +//! +//! Both modes use the same core types: //! -//! - [`KMemCache`]: A slab cache configured with sheaf support. //! - [`Sheaf`]: A pre-filled container of objects from a specific cache. //! - [`SBox`]: An owned allocation from a sheaf, similar to a `Box`. //! //! # Example //! +//! Using a dynamically created cache: +//! //! ``` //! use kernel::c_str; -//! use kernel::mm::sheaf::{KMemCache, KMemCacheInit, Sheaf, SBox}; +//! use kernel::mm::sheaf::{KMemCacheHandle, KMemCacheInit, Sheaf, SBox}; //! use kernel::prelude::*; //! //! struct MyObject { @@ -47,7 +56,7 @@ //! } //! //! // Create a cache with sheaf capacity of 16 objects. -//! let cache =3D KMemCache::::new(c_str!("my_cache"), 16)?; +//! let cache =3D KMemCacheHandle::::new(c_str!("my_cache"), 16)= ?; //! //! // Pre-fill a sheaf with 8 objects. //! let mut sheaf =3D cache.as_arc_borrow().sheaf(8, GFP_KERNEL)?; @@ -76,7 +85,102 @@ =20 use kernel::prelude::*; =20 -use crate::sync::{Arc, ArcBorrow}; +use crate::{ + sync::{Arc, ArcBorrow}, + types::Opaque, +}; + +/// A slab cache with sheaf support. +/// +/// This type is a transparent wrapper around a kernel `kmem_cache`. It ca= n be +/// used with caches created either by C code or via [`KMemCacheHandle`]. +/// +/// When a reference to this type has `'static` lifetime (i.e., `&'static +/// KMemCache`), it typically represents a cache initialized by C at bo= ot +/// time. Such references produce [`StaticSheaf`] instances via [`sheaf`]. +/// +/// [`sheaf`]: KMemCache::sheaf +/// +/// # Type parameter +/// +/// - `T`: The type of objects managed by this cache. Must implement +/// [`KMemCacheInit`] to provide initialization logic for allocations. +#[repr(transparent)] +pub struct KMemCache> { + inner: Opaque, + _p: PhantomData, +} + +impl> KMemCache { + /// Creates a pre-filled sheaf from this cache. + /// + /// Allocates a sheaf and pre-fills it with `size` objects. Once creat= ed, + /// allocations from the sheaf via [`Sheaf::alloc`] are guaranteed to + /// succeed until the sheaf is depleted. + /// + /// # Arguments + /// + /// - `size`: The number of objects to pre-allocate. Must not exceed t= he + /// cache's `sheaf_capacity`. + /// - `gfp`: Allocation flags controlling how memory is obtained. Use + /// [`GFP_KERNEL`] for normal allocations that may sleep, or + /// [`GFP_NOWAIT`] for non-blocking allocations. + /// + /// # Errors + /// + /// Returns [`ENOMEM`] if the sheaf or its objects could not be alloca= ted. + /// + /// # Warnings + /// + /// The kernel will warn if `size` exceeds `sheaf_capacity`. + pub fn sheaf( + &'static self, + size: usize, + gfp: kernel::alloc::Flags, + ) -> Result> { + // SAFETY: `self.as_raw()` returns a valid cache pointer, and `siz= e` + // has been validated to fit in a `c_uint`. + let ptr =3D unsafe { + bindings::kmem_cache_prefill_sheaf(self.inner.get(), gfp.as_ra= w(), size.try_into()?) + }; + + // INVARIANT: `ptr` was returned by `kmem_cache_prefill_sheaf` and= is + // non-null (checked below). `cache` is the cache from which this = sheaf + // was created. `dropped` is false since the sheaf has not been re= turned. + Ok(Sheaf { + sheaf: NonNull::new(ptr).ok_or(ENOMEM)?, + // SAFETY: `self` is a valid reference, so the pointer is non-= null. + cache: CacheRef::Static(unsafe { + NonNull::new_unchecked((&raw const *self).cast_mut()) + }), + dropped: false, + _p: PhantomData, + }) + } + + fn as_raw(&self) -> *mut bindings::kmem_cache { + self.inner.get() + } + + /// Creates a reference to a [`KMemCache`] from a raw pointer. + /// + /// This is useful for wrapping a C-initialized static `kmem_cache`, s= uch as + /// the global `radix_tree_node_cachep` used by XArrays. + /// + /// # Safety + /// + /// - `ptr` must be a valid pointer to a `kmem_cache` that was created= for + /// objects of type `T`. + /// - The cache must remain valid for the lifetime `'a`. + /// - The caller must ensure that the cache was configured appropriate= ly for + /// the type `T`, including proper size and alignment. + pub unsafe fn from_raw<'a>(ptr: *mut bindings::kmem_cache) -> &'a Self= { + // SAFETY: The caller guarantees that `ptr` is a valid pointer to a + // `kmem_cache` created for objects of type `T`, that it remains v= alid + // for lifetime `'a`, and that the cache is properly configured fo= r `T`. + unsafe { &*ptr.cast::() } + } +} =20 /// A slab cache with sheaf support. /// @@ -95,12 +199,12 @@ /// - `cache` is a valid pointer to a `kmem_cache` created with /// `__kmem_cache_create_args`. /// - The cache is valid for the lifetime of this struct. -pub struct KMemCache> { - cache: NonNull, - _p: PhantomData, +#[repr(transparent)] +pub struct KMemCacheHandle> { + cache: NonNull>, } =20 -impl> KMemCache { +impl> KMemCacheHandle { /// Creates a new slab cache with sheaf support. /// /// Creates a kernel slab cache for objects of type `T` with the speci= fied @@ -148,8 +252,7 @@ pub fn new(name: &CStr, sheaf_capacity: u32) -> Result<= Arc> // `kmem_cache_destroy` is called in `Drop`. Ok(Arc::new( Self { - cache: NonNull::new(ptr).ok_or(ENOMEM)?, - _p: PhantomData, + cache: NonNull::new(ptr.cast()).ok_or(ENOMEM)?, }, GFP_KERNEL, )?) @@ -176,11 +279,11 @@ pub fn new(name: &CStr, sheaf_capacity: u32) -> Resul= t> /// # Warnings /// /// The kernel will warn if `size` exceeds `sheaf_capacity`. - pub fn sheaf( - self: ArcBorrow<'_, Self>, + pub fn sheaf<'a>( + self: ArcBorrow<'a, Self>, size: usize, gfp: kernel::alloc::Flags, - ) -> Result> { + ) -> Result> { // SAFETY: `self.as_raw()` returns a valid cache pointer, and `siz= e` // has been validated to fit in a `c_uint`. let ptr =3D unsafe { @@ -192,17 +295,18 @@ pub fn sheaf( // was created. `dropped` is false since the sheaf has not been re= turned. Ok(Sheaf { sheaf: NonNull::new(ptr).ok_or(ENOMEM)?, - cache: self.into(), + cache: CacheRef::Arc(self.into()), dropped: false, + _p: PhantomData, }) } =20 fn as_raw(&self) -> *mut bindings::kmem_cache { - self.cache.as_ptr() + self.cache.as_ptr().cast() } } =20 -impl> Drop for KMemCache { +impl> Drop for KMemCacheHandle { fn drop(&mut self) { // SAFETY: `self.as_raw()` returns a valid cache pointer that was // created by `__kmem_cache_create_args`. As all objects allocated= from @@ -215,13 +319,13 @@ fn drop(&mut self) { /// Trait for types that can be initialized in a slab cache. /// /// This trait provides the initialization logic for objects allocated fro= m a -/// [`KMemCache`]. When the slab allocator creates new objects, it invokes= the -/// constructor to ensure objects are in a valid initial state. +/// [`KMemCache`]. The initializer is called when objects are allocated fr= om a +/// sheaf via [`Sheaf::alloc`]. /// /// # Implementation /// -/// Implementors must provide [`init`](KMemCacheInit::init), which returns -/// a in-place initializer for the type. +/// Implementors must provide [`init`](KMemCacheInit::init), which returns= an +/// infallible initializer for the type. /// /// # Example /// @@ -252,6 +356,28 @@ pub trait KMemCacheInit { fn init() -> impl Init; } =20 +/// Marker type for sheaves from static caches. +/// +/// Used as a type parameter for [`Sheaf`] to indicate the sheaf was creat= ed +/// from a `&'static KMemCache`. +pub enum Static {} + +/// Marker type for sheaves from dynamic caches. +/// +/// Used as a type parameter for [`Sheaf`] to indicate the sheaf was creat= ed +/// from a [`KMemCacheHandle`] via [`ArcBorrow`]. +pub enum Dynamic {} + +/// A sheaf from a static cache. +/// +/// This is a [`Sheaf`] backed by a `&'static KMemCache`. +pub type StaticSheaf<'a, T> =3D Sheaf<'a, T, Static>; + +/// A sheaf from a dynamic cache. +/// +/// This is a [`Sheaf`] backed by a reference-counted [`KMemCacheHandle`]. +pub type DynamicSheaf<'a, T> =3D Sheaf<'a, T, Dynamic>; + /// A pre-filled container of slab objects. /// /// A sheaf holds a set of pre-allocated objects from a [`KMemCache`]. @@ -262,12 +388,23 @@ pub trait KMemCacheInit { /// Sheaves provide faster allocation than direct allocation because they = use /// local locks with preemption disabled rather than atomic operations. /// +/// # Type parameters +/// +/// - `'a`: The lifetime of the cache reference. +/// - `T`: The type of objects in this sheaf. +/// - `A`: Either [`Static`] or [`Dynamic`], indicating whether the backing +/// cache is a static reference or a reference-counted handle. +/// +/// For convenience, [`StaticSheaf`] and [`DynamicSheaf`] type aliases are +/// provided. +/// /// # Lifecycle /// -/// Sheaves are created via [`KMemCache::sheaf`] and should be returned to= the -/// allocator when no longer needed via [`Sheaf::return_refill`]. If a she= af is -/// simply dropped, it is returned with `GFP_NOWAIT` flags, which may resu= lt in -/// the sheaf being flushed and freed rather than being cached for reuse. +/// Sheaves are created via [`KMemCache::sheaf`] or [`KMemCacheHandle::she= af`] +/// and should be returned to the allocator when no longer needed via +/// [`Sheaf::return_refill`]. If a sheaf is simply dropped, it is returned= with +/// `GFP_NOWAIT` flags, which may result in the sheaf being flushed and fr= eed +/// rather than being cached for reuse. /// /// # Invariants /// @@ -275,13 +412,14 @@ pub trait KMemCacheInit { /// `kmem_cache_prefill_sheaf`. /// - `cache` is the cache from which this sheaf was created. /// - `dropped` tracks whether the sheaf has been explicitly returned. -pub struct Sheaf> { +pub struct Sheaf<'a, T: KMemCacheInit, A> { sheaf: NonNull, - cache: Arc>, + cache: CacheRef, dropped: bool, + _p: PhantomData<(&'a KMemCache, A)>, } =20 -impl> Sheaf { +impl<'a, T: KMemCacheInit, A> Sheaf<'a, T, A> { fn as_raw(&self) -> *mut bindings::slab_sheaf { self.sheaf.as_ptr() } @@ -304,6 +442,75 @@ pub fn return_refill(mut self, flags: kernel::alloc::F= lags) { drop(self); } =20 + /// Refills the sheaf to at least the specified size. + /// + /// Replenishes the sheaf by preallocating objects until it contains at + /// least `size` objects. If the sheaf already contains `size` or more + /// objects, this is a no-op. In practice, the sheaf is refilled to its + /// full capacity. + /// + /// # Arguments + /// + /// - `flags`: Allocation flags controlling how memory is obtained. + /// - `size`: The minimum number of objects the sheaf should contain a= fter + /// refilling. If `size` exceeds the cache's `sheaf_capacity`, the s= heaf + /// may be replaced with a larger one. + /// + /// # Errors + /// + /// Returns an error if the objects could not be allocated. If refilli= ng + /// fails, the existing sheaf is left intact. + pub fn refill(&mut self, flags: kernel::alloc::Flags, size: usize) -> = Result { + // SAFETY: `self.cache.as_raw()` returns a valid cache pointer and + // `&raw mut self.sheaf` points to a valid sheaf per the type inva= riants. + kernel::error::to_result(unsafe { + bindings::kmem_cache_refill_sheaf( + self.cache.as_raw(), + flags.as_raw(), + (&raw mut (self.sheaf)).cast(), + size.try_into()?, + ) + }) + } +} + +impl<'a, T: KMemCacheInit> Sheaf<'a, T, Static> { + /// Allocates an object from the sheaf. + /// + /// Returns a new [`SBox`] containing an initialized object, or [`None= `] + /// if the sheaf is depleted. Allocations are guaranteed to succeed as + /// long as the sheaf contains pre-allocated objects. + /// + /// The `gfp` flags passed to `kmem_cache_alloc_from_sheaf` are set to= zero, + /// meaning no additional flags like `__GFP_ZERO` or `__GFP_ACCOUNT` a= re + /// applied. + /// + /// The returned `T` is initialized as part of this function. + pub fn alloc(&mut self) -> Option> { + // SAFETY: `self.cache.as_raw()` and `self.as_raw()` return valid + // pointers. The function returns NULL when the sheaf is empty. + let ptr =3D unsafe { + bindings::kmem_cache_alloc_from_sheaf_noprof(self.cache.as_raw= (), 0, self.as_raw()) + }; + + // SAFETY: + // - `ptr` is a valid pointer as it was just returned by the cache. + // - The initializer is infallible, so an error is never returned. + unsafe { T::init().__init(ptr.cast()) }.expect("Initializer is inf= allible"); + + let ptr =3D NonNull::new(ptr.cast::())?; + + // INVARIANT: `ptr` was returned by `kmem_cache_alloc_from_sheaf_n= oprof` + // and initialized above. `cache` is the cache from which this obj= ect + // was allocated. The object remains valid until freed in `Drop`. + Some(SBox { + ptr, + cache: self.cache.clone(), + }) + } +} + +impl<'a, T: KMemCacheInit> Sheaf<'a, T, Dynamic> { /// Allocates an object from the sheaf. /// /// Returns a new [`SBox`] containing an initialized object, or [`None= `] @@ -339,7 +546,7 @@ pub fn alloc(&mut self) -> Option> { } } =20 -impl> Drop for Sheaf { +impl<'a, T: KMemCacheInit, A> Drop for Sheaf<'a, T, A> { fn drop(&mut self) { if !self.dropped { // SAFETY: `self.cache.as_raw()` and `self.as_raw()` return va= lid @@ -356,6 +563,39 @@ fn drop(&mut self) { } } =20 +/// Internal reference to a cache, either static or reference-counted. +/// +/// # Invariants +/// +/// - For `CacheRef::Static`: the `NonNull` points to a valid `KMemCache` +/// with `'static` lifetime, derived from a `&'static KMemCache` refe= rence. +enum CacheRef> { + /// A reference-counted handle to a dynamically created cache. + Arc(Arc>), + /// A pointer to a static lifetime cache. + Static(NonNull>), +} + +impl> Clone for CacheRef { + fn clone(&self) -> Self { + match self { + Self::Arc(arg0) =3D> Self::Arc(arg0.clone()), + Self::Static(arg0) =3D> Self::Static(*arg0), + } + } +} + +impl> CacheRef { + fn as_raw(&self) -> *mut bindings::kmem_cache { + match self { + CacheRef::Arc(handle) =3D> handle.as_raw(), + // SAFETY: By type invariant, `ptr` points to a valid `KMemCac= he` + // with `'static` lifetime. + CacheRef::Static(ptr) =3D> unsafe { ptr.as_ref() }.as_raw(), + } + } +} + /// An owned allocation from a cache sheaf. /// /// `SBox` is similar to `Box` but is backed by a slab cache allocation ob= tained @@ -372,7 +612,50 @@ fn drop(&mut self) { /// - The object remains valid for the lifetime of the `SBox`. pub struct SBox> { ptr: NonNull, - cache: Arc>, + cache: CacheRef, +} + +impl> SBox { + /// Consumes the `SBox` and returns the raw pointer to the contained v= alue. + /// + /// The caller becomes responsible for freeing the memory. The object = is not + /// dropped and remains initialized. Use [`static_from_ptr`] to recons= truct + /// an `SBox` from the pointer. + /// + /// [`static_from_ptr`]: SBox::static_from_ptr + pub fn into_ptr(self) -> *mut T { + let ptr =3D self.ptr.as_ptr(); + core::mem::forget(self); + ptr + } + + /// Reconstructs an `SBox` from a raw pointer and cache. + /// + /// This is intended for use with objects that were previously convert= ed to + /// raw pointers via [`into_ptr`], typically for passing through C cod= e. + /// + /// [`into_ptr`]: SBox::into_ptr + /// + /// # Safety + /// + /// - `cache` must be a valid pointer to the `kmem_cache` from which `= value` + /// was allocated. + /// - `value` must be a valid pointer to an initialized `T` that was + /// allocated from `cache`. + /// - The caller must ensure that no other `SBox` or reference exists = for + /// `value`. + pub unsafe fn static_from_ptr(cache: *mut bindings::kmem_cache, value:= *mut T) -> Self { + // INVARIANT: The caller guarantees `value` points to a valid, + // initialized `T` allocated from `cache`. + Self { + // SAFETY: By function safety requirements, `value` is not nul= l. + ptr: unsafe { NonNull::new_unchecked(value) }, + cache: CacheRef::Static( + // SAFETY: By function safety requirements, `cache` is not= null. + unsafe { NonNull::new_unchecked(cache.cast()) }, + ), + } + } } =20 impl> Deref for SBox { --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id AF19D37BE78; Mon, 9 Feb 2026 14:39:41 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647981; cv=none; b=Q7vaplIsI7LV1R2BL6THV+PHeBE72a5ZgAe56hMX5IiGF+YrVPUfKt0DA0/7OcTEvk6D5r2L/yPkr31csqZy2awFuj2BwE73aXQYpOA0sLddCDcODVjbYyL/5NuuR4bb4WOYy06g4j9/DVFw76oBwsyMwxWJeWcT6EkOSKajAS0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647981; c=relaxed/simple; bh=VslDN1mPgFIPh4fOyBZhDN58qjpQH9gwwiiN+krVdpc=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=Ga0olbyWDmbrE25PO+c3WZrZ7sXsyvEbG3VYXkPgoC6PVcc9rN0aEyOxIYD4dXzJd/WxbMgWQMSW709YevUdYKDDSHt2xLjZpE1ZK4TJAPF9gg82GP6FbBM8LsgBHGVyKxp53mh/DmeKs2eT/Onz51lY8BA2H/Y5HclCRivaOE8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=i41dtz0x; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="i41dtz0x" Received: by smtp.kernel.org (Postfix) with ESMTPSA id ECDF3C19425; Mon, 9 Feb 2026 14:39:35 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647981; bh=VslDN1mPgFIPh4fOyBZhDN58qjpQH9gwwiiN+krVdpc=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=i41dtz0xskrUok+uzr6C0WdKfzIZ4fXVrLBlLxCOk6zIViA9IMUrZWbY4l1jTHyiH 8q39lKA1o3D7s+vHLG0Y0d55CSJg07xGLYt2EcioMSRA26dHW4F2IpGpyZF3YbfTJ4 IatdZL1SyBG9AcRmkDKbpQ4Vk1r5jImqLCmctbbD/RjqXMg/ZALLuDtp9oXJ+/ET8B diWzf5VRTxiBdln3n3NORvW5jFAxjCw9DzAj5uhdNlAh8GPJ8VQ2TFstZ4QbYZ72LL 8n1pq7Sd4Tu4Ru6x8Lqf+X3j6sD6mG11ePNDnEAZAYar+jyK0vGujiBqYt/t9d4En0 77w3TEOXo83XQ== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:16 +0100 Subject: [PATCH v3 11/12] xarray, radix-tree: enable sheaf support for kmem_cache Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-11-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg , "Matthew Wilcox (Oracle)" X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=1243; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=VslDN1mPgFIPh4fOyBZhDN58qjpQH9gwwiiN+krVdpc=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFex2+rby13jaJSZme2/vU05ZzvMRP5UT6SK VJQYMHYp8qJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxXgAKCRDhuBo+eShj d7xHD/48WEALFQfX+iMYXsiR/Mvn723D+YBjt2TOR/yfD7Sul4d8Iv02lIm3LGfCJsyUqsx0R/m Qk2ZkCueXt8bvCuSssHdX0jFMrBRDz9DFgMbk5SHMC2hLN/W0x/AY4rHDH4HxWqA/09V2h9kX9l by7veoXmADET0BEbdp15OIZLrP4+TbK6Md7Y98gcZZZZgKfyOgrWD9O+wZSe1jT4JSNrdw0pHhK WEiy/1R+bNnLAmMOP7AjVxTRKZ5SQafFzXZwe1+F6+mZe2saG8I5hrDR+VxNKisA7vQl9/z3gF1 XuYWSv6bLte0vcgMNpzi5111T1Tky95jqKotdLtHq7sO+hUg0IAm1sGAzEr848kitJXmQWyyp8z RtkHcd0kwQ/15cx2EZjuMGm32TRs79hH6cCTfvmf4CweWaQmqV+mC5uohtBCP7piEWMy/RrA1lx Lk8TicZ76wcecqeZ8TeDdarQAI/oVX1NO6bP4HKm3wcdjn2HDq2AW5m3Ss2RyH1ZoffhLt7eyAY HG8F4YbEif5r2ab7bGc75ROxcp9b9NTzghH7dtSdy8YrkyjeZNU9Uc1uq1KyyHMibe/jKSjYmPP debQlj5Vy7aICDbaFtI+SvG1jp8hcOSvnmatndAdy79kxiL25JYhXQRUaqy4lc6Xcs8Um0+KbQA EVG3kWV3B6ysnbw== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 The rust null block driver plans to rely on preloading xarray nodes from the radix_tree_node_cachep kmem_cache. Cc: "Matthew Wilcox (Oracle)" Signed-off-by: Andreas Hindborg --- lib/radix-tree.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 976b9bd02a1b5..1cf0012b15ade 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -1598,10 +1598,16 @@ void __init radix_tree_init(void) BUILD_BUG_ON(RADIX_TREE_MAX_TAGS + __GFP_BITS_SHIFT > 32); BUILD_BUG_ON(ROOT_IS_IDR & ~GFP_ZONEMASK); BUILD_BUG_ON(XA_CHUNK_SIZE > 255); - radix_tree_node_cachep =3D kmem_cache_create("radix_tree_node", - sizeof(struct radix_tree_node), 0, - SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, - radix_tree_node_ctor); + + struct kmem_cache_args args =3D { + .ctor =3D radix_tree_node_ctor, + .sheaf_capacity =3D 64, + }; + + radix_tree_node_cachep =3D kmem_cache_create( + "radix_tree_node", sizeof(struct radix_tree_node), &args, + SLAB_PANIC | SLAB_RECLAIM_ACCOUNT); + ret =3D cpuhp_setup_state_nocalls(CPUHP_RADIX_DEAD, "lib/radix:dead", NULL, radix_tree_cpu_dead); WARN_ON(ret < 0); --=20 2.51.2 From nobody Tue Feb 10 16:19:01 2026 Received: from smtp.kernel.org (aws-us-west-2-korg-mail-1.web.codeaurora.org [10.30.226.201]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 2AA4B2222B2; Mon, 9 Feb 2026 14:39:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=10.30.226.201 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647969; cv=none; b=bBY/991OMeuS3msuB/j+wSVzZpzLQWN5MlD8rMUUoP5JF5K2agHPHeKtJyvHl/7N1GM+t3sezV/VdqN2BhCsHIKw9XsMDxHCkCl2Wzh72ALnQ84jJLa7M6395qF5pW6hHSpm0klu0pYFdQlw61BYP1Npxm7Fv4C1RFxRg31JXRs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770647969; c=relaxed/simple; bh=rKGd883t0mW5F1sVxQvZylDp/AAHhdrL4UTVDJdkRgM=; h=From:Date:Subject:MIME-Version:Content-Type:Message-Id:References: In-Reply-To:To:Cc; b=qePKLT6Grzb0Ey9tRkuq8I8tIKGIUvDRmOdgEwrDkqwUMMzD611T1yv51LXbbffVsMm+IisTEnbxDQnPZVTK9fMYyvuDSxyKbO0tzmN1EYb6J8orGYvVewGjs3zmA6go5LDbFL80R1IzUuLHLUrkhpvmjypZ/la8ZJdWcol3hWc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b=lCpPM2ND; arc=none smtp.client-ip=10.30.226.201 Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=kernel.org header.i=@kernel.org header.b="lCpPM2ND" Received: by smtp.kernel.org (Postfix) with ESMTPSA id 38C4BC116C6; Mon, 9 Feb 2026 14:39:23 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1770647969; bh=rKGd883t0mW5F1sVxQvZylDp/AAHhdrL4UTVDJdkRgM=; h=From:Date:Subject:References:In-Reply-To:To:Cc:From; b=lCpPM2ND9onyRZvKor9ARgLCOMktp2aQHrqH1n8BQrGgfgOsTu/VQ4GBiOMRD5L66 FASf74NjN3AvypbOawbaq+LllM5T0b0vLC1lPJ1pU/9ff4JmKKW7S2i7ku9qBes//Y seduQY1Vveo2bhfhmgvdt8ci3k9K2qKUp7zJhkXm1wNrcv4cstX/Wb0OsLm7+I/F9c 57HOH/6cKUwqGt9rAJQ+AiwMgcuS83umxfaM9YyxcGWg++q7Nw9+YojpPJM2tnjfnK Q1DiUBphb1qWhFxzveQ8n4z+Ih+7GlEnVvKr/q0sjuhtWIaIhs7bT9WBrWzX5atEJh hsudSZQ/b8jQA== From: Andreas Hindborg Date: Mon, 09 Feb 2026 15:38:17 +0100 Subject: [PATCH v3 12/12] rust: xarray: add preload API Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: quoted-printable Message-Id: <20260209-xarray-entry-send-v3-12-f777c65b8ae2@kernel.org> References: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> In-Reply-To: <20260209-xarray-entry-send-v3-0-f777c65b8ae2@kernel.org> To: Tamir Duberstein , Miguel Ojeda , Alex Gaynor , Boqun Feng , Gary Guo , =?utf-8?q?Bj=C3=B6rn_Roy_Baron?= , Benno Lossin , Alice Ryhl , Trevor Gross , Danilo Krummrich , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Andrew Morton , Christoph Lameter , David Rientjes , Roman Gushchin , Harry Yoo Cc: Daniel Gomez , rust-for-linux@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Andreas Hindborg , "Matthew Wilcox (Oracle)" X-Mailer: b4 0.15-dev X-Developer-Signature: v=1; a=openpgp-sha256; l=16827; i=a.hindborg@kernel.org; h=from:subject:message-id; bh=rKGd883t0mW5F1sVxQvZylDp/AAHhdrL4UTVDJdkRgM=; b=owEBbQKS/ZANAwAKAeG4Gj55KGN3AcsmYgBpifFf6NffzuXerL7xmFyC9b4KK7WEKpBgEWDYV NTIqGYovkSJAjMEAAEKAB0WIQQSwflHVr98KhXWwBLhuBo+eShjdwUCaYnxXwAKCRDhuBo+eShj dy44EACNDxm6p+yj/FsUGdp4Qtxt98PxnZF1Fv09Kj81o8fMoLkBXYm72Ty5SLNtxRCu+bAb8jy 71lkuOHueV1wmQ/Yxy2vmQW3rqhLOSQEYsI7IiieomAjg4fj1hUHkr50Ltw0zG1OdrtfZV5vR3E Un0nV0ocP15tv4Qk2g9ajNXevmDzutOqc6qJWyJlYofzCTiQ7+5V/yQzyre2EfXTuKgQdLSk6+C /gQst7h1JIQ/nEPuVAWVfYidFAvkg8v/xSIHMfYkiF4+yfwMW+L3XwKY6rFUgB5jfRljI6YiiGe OYNmnsQACXRUoDLnpQ0ly6UR0KaZpLgbXLf+zjIKRQhr6/P9EpW0f5q4LTsduZWa2b2y8ITuMg8 ENP6Gkk/GlbYc+TMxjLMZeOpqgNQg36NQzRDjWabQr8lPrQpiWoijt0S/MRYA/tjz96DJS7mQW2 D5z1C3q2PDvSP/U/uE8LFDhtwn4hB67lzPPLwsjEtM1vNOxc3zJMcvmLOayJh+K7t0VvtX0gWcW ZkB6BUY9vMSmG6M8fXbdYPVFXEP5iEGqb1BxexJqLHiRUunLVZ5c/VCCx+Cxfp7bgsBjDO9+GzZ BZJGUkl+6aQrhj2QJbaJ+2R3RTLadNSUW8tpdZj4QvT9mp2m5uVVdlc6FxEbj636TMEw59r0Qn1 BXzwDdR4akoRAXg== X-Developer-Key: i=a.hindborg@kernel.org; a=openpgp; fpr=3108C10F46872E248D1FB221376EB100563EF7A7 Add a preload API that allows preallocating memory for XArray insertions. This enables insertions to proceed without allocation failures in contexts where memory allocation is not desirable, such as in atomic contexts. The implementation introduces `XArrayNode` representing a single XArray node and `XArraySheaf` as a type alias for a sheaf of preallocated nodes. Add the function `xarray_kmem_cache` to provide access to the global XArray node cache for creating sheaves. Update `VacantEntry::insert` and `VacantEntry::insert_entry` to accept an optional sheaf argument for preloaded memory. Add a new `Guard::insert_entry` method for inserting with preload support. When an insertion would fail due to ENOMEM, the XArray state API automatically consumes a preallocated node from the sheaf if available. Export `radix_tree_node_ctor` and `radix_tree_node_cachep` from C to enable Rust code to work with the radix tree node cache. Cc: "Liam R. Howlett" Cc: "Matthew Wilcox (Oracle)" Signed-off-by: Andreas Hindborg --- include/linux/radix-tree.h | 3 + lib/radix-tree.c | 5 +- rust/bindings/bindings_helper.h | 3 + rust/kernel/xarray.rs | 172 +++++++++++++++++++++++++++++++++++-= ---- rust/kernel/xarray/entry.rs | 29 ++++--- 5 files changed, 182 insertions(+), 30 deletions(-) diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index eae67015ce51a..c3699f12b070c 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -469,4 +469,7 @@ static __always_inline void __rcu **radix_tree_next_slo= t(void __rcu **slot, slot =3D radix_tree_next_slot(slot, iter, \ RADIX_TREE_ITER_TAGGED | tag)) =20 + +void radix_tree_node_ctor(void *arg); + #endif /* _LINUX_RADIX_TREE_H */ diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 1cf0012b15ade..ddd67ce672f5c 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -33,6 +33,7 @@ * Radix tree node cache. */ struct kmem_cache *radix_tree_node_cachep; +EXPORT_SYMBOL(radix_tree_node_cachep); =20 /* * The radix tree is variable-height, so an insert operation not only has @@ -1566,14 +1567,14 @@ void idr_destroy(struct idr *idr) } EXPORT_SYMBOL(idr_destroy); =20 -static void -radix_tree_node_ctor(void *arg) +void radix_tree_node_ctor(void *arg) { struct radix_tree_node *node =3D arg; =20 memset(node, 0, sizeof(*node)); INIT_LIST_HEAD(&node->private_list); } +EXPORT_SYMBOL(radix_tree_node_ctor); =20 static int radix_tree_cpu_dead(unsigned int cpu) { diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helpe= r.h index 58605c32e8102..652f08ad888cd 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -118,6 +118,9 @@ const xa_mark_t RUST_CONST_HELPER_XA_PRESENT =3D XA_PRE= SENT; const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC =3D XA_FLAGS_ALLOC; const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 =3D XA_FLAGS_ALLOC1; const size_t RUST_CONST_HELPER_XAS_RESTART =3D (size_t)XAS_RESTART; +const size_t RUST_CONST_HELPER_XA_CHUNK_SHIFT =3D XA_CHUNK_SHIFT; +const size_t RUST_CONST_HELPER_XA_CHUNK_SIZE =3D XA_CHUNK_SIZE; +extern struct kmem_cache *radix_tree_node_cachep; =20 const vm_flags_t RUST_CONST_HELPER_VM_MERGEABLE =3D VM_MERGEABLE; const vm_flags_t RUST_CONST_HELPER_VM_READ =3D VM_READ; diff --git a/rust/kernel/xarray.rs b/rust/kernel/xarray.rs index 8c10e8fd76f15..89bf531308c88 100644 --- a/rust/kernel/xarray.rs +++ b/rust/kernel/xarray.rs @@ -5,6 +5,7 @@ //! C header: [`include/linux/xarray.h`](srctree/include/linux/xarray.h) =20 use core::{ + convert::Infallible, iter, marker::PhantomData, pin::Pin, @@ -23,11 +24,17 @@ bindings, build_assert, // error::{ + code::*, to_result, Error, Result, // }, ffi::c_void, + mm::sheaf::{ + KMemCache, + SBox, + StaticSheaf, // + }, types::{ ForeignOwnable, NotThreadSafe, @@ -35,12 +42,54 @@ }, }; use pin_init::{ + init, pin_data, pin_init, pinned_drop, + Init, PinInit, // }; =20 +/// Sheaf of preallocated [`XArray`] nodes. +pub type XArraySheaf<'a> =3D StaticSheaf<'a, XArrayNode>; + +/// Returns a reference to the global XArray node cache. +/// +/// This provides access to the kernel's `radix_tree_node_cachep`, which i= s the +/// slab cache used for allocating internal XArray nodes. This cache can b= e used +/// to create sheaves for preallocating XArray nodes. +pub fn xarray_kmem_cache() -> &'static KMemCache { + // SAFETY: `radix_tree_node_cachep` is a valid, statically initialized + // kmem_cache that remains valid for the lifetime of the kernel. The c= ache + // is configured for `xa_node` objects which match our `XArrayNode` ty= pe. + unsafe { KMemCache::from_raw(bindings::radix_tree_node_cachep) } +} + +/// An preallocated XArray node. +/// +/// This represents a single preallocated internal node for an XArray. +pub struct XArrayNode { + node: Opaque, +} + +impl kernel::mm::sheaf::KMemCacheInit for XArrayNode { + fn init() -> impl Init { + init!(Self { + // SAFETY: + // - This initialization cannot fail and will never return `Er= r`. + // - The xa_node does not move during initalization. + node <- unsafe { + pin_init::init_from_closure( + |place: *mut Opaque| -> Result<(), = Infallible> { + bindings::radix_tree_node_ctor(place.cast::()); + Ok(()) + }, + ) + } + }) + } +} + /// An array which efficiently maps sparse integer indices to owned object= s. /// /// This is similar to a [`crate::alloc::kvec::Vec>`], but more = efficient when there are @@ -137,15 +186,22 @@ fn iter(&self) -> impl Iterator> + '_ { let mut index =3D 0; =20 // SAFETY: `self.xa` is always valid by the type invariant. - iter::once(unsafe { - bindings::xa_find(self.xa.get(), &mut index, usize::MAX, bindi= ngs::XA_PRESENT) - }) - .chain(iter::from_fn(move || { - // SAFETY: `self.xa` is always valid by the type invariant. - Some(unsafe { - bindings::xa_find_after(self.xa.get(), &mut index, usize::= MAX, bindings::XA_PRESENT) - }) - })) + Iterator::chain( + iter::once(unsafe { + bindings::xa_find(self.xa.get(), &mut index, usize::MAX, b= indings::XA_PRESENT) + }), + iter::from_fn(move || { + // SAFETY: `self.xa` is always valid by the type invariant. + Some(unsafe { + bindings::xa_find_after( + self.xa.get(), + &mut index, + usize::MAX, + bindings::XA_PRESENT, + ) + }) + }), + ) .map_while(|ptr| NonNull::new(ptr.cast())) } =20 @@ -166,7 +222,6 @@ pub fn try_lock(&self) -> Option> { pub fn lock(&self) -> Guard<'_, T> { // SAFETY: `self.xa` is always valid by the type invariant. unsafe { bindings::xa_lock(self.xa.get()) }; - Guard { xa: self, _not_send: NotThreadSafe, @@ -270,7 +325,7 @@ pub fn get_mut(&mut self, index: usize) -> Option> { /// /// match guard.entry(42) { /// Entry::Vacant(entry) =3D> { - /// entry.insert(KBox::new(0x1337u32, GFP_KERNEL)?)?; + /// entry.insert(KBox::new(0x1337u32, GFP_KERNEL)?, None)?; /// } /// Entry::Occupied(_) =3D> unreachable!("We did not insert an ent= ry yet"), /// } @@ -475,6 +530,45 @@ pub fn store( Ok(unsafe { T::try_from_foreign(old) }) } } + + /// Inserts a value and returns an occupied entry for further operatio= ns. + /// + /// If a value is already present, the operation fails. + /// + /// This method will not drop the XArray lock. If memory allocation is + /// required for the operation to succeed, the user should supply memo= ry + /// through the `preload` argument. + /// + /// # Examples + /// + /// ``` + /// # use kernel::{prelude::*, xarray::{AllocKind, XArray}}; + /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; + /// let mut guard =3D xa.lock(); + /// + /// assert_eq!(guard.get(42), None); + /// + /// let value =3D KBox::new(0x1337u32, GFP_KERNEL)?; + /// let entry =3D guard.insert_entry(42, value, None)?; + /// let borrowed =3D entry.into_mut(); + /// assert_eq!(borrowed, &0x1337); + /// + /// # Ok::<(), kernel::error::Error>(()) + /// ``` + pub fn insert_entry<'b>( + &'b mut self, + index: usize, + value: T, + preload: Option<&mut XArraySheaf<'_>>, + ) -> Result, StoreError> { + match self.entry(index) { + Entry::Vacant(entry) =3D> entry.insert_entry(value, preload), + Entry::Occupied(_) =3D> Err(StoreError { + error: EBUSY, + value, + }), + } + } } =20 /// Internal state for XArray iteration and entry operations. @@ -489,6 +583,25 @@ pub(crate) struct XArrayState<'a, 'b, T: ForeignOwnabl= e> { state: bindings::xa_state, } =20 +impl<'a, 'b, T: ForeignOwnable> Drop for XArrayState<'a, 'b, T> { + fn drop(&mut self) { + if !self.state.xa_alloc.is_null() { + // SAFETY: + // - `xa_alloc` is only set via `SBox::into_ptr()` in `insert(= )` where + // the node comes from an `XArraySheaf` backed by `radix_tre= e_node_cachep`. + // - `xa_alloc` points to a valid, initialized `XArrayNode`. + // - `XArrayState` has exclusive ownership of `xa_alloc`, and = no other + // `SBox` or reference exists for this value. + drop(unsafe { + SBox::::static_from_ptr( + bindings::radix_tree_node_cachep, + self.state.xa_alloc.cast(), + ) + }) + } + } +} + impl<'a, 'b, T: ForeignOwnable> XArrayState<'a, 'b, T> { fn new(access: &'b Guard<'a, T>, index: usize) -> Self { let ptr =3D access.xa.xa.get(); @@ -529,16 +642,37 @@ fn status(&self) -> Result { to_result(unsafe { bindings::xas_error(&self.state) }) } =20 - fn insert(&mut self, value: T) -> Result<*mut c_void, StoreError> { + fn insert( + &mut self, + value: T, + mut preload: Option<&mut XArraySheaf<'_>>, + ) -> Result<*mut c_void, StoreError> { let new =3D T::into_foreign(value).cast(); =20 - // SAFETY: `self.state.state` is properly initialized and `new` ca= me from `T::into_foreign`. - // We hold the xarray lock. - unsafe { bindings::xas_store(&mut self.state, new) }; - - self.status().map(|()| new).map_err(|error| { - // SAFETY: `new` came from `T::into_foreign` and `xas_store` d= oes not take ownership of - // the value on error. + loop { + // SAFETY: `self.state` is properly initialized and `new` came= from + // `T::into_foreign`. We hold the xarray lock. + unsafe { bindings::xas_store(&mut self.state, new) }; + + match self.status() { + Ok(()) =3D> break Ok(new), + Err(ENOMEM) =3D> { + debug_assert!(self.state.xa_alloc.is_null()); + let node =3D match preload.as_mut().map(|sheaf| sheaf.= alloc().ok_or(ENOMEM)) { + None =3D> break Err(ENOMEM), + Some(Err(e)) =3D> break Err(e), + Some(Ok(node)) =3D> node, + }; + + self.state.xa_alloc =3D node.into_ptr().cast(); + continue; + } + Err(e) =3D> break Err(e), + } + } + .map_err(|error| { + // SAFETY: `new` came from `T::into_foreign` and `xas_store` d= oes not take + // ownership of the value on error. let value =3D unsafe { T::from_foreign(new) }; StoreError { value, error } }) diff --git a/rust/kernel/xarray/entry.rs b/rust/kernel/xarray/entry.rs index 1b1c21bed7022..ff500be3832b7 100644 --- a/rust/kernel/xarray/entry.rs +++ b/rust/kernel/xarray/entry.rs @@ -3,6 +3,7 @@ use super::{ Guard, StoreError, + XArraySheaf, XArrayState, // }; use core::ptr::NonNull; @@ -29,9 +30,9 @@ impl Entry<'_, '_, T> { /// let mut xa =3D KBox::pin_init(XArray::>::new(AllocKind::= Alloc), GFP_KERNEL)?; /// let mut guard =3D xa.lock(); /// - /// /// let entry =3D guard.entry(42); /// assert_eq!(entry.is_occupied(), false); + /// drop(entry); /// /// guard.store(42, KBox::new(0x1337u32, GFP_KERNEL)?, GFP_KERNEL)?; /// let entry =3D guard.entry(42); @@ -64,7 +65,8 @@ pub(crate) fn new(guard: &'b mut Guard<'a, T>, index: usi= ze) -> Self { /// Returns a reference to the newly inserted value. /// /// - This method will fail if the nodes on the path to the index - /// represented by this entry are not present in the XArray. + /// represented by this entry are not present in the XArray and no m= emory + /// is available via the `preload` argument. /// - This method will not drop the XArray lock. /// /// @@ -79,7 +81,7 @@ pub(crate) fn new(guard: &'b mut Guard<'a, T>, index: usi= ze) -> Self { /// /// if let Entry::Vacant(entry) =3D guard.entry(42) { /// let value =3D KBox::new(0x1337u32, GFP_KERNEL)?; - /// let borrowed =3D entry.insert(value)?; + /// let borrowed =3D entry.insert(value, None)?; /// assert_eq!(*borrowed, 0x1337); /// } /// @@ -87,8 +89,12 @@ pub(crate) fn new(guard: &'b mut Guard<'a, T>, index: us= ize) -> Self { /// /// # Ok::<(), kernel::error::Error>(()) /// ``` - pub fn insert(mut self, value: T) -> Result, StoreE= rror> { - let new =3D self.state.insert(value)?; + pub fn insert( + mut self, + value: T, + preload: Option<&mut XArraySheaf<'_>>, + ) -> Result, StoreError> { + let new =3D self.state.insert(value, preload)?; =20 // SAFETY: `new` came from `T::into_foreign`. The entry has exclus= ive // ownership of `new` as it holds a mutable reference to `Guard`. @@ -98,7 +104,8 @@ pub fn insert(mut self, value: T) -> Result, StoreError> { /// Inserts a value and returns an occupied entry representing the new= ly inserted value. /// /// - This method will fail if the nodes on the path to the index - /// represented by this entry are not present in the XArray. + /// represented by this entry are not present in the XArray and no m= emory + /// is available via the `preload` argument. /// - This method will not drop the XArray lock. /// /// # Examples @@ -112,7 +119,7 @@ pub fn insert(mut self, value: T) -> Result, StoreError> { /// /// if let Entry::Vacant(entry) =3D guard.entry(42) { /// let value =3D KBox::new(0x1337u32, GFP_KERNEL)?; - /// let occupied =3D entry.insert_entry(value)?; + /// let occupied =3D entry.insert_entry(value, None)?; /// assert_eq!(occupied.index(), 42); /// } /// @@ -120,8 +127,12 @@ pub fn insert(mut self, value: T) -> Result, StoreError> { /// /// # Ok::<(), kernel::error::Error>(()) /// ``` - pub fn insert_entry(mut self, value: T) -> Result, StoreError> { - let new =3D self.state.insert(value)?; + pub fn insert_entry( + mut self, + value: T, + preload: Option<&mut XArraySheaf<'_>>, + ) -> Result, StoreError> { + let new =3D self.state.insert(value, preload)?; =20 Ok(OccupiedEntry::<'a, 'b, T> { state: self.state, --=20 2.51.2