From nobody Mon Feb 9 14:33:09 2026 Received: from lgeamrelo13.lge.com (lgeamrelo13.lge.com [156.147.23.53]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E60B334E776 for ; Fri, 6 Feb 2026 08:54:05 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=156.147.23.53 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770368047; cv=none; b=UmOHrGTDmfOPI8CGZakAKwBXfb9VwuOt1EsJ7CBoykj4FwzuqTQ1o5Qm1jKdCPzywd/cOJsZuwSVUZU8haKGFXTnozMMGAMQcDPV+TzqVQc28N50Q0s+W/U/znUGzeU5aUawLezGB3SNnYuW8PZ1hBHurjaF129HsZ+utTd3Hns= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1770368047; c=relaxed/simple; bh=TppHh4d+/RTRG23OsLSEeiB4TSBuygqqnhEizD9h76A=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=lR8VBU+m7ZotemiLkutaWsoMj/Xj2u2oQUfXDYsE2i0kW2S1mC9qOpjHBd1BP6gbtxhlC6gPN6RAEyi/PS4TgtqmTtNGIXfo4hcPC3ZM0LeofDAEk39jH+RpcyAFYV1tBPqCpRXhvTGO2H9mTJWPTbRbZfoxkudTW9AbyjdZq5Y= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=lge.com; spf=pass smtp.mailfrom=lge.com; arc=none smtp.client-ip=156.147.23.53 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=lge.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=lge.com Received: from unknown (HELO lgemrelse6q.lge.com) (156.147.1.121) by 156.147.23.53 with ESMTP; 6 Feb 2026 17:54:04 +0900 X-Original-SENDERIP: 156.147.1.121 X-Original-MAILFROM: jongan.kim@lge.com Received: from unknown (HELO jongan-kim-nissan-cdc.bee-live.svc.cluster.local) (10.159.44.57) by 156.147.1.121 with ESMTP; 6 Feb 2026 17:54:04 +0900 X-Original-SENDERIP: 10.159.44.57 X-Original-MAILFROM: jongan.kim@lge.com From: jongan.kim@lge.com To: aliceryhl@google.com, a.hindborg@kernel.org, arve@android.com, bjorn3_gh@protonmail.com, boqun.feng@gmail.com, brauner@kernel.org, cmllamas@google.com, dakr@kernel.org, daniel.almeida@collabora.com, gary@garyguo.net, gregkh@linuxfoundation.org, tamird@gmail.com, tkjos@android.com, tmgross@umich.edu, viresh.kumar@linaro.org, vitaly.wool@konsulko.se, yury.norov@gmail.com, ojeda@kernel.org, lossin@kernel.org Cc: heesu0025.kim@lge.com, ht.hong@lge.com, jongan.kim@lge.com, jungsu.hwang@lge.com, kernel-team@android.com, linux-kernel@vger.kernel.org, rust-for-linux@vger.kernel.org, sanghun.lee@lge.com, seulgi.lee@lge.com, sunghoon.kim@lge.com Subject: [PATCH v4 3/3] rust_binder: fix PID namespace collision for freeze operation Date: Fri, 6 Feb 2026 17:53:36 +0900 Message-Id: <20260206085336.32819-4-jongan.kim@lge.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: <20260206085336.32819-1-jongan.kim@lge.com> References: <20260206085336.32819-1-jongan.kim@lge.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: HeeSu Kim Port PID namespace conversion logic from C binder to the Rust implementation. Without namespace conversion, freeze operations from non-init namespaces can match wrong processes due to PID collision. This adds proper conversion to ensure freeze operations target the correct process. This patch fixes the issue by: - Add get_task_from_vpid() to translate VPID to Task reference - Add Context::get_procs_with_task() for Task-based process lookup using &mut KVec parameter to avoid intermediate allocations - Update get_frozen_status() and ioctl_freeze() to use Task comparison Suggested-by: Gary Guo Link: https://lore.kernel.org/rust-for-linux/DG5CFX3ML5YL.2FE913F20LNPT@gar= yguo.net/ Suggested-by: Alice Ryhl Link: https://lore.kernel.org/lkml/aXs5Y3xAFKyZr6nd@google.com/ Signed-off-by: Heesu Kim --- v3 -> v4 - change subject name more clearly - Use Task pointer comparison instead of PID number comparison - Remove PidNamespace dependency entirely - Use &mut KVec parameter to avoid intermediate allocation - Merge context.rs and process.rs changes into single patch =20 v2 -> v3: - Use task::Pid typedef instead of u32/i32 - Use PidNamespace::init_ns() instead of init_pid_ns() - Compare PidNamespace directly with =3D=3D instead of raw pointers - Use Pid::find_vpid() and pid.pid_task() (dropped _with_guard suffix) - Fix rustfmt import ordering (rcu before Arc) - Rename TaskPid alias to PidT for clearer pid_t type indication - Use task.group_leader().pid() instead of tgid_nr_ns() for consistency wit= h C drivers/android/binder/context.rs | 16 +++++++++++++++- drivers/android/binder/process.rs | 25 +++++++++++++++++++------ 2 files changed, 34 insertions(+), 7 deletions(-) diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/con= text.rs index 3d135ec03ca7..1fc779e4d9ce 100644 --- a/drivers/android/binder/context.rs +++ b/drivers/android/binder/context.rs @@ -9,7 +9,7 @@ security, str::{CStr, CString}, sync::{Arc, Mutex}, - task::Kuid, + task::{Kuid, Task}, }; =20 use crate::{error::BinderError, node::NodeRef, process::Process}; @@ -177,4 +177,18 @@ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> R= esult>> } Ok(backing) } + + pub(crate) fn get_procs_with_task( + &self, + target: &Task, + out: &mut KVec>, + ) -> Result { + let lock =3D self.manager.lock(); + for proc in &lock.all_procs { + if core::ptr::eq(&*proc.task, target) { + out.push(Arc::from(proc), GFP_KERNEL)?; + } + } + Ok(()) + } } diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/pro= cess.rs index 132055b4790f..58e816f8873f 100644 --- a/drivers/android/binder/process.rs +++ b/drivers/android/binder/process.rs @@ -22,6 +22,7 @@ id_pool::IdPool, list::{List, ListArc, ListArcField, ListLinks}, mm, + pid::Pid, prelude::*, rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation}, seq_file::SeqFile, @@ -29,9 +30,9 @@ sync::poll::PollTable, sync::{ lock::{spinlock::SpinLockBackend, Guard}, - Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, Un= iqueArc, + rcu, Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLoc= k, UniqueArc, }, - task::Task, + task::{Pid as PidT, Task}, types::ARef, uaccess::{UserSlice, UserSliceReader}, uapi, @@ -1498,17 +1499,29 @@ pub(crate) fn ioctl_freeze(&self, info: &BinderFree= zeInfo) -> Result { } } =20 +/// Get Task reference from VPID with refcount increment. +fn get_task_from_vpid(pid: PidT) -> Result> { + let rcu_guard =3D rcu::read_lock(); + let pid_struct =3D Pid::find_vpid(pid, &rcu_guard).ok_or(ESRCH)?; + let task =3D pid_struct.pid_task(&rcu_guard).ok_or(ESRCH)?; + + Ok(ARef::from(task)) +} + fn get_frozen_status(data: UserSlice) -> Result { let (mut reader, mut writer) =3D data.reader_writer(); =20 let mut info =3D reader.read::()?; + + let target_task =3D get_task_from_vpid(info.pid as PidT)?; + info.sync_recv =3D 0; info.async_recv =3D 0; let mut found =3D false; =20 for ctx in crate::context::get_all_contexts()? { ctx.for_each_proc(|proc| { - if proc.task.pid() =3D=3D info.pid as _ { + if core::ptr::eq(&*proc.task, &*target_task) { found =3D true; let inner =3D proc.inner.lock(); let txns_pending =3D inner.txns_pending_locked(); @@ -1530,15 +1543,15 @@ fn get_frozen_status(data: UserSlice) -> Result { fn ioctl_freeze(reader: &mut UserSliceReader) -> Result { let info =3D reader.read::()?; =20 + let target_task =3D get_task_from_vpid(info.pid as PidT)?; + // Very unlikely for there to be more than 3, since a process normally= uses at most binder and // hwbinder. let mut procs =3D KVec::with_capacity(3, GFP_KERNEL)?; =20 let ctxs =3D crate::context::get_all_contexts()?; for ctx in ctxs { - for proc in ctx.get_procs_with_pid(info.pid as i32)? { - procs.push(proc, GFP_KERNEL)?; - } + ctx.get_procs_with_task(&target_task, &mut procs)?; } =20 for proc in procs { --=20 2.25.1