From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 27A5B33A033; Tue, 24 Mar 2026 00:45:56 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313157; cv=none; b=Jsu0BmMZm+Ni2mGvwe4+QANTt4E0rl0K6OeogTl4uElxnCKGb9fsIw0r7u430JgmWehdESKXZmD+N4DgRWj5qD6rsKgiq5plsWgkxrkgDNtFICJ0RILe7c1eMy5MDmFQ6g6RWz8s8VcGwYYEF20h20ywi+afygJlqE1vXcYkq8Q= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313157; c=relaxed/simple; bh=ucyDrz+YIvqZS8qQMRZNMN6O3iLyvvXdEPYmMh/DcuE=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=F1RYrPTWdgEs01EjdtdGTwBsr8fqjdmdAajwwQD2reKsZqUB4BkXi+mMddISCo8OSzVwydRj0xLrCHeR2p0ZbjeEhWAu6RBHdbmk+9vjf9aT54T4Wbi7BxbUaY9yD0d9Osu/mwFYg8ddOcC9+ErmmRk5O0/Wh1gHqMjsWoWv7TM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=CsNnSPfX; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="CsNnSPfX" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313156; x=1805849156; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=ucyDrz+YIvqZS8qQMRZNMN6O3iLyvvXdEPYmMh/DcuE=; b=CsNnSPfXCSUItxho7MXjdp9jBniyS84Ch1S9o3NkYvA833QkOxEzSKB4 xao7DsUyVduv3uk8iAR/8hRQeLsNq0Xc/1JOq8V8olA7PYyPMEwMbxw6q PWt6TpwoiAb6hz1Gf/QgJcCa5J8Ynn2onsiCA74lmG/2ajP9tmYOAq00t 0EF/1XCZ4ro6q1ZhM+MhGmbQi5ys0XMd3nn49lYfX3yRe8usQDHLCrtX7 VwnckFg+Jv1ipGYY1fvEbAB+/vCgDoY4NwH21y/M+FDicTOs+ZGFpY87R 5NfOnAi1ZT+uMx+TOyppRCSFK3J36eiJaoQTcCYbD/XQvdZUE14LmmS3j A==; X-CSE-ConnectionGUID: 8P8lAEMgQJ6+NQ/GHCIlbg== X-CSE-MsgGUID: h1FsJzo6Q+GEomTTsGcFDg== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86396932" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86396932" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:45:56 -0700 X-CSE-ConnectionGUID: NU8Cgf+dSLGYuYeb1NXdHQ== X-CSE-MsgGUID: dFxChUNjRWuqC8LEfI37sw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322564" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:45:51 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 01/24] perf/x86: Move hybrid PMU initialization before x86_pmu_starting_cpu() Date: Tue, 24 Mar 2026 08:40:55 +0800 Message-Id: <20260324004118.3772171-2-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The current approach initializes hybrid PMU structures immediately before registering them. This is risky as it can lead to key fields, such as 'capabilities', being inadvertently overwritten. Although no issues have arisen so far, this method is not ideal. It makes the PMU structure fields susceptible to being overwritten, especially with future changes that might initialize fields like 'capabilities' within init_hybrid_pmu() called by x86_pmu_starting_cpu(). To mitigate this potential problem, move the default hybrid structure initialization before calling x86_pmu_starting_cpu(). Signed-off-by: Dapeng Mi --- V7: new patch. arch/x86/events/core.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 03ce1bc7ef2e..67883cf1d675 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2189,8 +2189,20 @@ static int __init init_hw_perf_events(void) =20 pmu.attr_update =3D x86_pmu.attr_update; =20 - if (!is_hybrid()) + if (!is_hybrid()) { x86_pmu_show_pmu_cap(NULL); + } else { + int i; + + /* + * Init default ops. + * Must be called before registering x86_pmu_starting_cpu(), + * otherwise some key PMU fields, e.g., capabilities + * initialized in x86_pmu_starting_cpu(), would be overwritten. + */ + for (i =3D 0; i < x86_pmu.num_hybrid_pmus; i++) + x86_pmu.hybrid_pmu[i].pmu =3D pmu; + } =20 if (!x86_pmu.read) x86_pmu.read =3D _x86_pmu_read; @@ -2237,7 +2249,6 @@ static int __init init_hw_perf_events(void) for (i =3D 0; i < x86_pmu.num_hybrid_pmus; i++) { hybrid_pmu =3D &x86_pmu.hybrid_pmu[i]; =20 - hybrid_pmu->pmu =3D pmu; hybrid_pmu->pmu.type =3D -1; hybrid_pmu->pmu.attr_update =3D x86_pmu.attr_update; hybrid_pmu->pmu.capabilities |=3D PERF_PMU_CAP_EXTENDED_HW_TYPE; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C821530B51D; Tue, 24 Mar 2026 00:46:00 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313162; cv=none; b=t/zRuz/pkDhKoAXq4WQnm6f1OMdhfJNRgP59VbjcfzbhxZ2+ezPGMPVb9RiRmefT+OMudxb4YSkElbG+rcOpWUMrb9orjVRbGgoVd8T/D8G+kxV0c/wpuMbx4DJyKdJy349sZiuB1J4jWD28TWsE3w9GkG0DyPOGXQkmFil7kP0= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313162; c=relaxed/simple; bh=nZdptApk2egtKxE0jTlQ4OhGVZIz8u+Eemv+BFkILIo=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=Wcvof6jnQoKkDKdpvX7TndARZ9qdquA7X8hvTvWi7S+eidDzEJ0Ipj67PHpnJC+dPvr3vouTIYV6n9+D5SvsRiQmycbaobuZ2Ggx5iYn+WDNm7ngdgIqOCymC9YU5SeW17T/pAzcVVMiLOVccv0zZsUpgQ1Wj+391rryk/BuyLA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=WQp28z9m; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="WQp28z9m" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313161; x=1805849161; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=nZdptApk2egtKxE0jTlQ4OhGVZIz8u+Eemv+BFkILIo=; b=WQp28z9mHWYjEAJdB/uA6zU/UNDI3N9EM+s7v0Yj1z4IY5ePoObQAaN+ OmAz5qNheOLX6gH1NjY+ljnS5QtX1cAd6E7aUO65PELYD50UhEK2bBKQW Z9V98CqUrmzHqFUFDDI4UEEbwsX6CM6NwKgc/K9qAOdJA/CzV4NlOrxzx cWv6VvK4gKrIIaPrXe4hQpBFRHFSS1sI4KhwBDuFmu7lnaej5N0VNaSuQ k09K45cX1nZ3IhOary3ljEt/oPViClORxX/EKISesJQLw+SKnWN23qwKk 07usilet8jrtgKilNRrih+x8Don9DRwFucPPmceBicMFvpNFBQxLzQtOP A==; X-CSE-ConnectionGUID: Y+TyXdzST3GT8HcRUtATGw== X-CSE-MsgGUID: kNs6lyG4R5+zPIxtL/T/1w== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86396953" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86396953" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:00 -0700 X-CSE-ConnectionGUID: EEgbxxN/SxacT6MBoQsc+A== X-CSE-MsgGUID: IZA98ioZTEKLPaqdBs1/YA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322582" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:45:56 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi , Yi Lai Subject: [Patch v7 02/24] perf/x86/intel: Avoid PEBS event on fixed counters without extended PEBS Date: Tue, 24 Mar 2026 08:40:56 +0800 Message-Id: <20260324004118.3772171-3-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Before the introduction of extended PEBS, PEBS supported only general-purpose (GP) counters. In a virtual machine (VM) environment, the PEBS_BASELINE bit in PERF_CAPABILITIES may not be set, but the PEBS format could be indicated as 4 or higher. In such cases, PEBS events might be scheduled to fixed counters, and writing the corresponding bits into the PEBS_ENABLE MSR could cause a #GP fault. To fix this issue, enhance intel_pebs_constraints() to avoid scheduling PEBS events on fixed counters if extended PEBS is not supported. Reported-by: Yi Lai Signed-off-by: Dapeng Mi --- V2: Restrict PEBS events work on only GP counters if no PEBS-baseline suggested instead of limiting cpuc->pebs_enabled to PEBS capable counters in v1. arch/x86/events/intel/ds.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 5027afc97b65..49af127bff68 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1557,6 +1557,14 @@ struct event_constraint *intel_pebs_constraints(stru= ct perf_event *event) if (pebs_constraints) { for_each_event_constraint(c, pebs_constraints) { if (constraint_match(c, event->hw.config)) { + /* + * If fixed counters are suggested in the constraints, + * but extended PEBS is not supported, empty constraint + * should be returned. + */ + if ((c->idxmsk64 & ~PEBS_COUNTER_MASK) && + !(x86_pmu.flags & PMU_FL_PEBS_ALL)) + break; event->hw.flags |=3D c->flags; return c; } --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 657FF2F5485; Tue, 24 Mar 2026 00:46:05 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313166; cv=none; b=oN/q3rma7pTNQw4FjVzZdrqYaAHSXWhKH6URpRASzlHtVzwIKeipqavcAB98mmW4Q5BZFVpjveehJJX10AxSqP/XZYkQBPo1Vw7Rpcvr+yWxnF5R/3ZYze9GNvqThDXikuHbtLLTfyTWW2FXfA/+kimpQIwtOl/sSfOcpcF+CBY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313166; c=relaxed/simple; bh=shyme+Ceqr9QBtXLxUZr4ZksB/qjm5QkTlR85zNhOig=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=EQUDHzJB6mOAlZXmZlvPZiaMGjw1tS0//hAOUjwC43ornX7pcCLKcNzp1GEU6Fu+0wDa+9PO/DbFBzf2k1rsWyav4xua9KVeHm+MPf1HO1v+jSg+v0C/Wad8gTOy8IgpVPbEiw8AnLcJu+ImRpYx8/RCkUOpRf3F+kfOyt47Qpk= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=adjkDANZ; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="adjkDANZ" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313165; x=1805849165; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=shyme+Ceqr9QBtXLxUZr4ZksB/qjm5QkTlR85zNhOig=; b=adjkDANZrcP2WZM5kfEXGR4fFwnO+qoOJvU1h91gPTYfDq1qZShS12Ui 73qsbhK280CWRgqRP8PrsknzTa33FYi2EECjqiSgphDfQCDrqHjVE+gM0 J8uTSA0YowFJWqIjQncP5GX68S+x4iVG5Afd1XdFvlz9ipQ+U8ySex18L 3HKaEbbcwwpM0Wia6+bEQFyTRfUN7t7HLwCh+/eWqWD+ZvMyaeSczDYTG LhUpneHGnnpdTQ3mEnhf3OksSsv+NWA6LuDdFyR1a0z9vWUJLmGeamA1S Ln0Zn6Yi4jKUPevrIWTfuXXyDx+zsZK2QvNxRe30194zqO/thFjsPCadB g==; X-CSE-ConnectionGUID: NvCHcy47QcmXA6bfY5D2kQ== X-CSE-MsgGUID: JJjV710dSuC66+Bo1Z+uKg== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86396973" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86396973" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:05 -0700 X-CSE-ConnectionGUID: npP+7K6hQYeDwDdBZ6Xj6w== X-CSE-MsgGUID: YVQuJWRsTJu8Xu2nDuD+jg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322596" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:01 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 03/24] perf/x86/intel: Enable large PEBS sampling for XMMs Date: Tue, 24 Mar 2026 08:40:57 +0800 Message-Id: <20260324004118.3772171-4-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Modern PEBS hardware supports directly sampling XMM registers, then large PEBS can be enabled for XMM registers just like other GPRs. Reported-by: Xudong Hao Signed-off-by: Dapeng Mi --- arch/x86/events/intel/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 4768236c054b..5a2b1503b6a5 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4423,7 +4423,8 @@ static unsigned long intel_pmu_large_pebs_flags(struc= t perf_event *event) flags &=3D ~PERF_SAMPLE_REGS_USER; if (event->attr.sample_regs_user & ~PEBS_GP_REGS) flags &=3D ~PERF_SAMPLE_REGS_USER; - if (event->attr.sample_regs_intr & ~PEBS_GP_REGS) + if (event->attr.sample_regs_intr & + ~(PEBS_GP_REGS | PERF_REG_EXTENDED_MASK)) flags &=3D ~PERF_SAMPLE_REGS_INTR; return flags; } --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 587F834CFD7; Tue, 24 Mar 2026 00:46:10 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313171; cv=none; b=QzePs+1sLLILd0Zh06BoYznASCn1K5P4I/Ra+AqhNLzX+qUFuzX5846DIoVaIS3U2fn+lwBceK2sUZmbMLbluAG7KxohLccG82tHl10rm0VvMKkU40/PEHBm8MYECYLTmTPKs8UqPNloPeVf4Jw0h1STzYZKxXG2IsPpdf30ocA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313171; c=relaxed/simple; bh=F6zSDvY2cAEEmHpKsROXdQb5nFcmwiqGgFuqWyyAeQM=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=H1wpvRrCUYYh36PpyBYIiIKdh+zpI/hT73w1rY8ljGIaxxn5sSXmIdY/96hWtTBDi7ixAZgGd+Hr6zsrg7oD3PCiq1iJOsxvjDMcgnVGx/CL2m2qVk92ec+Hb+p3bvIHa2M+QKJ2AL8iEul99Ty/d9Cj+Cr7ickn67mYf/MM6jw= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=ATLkMY37; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="ATLkMY37" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313170; x=1805849170; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=F6zSDvY2cAEEmHpKsROXdQb5nFcmwiqGgFuqWyyAeQM=; b=ATLkMY37z+Lab6CCz8NFPafKcIK/1mx7ObYTsDXuPdvqo6TFZCHwo8qv 5jCki+lLc+uylfM3u+Zn+I9ikh2Al0K6qaK8Rj22YdkAohtcVwZDlKUuJ 3surpbTN8h9moT3pvtuZI7BoPk01ZRC0XRuDkPQj3sQSZIc75Ldt6T1C8 /kjNCwtH0BxRz2TgU94MiTQM6sLFlyNQ24XEK2VVJcIWfK5qnd7YNEB2J EMcppgWbVxLT6nNH7L2XXyKQjpj3aGuvIxpkWK1Vzl7V/n79b00ERHIj0 poZeAYxLHtVZals2t2XYNThqQB1Fhi0mnRQUrs8O4UzfW19/4k62TdXFa Q==; X-CSE-ConnectionGUID: T/ok0zVJTDS8tBbVEKRlyg== X-CSE-MsgGUID: 2dUyC+s6RwWf2cBTwTf2LA== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397007" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397007" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:10 -0700 X-CSE-ConnectionGUID: r5xmhYw/T32/l1gGkLNmNA== X-CSE-MsgGUID: +FpuexhIRe+8sS8y2jQwZA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322627" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:05 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 04/24] perf/x86/intel: Convert x86_perf_regs to per-cpu variables Date: Tue, 24 Mar 2026 08:40:58 +0800 Message-Id: <20260324004118.3772171-5-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Currently, the intel_pmu_drain_pebs_icl() and intel_pmu_drain_arch_pebs() helpers define many temporary variables. Upcoming patches will add new fields like *ymm_regs and *zmm_regs to the x86_perf_regs structure to support sampling for these SIMD registers. This would increase the stack size consumed by these helpers, potentially triggering the warning: "the frame size of 1048 bytes is larger than 1024 bytes [-Wframe-larger-than=3D]". To eliminate this warning, convert x86_perf_regs to per-cpu variables. No functional changes are intended. Signed-off-by: Dapeng Mi --- arch/x86/events/intel/ds.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 49af127bff68..52eb6eac5df3 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -3179,14 +3179,16 @@ __intel_pmu_handle_last_pebs_record(struct pt_regs = *iregs, =20 } =20 +static DEFINE_PER_CPU(struct x86_perf_regs, x86_pebs_regs); + static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sa= mple_data *data) { short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] =3D {}; void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS]; struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); struct debug_store *ds =3D cpuc->ds; - struct x86_perf_regs perf_regs; - struct pt_regs *regs =3D &perf_regs.regs; + struct x86_perf_regs *perf_regs =3D this_cpu_ptr(&x86_pebs_regs); + struct pt_regs *regs =3D &perf_regs->regs; struct pebs_basic *basic; void *base, *at, *top; u64 mask; @@ -3236,8 +3238,8 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs = *iregs, void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS]; struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); union arch_pebs_index index; - struct x86_perf_regs perf_regs; - struct pt_regs *regs =3D &perf_regs.regs; + struct x86_perf_regs *perf_regs =3D this_cpu_ptr(&x86_pebs_regs); + struct pt_regs *regs =3D &perf_regs->regs; void *base, *at, *top; u64 mask; =20 --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id DF85533A6E9; Tue, 24 Mar 2026 00:46:14 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313176; cv=none; b=Wnf9QaQBWkNQsgYE1MEg2SmVOFZkeF6zZZ3Su4coWtlJTS4UWbViuQ/7Mxol7aC3R9CYqpya9c5AfbUHndcxFJegQIgnmty5AJU3vQgVkYRGFNI5zzBPIw/jR0AKFktAES+TP2CvFBMkG77Q9C0VB42pQXCl3ka3BIsUHHx8Opc= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313176; c=relaxed/simple; bh=zodwL5+xJi/IO70cB6B5L6mqs+2tijSFx7bH9iRFOXs=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=aovmFKJZZ82/8ff1jK3PQg0mME+KyYrS4l/yId9r/eQAF/FuPf2Er6DFkqfaO6EaydIE9Fo6Dp3i/uWJ4iMx6UIPunxZ4MJondKjCLDxhHr7T1BskIDb8PTaaoM92tP4bzORIZpcE9YVFonSi79zXJtZm9N2EUpunB9AJhJKlFc= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=S/+TLhlB; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="S/+TLhlB" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313175; x=1805849175; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=zodwL5+xJi/IO70cB6B5L6mqs+2tijSFx7bH9iRFOXs=; b=S/+TLhlBJxTSRtmblXGUGanpT1btzeASWlXx4P6QKOSQR+6G6Ggr5khM 9sZIBFwdroZq2v3wXC5pfWDgjSA9+Kn6oudo/lP9duuC326P5Si7zKvK0 q3MK0LdVkH4o2BJJ6tkFi8IrCGbhfRizLlQ1iMSIR/WfnSCNHiaNpgb/o T88ZlnjZB7bW/C+LhpWB6pds1mYnDGL8t33f0CNZpGbIpN5EwZU48eBdk pExddFwslSwHuRM5o08eKBjKfu8A7Imzdq4RhhCzNrWA6O+kGn2wsE7UR NqB1I0t99dNCZzVWupNn9Me7RzZlACRAMYSkbWcJqS/F+syKsVG9jWy7B A==; X-CSE-ConnectionGUID: NdKBAkNESWeGClejHzKGPg== X-CSE-MsgGUID: 1kZthIy2SU+xJoQ+lwiJuw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397034" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397034" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:14 -0700 X-CSE-ConnectionGUID: xf8DvQtxR1WNQ5ZVm/1GcQ== X-CSE-MsgGUID: zsGQ1Up0TLCC44xv6PaFYg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322659" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:10 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 05/24] perf: Eliminate duplicate arch-specific functions definations Date: Tue, 24 Mar 2026 08:40:59 +0800 Message-Id: <20260324004118.3772171-6-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Define default common __weak functions for perf_reg_value(), perf_reg_validate(), perf_reg_abi() and perf_get_regs_user(). This helps to eliminate the duplicated arch-specific definations. No function changes intended. Signed-off-by: Dapeng Mi --- arch/arm/kernel/perf_regs.c | 6 ------ arch/arm64/kernel/perf_regs.c | 6 ------ arch/csky/kernel/perf_regs.c | 6 ------ arch/loongarch/kernel/perf_regs.c | 6 ------ arch/mips/kernel/perf_regs.c | 6 ------ arch/parisc/kernel/perf_regs.c | 6 ------ arch/riscv/kernel/perf_regs.c | 6 ------ arch/x86/kernel/perf_regs.c | 6 ------ include/linux/perf_regs.h | 32 ++++++------------------------- kernel/events/core.c | 22 +++++++++++++++++++++ 10 files changed, 28 insertions(+), 74 deletions(-) diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c index 0529f90395c9..d575a4c3ca56 100644 --- a/arch/arm/kernel/perf_regs.c +++ b/arch/arm/kernel/perf_regs.c @@ -31,9 +31,3 @@ u64 perf_reg_abi(struct task_struct *task) return PERF_SAMPLE_REGS_ABI_32; } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index b4eece3eb17d..70e2f13f587f 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -98,9 +98,3 @@ u64 perf_reg_abi(struct task_struct *task) return PERF_SAMPLE_REGS_ABI_64; } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c index 09b7f88a2d6a..94601f37b596 100644 --- a/arch/csky/kernel/perf_regs.c +++ b/arch/csky/kernel/perf_regs.c @@ -31,9 +31,3 @@ u64 perf_reg_abi(struct task_struct *task) return PERF_SAMPLE_REGS_ABI_32; } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf= _regs.c index 263ac4ab5af6..8dd604f01745 100644 --- a/arch/loongarch/kernel/perf_regs.c +++ b/arch/loongarch/kernel/perf_regs.c @@ -45,9 +45,3 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return regs->regs[idx]; } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} diff --git a/arch/mips/kernel/perf_regs.c b/arch/mips/kernel/perf_regs.c index e686780d1647..7736d3c5ebd2 100644 --- a/arch/mips/kernel/perf_regs.c +++ b/arch/mips/kernel/perf_regs.c @@ -60,9 +60,3 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return (s64)v; /* Sign extend if 32-bit. */ } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} diff --git a/arch/parisc/kernel/perf_regs.c b/arch/parisc/kernel/perf_regs.c index 10a1a5f06a18..b9fe1f2fcb9b 100644 --- a/arch/parisc/kernel/perf_regs.c +++ b/arch/parisc/kernel/perf_regs.c @@ -53,9 +53,3 @@ u64 perf_reg_abi(struct task_struct *task) return PERF_SAMPLE_REGS_ABI_64; } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} diff --git a/arch/riscv/kernel/perf_regs.c b/arch/riscv/kernel/perf_regs.c index fd304a248de6..3bba8deababb 100644 --- a/arch/riscv/kernel/perf_regs.c +++ b/arch/riscv/kernel/perf_regs.c @@ -35,9 +35,3 @@ u64 perf_reg_abi(struct task_struct *task) #endif } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 624703af80a1..81204cb7f723 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -100,12 +100,6 @@ u64 perf_reg_abi(struct task_struct *task) return PERF_SAMPLE_REGS_ABI_32; } =20 -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} #else /* CONFIG_X86_64 */ #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ (1ULL << PERF_REG_X86_ES) | \ diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index f632c5725f16..144bcc3ff19f 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -9,6 +9,12 @@ struct perf_regs { struct pt_regs *regs; }; =20 +u64 perf_reg_value(struct pt_regs *regs, int idx); +int perf_reg_validate(u64 mask); +u64 perf_reg_abi(struct task_struct *task); +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs); + #ifdef CONFIG_HAVE_PERF_REGS #include =20 @@ -16,35 +22,9 @@ struct perf_regs { #define PERF_REG_EXTENDED_MASK 0 #endif =20 -u64 perf_reg_value(struct pt_regs *regs, int idx); -int perf_reg_validate(u64 mask); -u64 perf_reg_abi(struct task_struct *task); -void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs); #else =20 #define PERF_REG_EXTENDED_MASK 0 =20 -static inline u64 perf_reg_value(struct pt_regs *regs, int idx) -{ - return 0; -} - -static inline int perf_reg_validate(u64 mask) -{ - return mask ? -ENOSYS : 0; -} - -static inline u64 perf_reg_abi(struct task_struct *task) -{ - return PERF_SAMPLE_REGS_ABI_NONE; -} - -static inline void perf_get_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) -{ - regs_user->regs =3D task_pt_regs(current); - regs_user->abi =3D perf_reg_abi(current); -} #endif /* CONFIG_HAVE_PERF_REGS */ #endif /* _LINUX_PERF_REGS_H */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 5eeae8636996..eb1dea2b1b0e 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7731,6 +7731,28 @@ unsigned long perf_instruction_pointer(struct perf_e= vent *event, return perf_arch_instruction_pointer(regs); } =20 +u64 __weak perf_reg_value(struct pt_regs *regs, int idx) +{ + return 0; +} + +int __weak perf_reg_validate(u64 mask) +{ + return mask ? -ENOSYS : 0; +} + +u64 __weak perf_reg_abi(struct task_struct *task) +{ + return PERF_SAMPLE_REGS_ABI_NONE; +} + +void __weak perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs) +{ + regs_user->regs =3D task_pt_regs(current); + regs_user->abi =3D perf_reg_abi(current); +} + static void perf_output_sample_regs(struct perf_output_handle *handle, struct pt_regs *regs, u64 mask) --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id B8B773368A9; Tue, 24 Mar 2026 00:46:19 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313180; cv=none; b=jhmuqhXqYapkYCM0uXAeyX3Z+lJ9Ku8v1L/Z7iKA4B0qjS7XVkiQCHJ5tdSCyEecPHdhQ+Qa4cMejTa9nKMJJ3ppMLrDOpZSANiXNXrglsUKAyURAN8X0gr7kpgZh+7VmyjRKhvsK9FabEj5dZKazCwKHf/3r///Iw40XJtFOoU= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313180; c=relaxed/simple; bh=YPwdksZY9flUYU+BXIQ78U8LxMq1uu73KMPaOrlJlDE=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=dxMpyiduAgSfwCQkO77e6Jbha97U/6U4lBU78nIUJU8XfPUbpt8W74+XlJqsDpeL0I88YapgHqvE+11hZKi5TqjJHmWp27U4nfLKc3wb78z+4AC5PU3fEhkGAdhvyTBFQTB6ArGxDsaBeRINENus2jSj0109v9G9vyCVBOt/aqU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=jsZIjjPq; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="jsZIjjPq" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313180; x=1805849180; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=YPwdksZY9flUYU+BXIQ78U8LxMq1uu73KMPaOrlJlDE=; b=jsZIjjPqfT6R9n6675RcPiafeCS7FdQCYIfddTfz7ApFjqpX7+qFqPNI 6xKhUOky6IxFiPB7XI4Fg+WDpDQdi7rsfeawGvsYHucou05uJ5Q2gWjeM 5PoN6aMFVNUXaV/rL9FNbN1y9tnZBKv0fkavnWZgB40eowtKEH0qLmFDr NwTN8picqGIcTbiAQbTQTU6uHC3M4BbR5NB58f+9/BnaiDjeno6XAGJXk +QRoBwupPaf5nkYHx4dMCygvUn0r+kEdSLBoRnQG4om76AXguLBqh7ITF s4J33lDjJv8n9uFf/519WC5KSJ6SanP/qTEzbWdZWNJbN7dzxoXkebR3j Q==; X-CSE-ConnectionGUID: 89ZWDUocS2KJi7KFyqSnUQ== X-CSE-MsgGUID: FfwFUtnfSsKxHpxEL2Zrtw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397054" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397054" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:19 -0700 X-CSE-ConnectionGUID: vorp4ajcQySGM1M1Frrl4A== X-CSE-MsgGUID: muSkuVMvSVC2hSm+hgElfw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322684" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:15 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 06/24] perf/x86: Use x86_perf_regs in the x86 nmi handler Date: Tue, 24 Mar 2026 08:41:00 +0800 Message-Id: <20260324004118.3772171-7-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang More and more regs will be supported in the overflow, e.g., more vector registers, SSP, etc. The generic pt_regs struct cannot store all of them. Use a X86 specific x86_perf_regs instead. The struct pt_regs *regs is still passed to x86_pmu_handle_irq(). There is no functional change for the existing code. AMD IBS's NMI handler doesn't utilize the static call x86_pmu_handle_irq(). The x86_perf_regs struct doesn't apply to the AMD IBS. It can be added separately later when AMD IBS supports more regs. Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi --- V7: use per-cpu x86_intr_regs to replace temporary variable in v6. arch/x86/events/core.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 67883cf1d675..ad6cbc19592d 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1782,9 +1782,11 @@ void perf_put_guest_lvtpc(void) EXPORT_SYMBOL_FOR_KVM(perf_put_guest_lvtpc); #endif /* CONFIG_PERF_GUEST_MEDIATED_PMU */ =20 +static DEFINE_PER_CPU(struct x86_perf_regs, x86_intr_regs); static int perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) { + struct x86_perf_regs *x86_regs =3D this_cpu_ptr(&x86_intr_regs); u64 start_clock; u64 finish_clock; int ret; @@ -1808,7 +1810,8 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_re= gs *regs) return NMI_DONE; =20 start_clock =3D sched_clock(); - ret =3D static_call(x86_pmu_handle_irq)(regs); + x86_regs->regs =3D *regs; + ret =3D static_call(x86_pmu_handle_irq)(&x86_regs->regs); finish_clock =3D sched_clock(); =20 perf_sample_event_took(finish_clock - start_clock); --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6D4F233B6F9; Tue, 24 Mar 2026 00:46:24 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313185; cv=none; b=t5pJDxmsmh7AIz31h7isgySnXvDp66SYa9cGObB614UFQ67f5b6DttYOuYYW6f6mTcE0KV2IyqTnAR0is2bRfj33Jw/fnnW17FgBk+dbyLgoBsIMfVyqa9kb8rbxVji8DEy66TpkUvEmRDN4hz2hueGaOfd+9+QQw418ADbdQRs= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313185; c=relaxed/simple; bh=H6ySoMCaHGo87/RTioxATtj06rUnRQVpMFCMIsPks/4=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=WTYtNU9QlDQVpCXMFwtD3R7CHHhNJFpR2o5Z78ks28Y5DSo0lzTHUeoIb+Kak2fe/aQwdimYYwVNVpdynYNXKAX+ylG6s9tHYueB0pvnP2Fl4KXZDH64lahRd1eExmU5rpqVB4bc3d/AOAb7VLqCtDVVcyt9EimLfIflbv0Lx24= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=EETBZ/z8; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="EETBZ/z8" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313184; x=1805849184; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=H6ySoMCaHGo87/RTioxATtj06rUnRQVpMFCMIsPks/4=; b=EETBZ/z8ADXxY3Yan5Tu1YqPgW/qarU3Yr6DhqIAKE70wvCP82QQqYVP ntiXgMeRIw7lZPCOke9DaHXNCrkx4pWxWPurIwWaRI5LW4Vy/AU9X1VRe 2yTIeRj1YfxKSEJJPHAOIVXN9CyByX3kcU3OsG0ZWPLlviZ01nQSXJmPW 4pNdMCbghi5tz3/JY4tP+IOtzSJ+uk9/Yh6DWqB43U8l3I582cuBu82nB 06Yv6zoekXIuLOVMVRrCwoJw6bhr8zTt0EbhOybHbEBx3j4xOkiHd2wWE o58QasapC8r1oPGztF2ptjjQ1N8sSUTAz3LrqJstihIMA6+RoPdXt8h+o A==; X-CSE-ConnectionGUID: pWSPB/kvTqqp5yCBekQiVQ== X-CSE-MsgGUID: QIvWe3oXTi+3DWvOO9H9Mg== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397065" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397065" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:24 -0700 X-CSE-ConnectionGUID: VHOkmS2VSeaLWRTPWqeJMQ== X-CSE-MsgGUID: w0ehnexnRmuS22T/lGhTIw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322700" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:20 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 07/24] perf/x86: Introduce x86-specific x86_pmu_setup_regs_data() Date: Tue, 24 Mar 2026 08:41:01 +0800 Message-Id: <20260324004118.3772171-8-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang The current perf/x86 implementation uses the generic functions perf_sample_regs_user() and perf_sample_regs_intr() to set up registers data for sampling records. While this approach works for general registers, it falls short when adding sampling support for SIMD and APX eGPRs registers on x86 platforms. To address this, we introduce the x86-specific function x86_pmu_setup_regs_data() for setting up register data on x86 platforms. At present, x86_pmu_setup_regs_data() mirrors the logic of the generic functions perf_sample_regs_user() and perf_sample_regs_intr(). Subsequent patches will introduce x86-specific enhancements. Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 33 +++++++++++++++++++++++++++++++++ arch/x86/events/intel/ds.c | 9 ++++++--- arch/x86/events/perf_event.h | 4 ++++ 3 files changed, 43 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index ad6cbc19592d..0a6c51e86e9b 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1699,6 +1699,39 @@ static void x86_pmu_del(struct perf_event *event, in= t flags) static_call_cond(x86_pmu_del)(event); } =20 +void x86_pmu_setup_regs_data(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct perf_event_attr *attr =3D &event->attr; + u64 sample_type =3D attr->sample_type; + + if (sample_type & PERF_SAMPLE_REGS_USER) { + if (user_mode(regs)) { + data->regs_user.abi =3D perf_reg_abi(current); + data->regs_user.regs =3D regs; + } else if (!(current->flags & PF_KTHREAD)) { + perf_get_regs_user(&data->regs_user, regs); + } else { + data->regs_user.abi =3D PERF_SAMPLE_REGS_ABI_NONE; + data->regs_user.regs =3D NULL; + } + data->dyn_size +=3D sizeof(u64); + if (data->regs_user.regs) + data->dyn_size +=3D hweight64(attr->sample_regs_user) * sizeof(u64); + data->sample_flags |=3D PERF_SAMPLE_REGS_USER; + } + + if (sample_type & PERF_SAMPLE_REGS_INTR) { + data->regs_intr.regs =3D regs; + data->regs_intr.abi =3D perf_reg_abi(current); + data->dyn_size +=3D sizeof(u64); + if (data->regs_intr.regs) + data->dyn_size +=3D hweight64(attr->sample_regs_intr) * sizeof(u64); + data->sample_flags |=3D PERF_SAMPLE_REGS_INTR; + } +} + int x86_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 52eb6eac5df3..b045297c02d0 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2450,6 +2450,7 @@ static inline void __setup_pebs_basic_group(struct pe= rf_event *event, } =20 static inline void __setup_pebs_gpr_group(struct perf_event *event, + struct perf_sample_data *data, struct pt_regs *regs, struct pebs_gprs *gprs, u64 sample_type) @@ -2459,8 +2460,10 @@ static inline void __setup_pebs_gpr_group(struct per= f_event *event, regs->flags &=3D ~PERF_EFLAGS_EXACT; } =20 - if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) + if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) { adaptive_pebs_save_regs(regs, gprs); + x86_pmu_setup_regs_data(event, data, regs); + } } =20 static inline void __setup_pebs_meminfo_group(struct perf_event *event, @@ -2553,7 +2556,7 @@ static void setup_pebs_adaptive_sample_data(struct pe= rf_event *event, gprs =3D next_record; next_record =3D gprs + 1; =20 - __setup_pebs_gpr_group(event, regs, gprs, sample_type); + __setup_pebs_gpr_group(event, data, regs, gprs, sample_type); } =20 if (format_group & PEBS_DATACFG_MEMINFO) { @@ -2677,7 +2680,7 @@ static void setup_arch_pebs_sample_data(struct perf_e= vent *event, gprs =3D next_record; next_record =3D gprs + 1; =20 - __setup_pebs_gpr_group(event, regs, + __setup_pebs_gpr_group(event, data, regs, (struct pebs_gprs *)gprs, sample_type); } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index fad87d3c8b2c..39c41947c70d 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1306,6 +1306,10 @@ void x86_pmu_enable_event(struct perf_event *event); =20 int x86_pmu_handle_irq(struct pt_regs *regs); =20 +void x86_pmu_setup_regs_data(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs); + void x86_pmu_show_pmu_cap(struct pmu *pmu); =20 static inline int x86_pmu_num_counters(struct pmu *pmu) --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 4585C30B51D; Tue, 24 Mar 2026 00:46:29 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313190; cv=none; b=NiV/cwQijsSd7i9ER29RLYQS7y8GFX/z/Zx57KMd9epTWlWZpOGEdUYetQcaAZoRCQuRACP53oEtRoI8zxhn33sqPPP+P2smQVRC+Z/6SAZ0LzuHE2u7WSy+eHNH6PU2P/1lg2KdX94WIgUpMAKbrn6oALDjoeWMbinxrhplLlM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313190; c=relaxed/simple; bh=6xvz8ppUyFqbHOKlRedXt1nvKmP3kmo26rzwqLv2/Dw=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=esuCd4w7ZhhExvVLGGNeWKMcrzdeAAWIqHTaahG7IOYAilXOsS2CUUGYmf63lMkW77a3z/RjId/0xapH8oYBmSiF0t5WDYta/BcI2gz0FRBl55NcdJvf2SnPOxK5Pb3DgmXPRksidQ58FQ0gB+I0+MMtnKXkDuvBeIrnvp0wEno= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=H4aSEHJY; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="H4aSEHJY" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313189; x=1805849189; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=6xvz8ppUyFqbHOKlRedXt1nvKmP3kmo26rzwqLv2/Dw=; b=H4aSEHJYpJfU7ssi/KTU//kVvllkPMM29mf/MswRdrU0/ap39IY41pCr LIHueKDPydnZnK30PfaNFEH9y1w3bOsgSEnmwAh8dUnD3Y9+Pl3wgpJdb Jrv+ssVNIKDvhLHiHWyysEr4pUYPl4ep32xyV8HLVnFUhJAuFBJqM7LMr /nJf8dQa3s4AZkrmeXqbnvwuzxwQc1tzcJFmSo6iOWxukQH3GEgZwKhZR neWC+YY0JBlTs1wqehxN8KCz9SGtWw+ReQC9DAqvk27Ogk4pUGGNWgPOc APg2KAkGMEm0kyXCzyMQvEx8AN4zQiPLtBtzEUHcsXmb9DmtTkiqAxkWP A==; X-CSE-ConnectionGUID: mvtxNKphSZemYDE9GHCzKQ== X-CSE-MsgGUID: v++RZfC7SHaJ1RWvXuYXxw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397085" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397085" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:29 -0700 X-CSE-ConnectionGUID: SN+qMK9CQ2+8D1HxRucQ5A== X-CSE-MsgGUID: ufJXQwtSRLKdsSddOR1LRA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322720" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:24 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 08/24] x86/fpu/xstate: Add xsaves_nmi() helper Date: Tue, 24 Mar 2026 08:41:02 +0800 Message-Id: <20260324004118.3772171-9-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang Add xsaves_nmi() to save supported xsave states in NMI handler. This function is similar to xsaves(), but should only be called within a NMI handler. This function returns the actual register contents at the moment the NMI occurs. Currently the perf subsystem is the sole user of this helper. It uses this function to snapshot SIMD (XMM/YMM/ZMM) and APX eGPRs registers which would be added in subsequent patches. Suggested-by: Dave Hansen Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi --- arch/x86/include/asm/fpu/xstate.h | 1 + arch/x86/kernel/fpu/xstate.c | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/x= state.h index 7a7dc9d56027..38fa8ff26559 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -110,6 +110,7 @@ int xfeature_size(int xfeature_nr); =20 void xsaves(struct xregs_state *xsave, u64 mask); void xrstors(struct xregs_state *xsave, u64 mask); +void xsaves_nmi(struct xregs_state *xsave, u64 mask); =20 int xfd_enable_feature(u64 xfd_err); =20 diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 76153dfb58c9..39e5f9e79a4c 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -1475,6 +1475,29 @@ void xrstors(struct xregs_state *xstate, u64 mask) WARN_ON_ONCE(err); } =20 +/** + * xsaves_nmi - Save selected components to a kernel xstate buffer in NMI + * @xstate: Pointer to the buffer + * @mask: Feature mask to select the components to save + * + * This function is similar to xsaves(), but should only be called within + * a NMI handler. This function returns the actual register contents at + * the moment the NMI occurs. + * + * Currently, the perf subsystem is the sole user of this helper. It uses + * the function to snapshot SIMD (XMM/YMM/ZMM) and APX eGPRs registers. + */ +void xsaves_nmi(struct xregs_state *xstate, u64 mask) +{ + int err; + + if (!in_nmi()) + return; + + XSTATE_OP(XSAVES, xstate, (u32)mask, (u32)(mask >> 32), err); + WARN_ON_ONCE(err); +} + #if IS_ENABLED(CONFIG_KVM) void fpstate_clear_xstate_component(struct fpstate *fpstate, unsigned int = xfeature) { --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A6CA934E766; Tue, 24 Mar 2026 00:46:34 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313195; cv=none; b=QS/6Mdzivpf9k78gQantULj21auHnPVfqwYFhmf2myVHOwFFWS8tZxfPCsZwBF5WLWdQfhcZ8S5IPXf0m+K1OzefenE3Ef6cLV+t/QAN/qfumLaLXZ6HYkQgXLeoSwSBKXWFucV6EVkRZdZxUZP42rxafSCqk8sDZvLiAjdWCmY= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313195; c=relaxed/simple; bh=Ok6beftmWm0pWdV2KRZB73jdWdPdriAvjeD78kI5/0k=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=KEgaGf7zyQ6fdZStiL/Q8qyRtc8yoC36ws2E9GSDoBURZ5fHj43I3kKNwTSfOg1hbWPJ0V7TWQ6XbaxA832ZcDxtGH4J6XhoIG9ucQa/JeF+CnAJTFZC904PTAFwiSj2fdXQ9cOgkG1LFAc/NBc+wmocJDLAe4PFiQXrQYXRtRE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=CXrssQ3n; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="CXrssQ3n" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313194; x=1805849194; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Ok6beftmWm0pWdV2KRZB73jdWdPdriAvjeD78kI5/0k=; b=CXrssQ3ntJ1ZkeGLAXLtXipKCjl6uNqdLyT3rtbaGYV4Bq2lIS/XXKCa bzQoKqT5GhSSbUfAFbo7gfy9fvu1YuBGaV1famjTLDueQnyhpHg97pFu6 PJpADqdnw/0yIPWaRDS2wlm+wHg6cWcrwaF20LsW2TcZGyR7DcsQ2edNu 9GBU6IApkjxr9ZW7vwTgb+W7FQsuGYxdvRzZlCJd30amX40E7dg3Rk+LK GVRvu4TsLYoDUo39uIV9+SUady0W8YiExEZH7YDEiwJJBPwimwL6zFdYn bQ0OqJRUMeLBFFjqAnQUtbreUy9rbCo9IgwyAVA4jBnDsAhhdohDBVqy/ g==; X-CSE-ConnectionGUID: tFHbh24/TiazHWQ342a7AQ== X-CSE-MsgGUID: bbk/CeAHQdGgxVs0F7gNKw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397098" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397098" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:34 -0700 X-CSE-ConnectionGUID: 3qeI3Ps7SiKYn3GH/aJ4sA== X-CSE-MsgGUID: /S573n1vTFSvN7hjHdZblg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322746" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:29 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 09/24] x86/fpu: Ensure TIF_NEED_FPU_LOAD is set after saving FPU state Date: Tue, 24 Mar 2026 08:41:03 +0800 Message-Id: <20260324004118.3772171-10-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Following Peter and Dave's suggestion, Ensure that the TIF_NEED_FPU_LOAD flag is always set after saving the FPU state. This guarantees that the user space FPU state has been saved whenever the TIF_NEED_FPU_LOAD flag is set. A subsequent patch will verify if the user space FPU state can be retrieved from the saved task FPU state in the NMI context by checking the TIF_NEED_FPU_LOAD flag. Please check the below link to get more background about the suggestion. Suggested-by: Peter Zijlstra Suggested-by: Dave Hansen Link: https://lore.kernel.org/all/20251204154721.GB2619703@noisy.programmin= g.kicks-ass.net/ Signed-off-by: Dapeng Mi --- V7: Add wrapper helper update_fpu_state_and_flag() and corresponding comments. arch/x86/include/asm/fpu/sched.h | 5 +++-- arch/x86/kernel/fpu/core.c | 27 ++++++++++++++++++++------- 2 files changed, 23 insertions(+), 9 deletions(-) diff --git a/arch/x86/include/asm/fpu/sched.h b/arch/x86/include/asm/fpu/sc= hed.h index 89004f4ca208..dcb2fa5f06d6 100644 --- a/arch/x86/include/asm/fpu/sched.h +++ b/arch/x86/include/asm/fpu/sched.h @@ -10,6 +10,8 @@ #include =20 extern void save_fpregs_to_fpstate(struct fpu *fpu); +extern void update_fpu_state_and_flag(struct fpu *fpu, + struct task_struct *task); extern void fpu__drop(struct task_struct *tsk); extern int fpu_clone(struct task_struct *dst, u64 clone_flags, bool minim= al, unsigned long shstk_addr); @@ -36,8 +38,7 @@ static inline void switch_fpu(struct task_struct *old, in= t cpu) !(old->flags & (PF_KTHREAD | PF_USER_WORKER))) { struct fpu *old_fpu =3D x86_task_fpu(old); =20 - set_tsk_thread_flag(old, TIF_NEED_FPU_LOAD); - save_fpregs_to_fpstate(old_fpu); + update_fpu_state_and_flag(old_fpu, old); /* * The save operation preserved register state, so the * fpu_fpregs_owner_ctx is still @old_fpu. Store the diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c index 608983806fd7..48d1ab50a961 100644 --- a/arch/x86/kernel/fpu/core.c +++ b/arch/x86/kernel/fpu/core.c @@ -213,6 +213,19 @@ void restore_fpregs_from_fpstate(struct fpstate *fpsta= te, u64 mask) } } =20 +/* + * Save the FPU register state in fpu->fpstate->regs and set + * TIF_NEED_FPU_LOAD subsequently. + * + * Must be called with fpregs_lock() held, ensuring flag + * TIF_NEED_FPU_LOAD is set last. + */ +void update_fpu_state_and_flag(struct fpu *fpu, struct task_struct *task) +{ + save_fpregs_to_fpstate(fpu); + set_tsk_thread_flag(task, TIF_NEED_FPU_LOAD); +} + void fpu_reset_from_exception_fixup(void) { restore_fpregs_from_fpstate(&init_fpstate, XFEATURE_MASK_FPSTATE); @@ -379,17 +392,19 @@ int fpu_swap_kvm_fpstate(struct fpu_guest *guest_fpu,= bool enter_guest) =20 fpregs_lock(); if (!cur_fps->is_confidential && !test_thread_flag(TIF_NEED_FPU_LOAD)) - save_fpregs_to_fpstate(fpu); + update_fpu_state_and_flag(fpu, current); =20 /* Swap fpstate */ if (enter_guest) { - fpu->__task_fpstate =3D cur_fps; + WRITE_ONCE(fpu->__task_fpstate, cur_fps); + barrier(); fpu->fpstate =3D guest_fps; guest_fps->in_use =3D true; } else { guest_fps->in_use =3D false; fpu->fpstate =3D fpu->__task_fpstate; - fpu->__task_fpstate =3D NULL; + barrier(); + WRITE_ONCE(fpu->__task_fpstate, NULL); } =20 cur_fps =3D fpu->fpstate; @@ -481,10 +496,8 @@ void kernel_fpu_begin_mask(unsigned int kfpu_mask) this_cpu_write(kernel_fpu_allowed, false); =20 if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER)) && - !test_thread_flag(TIF_NEED_FPU_LOAD)) { - set_thread_flag(TIF_NEED_FPU_LOAD); - save_fpregs_to_fpstate(x86_task_fpu(current)); - } + !test_thread_flag(TIF_NEED_FPU_LOAD)) + update_fpu_state_and_flag(x86_task_fpu(current), current); __cpu_invalidate_fpregs_state(); =20 /* Put sane initial values into the control registers. */ --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7D4883659E7; Tue, 24 Mar 2026 00:46:38 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313199; cv=none; b=DdmuqY0KPxHxLRwjaPH9+o0c555AmlsQD4tMRSKfSI2YnjX83XGEStdxCTYO0HnpH9envIqUGXRVi28SneD3Rltzrxip5z55Eiu/WdnhbiXT2AmqdYQtSgjQEq0uWdet/kUDU15ZjoU3kIHT5AuyK8xsjgyDzanIfsXc9znwsJ4= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313199; c=relaxed/simple; bh=NCXNvw0KAcxdnslKQ+0wQ8tLZRJYbbz/pgK2U354PaI=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=kNok+rSoKEkQqRaF5YfUaeutnnL9c9YFtigJuV0X+7yNTHLxxGZvdohXRPaQxPAetlFpJe+Zav3Cj/kQmMgwEYatjcoBrO1MLYZSwTwna+KKMuJ+ix26/kHD9GLnggIQpTZAY/UDj77w042tSTxwknjOYO2tAgRQ8BQKrPVuKC8= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=YT5v1OPL; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="YT5v1OPL" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313198; x=1805849198; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=NCXNvw0KAcxdnslKQ+0wQ8tLZRJYbbz/pgK2U354PaI=; b=YT5v1OPLXKzikboOcdrC8T3ZQHAuDOxP2fNTiBDEJaUjKU5Y20mApNx6 fMwh0d74iAMwDKQp0sk92ibng72ZA/JPGtTOrDyVYlvvYFv4p3fpLPOuG ZIQjR9XiIZpoMrXzcUC1QtfwE6ZofbBX6ZbGvnYBi7dNoZe211TUTypM+ Y+lSHermwHy0baua6Ch7m40hFuPeJAWHfb718kEvfxECrrR4HEebRwR99 PfyC4gsIainNQpatVwlSDZZTeXbiDHyod5OpAh8MKFZfDJDkbpAmXUPkZ 8RKH8cZpnHHoMjgFaZqpj0fBGH/RaCxVoH8e+hZgGbQg82rlaT5CZJGIj g==; X-CSE-ConnectionGUID: UnWo30HKSTeeNAxSdto8gQ== X-CSE-MsgGUID: GZGhYqC8Th+CCDKrFqfopw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397107" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397107" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:38 -0700 X-CSE-ConnectionGUID: aztT3aPcRw61myiLucdD2Q== X-CSE-MsgGUID: dBKdnehJR3GjsuBvliBOEg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322773" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:34 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 10/24] perf: Move and rename has_extended_regs() for ARCH-specific use Date: Tue, 24 Mar 2026 08:41:04 +0800 Message-Id: <20260324004118.3772171-11-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang The has_extended_regs() function will be utilized in ARCH-specific code. To facilitate this, move it to header file perf_event.h Additionally, the function is renamed to event_has_extended_regs() which aligns with the existing naming conventions. No functional change intended. Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi --- include/linux/perf_event.h | 8 ++++++++ kernel/events/core.c | 8 +------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 48d851fbd8ea..e8b0d8e2d2af 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1534,6 +1534,14 @@ perf_event__output_id_sample(struct perf_event *even= t, extern void perf_log_lost_samples(struct perf_event *event, u64 lost); =20 +static inline bool event_has_extended_regs(struct perf_event *event) +{ + struct perf_event_attr *attr =3D &event->attr; + + return (attr->sample_regs_user & PERF_REG_EXTENDED_MASK) || + (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK); +} + static inline bool event_has_any_exclude_flag(struct perf_event *event) { struct perf_event_attr *attr =3D &event->attr; diff --git a/kernel/events/core.c b/kernel/events/core.c index eb1dea2b1b0e..7558bc5b1e73 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -12978,12 +12978,6 @@ int perf_pmu_unregister(struct pmu *pmu) } EXPORT_SYMBOL_GPL(perf_pmu_unregister); =20 -static inline bool has_extended_regs(struct perf_event *event) -{ - return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || - (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); -} - static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) { struct perf_event_context *ctx =3D NULL; @@ -13018,7 +13012,7 @@ static int perf_try_init_event(struct pmu *pmu, str= uct perf_event *event) goto err_pmu; =20 if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && - has_extended_regs(event)) { + event_has_extended_regs(event)) { ret =3D -EOPNOTSUPP; goto err_destroy; } --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7B7C4347BC9; Tue, 24 Mar 2026 00:46:43 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313205; cv=none; b=psw2nAG4hPW9guSU4Y1UfHDz5+OIzqwWM+jbC7VAR4hDu2NjNPXWfw/nF206le9/la+L3thKMMeAXOKojoQVYfGIWEimqskGG/gTXz9Fh62j5oLMcVXFdcur1cLkyDMo5OTrMiVGP/SwxfrqnWAerXQS0crKX9plW9kKUzhsYog= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313205; c=relaxed/simple; bh=9z2p/meVfGGS6GXkuTEUiRK3VDy1nKu6DCXfAZ/AUlA=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=OnO5VWX8wPjFEA36Gr9O4vqz3gpBa6UVyc8vJqTTflPGzpvW7yzHh5iXUv6R+PnR8HodmRiPNW75YWHQkutWdpAz/6TgkGDJjLyXbgXMuFImq9ZkTJZTb+ol3izBccGeEgkjkXD1dya38HNzULsVb7kXyK3hxNPAJdXont1WH4I= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=ggycG7C7; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="ggycG7C7" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313203; x=1805849203; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=9z2p/meVfGGS6GXkuTEUiRK3VDy1nKu6DCXfAZ/AUlA=; b=ggycG7C7E4RzbJOOfkvFbEfpCAoH1PFr+Qh+1UHhpgLgbR4CuUWMpLLF NKaLu9eELDVxwYj5mnQGwsyWN4bbJUdE1mHfSD/vIY+l43thqocaS/NuW Pxk8eJnRMeuZIrml6NIyT5e0sxWTYNHqbt2q2Wah6c75SCJXyMkKTEELW HihaFjzIk32y6TlBA0xCJf/7mxYXwA7fn7VEJocVKKFjPFtldHqGvxxdH Qmg7EdjWcvQr+LqrZ4D4X0mRpybjq+RMx9GS9p5PBrxdFEvvS2KxG55bH efZvMD32veim1i40MOW8NV/Ub8fnsOxEu2JThXBpeK9CqkHi3qCS3qJ+f g==; X-CSE-ConnectionGUID: //mg4bWXS9yNbBkTxMu51A== X-CSE-MsgGUID: d9rJxgLhSGq1k6HkUdS7BA== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397119" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397119" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:43 -0700 X-CSE-ConnectionGUID: 17L2CAGTRoiFK2DbV39q/Q== X-CSE-MsgGUID: zpcKFWxgRq6u251zl2iDyQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322792" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:38 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi , Kan Liang Subject: [Patch v7 11/24] perf/x86: Enable XMM Register Sampling for Non-PEBS Events Date: Tue, 24 Mar 2026 08:41:05 +0800 Message-Id: <20260324004118.3772171-12-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Previously, XMM register sampling was only available for PEBS events starting from Icelake. Currently the support is now extended to non-PEBS events by utilizing the xsaves instruction, thereby completing the feature set. To implement this, a 64-byte aligned buffer is required. A per-CPU ext_regs_buf is introduced to store SIMD and other registers, with an approximate size of 2K. The buffer is allocated using kzalloc_node(), ensuring natural and 64-byte alignment for all kmalloc() allocations with powers of 2. XMM sampling for non-PEBS events is supported in the REGS_INTR case. Support for REGS_USER will be added in a subsequent patch. For PEBS events, XMM register sampling data is directly retrieved from PEBS records. Future support for additional vector registers (YMM/ZMM/OPMASK) is planned. An `ext_regs_mask` is added to track the supported vector register groups. Co-developed-by: Kan Liang Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi --- V7: Optimize and simplify x86_pmu_sample_xregs(), etc. No functional change. arch/x86/events/core.c | 139 +++++++++++++++++++++++++++--- arch/x86/events/intel/core.c | 31 ++++++- arch/x86/events/intel/ds.c | 20 +++-- arch/x86/events/perf_event.h | 11 ++- arch/x86/include/asm/fpu/xstate.h | 2 + arch/x86/include/asm/perf_event.h | 5 +- arch/x86/kernel/fpu/xstate.c | 2 +- 7 files changed, 185 insertions(+), 25 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 0a6c51e86e9b..22965a8a22b3 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -410,6 +410,45 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf= _event *event) return x86_pmu_extra_regs(val, event); } =20 +static DEFINE_PER_CPU(struct xregs_state *, ext_regs_buf); + +static void release_ext_regs_buffers(void) +{ + int cpu; + + if (!x86_pmu.ext_regs_mask) + return; + + for_each_possible_cpu(cpu) { + kfree(per_cpu(ext_regs_buf, cpu)); + per_cpu(ext_regs_buf, cpu) =3D NULL; + } +} + +static void reserve_ext_regs_buffers(void) +{ + bool compacted =3D cpu_feature_enabled(X86_FEATURE_XCOMPACTED); + unsigned int size; + int cpu; + + if (!x86_pmu.ext_regs_mask) + return; + + size =3D xstate_calculate_size(x86_pmu.ext_regs_mask, compacted); + + for_each_possible_cpu(cpu) { + per_cpu(ext_regs_buf, cpu) =3D kzalloc_node(size, GFP_KERNEL, + cpu_to_node(cpu)); + if (!per_cpu(ext_regs_buf, cpu)) + goto err; + } + + return; + +err: + release_ext_regs_buffers(); +} + int x86_reserve_hardware(void) { int err =3D 0; @@ -422,6 +461,7 @@ int x86_reserve_hardware(void) } else { reserve_ds_buffers(); reserve_lbr_buffers(); + reserve_ext_regs_buffers(); } } if (!err) @@ -438,6 +478,7 @@ void x86_release_hardware(void) release_pmc_hardware(); release_ds_buffers(); release_lbr_buffers(); + release_ext_regs_buffers(); mutex_unlock(&pmc_reserve_mutex); } } @@ -655,18 +696,23 @@ int x86_pmu_hw_config(struct perf_event *event) return -EINVAL; } =20 - /* sample_regs_user never support XMM registers */ - if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK)) - return -EINVAL; - /* - * Besides the general purpose registers, XMM registers may - * be collected in PEBS on some platforms, e.g. Icelake - */ - if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) { - if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) - return -EINVAL; + if (event->attr.sample_type & PERF_SAMPLE_REGS_INTR) { + /* + * Besides the general purpose registers, XMM registers may + * be collected as well. + */ + if (event_has_extended_regs(event)) { + if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) + return -EINVAL; + } + } =20 - if (!event->attr.precise_ip) + if (event->attr.sample_type & PERF_SAMPLE_REGS_USER) { + /* + * Currently XMM registers sampling for REGS_USER is not + * supported yet. + */ + if (event_has_extended_regs(event)) return -EINVAL; } =20 @@ -1699,9 +1745,9 @@ static void x86_pmu_del(struct perf_event *event, int= flags) static_call_cond(x86_pmu_del)(event); } =20 -void x86_pmu_setup_regs_data(struct perf_event *event, - struct perf_sample_data *data, - struct pt_regs *regs) +static void x86_pmu_setup_gpregs_data(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs) { struct perf_event_attr *attr =3D &event->attr; u64 sample_type =3D attr->sample_type; @@ -1732,6 +1778,71 @@ void x86_pmu_setup_regs_data(struct perf_event *even= t, } } =20 +inline void x86_pmu_clear_perf_regs(struct pt_regs *regs) +{ + struct x86_perf_regs *perf_regs =3D container_of(regs, struct x86_perf_re= gs, regs); + + perf_regs->xmm_regs =3D NULL; +} + +static inline void x86_pmu_update_xregs(struct x86_perf_regs *perf_regs, + struct xregs_state *xsave, u64 bitmap) +{ + u64 mask; + + if (!xsave) + return; + + /* Filtered by what XSAVE really gives */ + mask =3D bitmap & xsave->header.xfeatures; + + if (mask & XFEATURE_MASK_SSE) + perf_regs->xmm_space =3D xsave->i387.xmm_space; +} + +static void x86_pmu_sample_xregs(struct perf_event *event, + struct perf_sample_data *data, + u64 ignore_mask) +{ + struct xregs_state *xsave =3D per_cpu(ext_regs_buf, smp_processor_id()); + u64 sample_type =3D event->attr.sample_type; + struct x86_perf_regs *perf_regs; + u64 intr_mask =3D 0; + u64 mask =3D 0; + + if (WARN_ON_ONCE(!xsave)) + return; + + if (event_has_extended_regs(event)) + mask |=3D XFEATURE_MASK_SSE; + + mask &=3D x86_pmu.ext_regs_mask; + + if ((sample_type & PERF_SAMPLE_REGS_INTR) && data->regs_intr.abi) + intr_mask =3D mask & ~ignore_mask; + + if (intr_mask) { + perf_regs =3D container_of(data->regs_intr.regs, + struct x86_perf_regs, regs); + xsave->header.xfeatures =3D 0; + xsaves_nmi(xsave, mask); + x86_pmu_update_xregs(perf_regs, xsave, intr_mask); + } +} + +void x86_pmu_setup_regs_data(struct perf_event *event, + struct perf_sample_data *data, + struct pt_regs *regs, + u64 ignore_mask) +{ + x86_pmu_setup_gpregs_data(event, data, regs); + /* + * ignore_mask indicates the PEBS sampled extended regs + * which are unnecessary to sample again. + */ + x86_pmu_sample_xregs(event, data, ignore_mask); +} + int x86_pmu_handle_irq(struct pt_regs *regs) { struct perf_sample_data data; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 5a2b1503b6a5..5772dcc3bcbd 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3649,6 +3649,9 @@ static int handle_pmi_common(struct pt_regs *regs, u6= 4 status) if (has_branch_stack(event)) intel_pmu_lbr_save_brstack(&data, cpuc, event); =20 + x86_pmu_clear_perf_regs(regs); + x86_pmu_setup_regs_data(event, &data, regs, 0); + perf_event_overflow(event, &data, regs); } =20 @@ -5884,8 +5887,32 @@ static inline void __intel_update_large_pebs_flags(s= truct pmu *pmu) } } =20 -#define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX= _FIXED)) +static void intel_extended_regs_init(struct pmu *pmu) +{ + struct pmu *dest_pmu =3D pmu ? pmu : x86_get_pmu(smp_processor_id()); + + /* + * Extend the vector registers support to non-PEBS. + * The feature is limited to newer Intel machines with + * PEBS V4+ or archPerfmonExt (0x23) enabled for now. + * In theory, the vector registers can be retrieved as + * long as the CPU supports. The support for the old + * generations may be added later if there is a + * requirement. + * Only support the extension when XSAVES is available. + */ + if (!boot_cpu_has(X86_FEATURE_XSAVES)) + return; + + if (!boot_cpu_has(X86_FEATURE_XMM) || + !cpu_has_xfeatures(XFEATURE_MASK_SSE, NULL)) + return; =20 + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_SSE; + dest_pmu->capabilities |=3D PERF_PMU_CAP_EXTENDED_REGS; +} + +#define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX= _FIXED)) static void update_pmu_cap(struct pmu *pmu) { unsigned int eax, ebx, ecx, edx; @@ -5949,6 +5976,8 @@ static void update_pmu_cap(struct pmu *pmu) /* Perf Metric (Bit 15) and PEBS via PT (Bit 16) are hybrid enumeration = */ rdmsrq(MSR_IA32_PERF_CAPABILITIES, hybrid(pmu, intel_cap).capabilities); } + + intel_extended_regs_init(pmu); } =20 static void intel_pmu_check_hybrid_pmus(struct x86_hybrid_pmu *pmu) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index b045297c02d0..74a41dae8a62 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1743,8 +1743,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event= *event) if (gprs || (attr->precise_ip < 2) || tsx_weight) pebs_data_cfg |=3D PEBS_DATACFG_GP; =20 - if ((sample_type & PERF_SAMPLE_REGS_INTR) && - (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK)) + if (event_has_extended_regs(event)) pebs_data_cfg |=3D PEBS_DATACFG_XMMS; =20 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { @@ -2460,10 +2459,8 @@ static inline void __setup_pebs_gpr_group(struct per= f_event *event, regs->flags &=3D ~PERF_EFLAGS_EXACT; } =20 - if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) { + if (sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER)) adaptive_pebs_save_regs(regs, gprs); - x86_pmu_setup_regs_data(event, data, regs); - } } =20 static inline void __setup_pebs_meminfo_group(struct perf_event *event, @@ -2521,6 +2518,7 @@ static void setup_pebs_adaptive_sample_data(struct pe= rf_event *event, struct pebs_meminfo *meminfo =3D NULL; struct pebs_gprs *gprs =3D NULL; struct x86_perf_regs *perf_regs; + u64 ignore_mask =3D 0; u64 format_group; u16 retire; =20 @@ -2528,7 +2526,7 @@ static void setup_pebs_adaptive_sample_data(struct pe= rf_event *event, return; =20 perf_regs =3D container_of(regs, struct x86_perf_regs, regs); - perf_regs->xmm_regs =3D NULL; + x86_pmu_clear_perf_regs(regs); =20 format_group =3D basic->format_group; =20 @@ -2575,6 +2573,7 @@ static void setup_pebs_adaptive_sample_data(struct pe= rf_event *event, if (format_group & PEBS_DATACFG_XMMS) { struct pebs_xmm *xmm =3D next_record; =20 + ignore_mask |=3D XFEATURE_MASK_SSE; next_record =3D xmm + 1; perf_regs->xmm_regs =3D xmm->xmm; } @@ -2613,6 +2612,8 @@ static void setup_pebs_adaptive_sample_data(struct pe= rf_event *event, next_record +=3D nr * sizeof(u64); } =20 + x86_pmu_setup_regs_data(event, data, regs, ignore_mask); + WARN_ONCE(next_record !=3D __pebs + basic->format_size, "PEBS record size %u, expected %llu, config %llx\n", basic->format_size, @@ -2638,6 +2639,7 @@ static void setup_arch_pebs_sample_data(struct perf_e= vent *event, struct arch_pebs_aux *meminfo =3D NULL; struct arch_pebs_gprs *gprs =3D NULL; struct x86_perf_regs *perf_regs; + u64 ignore_mask =3D 0; void *next_record; void *at =3D __pebs; =20 @@ -2645,7 +2647,7 @@ static void setup_arch_pebs_sample_data(struct perf_e= vent *event, return; =20 perf_regs =3D container_of(regs, struct x86_perf_regs, regs); - perf_regs->xmm_regs =3D NULL; + x86_pmu_clear_perf_regs(regs); =20 __setup_perf_sample_data(event, iregs, data); =20 @@ -2700,6 +2702,7 @@ static void setup_arch_pebs_sample_data(struct perf_e= vent *event, =20 next_record +=3D sizeof(struct arch_pebs_xer_header); =20 + ignore_mask |=3D XFEATURE_MASK_SSE; xmm =3D next_record; perf_regs->xmm_regs =3D xmm->xmm; next_record =3D xmm + 1; @@ -2747,6 +2750,8 @@ static void setup_arch_pebs_sample_data(struct perf_e= vent *event, at =3D at + header->size; goto again; } + + x86_pmu_setup_regs_data(event, data, regs, ignore_mask); } =20 static inline void * @@ -3409,6 +3414,7 @@ static void __init intel_ds_pebs_init(void) x86_pmu.flags |=3D PMU_FL_PEBS_ALL; x86_pmu.pebs_capable =3D ~0ULL; pebs_qual =3D "-baseline"; + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_SSE; x86_get_pmu(smp_processor_id())->capabilities |=3D PERF_PMU_CAP_EXTEND= ED_REGS; } else { /* Only basic record supported */ diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 39c41947c70d..a5e5bffb711e 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1020,6 +1020,12 @@ struct x86_pmu { struct extra_reg *extra_regs; unsigned int flags; =20 + /* + * Extended regs, e.g., vector registers + * Utilize the same format as the XFEATURE_MASK_* + */ + u64 ext_regs_mask; + /* * Intel host/guest support (KVM) */ @@ -1306,9 +1312,12 @@ void x86_pmu_enable_event(struct perf_event *event); =20 int x86_pmu_handle_irq(struct pt_regs *regs); =20 +void x86_pmu_clear_perf_regs(struct pt_regs *regs); + void x86_pmu_setup_regs_data(struct perf_event *event, struct perf_sample_data *data, - struct pt_regs *regs); + struct pt_regs *regs, + u64 ignore_mask); =20 void x86_pmu_show_pmu_cap(struct pmu *pmu); =20 diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/x= state.h index 38fa8ff26559..19dec5f0b1c7 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -112,6 +112,8 @@ void xsaves(struct xregs_state *xsave, u64 mask); void xrstors(struct xregs_state *xsave, u64 mask); void xsaves_nmi(struct xregs_state *xsave, u64 mask); =20 +unsigned int xstate_calculate_size(u64 xfeatures, bool compacted); + int xfd_enable_feature(u64 xfd_err); =20 #ifdef CONFIG_X86_64 diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index 752cb319d5ea..e47a963a7cf0 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -726,7 +726,10 @@ extern void perf_events_lapic_init(void); struct pt_regs; struct x86_perf_regs { struct pt_regs regs; - u64 *xmm_regs; + union { + u64 *xmm_regs; + u32 *xmm_space; /* for xsaves */ + }; }; =20 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c index 39e5f9e79a4c..93631f7a638e 100644 --- a/arch/x86/kernel/fpu/xstate.c +++ b/arch/x86/kernel/fpu/xstate.c @@ -587,7 +587,7 @@ static bool __init check_xstate_against_struct(int nr) return true; } =20 -static unsigned int xstate_calculate_size(u64 xfeatures, bool compacted) +unsigned int xstate_calculate_size(u64 xfeatures, bool compacted) { unsigned int topmost =3D fls64(xfeatures) - 1; unsigned int offset, i; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 3DECD34D90C; Tue, 24 Mar 2026 00:46:48 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313209; cv=none; b=hfP6RBqE91Jvab7svKokFsqRFr0wTeY+vAZRapuzRrg0/Ale26z8xrU+sN08/se3IXlk0s35tnu1GO1VnDPAPrQuhIY2z4C8kivF4gfKzKHZv4NQMo81VpkR4ZDoz+MgOfM2toaXQJogn8a0z4MO2xoGn6yMGFhEu2ru+b1yYoA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313209; c=relaxed/simple; bh=MFZWogFnHqFSZKefxWXjpdRCaY3X2l1UQDQz8jBRbDY=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=DZDhngf3ajJK7oWeqoyDPnM318NArxwqE/shPmfAlZZ7OTLLqJArElVOvzj0CC/5J8ByU/0lIV//sNJ1ZtC5NnLiTsi2p+XZgNys8jf9+iPsU0knURxhH1iHA5ktalgErNZwuD57y1/OKDzyP5BzXO9ArPdlZ/0mKvXK+D79QzA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=Nh6iCa3I; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="Nh6iCa3I" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313208; x=1805849208; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=MFZWogFnHqFSZKefxWXjpdRCaY3X2l1UQDQz8jBRbDY=; b=Nh6iCa3IrlhIw10rDBiEVu2cNovS7AolNFJoqbdPAHIsVO7dC811mK+I 4nbklwakLk7SFuXRPNc2WJViQt5mi5ExlK6VQejdBE2DESagk44GkZzgQ GAz25ZIw67BseRqjdR0Elr0Iy1DcsiP8sm/oE/IPssB7meTfvUK3FEePd UKWXCWdW76rKMeYbyN21Iovk6byR6jqljNNOS+V603+yl+C0UX0uLW66F 872kLCKb/wecnpHmEiMqXgHhE0BZxUsoTbFJHGg8Q3xgHN13UFeTRQp3M IAexLcDhWBZ5d2SaQC/vyEfQ5QW8uVthzWQzxOxnq7MbALeGvoLQaCpDd A==; X-CSE-ConnectionGUID: ejPgRkj+QIyX+ibhav7O1g== X-CSE-MsgGUID: xcxKbaC4Qj+QW7KRUaPFfw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397137" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397137" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:48 -0700 X-CSE-ConnectionGUID: 2/jXmcqKQeCEi+iOtWETQw== X-CSE-MsgGUID: Q04zkw1ZQlCSI+7X2rajbQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322811" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:43 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi , Kan Liang Subject: [Patch v7 12/24] perf/x86: Enable XMM register sampling for REGS_USER case Date: Tue, 24 Mar 2026 08:41:06 +0800 Message-Id: <20260324004118.3772171-13-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" This patch adds support for XMM register sampling in the REGS_USER case. To handle simultaneous sampling of XMM registers for both REGS_INTR and REGS_USER cases, a per-CPU `x86_user_regs` is introduced to store REGS_USER-specific XMM registers. This prevents REGS_USER-specific XMM register data from being overwritten by REGS_INTR-specific data if they share the same `x86_perf_regs` structure. To sample user-space XMM registers, the `x86_pmu_update_user_ext_regs()` helper function is added. It checks if the `TIF_NEED_FPU_LOAD` flag is set. If so, the user-space XMM register data can be directly retrieved from the cached task FPU state, as the corresponding hardware registers have been cleared or switched to kernel-space data. Otherwise, the data must be read from the hardware registers using the `xsaves` instruction. For PEBS events, `x86_pmu_update_user_ext_regs()` checks if the PEBS-sampled XMM register data belongs to user-space. If so, no further action is needed. Otherwise, the user-space XMM register data needs to be re-sampled using the same method as for non-PEBS events. Co-developed-by: Kan Liang Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 95 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 82 insertions(+), 13 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 22965a8a22b3..a5643c875190 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -696,7 +696,7 @@ int x86_pmu_hw_config(struct perf_event *event) return -EINVAL; } =20 - if (event->attr.sample_type & PERF_SAMPLE_REGS_INTR) { + if (event->attr.sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_U= SER)) { /* * Besides the general purpose registers, XMM registers may * be collected as well. @@ -707,15 +707,6 @@ int x86_pmu_hw_config(struct perf_event *event) } } =20 - if (event->attr.sample_type & PERF_SAMPLE_REGS_USER) { - /* - * Currently XMM registers sampling for REGS_USER is not - * supported yet. - */ - if (event_has_extended_regs(event)) - return -EINVAL; - } - return x86_setup_perfctr(event); } =20 @@ -1745,6 +1736,28 @@ static void x86_pmu_del(struct perf_event *event, in= t flags) static_call_cond(x86_pmu_del)(event); } =20 +/* + * When both PERF_SAMPLE_REGS_INTR and PERF_SAMPLE_REGS_USER are set, + * an additional x86_perf_regs is required to save user-space registers. + * Without this, user-space register data may be overwritten by kernel-spa= ce + * registers. + */ +static DEFINE_PER_CPU(struct x86_perf_regs, x86_user_regs); +static void x86_pmu_perf_get_regs_user(struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct x86_perf_regs *x86_regs_user =3D this_cpu_ptr(&x86_user_regs); + struct perf_regs regs_user; + + perf_get_regs_user(®s_user, regs); + data->regs_user.abi =3D regs_user.abi; + if (regs_user.regs) { + x86_regs_user->regs =3D *regs_user.regs; + data->regs_user.regs =3D &x86_regs_user->regs; + } else + data->regs_user.regs =3D NULL; +} + static void x86_pmu_setup_gpregs_data(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) @@ -1757,7 +1770,14 @@ static void x86_pmu_setup_gpregs_data(struct perf_ev= ent *event, data->regs_user.abi =3D perf_reg_abi(current); data->regs_user.regs =3D regs; } else if (!(current->flags & PF_KTHREAD)) { - perf_get_regs_user(&data->regs_user, regs); + /* + * It cannot guarantee that the kernel will never + * touch the registers outside of the pt_regs, + * especially when more and more registers + * (e.g., SIMD, eGPR) are added. The live data + * cannot be used. + */ + x86_pmu_perf_get_regs_user(data, regs); } else { data->regs_user.abi =3D PERF_SAMPLE_REGS_ABI_NONE; data->regs_user.regs =3D NULL; @@ -1800,6 +1820,43 @@ static inline void x86_pmu_update_xregs(struct x86_p= erf_regs *perf_regs, perf_regs->xmm_space =3D xsave->i387.xmm_space; } =20 +/* + * This function retrieves cached user-space fpu registers (XMM/YMM/ZMM). + * If TIF_NEED_FPU_LOAD is set, it indicates that the user-space FPU state + * is cached. Otherwise, the data should be read directly from the hardware + * registers. + */ +static inline u64 x86_pmu_update_user_xregs(struct perf_sample_data *data, + u64 mask, u64 ignore_mask) +{ + struct x86_perf_regs *perf_regs; + struct xregs_state *xsave; + struct fpu *fpu; + struct fpstate *fps; + + if (data->regs_user.abi =3D=3D PERF_SAMPLE_REGS_ABI_NONE) + return 0; + + if (test_thread_flag(TIF_NEED_FPU_LOAD)) { + perf_regs =3D container_of(data->regs_user.regs, + struct x86_perf_regs, regs); + fpu =3D x86_task_fpu(current); + /* + * If __task_fpstate is set, it holds the right pointer, + * otherwise fpstate will. + */ + fps =3D READ_ONCE(fpu->__task_fpstate); + if (!fps) + fps =3D fpu->fpstate; + xsave =3D &fps->regs.xsave; + + x86_pmu_update_xregs(perf_regs, xsave, mask); + return 0; + } + + return mask & ~ignore_mask; +} + static void x86_pmu_sample_xregs(struct perf_event *event, struct perf_sample_data *data, u64 ignore_mask) @@ -1807,6 +1864,7 @@ static void x86_pmu_sample_xregs(struct perf_event *e= vent, struct xregs_state *xsave =3D per_cpu(ext_regs_buf, smp_processor_id()); u64 sample_type =3D event->attr.sample_type; struct x86_perf_regs *perf_regs; + u64 user_mask =3D 0; u64 intr_mask =3D 0; u64 mask =3D 0; =20 @@ -1817,15 +1875,26 @@ static void x86_pmu_sample_xregs(struct perf_event = *event, mask |=3D XFEATURE_MASK_SSE; =20 mask &=3D x86_pmu.ext_regs_mask; + if ((sample_type & PERF_SAMPLE_REGS_USER) && data->regs_user.abi) + user_mask =3D x86_pmu_update_user_xregs(data, mask, ignore_mask); =20 if ((sample_type & PERF_SAMPLE_REGS_INTR) && data->regs_intr.abi) intr_mask =3D mask & ~ignore_mask; =20 + if (user_mask | intr_mask) { + xsave->header.xfeatures =3D 0; + xsaves_nmi(xsave, user_mask | intr_mask); + } + + if (user_mask) { + perf_regs =3D container_of(data->regs_user.regs, + struct x86_perf_regs, regs); + x86_pmu_update_xregs(perf_regs, xsave, user_mask); + } + if (intr_mask) { perf_regs =3D container_of(data->regs_intr.regs, struct x86_perf_regs, regs); - xsave->header.xfeatures =3D 0; - xsaves_nmi(xsave, mask); x86_pmu_update_xregs(perf_regs, xsave, intr_mask); } } --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 24F721DDA24; Tue, 24 Mar 2026 00:46:53 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313214; cv=none; b=ZwtNLZmNEti6KFIKzJfjquajuelEyaJQl+Ig1j43v9xq/45XQEqOrGH/R0zCH4K9/Z8qoW8VNyvCAJSTKZjaEfV7qFa5anXUff9IG3Si1jnUAMq3CHHZdNdttEaa+/tvRmURA7b+cm1ttWfsAHeEGNss+27JpT5M8f98cdRPhEw= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313214; c=relaxed/simple; bh=2uh5gTN+vxslqRbGjcoYFR9DDWbPcMZAhwhy7N4FqSk=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=anIhgRGUH+ZZTDLj1z/1uLa4QOggfHuVnU5YWGtpTKzgmW5HE/Wcs4fJewmOKwuyuGbSAAidHhMyv3HsLgBqSLOxQ23XIxaf5AIAkLjLqiYFoB3WZ1fAOw/XPlUhTcL1hAaa6EhgZ9MC/PTUs7mweCcYarMXVLpFRog2M83X1qs= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=DRBRBwwl; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="DRBRBwwl" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313213; x=1805849213; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=2uh5gTN+vxslqRbGjcoYFR9DDWbPcMZAhwhy7N4FqSk=; b=DRBRBwwlSs/y/YIDW0u79pN6wGjOFARRol0YGaxjYNLX2su4OsNfW+ql bakYvESECY1vCAefWCyoKRoU5HNNfPyhLqzeydzCuEf+Gjd0EWGpai8Sh qRoBRuIhR31laiOkx4AGqK+VDkd1QpJZq/WYaAsqcCmB6J6090xBqZqdM sjfgbpSgUhi/G6EQbdJdIS2s8UtNDsjSV9ryCql1lksEmEKMQkxzxyFV+ m0yKf4vqdoYYsHaAkBAavCeSdoPMG3nSQdjviMzOPvami0M4VmOba4JHx RjorW1K7DL7BFEvR3U8jhm9mEpiaW/bWUUWqycgRsY6G3PhbS8lppB/k4 Q==; X-CSE-ConnectionGUID: F8/+wA3cRsa6zUFQbKVodw== X-CSE-MsgGUID: Z8FiCYRwQm+XRrku0DIDXQ== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397154" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397154" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:52 -0700 X-CSE-ConnectionGUID: OdpZ0cHeRimpClp3AskZlw== X-CSE-MsgGUID: TwsnQDJJTD6uxylUZl/THA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322823" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:48 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 13/24] perf: Add sampling support for SIMD registers Date: Tue, 24 Mar 2026 08:41:07 +0800 Message-Id: <20260324004118.3772171-14-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang Users may be interested in sampling SIMD registers during profiling. The current sample_regs_* structure does not have sufficient space for all SIMD registers. To address this, new attribute fields sample_simd_{pred,vec}_reg_* are added to struct perf_event_attr to represent the SIMD registers that are expected to be sampled. Currently, the perf/x86 code supports XMM registers in sample_regs_*. To unify the configuration of SIMD registers and ensure a consistent method for configuring XMM and other SIMD registers, a new event attribute field, sample_simd_regs_enabled, is introduced. When sample_simd_regs_enabled is set, it indicates that all SIMD registers, including XMM, will be represented by the newly introduced sample_simd_{pred|vec}_reg_* fields. The original XMM space in sample_regs_* is reserved for future uses. Since SIMD registers are wider than 64 bits, a new output format is introduced. The number and width of SIMD registers are dumped first, followed by the register values. The number and width are based on the user's configuration. If they differ (e.g., on ARM), an ARCH-specific perf_output_sample_simd_regs function can be implemented separately. A new ABI, PERF_SAMPLE_REGS_ABI_SIMD, is added to indicate the new format. The enum perf_sample_regs_abi is now a bitmap. This change should not impact existing tools, as the version and bitmap remain the same for values 1 and 2. Additionally, two new __weak functions are introduced: - perf_simd_reg_value(): Retrieves the value of the requested SIMD register. - perf_simd_reg_validate(): Validates the configuration of the SIMD registers. A new flag, PERF_PMU_CAP_SIMD_REGS, is added to indicate that the PMU supports SIMD register dumping. An error is generated if sample_simd_{pred|vec}_reg_* is mistakenly set for a PMU that does not support this capability. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi --- V7: Add macro word_for_each_set_bit() to simplify u64 set-bit iteration. include/linux/perf_event.h | 8 +++ include/linux/perf_regs.h | 4 ++ include/uapi/linux/perf_event.h | 50 ++++++++++++++-- kernel/events/core.c | 102 +++++++++++++++++++++++++++++--- tools/perf/util/header.c | 3 +- 5 files changed, 153 insertions(+), 14 deletions(-) diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index e8b0d8e2d2af..137d6e4a3403 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -306,6 +306,7 @@ struct perf_event_pmu_context; #define PERF_PMU_CAP_AUX_PAUSE 0x0200 #define PERF_PMU_CAP_AUX_PREFER_LARGE 0x0400 #define PERF_PMU_CAP_MEDIATED_VPMU 0x0800 +#define PERF_PMU_CAP_SIMD_REGS 0x1000 =20 /** * pmu::scope @@ -1534,6 +1535,13 @@ perf_event__output_id_sample(struct perf_event *even= t, extern void perf_log_lost_samples(struct perf_event *event, u64 lost); =20 +static inline bool event_has_simd_regs(struct perf_event *event) +{ + struct perf_event_attr *attr =3D &event->attr; + + return attr->sample_simd_regs_enabled !=3D 0; +} + static inline bool event_has_extended_regs(struct perf_event *event) { struct perf_event_attr *attr =3D &event->attr; diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index 144bcc3ff19f..518f28c6a7d4 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -14,6 +14,10 @@ int perf_reg_validate(u64 mask); u64 perf_reg_abi(struct task_struct *task); void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs); +int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask, + u16 pred_qwords, u32 pred_mask); +u64 perf_simd_reg_value(struct pt_regs *regs, int idx, + u16 qwords_idx, bool pred); =20 #ifdef CONFIG_HAVE_PERF_REGS #include diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_even= t.h index fd10aa8d697f..b8c8953928f8 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -314,8 +314,9 @@ enum { */ enum perf_sample_regs_abi { PERF_SAMPLE_REGS_ABI_NONE =3D 0, - PERF_SAMPLE_REGS_ABI_32 =3D 1, - PERF_SAMPLE_REGS_ABI_64 =3D 2, + PERF_SAMPLE_REGS_ABI_32 =3D (1 << 0), + PERF_SAMPLE_REGS_ABI_64 =3D (1 << 1), + PERF_SAMPLE_REGS_ABI_SIMD =3D (1 << 2), }; =20 /* @@ -383,6 +384,7 @@ enum perf_event_read_format { #define PERF_ATTR_SIZE_VER7 128 /* Add: sig_data */ #define PERF_ATTR_SIZE_VER8 136 /* Add: config3 */ #define PERF_ATTR_SIZE_VER9 144 /* add: config4 */ +#define PERF_ATTR_SIZE_VER10 176 /* Add: sample_simd_{pred,vec}_reg_* */ =20 /* * 'struct perf_event_attr' contains various attributes that define @@ -547,6 +549,30 @@ struct perf_event_attr { =20 __u64 config3; /* extension of config2 */ __u64 config4; /* extension of config3 */ + + /* + * Defines the sampling SIMD/PRED registers bitmap and qwords + * (8 bytes) length. + * + * sample_simd_regs_enabled !=3D 0 indicates there are SIMD/PRED registers + * to be sampled, the SIMD/PRED registers bitmap and qwords length are + * represented in sample_{simd|pred}_pred_reg_{intr|user} and + * sample_simd_{vec|pred}_reg_qwords fields. + * + * sample_simd_regs_enabled =3D=3D 0 indicates no SIMD/PRED registers are + * sampled. + */ + union { + __u16 sample_simd_regs_enabled; + __u16 sample_simd_pred_reg_qwords; + }; + __u16 sample_simd_vec_reg_qwords; + __u32 __reserved_4; + + __u32 sample_simd_pred_reg_intr; + __u32 sample_simd_pred_reg_user; + __u64 sample_simd_vec_reg_intr; + __u64 sample_simd_vec_reg_user; }; =20 /* @@ -1020,7 +1046,15 @@ enum perf_event_type { * } && PERF_SAMPLE_BRANCH_STACK * * { u64 abi; # enum perf_sample_regs_abi - * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER + * u64 regs[weight(mask)]; + * struct { + * u16 nr_vectors; # 0 ... weight(sample_simd_vec_reg_user) + * u16 vector_qwords; # 0 ... sample_simd_vec_reg_qwords + * u16 nr_pred; # 0 ... weight(sample_simd_pred_reg_user) + * u16 pred_qwords; # 0 ... sample_simd_pred_reg_qwords + * u64 data[nr_vectors * vector_qwords + nr_pred * pred_qwords]; + * } && (abi & PERF_SAMPLE_REGS_ABI_SIMD) + * } && PERF_SAMPLE_REGS_USER * * { u64 size; * char data[size]; @@ -1047,7 +1081,15 @@ enum perf_event_type { * { u64 data_src; } && PERF_SAMPLE_DATA_SRC * { u64 transaction; } && PERF_SAMPLE_TRANSACTION * { u64 abi; # enum perf_sample_regs_abi - * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR + * u64 regs[weight(mask)]; + * struct { + * u16 nr_vectors; # 0 ... weight(sample_simd_vec_reg_intr) + * u16 vector_qwords; # 0 ... sample_simd_vec_reg_qwords + * u16 nr_pred; # 0 ... weight(sample_simd_pred_reg_intr) + * u16 pred_qwords; # 0 ... sample_simd_pred_reg_qwords + * u64 data[nr_vectors * vector_qwords + nr_pred * pred_qwords]; + * } && (abi & PERF_SAMPLE_REGS_ABI_SIMD) + * } && PERF_SAMPLE_REGS_INTR * { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR * { u64 cgroup;} && PERF_SAMPLE_CGROUP * { u64 data_page_size;} && PERF_SAMPLE_DATA_PAGE_SIZE diff --git a/kernel/events/core.c b/kernel/events/core.c index 7558bc5b1e73..de42575f517b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7753,22 +7753,60 @@ void __weak perf_get_regs_user(struct perf_regs *re= gs_user, regs_user->abi =3D perf_reg_abi(current); } =20 +#define word_for_each_set_bit(bit, val) \ + for (unsigned long long __v =3D (val); \ + __v && ((bit =3D __builtin_ctzll(__v)), 1); \ + __v &=3D __v - 1) + static void perf_output_sample_regs(struct perf_output_handle *handle, struct pt_regs *regs, u64 mask) { int bit; - DECLARE_BITMAP(_mask, 64); - - bitmap_from_u64(_mask, mask); - for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) { - u64 val; =20 - val =3D perf_reg_value(regs, bit); + word_for_each_set_bit(bit, mask) { + u64 val =3D perf_reg_value(regs, bit); perf_output_put(handle, val); } } =20 +static void +perf_output_sample_simd_regs(struct perf_output_handle *handle, + struct perf_event *event, + struct pt_regs *regs, + u64 mask, u32 pred_mask) +{ + u16 pred_qwords =3D event->attr.sample_simd_pred_reg_qwords; + u16 vec_qwords =3D event->attr.sample_simd_vec_reg_qwords; + u16 nr_vectors =3D hweight64(mask); + u16 nr_pred =3D hweight32(pred_mask); + int bit; + + perf_output_put(handle, nr_vectors); + perf_output_put(handle, vec_qwords); + perf_output_put(handle, nr_pred); + perf_output_put(handle, pred_qwords); + + if (nr_vectors) { + word_for_each_set_bit(bit, mask) { + for (int i =3D 0; i < vec_qwords; i++) { + u64 val =3D perf_simd_reg_value(regs, bit, + i, false); + perf_output_put(handle, val); + } + } + } + if (nr_pred) { + word_for_each_set_bit(bit, pred_mask) { + for (int i =3D 0; i < pred_qwords; i++) { + u64 val =3D perf_simd_reg_value(regs, bit, + i, true); + perf_output_put(handle, val); + } + } + } +} + static void perf_sample_regs_user(struct perf_regs *regs_user, struct pt_regs *regs) { @@ -7790,6 +7828,17 @@ static void perf_sample_regs_intr(struct perf_regs *= regs_intr, regs_intr->abi =3D perf_reg_abi(current); } =20 +int __weak perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask, + u16 pred_qwords, u32 pred_mask) +{ + return vec_qwords || vec_mask || pred_qwords || pred_mask ? -ENOSYS : 0; +} + +u64 __weak perf_simd_reg_value(struct pt_regs *regs, int idx, + u16 qwords_idx, bool pred) +{ + return 0; +} =20 /* * Get remaining task size from user stack pointer. @@ -8320,10 +8369,17 @@ void perf_output_sample(struct perf_output_handle *= handle, perf_output_put(handle, abi); =20 if (abi) { - u64 mask =3D event->attr.sample_regs_user; + struct perf_event_attr *attr =3D &event->attr; + u64 mask =3D attr->sample_regs_user; perf_output_sample_regs(handle, data->regs_user.regs, mask); + if (abi & PERF_SAMPLE_REGS_ABI_SIMD) { + perf_output_sample_simd_regs(handle, event, + data->regs_user.regs, + attr->sample_simd_vec_reg_user, + attr->sample_simd_pred_reg_user); + } } } =20 @@ -8351,11 +8407,18 @@ void perf_output_sample(struct perf_output_handle *= handle, perf_output_put(handle, abi); =20 if (abi) { - u64 mask =3D event->attr.sample_regs_intr; + struct perf_event_attr *attr =3D &event->attr; + u64 mask =3D attr->sample_regs_intr; =20 perf_output_sample_regs(handle, data->regs_intr.regs, mask); + if (abi & PERF_SAMPLE_REGS_ABI_SIMD) { + perf_output_sample_simd_regs(handle, event, + data->regs_intr.regs, + attr->sample_simd_vec_reg_intr, + attr->sample_simd_pred_reg_intr); + } } } =20 @@ -13011,6 +13074,12 @@ static int perf_try_init_event(struct pmu *pmu, st= ruct perf_event *event) if (ret) goto err_pmu; =20 + if (!(pmu->capabilities & PERF_PMU_CAP_SIMD_REGS) && + event_has_simd_regs(event)) { + ret =3D -EOPNOTSUPP; + goto err_destroy; + } + if (!(pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS) && event_has_extended_regs(event)) { ret =3D -EOPNOTSUPP; @@ -13556,6 +13625,12 @@ static int perf_copy_attr(struct perf_event_attr _= _user *uattr, ret =3D perf_reg_validate(attr->sample_regs_user); if (ret) return ret; + ret =3D perf_simd_reg_validate(attr->sample_simd_vec_reg_qwords, + attr->sample_simd_vec_reg_user, + attr->sample_simd_pred_reg_qwords, + attr->sample_simd_pred_reg_user); + if (ret) + return ret; } =20 if (attr->sample_type & PERF_SAMPLE_STACK_USER) { @@ -13576,8 +13651,17 @@ static int perf_copy_attr(struct perf_event_attr _= _user *uattr, if (!attr->sample_max_stack) attr->sample_max_stack =3D sysctl_perf_event_max_stack; =20 - if (attr->sample_type & PERF_SAMPLE_REGS_INTR) + if (attr->sample_type & PERF_SAMPLE_REGS_INTR) { ret =3D perf_reg_validate(attr->sample_regs_intr); + if (ret) + return ret; + ret =3D perf_simd_reg_validate(attr->sample_simd_vec_reg_qwords, + attr->sample_simd_vec_reg_intr, + attr->sample_simd_pred_reg_qwords, + attr->sample_simd_pred_reg_intr); + if (ret) + return ret; + } =20 #ifndef CONFIG_CGROUP_PERF if (attr->sample_type & PERF_SAMPLE_CGROUP) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 9142a8ba4019..f84200b9dd57 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -2051,7 +2051,8 @@ static void free_event_desc(struct evsel *events) =20 static bool perf_attr_check(struct perf_event_attr *attr) { - if (attr->__reserved_1 || attr->__reserved_2 || attr->__reserved_3) { + if (attr->__reserved_1 || attr->__reserved_2 || + attr->__reserved_3 || attr->__reserved_4) { pr_warning("Reserved bits are set unexpectedly. " "Please update perf tool.\n"); return false; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C655036C0CB; Tue, 24 Mar 2026 00:46:57 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313219; cv=none; b=LS4A1mxNgdQjThGEBmk7YJ6/bhSf8WZljvKkhRWjrVmiFB1tnMc9qcgpUfpnHTp87561jZaDC19u01Wr02hRuCisQmImRyMDC2d+MYhSyWWy/fsRR6WDAln6R2srSMbuTdtPKnLvmyE0+NP5Doeceff8eSARoSB2gJ9iJ8uJErM= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313219; c=relaxed/simple; bh=Kk+WXC3xpTdQHWn1C/1CBJVVcyZsrWG4vk8UnETlIak=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=TPJR+rPxf9+b5c8QryfCaMBX9Zq9uBO9czwdO9UYxt5KMaG4FYGsF61vl8pOKvZeDotcowgl+LV24zP1rOgOY+/hdJofGX1t+RVYsWRs875BAGHmM0TwGz7Q63oCcTzK2jNrjNP2xNvdBDUoCNzwX0833p5x3+l3PJo/HcnrxuA= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=O329OxDx; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="O329OxDx" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313218; x=1805849218; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Kk+WXC3xpTdQHWn1C/1CBJVVcyZsrWG4vk8UnETlIak=; b=O329OxDxKzt1ixvhNLmruCSioAuOrz/9dFqaQyho32Nkbz+n9jlRe7r/ xjS/BklB52CetvY0mOHvx5Qcv99EhajT2j35wIIzBARnoPDVBrrnjuStE hFDFKHaNX4c6TPdACb/Y/uGScSHyElA8Ew5qr/fxSDs7xPb/CCvTf+6bp 9LbBLhhpjzPTjxFTJfncLzH9jvPDMyRfj5YxxIbuwFHXS5QtZd3S/eUtu KfNfcJl3uogB7ofD2BMCJwv5m//25oN09TEgWqYfAAOGdBTuJZVshfTL8 yxyoZPHj180tDoGDciQkycQpZPMEL4jiXdsMMTBX5lZyXMc9+h8/f5sk2 g==; X-CSE-ConnectionGUID: UrLSnzwUQzGuQN9LKKFivQ== X-CSE-MsgGUID: 9JEi/mEISKGb/DGJqq6Tng== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397172" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397172" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:46:57 -0700 X-CSE-ConnectionGUID: 2CCRRNM1TlO3Be2YvN4ydw== X-CSE-MsgGUID: 4wLST9clRsqDLkzL5VAwhQ== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322843" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:53 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 14/24] perf/x86: Enable XMM sampling using sample_simd_vec_reg_* fields Date: Tue, 24 Mar 2026 08:41:08 +0800 Message-Id: <20260324004118.3772171-15-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang This patch adds support for sampling XMM registers using the sample_simd_vec_reg_* fields. When sample_simd_regs_enabled is set, the original XMM space in the sample_regs_* field is treated as reserved. An INVAL error will be reported to user space if any bit is set in the original XMM space while sample_simd_regs_enabled is set. The perf_reg_value function requires ABI information to understand the layout of sample_regs. To accommodate this, a new abi field is introduced in the struct x86_perf_regs to represent ABI information. Additionally, the X86-specific perf_simd_reg_value function is implemented to retrieve the XMM register values. Signed-off-by: Kan Liang Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 89 +++++++++++++++++++++++++-- arch/x86/events/intel/ds.c | 2 +- arch/x86/events/perf_event.h | 12 ++++ arch/x86/include/asm/perf_event.h | 1 + arch/x86/include/uapi/asm/perf_regs.h | 13 ++++ arch/x86/kernel/perf_regs.c | 51 ++++++++++++++- 6 files changed, 161 insertions(+), 7 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index a5643c875190..3c9b79b46a66 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -704,6 +704,22 @@ int x86_pmu_hw_config(struct perf_event *event) if (event_has_extended_regs(event)) { if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) return -EINVAL; + if (event->attr.sample_simd_regs_enabled) + return -EINVAL; + } + + if (event_has_simd_regs(event)) { + if (!(event->pmu->capabilities & PERF_PMU_CAP_SIMD_REGS)) + return -EINVAL; + /* Not require any vector registers but set width */ + if (event->attr.sample_simd_vec_reg_qwords && + !event->attr.sample_simd_vec_reg_intr && + !event->attr.sample_simd_vec_reg_user) + return -EINVAL; + /* The vector registers set is not supported */ + if (event_needs_xmm(event) && + !(x86_pmu.ext_regs_mask & XFEATURE_MASK_SSE)) + return -EINVAL; } } =20 @@ -1749,6 +1765,7 @@ static void x86_pmu_perf_get_regs_user(struct perf_sa= mple_data *data, struct x86_perf_regs *x86_regs_user =3D this_cpu_ptr(&x86_user_regs); struct perf_regs regs_user; =20 + x86_regs_user->abi =3D PERF_SAMPLE_REGS_ABI_NONE; perf_get_regs_user(®s_user, regs); data->regs_user.abi =3D regs_user.abi; if (regs_user.regs) { @@ -1758,12 +1775,26 @@ static void x86_pmu_perf_get_regs_user(struct perf_= sample_data *data, data->regs_user.regs =3D NULL; } =20 +static inline void +x86_pmu_update_xregs_size(struct perf_event_attr *attr, + struct perf_sample_data *data, + struct pt_regs *regs, + u64 mask, u64 pred_mask) +{ + u16 pred_qwords =3D attr->sample_simd_pred_reg_qwords; + u16 vec_qwords =3D attr->sample_simd_vec_reg_qwords; + + data->dyn_size +=3D (hweight64(mask) * vec_qwords + + hweight64(pred_mask) * pred_qwords) * sizeof(u64); +} + static void x86_pmu_setup_gpregs_data(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { struct perf_event_attr *attr =3D &event->attr; u64 sample_type =3D attr->sample_type; + struct x86_perf_regs *perf_regs; =20 if (sample_type & PERF_SAMPLE_REGS_USER) { if (user_mode(regs)) { @@ -1783,8 +1814,13 @@ static void x86_pmu_setup_gpregs_data(struct perf_ev= ent *event, data->regs_user.regs =3D NULL; } data->dyn_size +=3D sizeof(u64); - if (data->regs_user.regs) - data->dyn_size +=3D hweight64(attr->sample_regs_user) * sizeof(u64); + if (data->regs_user.regs) { + data->dyn_size +=3D + hweight64(attr->sample_regs_user) * sizeof(u64); + perf_regs =3D container_of(data->regs_user.regs, + struct x86_perf_regs, regs); + perf_regs->abi =3D data->regs_user.abi; + } data->sample_flags |=3D PERF_SAMPLE_REGS_USER; } =20 @@ -1792,8 +1828,13 @@ static void x86_pmu_setup_gpregs_data(struct perf_ev= ent *event, data->regs_intr.regs =3D regs; data->regs_intr.abi =3D perf_reg_abi(current); data->dyn_size +=3D sizeof(u64); - if (data->regs_intr.regs) - data->dyn_size +=3D hweight64(attr->sample_regs_intr) * sizeof(u64); + if (data->regs_intr.regs) { + data->dyn_size +=3D + hweight64(attr->sample_regs_intr) * sizeof(u64); + perf_regs =3D container_of(data->regs_intr.regs, + struct x86_perf_regs, regs); + perf_regs->abi =3D data->regs_intr.abi; + } data->sample_flags |=3D PERF_SAMPLE_REGS_INTR; } } @@ -1871,7 +1912,7 @@ static void x86_pmu_sample_xregs(struct perf_event *e= vent, if (WARN_ON_ONCE(!xsave)) return; =20 - if (event_has_extended_regs(event)) + if (event_needs_xmm(event)) mask |=3D XFEATURE_MASK_SSE; =20 mask &=3D x86_pmu.ext_regs_mask; @@ -1899,6 +1940,43 @@ static void x86_pmu_sample_xregs(struct perf_event *= event, } } =20 +static void x86_pmu_setup_xregs_data(struct perf_event *event, + struct perf_sample_data *data) +{ + struct perf_event_attr *attr =3D &event->attr; + u64 sample_type =3D attr->sample_type; + struct x86_perf_regs *perf_regs; + + if (!attr->sample_simd_regs_enabled) + return; + + if (sample_type & PERF_SAMPLE_REGS_USER && data->regs_user.abi) { + perf_regs =3D container_of(data->regs_user.regs, + struct x86_perf_regs, regs); + perf_regs->abi |=3D PERF_SAMPLE_REGS_ABI_SIMD; + + /* num and qwords of vector and pred registers */ + data->dyn_size +=3D sizeof(u64); + data->regs_user.abi |=3D PERF_SAMPLE_REGS_ABI_SIMD; + x86_pmu_update_xregs_size(attr, data, data->regs_user.regs, + attr->sample_simd_vec_reg_user, + attr->sample_simd_pred_reg_user); + } + + if (sample_type & PERF_SAMPLE_REGS_INTR && data->regs_intr.abi) { + perf_regs =3D container_of(data->regs_intr.regs, + struct x86_perf_regs, regs); + perf_regs->abi |=3D PERF_SAMPLE_REGS_ABI_SIMD; + + /* num and qwords of vector and pred registers */ + data->dyn_size +=3D sizeof(u64); + data->regs_intr.abi |=3D PERF_SAMPLE_REGS_ABI_SIMD; + x86_pmu_update_xregs_size(attr, data, data->regs_intr.regs, + attr->sample_simd_vec_reg_intr, + attr->sample_simd_pred_reg_intr); + } +} + void x86_pmu_setup_regs_data(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs, @@ -1910,6 +1988,7 @@ void x86_pmu_setup_regs_data(struct perf_event *event, * which are unnecessary to sample again. */ x86_pmu_sample_xregs(event, data, ignore_mask); + x86_pmu_setup_xregs_data(event, data); } =20 int x86_pmu_handle_irq(struct pt_regs *regs) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 74a41dae8a62..ac9a1c2f0177 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1743,7 +1743,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event= *event) if (gprs || (attr->precise_ip < 2) || tsx_weight) pebs_data_cfg |=3D PEBS_DATACFG_GP; =20 - if (event_has_extended_regs(event)) + if (event_needs_xmm(event)) pebs_data_cfg |=3D PEBS_DATACFG_XMMS; =20 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index a5e5bffb711e..26d162794a36 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -137,6 +137,18 @@ static inline bool is_acr_event_group(struct perf_even= t *event) return check_leader_group(event->group_leader, PERF_X86_EVENT_ACR); } =20 +static inline bool event_needs_xmm(struct perf_event *event) +{ + if (event->attr.sample_simd_regs_enabled && + event->attr.sample_simd_vec_reg_qwords >=3D PERF_X86_XMM_QWORDS) + return true; + + if (!event->attr.sample_simd_regs_enabled && + event_has_extended_regs(event)) + return true; + return false; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index e47a963a7cf0..e54d21c13494 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -726,6 +726,7 @@ extern void perf_events_lapic_init(void); struct pt_regs; struct x86_perf_regs { struct pt_regs regs; + u64 abi; union { u64 *xmm_regs; u32 *xmm_space; /* for xsaves */ diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/= asm/perf_regs.h index 7c9d2bb3833b..c5c1b3930df1 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -55,4 +55,17 @@ enum perf_event_x86_regs { =20 #define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) =20 +enum { + PERF_X86_SIMD_XMM_REGS =3D 16, + PERF_X86_SIMD_VEC_REGS_MAX =3D PERF_X86_SIMD_XMM_REGS, +}; + +#define PERF_X86_SIMD_VEC_MASK GENMASK_ULL(PERF_X86_SIMD_VEC_REGS_MAX - 1,= 0) + +enum { + /* 1 qword =3D 8 bytes */ + PERF_X86_XMM_QWORDS =3D 2, + PERF_X86_SIMD_QWORDS_MAX =3D PERF_X86_XMM_QWORDS, +}; + #endif /* _ASM_X86_PERF_REGS_H */ diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 81204cb7f723..9947a6b5c260 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -63,6 +63,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) =20 if (idx >=3D PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { perf_regs =3D container_of(regs, struct x86_perf_regs, regs); + /* SIMD registers are moved to dedicated sample_simd_vec_reg */ + if (perf_regs->abi & PERF_SAMPLE_REGS_ABI_SIMD) + return 0; if (!perf_regs->xmm_regs) return 0; return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; @@ -74,6 +77,51 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return regs_get_register(regs, pt_regs_offset[idx]); } =20 +u64 perf_simd_reg_value(struct pt_regs *regs, int idx, + u16 qwords_idx, bool pred) +{ + struct x86_perf_regs *perf_regs =3D + container_of(regs, struct x86_perf_regs, regs); + + if (pred) + return 0; + + if (WARN_ON_ONCE(idx >=3D PERF_X86_SIMD_VEC_REGS_MAX || + qwords_idx >=3D PERF_X86_SIMD_QWORDS_MAX)) + return 0; + + if (qwords_idx < PERF_X86_XMM_QWORDS) { + if (!perf_regs->xmm_regs) + return 0; + return perf_regs->xmm_regs[idx * PERF_X86_XMM_QWORDS + + qwords_idx]; + } + + return 0; +} + +int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask, + u16 pred_qwords, u32 pred_mask) +{ + /* pred_qwords implies sample_simd_{pred,vec}_reg_* are supported */ + if (!pred_qwords) + return 0; + + if (!vec_qwords) { + if (vec_mask) + return -EINVAL; + } else { + if (vec_qwords !=3D PERF_X86_XMM_QWORDS) + return -EINVAL; + if (vec_mask & ~PERF_X86_SIMD_VEC_MASK) + return -EINVAL; + } + if (pred_mask) + return -EINVAL; + + return 0; +} + #define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ ~((1ULL << PERF_REG_X86_MAX) - 1)) =20 @@ -108,7 +156,8 @@ u64 perf_reg_abi(struct task_struct *task) =20 int perf_reg_validate(u64 mask) { - if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) + /* The mask could be 0 if only the SIMD registers are interested */ + if (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)) return -EINVAL; =20 return 0; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A29CC350A10; Tue, 24 Mar 2026 00:47:02 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313223; cv=none; b=LBkfyXzba4/QNCSW4ocwLr+zEnI18csDDM3sG6EC8eLAJbhqlOtnuM+bZPcopSSclIVbNhiuDRRTfYq+rWFzG14z9WmhynpVITBoCu6gKx5/ES1nxbq83/otTJFXnDVJsdqPWH66xgKT1cSDGnzZLx7fX5GjoQ3yHOsvO/3jM0A= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313223; c=relaxed/simple; bh=hgvap5aS9xwKqhgpYNVLDdl6QyKRzu0zU+wBaYxYo2Q=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=hGOXZUB14kSJ2r3B4H367FTeaqWDWlR7Ox0mpo3EOUMm6AtcwLtKqFVD6KTQdewZakum4q07p5wMt5mSG6qQgj5x3yUSW/X2FOE1mMQdy11NNZyH1pslNLvaDlvpdDKmihKqBr8dxxJvWOxTdtqk2oBsRzRu4cC7uhgGuj1gkMM= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=axqUUCNP; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="axqUUCNP" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313222; x=1805849222; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=hgvap5aS9xwKqhgpYNVLDdl6QyKRzu0zU+wBaYxYo2Q=; b=axqUUCNPMCI2KnSdQkwK5W9aNC9GMurMujJjBKGmKc5vpsTgXKdGXeU8 cBYP6imb5lIGiuM6m42nB+TmgLuyuOrg1JKP8OuuOKEt5klMjMbOD3I9i EWoeeSfNMfmAjRmNpyCt1jj0nSBVkz7niF3FHH0WfrKJR3EPKPJz7ksOK kpYJAY8o0zzJjOMPcw7ypiELT+rEy/8e1eQWneZpebOeVSO4qiZrSJuRF MKUp/FD8AWd+QZclQncRepx8hi5wYNwjrUT5FPar+shGdY5AqsPzvTo0E eeL2PGBDSVqJjilqJ2dIIwNZFs/ih1FpMZGpnWMpL2nIDy1W6xsZDjehh A==; X-CSE-ConnectionGUID: 6uvIrcXHTZOoC8uCcNyKkw== X-CSE-MsgGUID: noc2FcwpQ+2EpK0wN6N1ow== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397184" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397184" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:02 -0700 X-CSE-ConnectionGUID: 3XZWCqwlQCa9rxJcdqSzhA== X-CSE-MsgGUID: 8pTIBboUSTCeNba9UvaHew== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322854" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:46:58 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 15/24] perf/x86: Enable YMM sampling using sample_simd_vec_reg_* fields Date: Tue, 24 Mar 2026 08:41:09 +0800 Message-Id: <20260324004118.3772171-16-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang This patch introduces support for sampling YMM registers via the sample_simd_vec_reg_* fields. Each YMM register consists of 4 u64 words, assembled from two halves: XMM (the lower 2 u64 words) and YMMH (the upper 2 u64 words). Although both XMM and YMMH data can be retrieved with a single xsaves instruction, they are stored in separate locations. The perf_simd_reg_value() function is responsible for assembling these halves into a complete YMM register for output to userspace. Additionally, sample_simd_vec_reg_qwords should be set to 4 to indicate YMM sampling. Signed-off-by: Kan Liang Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 8 ++++++++ arch/x86/events/perf_event.h | 9 +++++++++ arch/x86/include/asm/perf_event.h | 4 ++++ arch/x86/include/uapi/asm/perf_regs.h | 6 ++++-- arch/x86/kernel/perf_regs.c | 10 +++++++++- 5 files changed, 34 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 3c9b79b46a66..cdea5a10ec9f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -720,6 +720,9 @@ int x86_pmu_hw_config(struct perf_event *event) if (event_needs_xmm(event) && !(x86_pmu.ext_regs_mask & XFEATURE_MASK_SSE)) return -EINVAL; + if (event_needs_ymm(event) && + !(x86_pmu.ext_regs_mask & XFEATURE_MASK_YMM)) + return -EINVAL; } } =20 @@ -1844,6 +1847,7 @@ inline void x86_pmu_clear_perf_regs(struct pt_regs *r= egs) struct x86_perf_regs *perf_regs =3D container_of(regs, struct x86_perf_re= gs, regs); =20 perf_regs->xmm_regs =3D NULL; + perf_regs->ymmh_regs =3D NULL; } =20 static inline void x86_pmu_update_xregs(struct x86_perf_regs *perf_regs, @@ -1859,6 +1863,8 @@ static inline void x86_pmu_update_xregs(struct x86_pe= rf_regs *perf_regs, =20 if (mask & XFEATURE_MASK_SSE) perf_regs->xmm_space =3D xsave->i387.xmm_space; + if (mask & XFEATURE_MASK_YMM) + perf_regs->ymmh =3D get_xsave_addr(xsave, XFEATURE_YMM); } =20 /* @@ -1914,6 +1920,8 @@ static void x86_pmu_sample_xregs(struct perf_event *e= vent, =20 if (event_needs_xmm(event)) mask |=3D XFEATURE_MASK_SSE; + if (event_needs_ymm(event)) + mask |=3D XFEATURE_MASK_YMM; =20 mask &=3D x86_pmu.ext_regs_mask; if ((sample_type & PERF_SAMPLE_REGS_USER) && data->regs_user.abi) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 26d162794a36..8d5484462f75 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -149,6 +149,15 @@ static inline bool event_needs_xmm(struct perf_event *= event) return false; } =20 +static inline bool event_needs_ymm(struct perf_event *event) +{ + if (event->attr.sample_simd_regs_enabled && + event->attr.sample_simd_vec_reg_qwords >=3D PERF_X86_YMM_QWORDS) + return true; + + return false; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index e54d21c13494..1d03b86be65d 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -731,6 +731,10 @@ struct x86_perf_regs { u64 *xmm_regs; u32 *xmm_space; /* for xsaves */ }; + union { + u64 *ymmh_regs; + struct ymmh_struct *ymmh; + }; }; =20 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/= asm/perf_regs.h index c5c1b3930df1..42d53978ea72 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -57,7 +57,8 @@ enum perf_event_x86_regs { =20 enum { PERF_X86_SIMD_XMM_REGS =3D 16, - PERF_X86_SIMD_VEC_REGS_MAX =3D PERF_X86_SIMD_XMM_REGS, + PERF_X86_SIMD_YMM_REGS =3D 16, + PERF_X86_SIMD_VEC_REGS_MAX =3D PERF_X86_SIMD_YMM_REGS, }; =20 #define PERF_X86_SIMD_VEC_MASK GENMASK_ULL(PERF_X86_SIMD_VEC_REGS_MAX - 1,= 0) @@ -65,7 +66,8 @@ enum { enum { /* 1 qword =3D 8 bytes */ PERF_X86_XMM_QWORDS =3D 2, - PERF_X86_SIMD_QWORDS_MAX =3D PERF_X86_XMM_QWORDS, + PERF_X86_YMM_QWORDS =3D 4, + PERF_X86_SIMD_QWORDS_MAX =3D PERF_X86_YMM_QWORDS, }; =20 #endif /* _ASM_X86_PERF_REGS_H */ diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 9947a6b5c260..4062a679cc5b 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -77,6 +77,8 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return regs_get_register(regs, pt_regs_offset[idx]); } =20 +#define PERF_X86_YMMH_QWORDS (PERF_X86_YMM_QWORDS / 2) + u64 perf_simd_reg_value(struct pt_regs *regs, int idx, u16 qwords_idx, bool pred) { @@ -95,6 +97,11 @@ u64 perf_simd_reg_value(struct pt_regs *regs, int idx, return 0; return perf_regs->xmm_regs[idx * PERF_X86_XMM_QWORDS + qwords_idx]; + } else if (qwords_idx < PERF_X86_YMM_QWORDS) { + if (!perf_regs->ymmh_regs) + return 0; + return perf_regs->ymmh_regs[idx * PERF_X86_YMMH_QWORDS + + qwords_idx - PERF_X86_XMM_QWORDS]; } =20 return 0; @@ -111,7 +118,8 @@ int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask, if (vec_mask) return -EINVAL; } else { - if (vec_qwords !=3D PERF_X86_XMM_QWORDS) + if (vec_qwords !=3D PERF_X86_XMM_QWORDS && + vec_qwords !=3D PERF_X86_YMM_QWORDS) return -EINVAL; if (vec_mask & ~PERF_X86_SIMD_VEC_MASK) return -EINVAL; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 7CAB0365A1D; Tue, 24 Mar 2026 00:47:07 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313228; cv=none; b=SVGNH/7zn4HCSW6gOY/nzUeQG/HkkHYGWm3EFXOQ7YS9mcB+l4EQA5SYbcEMWsy1AisdzFJtkZnruUonmtaiojY+171E4IorUUUB1JtU2dUOdIOn8NvhHQS8qxVWai4EWO+DdCHZegBmLpmjqo54x/iNkcLOB2KRsz97T8US6Z8= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313228; c=relaxed/simple; bh=B9i1E68K0zS2g9PzXwTSdKlkC7t5KfzU/+lY7ShxGwM=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=T4q6exJpsMFjy9y4HrgbqUwl7rOzdc/hxL7FJuOyvQdFuJvLZN7UrdV9lIzDRg1GMTx7C6TtOMTdefZSWzVAOtEM/bLvVtLDzEqCEqI9reqVd705pz/I+QQbgZg4rZEJ/uNldwWlhGA6u1QPq3S2PqpAlYSfoa8aajjy2c1fYd0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=korElXie; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="korElXie" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313227; x=1805849227; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=B9i1E68K0zS2g9PzXwTSdKlkC7t5KfzU/+lY7ShxGwM=; b=korElXiesVR7ewpr8lDxcTj/6C5vtW7fd3lAuQ+uzAfzof8hMjd7q3fr FiqEgbEVpIO6wQrzuCsbGl7gwbL0p1LPpiCzXQaM/AcuwV/OsuC5ZeocT A/626PJyzWviu7+kBnwd+Zd+WO7LjH6bhhleVr+XBFbpla3ffWQStu/Rv a7tfjlkEAM3JOLR4NbkBP3mBtcv+PaAOXGnnyiFrTqj8bF4UFA5kGDhQI ZdTbLhwO8OecWhStoB3Y7CXOylNilWWXd5xB+l1Jwu2Ou+pqUjeG6SQeW OXkFE9RpCYbz0ma6jrnXGuDlpSbEfWw2OiV5yK1JtqTRUdA0UbrUtQ7sG w==; X-CSE-ConnectionGUID: 3SakOmt6SUWkRubUkTKZsg== X-CSE-MsgGUID: V493wuPXRLKNPfBs9mHV7w== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397202" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397202" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:07 -0700 X-CSE-ConnectionGUID: /xs6TQ0QR1C6Zl+d/KggUw== X-CSE-MsgGUID: Mzil6j7UQ1KbEAHlb9FDyg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322897" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:03 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 16/24] perf/x86: Enable ZMM sampling using sample_simd_vec_reg_* fields Date: Tue, 24 Mar 2026 08:41:10 +0800 Message-Id: <20260324004118.3772171-17-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang This patch adds support for sampling ZMM registers via the sample_simd_vec_reg_* fields. Each ZMM register consists of 8 u64 words. Current x86 hardware supports up to 32 ZMM registers. For ZMM registers from ZMM0 to ZMM15, they are assembled from three parts: XMM (the lower 2 u64 words), YMMH (the middle 2 u64 words), and ZMMH (the upper 4 u64 words). The perf_simd_reg_value() function is responsible for assembling these three parts into a complete ZMM register for output to userspace. For ZMM registers ZMM16 to ZMM31, each register can be read as a whole and directly outputted to userspace. Additionally, sample_simd_vec_reg_qwords should be set to 8 to indicate ZMM sampling. Signed-off-by: Kan Liang Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 16 ++++++++++++++++ arch/x86/events/perf_event.h | 19 +++++++++++++++++++ arch/x86/include/asm/perf_event.h | 8 ++++++++ arch/x86/include/uapi/asm/perf_regs.h | 8 ++++++-- arch/x86/kernel/perf_regs.c | 16 +++++++++++++++- 5 files changed, 64 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index cdea5a10ec9f..e5f5a6971d72 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -723,6 +723,12 @@ int x86_pmu_hw_config(struct perf_event *event) if (event_needs_ymm(event) && !(x86_pmu.ext_regs_mask & XFEATURE_MASK_YMM)) return -EINVAL; + if (event_needs_low16_zmm(event) && + !(x86_pmu.ext_regs_mask & XFEATURE_MASK_ZMM_Hi256)) + return -EINVAL; + if (event_needs_high16_zmm(event) && + !(x86_pmu.ext_regs_mask & XFEATURE_MASK_Hi16_ZMM)) + return -EINVAL; } } =20 @@ -1848,6 +1854,8 @@ inline void x86_pmu_clear_perf_regs(struct pt_regs *r= egs) =20 perf_regs->xmm_regs =3D NULL; perf_regs->ymmh_regs =3D NULL; + perf_regs->zmmh_regs =3D NULL; + perf_regs->h16zmm_regs =3D NULL; } =20 static inline void x86_pmu_update_xregs(struct x86_perf_regs *perf_regs, @@ -1865,6 +1873,10 @@ static inline void x86_pmu_update_xregs(struct x86_p= erf_regs *perf_regs, perf_regs->xmm_space =3D xsave->i387.xmm_space; if (mask & XFEATURE_MASK_YMM) perf_regs->ymmh =3D get_xsave_addr(xsave, XFEATURE_YMM); + if (mask & XFEATURE_MASK_ZMM_Hi256) + perf_regs->zmmh =3D get_xsave_addr(xsave, XFEATURE_ZMM_Hi256); + if (mask & XFEATURE_MASK_Hi16_ZMM) + perf_regs->h16zmm =3D get_xsave_addr(xsave, XFEATURE_Hi16_ZMM); } =20 /* @@ -1922,6 +1934,10 @@ static void x86_pmu_sample_xregs(struct perf_event *= event, mask |=3D XFEATURE_MASK_SSE; if (event_needs_ymm(event)) mask |=3D XFEATURE_MASK_YMM; + if (event_needs_low16_zmm(event)) + mask |=3D XFEATURE_MASK_ZMM_Hi256; + if (event_needs_high16_zmm(event)) + mask |=3D XFEATURE_MASK_Hi16_ZMM; =20 mask &=3D x86_pmu.ext_regs_mask; if ((sample_type & PERF_SAMPLE_REGS_USER) && data->regs_user.abi) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 8d5484462f75..841c8880e6fd 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -158,6 +158,25 @@ static inline bool event_needs_ymm(struct perf_event *= event) return false; } =20 +static inline bool event_needs_low16_zmm(struct perf_event *event) +{ + if (event->attr.sample_simd_regs_enabled && + event->attr.sample_simd_vec_reg_qwords >=3D PERF_X86_ZMM_QWORDS) + return true; + + return false; +} + +static inline bool event_needs_high16_zmm(struct perf_event *event) +{ + if (event->attr.sample_simd_regs_enabled && + (fls64(event->attr.sample_simd_vec_reg_intr) > PERF_X86_H16ZMM_BASE || + fls64(event->attr.sample_simd_vec_reg_user) > PERF_X86_H16ZMM_BASE)) + return true; + + return false; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index 1d03b86be65d..273840bd7b33 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -735,6 +735,14 @@ struct x86_perf_regs { u64 *ymmh_regs; struct ymmh_struct *ymmh; }; + union { + u64 *zmmh_regs; + struct avx_512_zmm_uppers_state *zmmh; + }; + union { + u64 *h16zmm_regs; + struct avx_512_hi16_state *h16zmm; + }; }; =20 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/= asm/perf_regs.h index 42d53978ea72..a889fd92f2f0 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -58,16 +58,20 @@ enum perf_event_x86_regs { enum { PERF_X86_SIMD_XMM_REGS =3D 16, PERF_X86_SIMD_YMM_REGS =3D 16, - PERF_X86_SIMD_VEC_REGS_MAX =3D PERF_X86_SIMD_YMM_REGS, + PERF_X86_SIMD_ZMM_REGS =3D 32, + PERF_X86_SIMD_VEC_REGS_MAX =3D PERF_X86_SIMD_ZMM_REGS, }; =20 #define PERF_X86_SIMD_VEC_MASK GENMASK_ULL(PERF_X86_SIMD_VEC_REGS_MAX - 1,= 0) =20 +#define PERF_X86_H16ZMM_BASE 16 + enum { /* 1 qword =3D 8 bytes */ PERF_X86_XMM_QWORDS =3D 2, PERF_X86_YMM_QWORDS =3D 4, - PERF_X86_SIMD_QWORDS_MAX =3D PERF_X86_YMM_QWORDS, + PERF_X86_ZMM_QWORDS =3D 8, + PERF_X86_SIMD_QWORDS_MAX =3D PERF_X86_ZMM_QWORDS, }; =20 #endif /* _ASM_X86_PERF_REGS_H */ diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 4062a679cc5b..fe4ff4d2de88 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -78,6 +78,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) } =20 #define PERF_X86_YMMH_QWORDS (PERF_X86_YMM_QWORDS / 2) +#define PERF_X86_ZMMH_QWORDS (PERF_X86_ZMM_QWORDS / 2) =20 u64 perf_simd_reg_value(struct pt_regs *regs, int idx, u16 qwords_idx, bool pred) @@ -92,6 +93,13 @@ u64 perf_simd_reg_value(struct pt_regs *regs, int idx, qwords_idx >=3D PERF_X86_SIMD_QWORDS_MAX)) return 0; =20 + if (idx >=3D PERF_X86_H16ZMM_BASE) { + if (!perf_regs->h16zmm_regs) + return 0; + return perf_regs->h16zmm_regs[(idx - PERF_X86_H16ZMM_BASE) * + PERF_X86_ZMM_QWORDS + qwords_idx]; + } + if (qwords_idx < PERF_X86_XMM_QWORDS) { if (!perf_regs->xmm_regs) return 0; @@ -102,6 +110,11 @@ u64 perf_simd_reg_value(struct pt_regs *regs, int idx, return 0; return perf_regs->ymmh_regs[idx * PERF_X86_YMMH_QWORDS + qwords_idx - PERF_X86_XMM_QWORDS]; + } else if (qwords_idx < PERF_X86_ZMM_QWORDS) { + if (!perf_regs->zmmh_regs) + return 0; + return perf_regs->zmmh_regs[idx * PERF_X86_ZMMH_QWORDS + + qwords_idx - PERF_X86_YMM_QWORDS]; } =20 return 0; @@ -119,7 +132,8 @@ int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask, return -EINVAL; } else { if (vec_qwords !=3D PERF_X86_XMM_QWORDS && - vec_qwords !=3D PERF_X86_YMM_QWORDS) + vec_qwords !=3D PERF_X86_YMM_QWORDS && + vec_qwords !=3D PERF_X86_ZMM_QWORDS) return -EINVAL; if (vec_mask & ~PERF_X86_SIMD_VEC_MASK) return -EINVAL; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 49DB3370D7D; Tue, 24 Mar 2026 00:47:12 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313233; cv=none; b=rXMUYAq5DTpttlEPXqiFW7Lq0WgqRTgQqrCWP1kPgsZjjyitWnEwwxIDQ16ctIwsM7yRT0GTow1dzSe2ZCe7c6po/4a4y2Ij/6klq4Vi6Gv5BMiQxK4XEK5m/5LtJV7pBpTXYF+fUwzkNHmYOYncBNBUFyA06YO4G6xparXLwtE= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313233; c=relaxed/simple; bh=t7ntPNrYl+47KNOOHyFIWOVc+HUTcEEeepncneeWUGY=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=IcgNFqPby/lqPMJ09N5D+GAo5Y5VF8qoYdzMKKzRfnuPVSPhlufumqbxtlnh9XlGtkA/Di2si+hWBltZ9+iynTpbaKaNhFO8gD1PLOzkN79CNoWRGYP+s8ESQKWYPwGNdStwfuKxGzAUDfHuuVdeHL+MrGRH67JE0f64amx681M= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=SaHD4Ksv; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="SaHD4Ksv" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313232; x=1805849232; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=t7ntPNrYl+47KNOOHyFIWOVc+HUTcEEeepncneeWUGY=; b=SaHD4KsvIsIm/S88oX46eYaKgLHJMtefjvX4Kfpfz0ghGd2Nh4d1tRdJ Rt9/a3WmVRundYHT/y1QO3BL8lD/uDM5stHCLNH85nQUPO6xEdZfZvYD/ m6mYnJsrB8tK3lQ/EsejMMRlNNt4181GgkpEdPRMGuI0biSSeHZDJHjri 2/vKuiWvx+vF9ktuCws/ENIuKqvPDXcdpn0r2zmjcpm3c/oD3b95zXVXs D8Rnh2B19RIRa+nAAkvU63BGSj8iCYQ2WrBaRBr8zZEOGuqdUlhYOAA3P JJBNjs9gNpMfAhNKMr3p9fwSWeF0iKGEd6Arr/qjLJX/p89OrO7T5ASTN A==; X-CSE-ConnectionGUID: XGrzsR6HSaiCxcgo7I2q/g== X-CSE-MsgGUID: rnh3YUsyTDSCjX6tf0O0VA== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397222" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397222" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:12 -0700 X-CSE-ConnectionGUID: jMTAVDrdRJW4B3QpCLMKWw== X-CSE-MsgGUID: h8d07cRZTVKdZ39udYAXJw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322925" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:07 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 17/24] perf/x86: Enable OPMASK sampling using sample_simd_pred_reg_* fields Date: Tue, 24 Mar 2026 08:41:11 +0800 Message-Id: <20260324004118.3772171-18-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang This patch adds support for sampling OPAMSK registers via the sample_simd_pred_reg_* fields. Each OPMASK register consists of 1 u64 word. Current x86 hardware supports 8 OPMASK registers. The perf_simd_reg_value() function is responsible for outputting OPMASK value to userspace. Additionally, sample_simd_pred_reg_qwords should be set to 1 to indicate OPMASK sampling. Signed-off-by: Kan Liang Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 8 ++++++++ arch/x86/events/perf_event.h | 10 ++++++++++ arch/x86/include/asm/perf_event.h | 4 ++++ arch/x86/include/uapi/asm/perf_regs.h | 5 +++++ arch/x86/kernel/perf_regs.c | 15 ++++++++++++--- 5 files changed, 39 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e5f5a6971d72..d86a4fbea1ed 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -729,6 +729,9 @@ int x86_pmu_hw_config(struct perf_event *event) if (event_needs_high16_zmm(event) && !(x86_pmu.ext_regs_mask & XFEATURE_MASK_Hi16_ZMM)) return -EINVAL; + if (event_needs_opmask(event) && + !(x86_pmu.ext_regs_mask & XFEATURE_MASK_OPMASK)) + return -EINVAL; } } =20 @@ -1856,6 +1859,7 @@ inline void x86_pmu_clear_perf_regs(struct pt_regs *r= egs) perf_regs->ymmh_regs =3D NULL; perf_regs->zmmh_regs =3D NULL; perf_regs->h16zmm_regs =3D NULL; + perf_regs->opmask_regs =3D NULL; } =20 static inline void x86_pmu_update_xregs(struct x86_perf_regs *perf_regs, @@ -1877,6 +1881,8 @@ static inline void x86_pmu_update_xregs(struct x86_pe= rf_regs *perf_regs, perf_regs->zmmh =3D get_xsave_addr(xsave, XFEATURE_ZMM_Hi256); if (mask & XFEATURE_MASK_Hi16_ZMM) perf_regs->h16zmm =3D get_xsave_addr(xsave, XFEATURE_Hi16_ZMM); + if (mask & XFEATURE_MASK_OPMASK) + perf_regs->opmask =3D get_xsave_addr(xsave, XFEATURE_OPMASK); } =20 /* @@ -1938,6 +1944,8 @@ static void x86_pmu_sample_xregs(struct perf_event *e= vent, mask |=3D XFEATURE_MASK_ZMM_Hi256; if (event_needs_high16_zmm(event)) mask |=3D XFEATURE_MASK_Hi16_ZMM; + if (event_needs_opmask(event)) + mask |=3D XFEATURE_MASK_OPMASK; =20 mask &=3D x86_pmu.ext_regs_mask; if ((sample_type & PERF_SAMPLE_REGS_USER) && data->regs_user.abi) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 841c8880e6fd..00f436f5840b 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -177,6 +177,16 @@ static inline bool event_needs_high16_zmm(struct perf_= event *event) return false; } =20 +static inline bool event_needs_opmask(struct perf_event *event) +{ + if (event->attr.sample_simd_regs_enabled && + (event->attr.sample_simd_pred_reg_intr || + event->attr.sample_simd_pred_reg_user)) + return true; + + return false; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index 273840bd7b33..7e8b60bddd5a 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -743,6 +743,10 @@ struct x86_perf_regs { u64 *h16zmm_regs; struct avx_512_hi16_state *h16zmm; }; + union { + u64 *opmask_regs; + struct avx_512_opmask_state *opmask; + }; }; =20 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/= asm/perf_regs.h index a889fd92f2f0..f4a1630c1928 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -60,14 +60,19 @@ enum { PERF_X86_SIMD_YMM_REGS =3D 16, PERF_X86_SIMD_ZMM_REGS =3D 32, PERF_X86_SIMD_VEC_REGS_MAX =3D PERF_X86_SIMD_ZMM_REGS, + + PERF_X86_SIMD_OPMASK_REGS =3D 8, + PERF_X86_SIMD_PRED_REGS_MAX =3D PERF_X86_SIMD_OPMASK_REGS, }; =20 +#define PERF_X86_SIMD_PRED_MASK GENMASK(PERF_X86_SIMD_PRED_REGS_MAX - 1, 0) #define PERF_X86_SIMD_VEC_MASK GENMASK_ULL(PERF_X86_SIMD_VEC_REGS_MAX - 1,= 0) =20 #define PERF_X86_H16ZMM_BASE 16 =20 enum { /* 1 qword =3D 8 bytes */ + PERF_X86_OPMASK_QWORDS =3D 1, PERF_X86_XMM_QWORDS =3D 2, PERF_X86_YMM_QWORDS =3D 4, PERF_X86_ZMM_QWORDS =3D 8, diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index fe4ff4d2de88..2e3c10dffb35 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -86,8 +86,14 @@ u64 perf_simd_reg_value(struct pt_regs *regs, int idx, struct x86_perf_regs *perf_regs =3D container_of(regs, struct x86_perf_regs, regs); =20 - if (pred) - return 0; + if (pred) { + if (WARN_ON_ONCE(idx >=3D PERF_X86_SIMD_PRED_REGS_MAX || + qwords_idx >=3D PERF_X86_OPMASK_QWORDS)) + return 0; + if (!perf_regs->opmask_regs) + return 0; + return perf_regs->opmask_regs[idx]; + } =20 if (WARN_ON_ONCE(idx >=3D PERF_X86_SIMD_VEC_REGS_MAX || qwords_idx >=3D PERF_X86_SIMD_QWORDS_MAX)) @@ -138,7 +144,10 @@ int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mas= k, if (vec_mask & ~PERF_X86_SIMD_VEC_MASK) return -EINVAL; } - if (pred_mask) + + if (pred_qwords !=3D PERF_X86_OPMASK_QWORDS) + return -EINVAL; + if (pred_mask & ~PERF_X86_SIMD_PRED_MASK) return -EINVAL; =20 return 0; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id C25CD371885; Tue, 24 Mar 2026 00:47:16 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313238; cv=none; b=bOmEKX0DQgm0vfX/7iwdM+LGN7inL6VvJj6djsq4s0t6F8KwvcMyJOPlgNdw9l+m4Zf3CMZ3OtXAv+aIeIlUgKVmt8kKLj6lFRnPPHi30sW3tpnBEPLo2Kt/I357y8h8h3AUaYsSekegpHkZIYNeX/5fmpI7RgbxtLyOzA626gg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313238; c=relaxed/simple; bh=XGDicDKSEV/egANlz1A7pxkMMvNbDJg8pvP0vQly/go=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=ki2RXYQVy0GJdk2NMYIRs2vYI2BezQliAcSzHW0WdWMArOXsFU2Wuo0y67RUkrhNsV2QaA3sxOk9iE+8TPWVngDA/ioClZimQzKZxVJiRA2Ba5yK6tQUFKWUjuB2YBLKTCkChf0KHL5Jybe4JAOeFZNGwxJ3a+FyJZwNR8reK/o= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=hM6iG3R7; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="hM6iG3R7" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313237; x=1805849237; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=XGDicDKSEV/egANlz1A7pxkMMvNbDJg8pvP0vQly/go=; b=hM6iG3R7o3Ov2KlS0QbYwOs4CnFc4rGwEvtzC4XF72Gg2bEyOQx/RrA4 4ybyz96klbJociZjRzVgXPJySRuxtC0bGqScRJtStvAa24HN8IFasKcc4 LAaLbFCcOgpDw2aVwplhF2g1xbSnkQaTjUlUMtHl4DYsrXbRgu6j45Y6V qwMUYxAztMJsSE/j97aqhe3zm5EfFEq/4pFVw0qc3T7IDmkK1k9qaJtfy 4hJ3z4cA7fFtTgqpnvff92++eukKKC2FePfhuCb9WpqMiay2cgMWwOtcR sbcooi2fdd5YYvephNT5YEy2tNO3XQ9yNIuQPDHNVeO8gjaOQAn/sBG51 A==; X-CSE-ConnectionGUID: oEg+1d1jRQWz7NO9uEgChw== X-CSE-MsgGUID: p2Powu3lRpylfSEQqBk0hg== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397236" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397236" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:16 -0700 X-CSE-ConnectionGUID: zdgaP2oORteaZrjtrxkoIQ== X-CSE-MsgGUID: ZV4zA6gHS8y8DHNZLO6GfA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322942" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:12 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 18/24] perf: Enhance perf_reg_validate() with simd_enabled argument Date: Tue, 24 Mar 2026 08:41:12 +0800 Message-Id: <20260324004118.3772171-19-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" The upcoming patch will support x86 APX eGPRs sampling by using the reclaimed XMM register space to represent eGPRs in sample_regs_* fields. To differentiate between XMM and eGPRs in sample_regs_* fields, an additional argument, simd_enabled, is introduced to the perf_reg_validate() helper. If simd_enabled is set to 1, it indicates that eGPRs are represented in sample_regs_* fields for the x86 platform; otherwise, XMM registers are represented. Signed-off-by: Dapeng Mi --- arch/arm/kernel/perf_regs.c | 2 +- arch/arm64/kernel/perf_regs.c | 2 +- arch/csky/kernel/perf_regs.c | 2 +- arch/loongarch/kernel/perf_regs.c | 2 +- arch/mips/kernel/perf_regs.c | 2 +- arch/parisc/kernel/perf_regs.c | 2 +- arch/powerpc/perf/perf_regs.c | 2 +- arch/riscv/kernel/perf_regs.c | 2 +- arch/s390/kernel/perf_regs.c | 2 +- arch/x86/kernel/perf_regs.c | 4 ++-- include/linux/perf_regs.h | 2 +- kernel/events/core.c | 8 +++++--- 12 files changed, 17 insertions(+), 15 deletions(-) diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c index d575a4c3ca56..838d701adf4d 100644 --- a/arch/arm/kernel/perf_regs.c +++ b/arch/arm/kernel/perf_regs.c @@ -18,7 +18,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) =20 #define REG_RESERVED (~((1ULL << PERF_REG_ARM_MAX) - 1)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask || mask & REG_RESERVED) return -EINVAL; diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 70e2f13f587f..71a3e0238de4 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -77,7 +77,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) =20 #define REG_RESERVED (~((1ULL << PERF_REG_ARM64_MAX) - 1)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { u64 reserved_mask =3D REG_RESERVED; =20 diff --git a/arch/csky/kernel/perf_regs.c b/arch/csky/kernel/perf_regs.c index 94601f37b596..c932a96afc56 100644 --- a/arch/csky/kernel/perf_regs.c +++ b/arch/csky/kernel/perf_regs.c @@ -18,7 +18,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) =20 #define REG_RESERVED (~((1ULL << PERF_REG_CSKY_MAX) - 1)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask || mask & REG_RESERVED) return -EINVAL; diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf= _regs.c index 8dd604f01745..164514f40ae0 100644 --- a/arch/loongarch/kernel/perf_regs.c +++ b/arch/loongarch/kernel/perf_regs.c @@ -25,7 +25,7 @@ u64 perf_reg_abi(struct task_struct *tsk) } #endif /* CONFIG_32BIT */ =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask) return -EINVAL; diff --git a/arch/mips/kernel/perf_regs.c b/arch/mips/kernel/perf_regs.c index 7736d3c5ebd2..00a5201dbd5d 100644 --- a/arch/mips/kernel/perf_regs.c +++ b/arch/mips/kernel/perf_regs.c @@ -28,7 +28,7 @@ u64 perf_reg_abi(struct task_struct *tsk) } #endif /* CONFIG_32BIT */ =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask) return -EINVAL; diff --git a/arch/parisc/kernel/perf_regs.c b/arch/parisc/kernel/perf_regs.c index b9fe1f2fcb9b..4f21aab5405c 100644 --- a/arch/parisc/kernel/perf_regs.c +++ b/arch/parisc/kernel/perf_regs.c @@ -34,7 +34,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) =20 #define REG_RESERVED (~((1ULL << PERF_REG_PARISC_MAX) - 1)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask || mask & REG_RESERVED) return -EINVAL; diff --git a/arch/powerpc/perf/perf_regs.c b/arch/powerpc/perf/perf_regs.c index 350dccb0143c..a01d8a903640 100644 --- a/arch/powerpc/perf/perf_regs.c +++ b/arch/powerpc/perf/perf_regs.c @@ -125,7 +125,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return regs_get_register(regs, pt_regs_offset[idx]); } =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask || mask & REG_RESERVED) return -EINVAL; diff --git a/arch/riscv/kernel/perf_regs.c b/arch/riscv/kernel/perf_regs.c index 3bba8deababb..1ecc8760b88b 100644 --- a/arch/riscv/kernel/perf_regs.c +++ b/arch/riscv/kernel/perf_regs.c @@ -18,7 +18,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) =20 #define REG_RESERVED (~((1ULL << PERF_REG_RISCV_MAX) - 1)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask || mask & REG_RESERVED) return -EINVAL; diff --git a/arch/s390/kernel/perf_regs.c b/arch/s390/kernel/perf_regs.c index 7b305f1456f8..6496fd23c540 100644 --- a/arch/s390/kernel/perf_regs.c +++ b/arch/s390/kernel/perf_regs.c @@ -34,7 +34,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) =20 #define REG_RESERVED (~((1UL << PERF_REG_S390_MAX) - 1)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask || mask & REG_RESERVED) return -EINVAL; diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 2e3c10dffb35..9b3134220b3e 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -166,7 +166,7 @@ int perf_simd_reg_validate(u16 vec_qwords, u64 vec_mask, (1ULL << PERF_REG_X86_R14) | \ (1ULL << PERF_REG_X86_R15)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED))) return -EINVAL; @@ -185,7 +185,7 @@ u64 perf_reg_abi(struct task_struct *task) (1ULL << PERF_REG_X86_FS) | \ (1ULL << PERF_REG_X86_GS)) =20 -int perf_reg_validate(u64 mask) +int perf_reg_validate(u64 mask, bool simd_enabled) { /* The mask could be 0 if only the SIMD registers are interested */ if (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)) diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index 518f28c6a7d4..09dbc2fc3859 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -10,7 +10,7 @@ struct perf_regs { }; =20 u64 perf_reg_value(struct pt_regs *regs, int idx); -int perf_reg_validate(u64 mask); +int perf_reg_validate(u64 mask, bool simd_enabled); u64 perf_reg_abi(struct task_struct *task); void perf_get_regs_user(struct perf_regs *regs_user, struct pt_regs *regs); diff --git a/kernel/events/core.c b/kernel/events/core.c index de42575f517b..797bddeca46a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7736,7 +7736,7 @@ u64 __weak perf_reg_value(struct pt_regs *regs, int i= dx) return 0; } =20 -int __weak perf_reg_validate(u64 mask) +int __weak perf_reg_validate(u64 mask, bool simd_enabled) { return mask ? -ENOSYS : 0; } @@ -13622,7 +13622,8 @@ static int perf_copy_attr(struct perf_event_attr __= user *uattr, } =20 if (attr->sample_type & PERF_SAMPLE_REGS_USER) { - ret =3D perf_reg_validate(attr->sample_regs_user); + ret =3D perf_reg_validate(attr->sample_regs_user, + attr->sample_simd_regs_enabled); if (ret) return ret; ret =3D perf_simd_reg_validate(attr->sample_simd_vec_reg_qwords, @@ -13652,7 +13653,8 @@ static int perf_copy_attr(struct perf_event_attr __= user *uattr, attr->sample_max_stack =3D sysctl_perf_event_max_stack; =20 if (attr->sample_type & PERF_SAMPLE_REGS_INTR) { - ret =3D perf_reg_validate(attr->sample_regs_intr); + ret =3D perf_reg_validate(attr->sample_regs_intr, + attr->sample_simd_regs_enabled); if (ret) return ret; ret =3D perf_simd_reg_validate(attr->sample_simd_vec_reg_qwords, --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A5FAD372EE3; Tue, 24 Mar 2026 00:47:21 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313243; cv=none; b=NKWUkUz9EiabpcdZu+GBuUPlEQYYOf2FSK46SMwfIBQum6awXuUWjQGg8osDZfgQdXpeiNubGORS/O58dwDKgBawJVHIU+c1Ux+Lp+pmKQRiQwh+oGJGIRZXYmpNyODrg1zhdQkixgxs4YCG7VIh1zWnurTzy7fNsq0/WvmD63w= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313243; c=relaxed/simple; bh=lM9ou+xaSTN5Lj+3m2apmkVvZzG2xUKXKiyEiqsvYqU=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=TIfVIlQ10ajof1Al7WK8uxE2dqLb8iPGx4pUGJlGckWN+Ne1QnqLwdHI/2YEQvCUUTZ5Vnfcos9H+BMqOdlTXQaohJuR8onf5ffqb7+uYqhYet2BI0NhGeZw+L2IpboT/cy9TsTl1KgKA+k5LQ6A7pt91JPEaFQFUr3V2vEj0BU= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=nU1PY9eP; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="nU1PY9eP" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313241; x=1805849241; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=lM9ou+xaSTN5Lj+3m2apmkVvZzG2xUKXKiyEiqsvYqU=; b=nU1PY9eP3Gr/Vz7SRQ8EnSIdPnj2l7wRiyukTDNTT7O0g/nSbYQxqKCb GYWPND1NTEmA7DyoAN69NmfGystJ6lpt1F21ic9jz4MY2mm/MweVoMKGX aareggb1khN1An8okPtQQvyGMlgEiH2nVmo8T98v8RPgfJmmNBHMd+340 Hy4Nm2AVRdhrz0D76k3txmRsCv6Jt6KSiyzhSl+PJ21H5MOU9LvJ9F/I0 o4POdnO9ZDvZBOHtGtIk81tCRx1JrvwlkCqXiJhV13x2mvmEgnNPoNNWe l5VOiZGAgs63PvcwkSyFgk+asEYX2U6+DVwZcjnK7VhwU9ohtSyYQifPU Q==; X-CSE-ConnectionGUID: uVhz3wLOQASS/CTC7Efwhg== X-CSE-MsgGUID: lHN+wjxlQrS2cbg3Gxkk+w== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397251" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397251" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:21 -0700 X-CSE-ConnectionGUID: 4cUAkmsXQ/yulQUuTG2LWA== X-CSE-MsgGUID: UZIhFURyQQe5Bnr7L67JaA== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322959" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:17 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 19/24] perf/x86: Enable eGPRs sampling using sample_regs_* fields Date: Tue, 24 Mar 2026 08:41:13 +0800 Message-Id: <20260324004118.3772171-20-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang This patch enables sampling of APX eGPRs (R16 ~ R31) via the sample_regs_* fields. To sample eGPRs, the sample_simd_regs_enabled field must be set. This allows the spare space (reclaimed from the original XMM space) in the sample_regs_* fields to be used for representing eGPRs. The perf_reg_value() function needs to check if the PERF_SAMPLE_REGS_ABI_SIMD flag is set first, and then determine whether to output eGPRs or legacy XMM registers to userspace. The perf_reg_validate() function first checks the simd_enabled argument to determine if the eGPRs bitmap is represented in sample_regs_* fields. It then validates the eGPRs bitmap accordingly. Currently, eGPRs sampling is only supported on the x86_64 architecture, as APX is only available on x86_64 platforms. Suggested-by: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 37 ++++++++++++++++------- arch/x86/events/perf_event.h | 10 +++++++ arch/x86/include/asm/perf_event.h | 4 +++ arch/x86/include/uapi/asm/perf_regs.h | 26 ++++++++++++++++ arch/x86/kernel/perf_regs.c | 43 ++++++++++++++++----------- 5 files changed, 91 insertions(+), 29 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d86a4fbea1ed..d33cfbe38573 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -697,20 +697,21 @@ int x86_pmu_hw_config(struct perf_event *event) } =20 if (event->attr.sample_type & (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_U= SER)) { - /* - * Besides the general purpose registers, XMM registers may - * be collected as well. - */ - if (event_has_extended_regs(event)) { - if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) - return -EINVAL; - if (event->attr.sample_simd_regs_enabled) - return -EINVAL; - } - if (event_has_simd_regs(event)) { + u64 reserved =3D ~GENMASK_ULL(PERF_REG_MISC_MAX - 1, 0); + if (!(event->pmu->capabilities & PERF_PMU_CAP_SIMD_REGS)) return -EINVAL; + /* + * The XMM space in the perf_event_x86_regs is reclaimed + * for eGPRs and other general registers. + */ + if (event->attr.sample_regs_user & reserved || + event->attr.sample_regs_intr & reserved) + return -EINVAL; + if (event_needs_egprs(event) && + !(x86_pmu.ext_regs_mask & XFEATURE_MASK_APX)) + return -EINVAL; /* Not require any vector registers but set width */ if (event->attr.sample_simd_vec_reg_qwords && !event->attr.sample_simd_vec_reg_intr && @@ -732,6 +733,15 @@ int x86_pmu_hw_config(struct perf_event *event) if (event_needs_opmask(event) && !(x86_pmu.ext_regs_mask & XFEATURE_MASK_OPMASK)) return -EINVAL; + } else { + /* + * Besides the general purpose registers, XMM registers may + * be collected as well. + */ + if (event_has_extended_regs(event)) { + if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS)) + return -EINVAL; + } } } =20 @@ -1860,6 +1870,7 @@ inline void x86_pmu_clear_perf_regs(struct pt_regs *r= egs) perf_regs->zmmh_regs =3D NULL; perf_regs->h16zmm_regs =3D NULL; perf_regs->opmask_regs =3D NULL; + perf_regs->egpr_regs =3D NULL; } =20 static inline void x86_pmu_update_xregs(struct x86_perf_regs *perf_regs, @@ -1883,6 +1894,8 @@ static inline void x86_pmu_update_xregs(struct x86_pe= rf_regs *perf_regs, perf_regs->h16zmm =3D get_xsave_addr(xsave, XFEATURE_Hi16_ZMM); if (mask & XFEATURE_MASK_OPMASK) perf_regs->opmask =3D get_xsave_addr(xsave, XFEATURE_OPMASK); + if (mask & XFEATURE_MASK_APX) + perf_regs->egpr =3D get_xsave_addr(xsave, XFEATURE_APX); } =20 /* @@ -1946,6 +1959,8 @@ static void x86_pmu_sample_xregs(struct perf_event *e= vent, mask |=3D XFEATURE_MASK_Hi16_ZMM; if (event_needs_opmask(event)) mask |=3D XFEATURE_MASK_OPMASK; + if (event_needs_egprs(event)) + mask |=3D XFEATURE_MASK_APX; =20 mask &=3D x86_pmu.ext_regs_mask; if ((sample_type & PERF_SAMPLE_REGS_USER) && data->regs_user.abi) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 00f436f5840b..0974fd8b0e20 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -187,6 +187,16 @@ static inline bool event_needs_opmask(struct perf_even= t *event) return false; } =20 +static inline bool event_needs_egprs(struct perf_event *event) +{ + if (event->attr.sample_simd_regs_enabled && + (event->attr.sample_regs_user & PERF_X86_EGPRS_MASK || + event->attr.sample_regs_intr & PERF_X86_EGPRS_MASK)) + return true; + + return false; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index 7e8b60bddd5a..a54ea8fa6a04 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -747,6 +747,10 @@ struct x86_perf_regs { u64 *opmask_regs; struct avx_512_opmask_state *opmask; }; + union { + u64 *egpr_regs; + struct apx_state *egpr; + }; }; =20 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/= asm/perf_regs.h index f4a1630c1928..e721a47556d4 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -27,9 +27,34 @@ enum perf_event_x86_regs { PERF_REG_X86_R13, PERF_REG_X86_R14, PERF_REG_X86_R15, + /* + * The eGPRs and XMM have overlaps. Only one can be used + * at a time. The ABI PERF_SAMPLE_REGS_ABI_SIMD is used to + * distinguish which one is used. If PERF_SAMPLE_REGS_ABI_SIMD + * is set, then eGPRs is used, otherwise, XMM is used. + * + * Extended GPRs (eGPRs) + */ + PERF_REG_X86_R16, + PERF_REG_X86_R17, + PERF_REG_X86_R18, + PERF_REG_X86_R19, + PERF_REG_X86_R20, + PERF_REG_X86_R21, + PERF_REG_X86_R22, + PERF_REG_X86_R23, + PERF_REG_X86_R24, + PERF_REG_X86_R25, + PERF_REG_X86_R26, + PERF_REG_X86_R27, + PERF_REG_X86_R28, + PERF_REG_X86_R29, + PERF_REG_X86_R30, + PERF_REG_X86_R31, /* These are the limits for the GPRs. */ PERF_REG_X86_32_MAX =3D PERF_REG_X86_GS + 1, PERF_REG_X86_64_MAX =3D PERF_REG_X86_R15 + 1, + PERF_REG_MISC_MAX =3D PERF_REG_X86_R31 + 1, =20 /* These all need two bits set because they are 128bit */ PERF_REG_X86_XMM0 =3D 32, @@ -54,6 +79,7 @@ enum perf_event_x86_regs { }; =20 #define PERF_REG_EXTENDED_MASK (~((1ULL << PERF_REG_X86_XMM0) - 1)) +#define PERF_X86_EGPRS_MASK GENMASK_ULL(PERF_REG_X86_R31, PERF_REG_X86_R16) =20 enum { PERF_X86_SIMD_XMM_REGS =3D 16, diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index 9b3134220b3e..a34cc52dbbeb 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -61,14 +61,22 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) { struct x86_perf_regs *perf_regs; =20 - if (idx >=3D PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { + if (idx > PERF_REG_X86_R15) { perf_regs =3D container_of(regs, struct x86_perf_regs, regs); - /* SIMD registers are moved to dedicated sample_simd_vec_reg */ - if (perf_regs->abi & PERF_SAMPLE_REGS_ABI_SIMD) - return 0; - if (!perf_regs->xmm_regs) - return 0; - return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; + + if (perf_regs->abi & PERF_SAMPLE_REGS_ABI_SIMD) { + if (idx <=3D PERF_REG_X86_R31) { + if (!perf_regs->egpr_regs) + return 0; + return perf_regs->egpr_regs[idx - PERF_REG_X86_R16]; + } + } else { + if (idx >=3D PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { + if (!perf_regs->xmm_regs) + return 0; + return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0]; + } + } } =20 if (WARN_ON_ONCE(idx >=3D ARRAY_SIZE(pt_regs_offset))) @@ -153,18 +161,12 @@ int perf_simd_reg_validate(u16 vec_qwords, u64 vec_ma= sk, return 0; } =20 -#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \ - ~((1ULL << PERF_REG_X86_MAX) - 1)) +#define PERF_REG_X86_RESERVED (GENMASK_ULL(PERF_REG_X86_XMM0 - 1, PERF_REG= _X86_AX) & \ + ~GENMASK_ULL(PERF_REG_X86_R15, PERF_REG_X86_AX)) +#define PERF_REG_X86_EXT_RESERVED (~GENMASK_ULL(PERF_REG_MISC_MAX - 1, PER= F_REG_X86_AX)) =20 #ifdef CONFIG_X86_32 -#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \ - (1ULL << PERF_REG_X86_R9) | \ - (1ULL << PERF_REG_X86_R10) | \ - (1ULL << PERF_REG_X86_R11) | \ - (1ULL << PERF_REG_X86_R12) | \ - (1ULL << PERF_REG_X86_R13) | \ - (1ULL << PERF_REG_X86_R14) | \ - (1ULL << PERF_REG_X86_R15)) +#define REG_NOSUPPORT GENMASK_ULL(PERF_REG_X86_R15, PERF_REG_X86_R8) =20 int perf_reg_validate(u64 mask, bool simd_enabled) { @@ -187,8 +189,13 @@ u64 perf_reg_abi(struct task_struct *task) =20 int perf_reg_validate(u64 mask, bool simd_enabled) { + if (!simd_enabled && + (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))) + return -EINVAL; + /* The mask could be 0 if only the SIMD registers are interested */ - if (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)) + if (simd_enabled && + (mask & (REG_NOSUPPORT | PERF_REG_X86_EXT_RESERVED))) return -EINVAL; =20 return 0; --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6748F369219; Tue, 24 Mar 2026 00:47:26 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313247; cv=none; b=NRN1Zi/2CNHhan1sYxvaqjcdjXa4EEJSXh2MAWs6BkGA64asDoE6IBGo87CVZNncOnW5S/ER30HWYGU6fEdv5AYS6E2Ggum3BIfgkFC24PDRPkt4vBZ9A1FtVwGs74ERYCIF5DiThZuXcBtPlyXoiXtJAesDdigHzmpIxPrlalo= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313247; c=relaxed/simple; bh=QtLR/fz9qPFZ4UPHdPr2QzXqegSCHOhG8wWpER1udAw=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=Hi1NVon+Df7AOCqt/q3zjMCf8sz8h/f1ju7yO4dxS2/VxLd40QxiSh8iPxbKzUlDXN31+XbLw72duv1TNghSuDyVd4/x0SPK1hLjYexIWmYh08utwkdt/lzLYfwTZEn3RH0JNkcNFdVcbIIph0oeFWaqjnUjDALGDTCgzD2a/ak= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=X6FlARsT; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="X6FlARsT" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313246; x=1805849246; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=QtLR/fz9qPFZ4UPHdPr2QzXqegSCHOhG8wWpER1udAw=; b=X6FlARsTim5wLDz7Ri8RvWJeMpA7sFoJ1OareIBC2JljRNWUIMNlDzYP Qby4frEk5A0Uh0dTCOEjA3Rdch0C9Gwtpsfwaci2fd+2lyskPVP3deTUm XOIcT+Yck5a1gQ6ecWMkUF+25aMIlwKnDhxD4/C989Vjo6H/5N89KpEWo vPx6MREiYUsy3+z+ezbAhZE2JEEp8wWWlFJXVdoRsiWh6kQKNYPdlmBqG gwutax1riW9tyE3mxDr2J9rPEB3VnjxweqB7rHt9vfsrEYBFntcWJgmew Q8cLc+LWMnriszgJOlJL1dkX5HNQUHw+9Bs4zsGa7ORiuXNoETENA/G4r Q==; X-CSE-ConnectionGUID: 5AwNhy5NRZa3NYGrsUAKxg== X-CSE-MsgGUID: jFZN4He4TLGlYRarHsAgqw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397268" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397268" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:26 -0700 X-CSE-ConnectionGUID: R4P5CQXcQTa9qGbHWDAvjg== X-CSE-MsgGUID: IQvaBVMUQ92SWI2JsnHBXg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322970" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:21 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 20/24] perf/x86: Enable SSP sampling using sample_regs_* fields Date: Tue, 24 Mar 2026 08:41:14 +0800 Message-Id: <20260324004118.3772171-21-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang This patch enables sampling of CET SSP register via the sample_regs_* fields. To sample SSP, the sample_simd_regs_enabled field must be set. This allows the spare space (reclaimed from the original XMM space) in the sample_regs_* fields to be used for representing SSP. Similar with eGPRs sampling, the perf_reg_value() function needs to check if the PERF_SAMPLE_REGS_ABI_SIMD flag is set first, and then determine whether to output SSP or legacy XMM registers to userspace. Additionally, arch-PEBS supports sampling SSP, which is placed into the GPRs group. This patch also enables arch-PEBS-based SSP sampling. Currently, SSP sampling is only supported on the x86_64 architecture, as CET is only available on x86_64 platforms. Signed-off-by: Kan Liang Co-developed-by: Dapeng Mi Signed-off-by: Dapeng Mi --- arch/x86/events/core.c | 9 +++++++++ arch/x86/events/intel/ds.c | 8 ++++++++ arch/x86/events/perf_event.h | 10 ++++++++++ arch/x86/include/asm/perf_event.h | 4 ++++ arch/x86/include/uapi/asm/perf_regs.h | 7 ++++--- arch/x86/kernel/perf_regs.c | 5 +++++ 6 files changed, 40 insertions(+), 3 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d33cfbe38573..ea451b48b9d6 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -712,6 +712,10 @@ int x86_pmu_hw_config(struct perf_event *event) if (event_needs_egprs(event) && !(x86_pmu.ext_regs_mask & XFEATURE_MASK_APX)) return -EINVAL; + if (event_needs_ssp(event) && + !(x86_pmu.ext_regs_mask & XFEATURE_MASK_CET_USER)) + return -EINVAL; + /* Not require any vector registers but set width */ if (event->attr.sample_simd_vec_reg_qwords && !event->attr.sample_simd_vec_reg_intr && @@ -1871,6 +1875,7 @@ inline void x86_pmu_clear_perf_regs(struct pt_regs *r= egs) perf_regs->h16zmm_regs =3D NULL; perf_regs->opmask_regs =3D NULL; perf_regs->egpr_regs =3D NULL; + perf_regs->cet_regs =3D NULL; } =20 static inline void x86_pmu_update_xregs(struct x86_perf_regs *perf_regs, @@ -1896,6 +1901,8 @@ static inline void x86_pmu_update_xregs(struct x86_pe= rf_regs *perf_regs, perf_regs->opmask =3D get_xsave_addr(xsave, XFEATURE_OPMASK); if (mask & XFEATURE_MASK_APX) perf_regs->egpr =3D get_xsave_addr(xsave, XFEATURE_APX); + if (mask & XFEATURE_MASK_CET_USER) + perf_regs->cet =3D get_xsave_addr(xsave, XFEATURE_CET_USER); } =20 /* @@ -1961,6 +1968,8 @@ static void x86_pmu_sample_xregs(struct perf_event *e= vent, mask |=3D XFEATURE_MASK_OPMASK; if (event_needs_egprs(event)) mask |=3D XFEATURE_MASK_APX; + if (event_needs_ssp(event)) + mask |=3D XFEATURE_MASK_CET_USER; =20 mask &=3D x86_pmu.ext_regs_mask; if ((sample_type & PERF_SAMPLE_REGS_USER) && data->regs_user.abi) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index ac9a1c2f0177..3a2fb623e0ab 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2685,6 +2685,14 @@ static void setup_arch_pebs_sample_data(struct perf_= event *event, __setup_pebs_gpr_group(event, data, regs, (struct pebs_gprs *)gprs, sample_type); + + /* Currently only user space mode enables SSP. */ + if (user_mode(regs) && (sample_type & + (PERF_SAMPLE_REGS_INTR | PERF_SAMPLE_REGS_USER))) { + /* Point to r15 so that cet_regs[1] =3D ssp. */ + perf_regs->cet_regs =3D &gprs->r15; + ignore_mask =3D XFEATURE_MASK_CET_USER; + } } =20 if (header->aux) { diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 0974fd8b0e20..36688d28407f 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -197,6 +197,16 @@ static inline bool event_needs_egprs(struct perf_event= *event) return false; } =20 +static inline bool event_needs_ssp(struct perf_event *event) +{ + if (event->attr.sample_simd_regs_enabled && + (event->attr.sample_regs_user & BIT_ULL(PERF_REG_X86_SSP) || + event->attr.sample_regs_intr & BIT_ULL(PERF_REG_X86_SSP))) + return true; + + return false; +} + struct amd_nb { int nb_id; /* NorthBridge id */ int refcnt; /* reference count */ diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index a54ea8fa6a04..0c6d58e6c98f 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -751,6 +751,10 @@ struct x86_perf_regs { u64 *egpr_regs; struct apx_state *egpr; }; + union { + u64 *cet_regs; + struct cet_user_state *cet; + }; }; =20 extern unsigned long perf_arch_instruction_pointer(struct pt_regs *regs); diff --git a/arch/x86/include/uapi/asm/perf_regs.h b/arch/x86/include/uapi/= asm/perf_regs.h index e721a47556d4..98a5b6c8e24c 100644 --- a/arch/x86/include/uapi/asm/perf_regs.h +++ b/arch/x86/include/uapi/asm/perf_regs.h @@ -28,10 +28,10 @@ enum perf_event_x86_regs { PERF_REG_X86_R14, PERF_REG_X86_R15, /* - * The eGPRs and XMM have overlaps. Only one can be used + * The eGPRs/SSP and XMM have overlaps. Only one can be used * at a time. The ABI PERF_SAMPLE_REGS_ABI_SIMD is used to * distinguish which one is used. If PERF_SAMPLE_REGS_ABI_SIMD - * is set, then eGPRs is used, otherwise, XMM is used. + * is set, then eGPRs/SSP is used, otherwise, XMM is used. * * Extended GPRs (eGPRs) */ @@ -51,10 +51,11 @@ enum perf_event_x86_regs { PERF_REG_X86_R29, PERF_REG_X86_R30, PERF_REG_X86_R31, + PERF_REG_X86_SSP, /* These are the limits for the GPRs. */ PERF_REG_X86_32_MAX =3D PERF_REG_X86_GS + 1, PERF_REG_X86_64_MAX =3D PERF_REG_X86_R15 + 1, - PERF_REG_MISC_MAX =3D PERF_REG_X86_R31 + 1, + PERF_REG_MISC_MAX =3D PERF_REG_X86_SSP + 1, =20 /* These all need two bits set because they are 128bit */ PERF_REG_X86_XMM0 =3D 32, diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index a34cc52dbbeb..9715d1f90313 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -70,6 +70,11 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return 0; return perf_regs->egpr_regs[idx - PERF_REG_X86_R16]; } + if (idx =3D=3D PERF_REG_X86_SSP) { + if (!perf_regs->cet_regs) + return 0; + return perf_regs->cet_regs[1]; + } } else { if (idx >=3D PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) { if (!perf_regs->xmm_regs) --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 1B16436997A; Tue, 24 Mar 2026 00:47:31 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313252; cv=none; b=OqZOf8RSSGOgEFNCckoHSpDmpt9tM34nHzax5myBos4HGg+DqM60EK7Doh1hPrXVRhKgHAOL9lT4F8jz8rivFOZeqdY3Mu68aaTkT+CQRFyAlr+7j6cc+5AWqh5+9YojSI6rfhdQIasxmgyMBe9kb0buB7HEy80LntcQmOGX3GA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313252; c=relaxed/simple; bh=hP+ozp4ouWavIle7cZwBH6bgjvITk/iTGJu6EIZP7nc=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=RCna+Ns4Jqy3V9EfQQlLF+WHHBStlPim1Zshke3y0MmQRYCaLKQfqhRvtXze6CkJ3Ec+r+1As/LkKYXNqHAgN2H0nbVuZ8cn7I5eLPWNsTyE86RvSp9oupz+ttv4xdUvC0U/tV242j+m8v5BWtsjpaxUMnLBQR/LdKwJ6pO+mnE= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=Xtojd2Ri; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="Xtojd2Ri" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313251; x=1805849251; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=hP+ozp4ouWavIle7cZwBH6bgjvITk/iTGJu6EIZP7nc=; b=Xtojd2Ricf1iiZDg37yEPA2UoQu3I7I44mYa/nfj7Ry6Ibc8FkVAGkJr Jr/zsgHKeeaSAiZktdO3lyM1Muvqj+rlKIQmp/TjSY1hQEacyvs8wTp0f 5TJLdlIIxQ4i/7bYrqGZOXfp3Y6HmkvXdMvcez6+MVh9G0ZX600Jz7yAJ otXiohe/wXvEE3zXbRrGHO7vsH2Zi2G4ei9+UtlRtBLJ3qim53qtd+CTd DlyGxa6yzpyhXrGnEDJKIro3hAT3S3RLRVQe5ymGy7d5MWv7kKJh71leV FwaOS12MaeKdTJeq5e87OmGsE4Q6SUc79JpNfRwTWeMZQIYPEe4T4/p8j w==; X-CSE-ConnectionGUID: tsGEwdzgR1ao0c0UMTD25w== X-CSE-MsgGUID: q0ZFaNGyQGOCwBQdZOsLQQ== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397278" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397278" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:31 -0700 X-CSE-ConnectionGUID: uyucCZd0TNmoFuNZHiVRZA== X-CSE-MsgGUID: dkRZGSouRp65HupN2Tja3A== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322977" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:26 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Kan Liang , Dapeng Mi Subject: [Patch v7 21/24] perf/x86/intel: Enable PERF_PMU_CAP_SIMD_REGS capability Date: Tue, 24 Mar 2026 08:41:15 +0800 Message-Id: <20260324004118.3772171-22-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" From: Kan Liang Enable the PERF_PMU_CAP_SIMD_REGS capability if XSAVES support is available for YMM, ZMM, OPMASK, eGPRs, or SSP. Temporarily disable large PEBS sampling for these registers, as the current arch-PEBS sampling code does not support them yet. Large PEBS sampling for these registers will be enabled in subsequent patches. Signed-off-by: Kan Liang Signed-off-by: Dapeng Mi --- arch/x86/events/intel/core.c | 52 ++++++++++++++++++++++++++++++++---- 1 file changed, 47 insertions(+), 5 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 5772dcc3bcbd..0a32a0367647 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4424,11 +4424,33 @@ static unsigned long intel_pmu_large_pebs_flags(str= uct perf_event *event) flags &=3D ~PERF_SAMPLE_TIME; if (!event->attr.exclude_kernel) flags &=3D ~PERF_SAMPLE_REGS_USER; - if (event->attr.sample_regs_user & ~PEBS_GP_REGS) - flags &=3D ~PERF_SAMPLE_REGS_USER; - if (event->attr.sample_regs_intr & - ~(PEBS_GP_REGS | PERF_REG_EXTENDED_MASK)) - flags &=3D ~PERF_SAMPLE_REGS_INTR; + if (event->attr.sample_simd_regs_enabled) { + u64 nolarge =3D PERF_X86_EGPRS_MASK | BIT_ULL(PERF_REG_X86_SSP); + + /* + * PEBS HW can only collect the XMM0-XMM15 for now. + * Disable large PEBS for other vector registers, predicate + * registers, eGPRs, and SSP. + */ + if (event->attr.sample_regs_user & nolarge || + fls64(event->attr.sample_simd_vec_reg_user) > PERF_X86_H16ZMM_BASE || + event->attr.sample_simd_pred_reg_user) + flags &=3D ~PERF_SAMPLE_REGS_USER; + + if (event->attr.sample_regs_intr & nolarge || + fls64(event->attr.sample_simd_vec_reg_intr) > PERF_X86_H16ZMM_BASE || + event->attr.sample_simd_pred_reg_intr) + flags &=3D ~PERF_SAMPLE_REGS_INTR; + + if (event->attr.sample_simd_vec_reg_qwords > PERF_X86_XMM_QWORDS) + flags &=3D ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); + } else { + if (event->attr.sample_regs_user & ~PEBS_GP_REGS) + flags &=3D ~PERF_SAMPLE_REGS_USER; + if (event->attr.sample_regs_intr & + ~(PEBS_GP_REGS | PERF_REG_EXTENDED_MASK)) + flags &=3D ~PERF_SAMPLE_REGS_INTR; + } return flags; } =20 @@ -5910,6 +5932,26 @@ static void intel_extended_regs_init(struct pmu *pmu) =20 x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_SSE; dest_pmu->capabilities |=3D PERF_PMU_CAP_EXTENDED_REGS; + + if (boot_cpu_has(X86_FEATURE_AVX) && + cpu_has_xfeatures(XFEATURE_MASK_YMM, NULL)) + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_YMM; + if (boot_cpu_has(X86_FEATURE_APX) && + cpu_has_xfeatures(XFEATURE_MASK_APX, NULL)) + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_APX; + if (boot_cpu_has(X86_FEATURE_AVX512F)) { + if (cpu_has_xfeatures(XFEATURE_MASK_OPMASK, NULL)) + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_OPMASK; + if (cpu_has_xfeatures(XFEATURE_MASK_ZMM_Hi256, NULL)) + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_ZMM_Hi256; + if (cpu_has_xfeatures(XFEATURE_MASK_Hi16_ZMM, NULL)) + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_Hi16_ZMM; + } + if (cpu_feature_enabled(X86_FEATURE_USER_SHSTK)) + x86_pmu.ext_regs_mask |=3D XFEATURE_MASK_CET_USER; + + if (x86_pmu.ext_regs_mask !=3D XFEATURE_MASK_SSE) + dest_pmu->capabilities |=3D PERF_PMU_CAP_SIMD_REGS; } =20 #define counter_mask(_gp, _fixed) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX= _FIXED)) --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id A31F91A681A; Tue, 24 Mar 2026 00:47:35 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313258; cv=none; b=babchKJRPXigyg4x63Elfb18JuP6LJyE6cM6BXPGSiUFBOZlGgtl8BxFbGHLMB6NG8NCVmsZ/6HoncjiufZihhu6SXdwvSFFddOTLHMFPWBJe723OhoXEwGA+/R0GeIECqjFQGvaiRqwRD2srFHlZBhqtS8RgntnPLyWXylEFLg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313258; c=relaxed/simple; bh=+TVpe+C1qqKgkjupJ92EWRpzHCu3FZPt8miWHwqHMYE=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=PsQ1L2pDkTCw3LWs61bL2zZNEzi3tfVfMdmct92xmt8i6w101NOFtoHR41/YZSpzcJfHwG0+s7uT6AxnafnoUVDrwEvM0nI7rpvZw7oJLMDh0LuJ3Dl2/YMAIM3AIZvedmfjG+9VjItRddVTUG9bDGROmNl9qBilSmtQyhW0aC0= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=kEsVI5iD; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="kEsVI5iD" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313255; x=1805849255; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=+TVpe+C1qqKgkjupJ92EWRpzHCu3FZPt8miWHwqHMYE=; b=kEsVI5iD2Wqb64QVI7qeOXvgZwQyIP+Ct8GOrLXu6AFg1LkoKftXIS4w qlaE6JCgrCQw7dC8Oun8N02aoxbJ8FhOSKUM04HXdEFOjdh4YIL1vIl1T Ew2CBHsgVrwBD4MvOdcBKa5sPdG8YIIs+3GOduk92XFGuKviAh2m4cg09 zSxVYuH6TgXjUfwM8tWLJkKcGVu3SQ7Jol4hS0rvydb7wiUK7XJEKhHKq iXnZMzobMb848Lr2cL3ZAMCVY/0+jkqIMuuLR4/JTaJKBDxCj7W+b1Gs0 ek2wOsTlYX67hVTZe5U536bgQ64ImROL3Xfx5KwQRc1G5s/g+7s4e+p5U g==; X-CSE-ConnectionGUID: og2fgQGlSjOhn6jRIRdFHA== X-CSE-MsgGUID: QR7PAEU6TUqUeFUpLAgG3w== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397292" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397292" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:35 -0700 X-CSE-ConnectionGUID: tuT1evskTP2vzN7aMCIOqQ== X-CSE-MsgGUID: gW6HBIXhSSS57PYxWf2Bxw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221322990" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:31 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 22/24] perf/x86/intel: Enable arch-PEBS based SIMD/eGPRs/SSP sampling Date: Tue, 24 Mar 2026 08:41:16 +0800 Message-Id: <20260324004118.3772171-23-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" This patch enables arch-PEBS based SIMD/eGPRs/SSP registers sampling. Arch-PEBS supports sampling of these registers, with all except SSP placed into the XSAVE-Enabled Registers (XER) group with the layout described below. Field Name Registers Used Size XSTATE_BV XINUSE for groups 8 B Reserved Reserved 8 B SSER XMM0-XMM15 16 regs * 16 B =3D 256 B YMMHIR Upper 128 bits of YMM0-YMM15 16 regs * 16 B =3D 256 B EGPR R16-R31 16 regs * 8 B =3D 128 B OPMASKR K0-K7 8 regs * 8 B =3D 64 B ZMMHIR Upper 256 bits of ZMM0-ZMM15 16 regs * 32 B =3D 512 B Hi16ZMMR ZMM16-ZMM31 16 regs * 64 B =3D 1024 B Memory space in the output buffer is allocated for these sub-groups as long as the corresponding Format.XER[55:49] bits in the PEBS record header are set. However, the arch-PEBS hardware engine does not write the sub-group if it is not used (in INIT state). In such cases, the corresponding bit in the XSTATE_BV bitmap is set to 0. Therefore, the XSTATE_BV field is checked to determine if the register data is actually written for each PEBS record. If not, the register data is not outputted to userspace. The SSP register is sampled and placed into the GPRs group by arch-PEBS. Additionally, the MSRs IA32_PMC_{GPn|FXm}_CFG_C.[55:49] bits are used to manage which types of these registers need to be sampled. Signed-off-by: Dapeng Mi --- arch/x86/events/intel/core.c | 75 ++++++++++++++++++++++-------- arch/x86/events/intel/ds.c | 77 ++++++++++++++++++++++++++++--- arch/x86/include/asm/msr-index.h | 7 +++ arch/x86/include/asm/perf_event.h | 8 +++- 4 files changed, 142 insertions(+), 25 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 0a32a0367647..e0dd57906bca 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3221,6 +3221,21 @@ static void intel_pmu_enable_event_ext(struct perf_e= vent *event) if (pebs_data_cfg & PEBS_DATACFG_XMMS) ext |=3D ARCH_PEBS_VECR_XMM & cap.caps; =20 + if (pebs_data_cfg & PEBS_DATACFG_YMMHS) + ext |=3D ARCH_PEBS_VECR_YMMH & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_EGPRS) + ext |=3D ARCH_PEBS_VECR_EGPRS & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_OPMASKS) + ext |=3D ARCH_PEBS_VECR_OPMASK & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_ZMMHS) + ext |=3D ARCH_PEBS_VECR_ZMMH & cap.caps; + + if (pebs_data_cfg & PEBS_DATACFG_H16ZMMS) + ext |=3D ARCH_PEBS_VECR_H16ZMM & cap.caps; + if (pebs_data_cfg & PEBS_DATACFG_LBRS) ext |=3D ARCH_PEBS_LBR & cap.caps; =20 @@ -4416,6 +4431,34 @@ static void intel_pebs_aliases_skl(struct perf_event= *event) return intel_pebs_aliases_precdist(event); } =20 +static inline bool intel_pebs_support_regs(struct perf_event *event, u64 r= egs) +{ + struct arch_pebs_cap cap =3D hybrid(event->pmu, arch_pebs_cap); + int pebs_format =3D x86_pmu.intel_cap.pebs_format; + bool supported =3D true; + + /* SSP */ + if (regs & PEBS_DATACFG_GP) + supported &=3D x86_pmu.arch_pebs && (ARCH_PEBS_GPR & cap.caps); + if (regs & PEBS_DATACFG_XMMS) { + supported &=3D x86_pmu.arch_pebs ? + ARCH_PEBS_VECR_XMM & cap.caps : + pebs_format > 3 && x86_pmu.intel_cap.pebs_baseline; + } + if (regs & PEBS_DATACFG_YMMHS) + supported &=3D x86_pmu.arch_pebs && (ARCH_PEBS_VECR_YMMH & cap.caps); + if (regs & PEBS_DATACFG_EGPRS) + supported &=3D x86_pmu.arch_pebs && (ARCH_PEBS_VECR_EGPRS & cap.caps); + if (regs & PEBS_DATACFG_OPMASKS) + supported &=3D x86_pmu.arch_pebs && (ARCH_PEBS_VECR_OPMASK & cap.caps); + if (regs & PEBS_DATACFG_ZMMHS) + supported &=3D x86_pmu.arch_pebs && (ARCH_PEBS_VECR_ZMMH & cap.caps); + if (regs & PEBS_DATACFG_H16ZMMS) + supported &=3D x86_pmu.arch_pebs && (ARCH_PEBS_VECR_H16ZMM & cap.caps); + + return supported; +} + static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) { unsigned long flags =3D x86_pmu.large_pebs_flags; @@ -4425,24 +4468,20 @@ static unsigned long intel_pmu_large_pebs_flags(str= uct perf_event *event) if (!event->attr.exclude_kernel) flags &=3D ~PERF_SAMPLE_REGS_USER; if (event->attr.sample_simd_regs_enabled) { - u64 nolarge =3D PERF_X86_EGPRS_MASK | BIT_ULL(PERF_REG_X86_SSP); - - /* - * PEBS HW can only collect the XMM0-XMM15 for now. - * Disable large PEBS for other vector registers, predicate - * registers, eGPRs, and SSP. - */ - if (event->attr.sample_regs_user & nolarge || - fls64(event->attr.sample_simd_vec_reg_user) > PERF_X86_H16ZMM_BASE || - event->attr.sample_simd_pred_reg_user) - flags &=3D ~PERF_SAMPLE_REGS_USER; - - if (event->attr.sample_regs_intr & nolarge || - fls64(event->attr.sample_simd_vec_reg_intr) > PERF_X86_H16ZMM_BASE || - event->attr.sample_simd_pred_reg_intr) - flags &=3D ~PERF_SAMPLE_REGS_INTR; - - if (event->attr.sample_simd_vec_reg_qwords > PERF_X86_XMM_QWORDS) + if ((event_needs_ssp(event) && + !intel_pebs_support_regs(event, PEBS_DATACFG_GP)) || + (event_needs_xmm(event) && + !intel_pebs_support_regs(event, PEBS_DATACFG_XMMS)) || + (event_needs_ymm(event) && + !intel_pebs_support_regs(event, PEBS_DATACFG_YMMHS)) || + (event_needs_egprs(event) && + !intel_pebs_support_regs(event, PEBS_DATACFG_EGPRS)) || + (event_needs_opmask(event) && + !intel_pebs_support_regs(event, PEBS_DATACFG_OPMASKS)) || + (event_needs_low16_zmm(event) && + !intel_pebs_support_regs(event, PEBS_DATACFG_ZMMHS)) || + (event_needs_high16_zmm(event) && + !intel_pebs_support_regs(event, PEBS_DATACFG_H16ZMMS))) flags &=3D ~(PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR); } else { if (event->attr.sample_regs_user & ~PEBS_GP_REGS) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 3a2fb623e0ab..4743bdfb4ed4 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1740,11 +1740,22 @@ static u64 pebs_update_adaptive_cfg(struct perf_eve= nt *event) ((attr->config & INTEL_ARCH_EVENT_MASK) =3D=3D x86_pmu.rtm_abort_event); =20 - if (gprs || (attr->precise_ip < 2) || tsx_weight) + if (gprs || (attr->precise_ip < 2) || + tsx_weight || event_needs_ssp(event)) pebs_data_cfg |=3D PEBS_DATACFG_GP; =20 if (event_needs_xmm(event)) pebs_data_cfg |=3D PEBS_DATACFG_XMMS; + if (event_needs_ymm(event)) + pebs_data_cfg |=3D PEBS_DATACFG_YMMHS; + if (event_needs_low16_zmm(event)) + pebs_data_cfg |=3D PEBS_DATACFG_ZMMHS; + if (event_needs_high16_zmm(event)) + pebs_data_cfg |=3D PEBS_DATACFG_H16ZMMS; + if (event_needs_opmask(event)) + pebs_data_cfg |=3D PEBS_DATACFG_OPMASKS; + if (event_needs_egprs(event)) + pebs_data_cfg |=3D PEBS_DATACFG_EGPRS; =20 if (sample_type & PERF_SAMPLE_BRANCH_STACK) { /* @@ -2705,15 +2716,69 @@ static void setup_arch_pebs_sample_data(struct perf= _event *event, meminfo->tsx_tuning, ax); } =20 - if (header->xmm) { + if (header->xmm || header->ymmh || header->egpr || + header->opmask || header->zmmh || header->h16zmm) { + struct arch_pebs_xer_header *xer_header =3D next_record; struct pebs_xmm *xmm; + struct ymmh_struct *ymmh; + struct avx_512_zmm_uppers_state *zmmh; + struct avx_512_hi16_state *h16zmm; + struct avx_512_opmask_state *opmask; + struct apx_state *egpr; =20 next_record +=3D sizeof(struct arch_pebs_xer_header); =20 - ignore_mask |=3D XFEATURE_MASK_SSE; - xmm =3D next_record; - perf_regs->xmm_regs =3D xmm->xmm; - next_record =3D xmm + 1; + if (header->xmm) { + ignore_mask |=3D XFEATURE_MASK_SSE; + xmm =3D next_record; + /* + * Only output XMM regs to user space when arch-PEBS + * really writes data into xstate area. + */ + if (xer_header->xstate & XFEATURE_MASK_SSE) + perf_regs->xmm_regs =3D xmm->xmm; + next_record =3D xmm + 1; + } + + if (header->ymmh) { + ignore_mask |=3D XFEATURE_MASK_YMM; + ymmh =3D next_record; + if (xer_header->xstate & XFEATURE_MASK_YMM) + perf_regs->ymmh =3D ymmh; + next_record =3D ymmh + 1; + } + + if (header->egpr) { + ignore_mask |=3D XFEATURE_MASK_APX; + egpr =3D next_record; + if (xer_header->xstate & XFEATURE_MASK_APX) + perf_regs->egpr =3D egpr; + next_record =3D egpr + 1; + } + + if (header->opmask) { + ignore_mask |=3D XFEATURE_MASK_OPMASK; + opmask =3D next_record; + if (xer_header->xstate & XFEATURE_MASK_OPMASK) + perf_regs->opmask =3D opmask; + next_record =3D opmask + 1; + } + + if (header->zmmh) { + ignore_mask |=3D XFEATURE_MASK_ZMM_Hi256; + zmmh =3D next_record; + if (xer_header->xstate & XFEATURE_MASK_ZMM_Hi256) + perf_regs->zmmh =3D zmmh; + next_record =3D zmmh + 1; + } + + if (header->h16zmm) { + ignore_mask |=3D XFEATURE_MASK_Hi16_ZMM; + h16zmm =3D next_record; + if (xer_header->xstate & XFEATURE_MASK_Hi16_ZMM) + perf_regs->h16zmm =3D h16zmm; + next_record =3D h16zmm + 1; + } } =20 if (header->lbr) { diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-in= dex.h index e25434d21159..4fe796993c97 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -350,6 +350,13 @@ #define ARCH_PEBS_LBR_SHIFT 40 #define ARCH_PEBS_LBR (0x3ull << ARCH_PEBS_LBR_SHIFT) #define ARCH_PEBS_VECR_XMM BIT_ULL(49) +#define ARCH_PEBS_VECR_YMMH BIT_ULL(50) +#define ARCH_PEBS_VECR_EGPRS BIT_ULL(51) +#define ARCH_PEBS_VECR_OPMASK BIT_ULL(53) +#define ARCH_PEBS_VECR_ZMMH BIT_ULL(54) +#define ARCH_PEBS_VECR_H16ZMM BIT_ULL(55) +#define ARCH_PEBS_VECR_EXT_SHIFT 50 +#define ARCH_PEBS_VECR_EXT (0x3full << ARCH_PEBS_VECR_EXT_SHIFT) #define ARCH_PEBS_GPR BIT_ULL(61) #define ARCH_PEBS_AUX BIT_ULL(62) #define ARCH_PEBS_EN BIT_ULL(63) diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_= event.h index 0c6d58e6c98f..db8bba43401c 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h @@ -148,6 +148,11 @@ #define PEBS_DATACFG_LBRS BIT_ULL(3) #define PEBS_DATACFG_CNTR BIT_ULL(4) #define PEBS_DATACFG_METRICS BIT_ULL(5) +#define PEBS_DATACFG_YMMHS BIT_ULL(6) +#define PEBS_DATACFG_OPMASKS BIT_ULL(7) +#define PEBS_DATACFG_ZMMHS BIT_ULL(8) +#define PEBS_DATACFG_H16ZMMS BIT_ULL(9) +#define PEBS_DATACFG_EGPRS BIT_ULL(10) #define PEBS_DATACFG_LBR_SHIFT 24 #define PEBS_DATACFG_CNTR_SHIFT 32 #define PEBS_DATACFG_CNTR_MASK GENMASK_ULL(15, 0) @@ -545,7 +550,8 @@ struct arch_pebs_header { rsvd3:7, xmm:1, ymmh:1, - rsvd4:2, + egpr:1, + rsvd4:1, opmask:1, zmmh:1, h16zmm:1, --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id 6034F37475D; Tue, 24 Mar 2026 00:47:40 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313261; cv=none; b=dP2J36rJVLt5pUiQO6SYXUYwR7RsnraRTqPQN+X00DZ3zL0JquKpTlfELr5BsDYHjfTBFbcArsbEYgQZHXjHCFcjK5ooSFDKPUsGW0out2boUwmRDMKcUcVlxa0URpWNUl8XTgIS0GuhMv0lA6vrQHYXEYGEY+ew4vubn6fWCZg= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313261; c=relaxed/simple; bh=Bk+t7ou2YxiDymnxt3TXKek3Ry7JWKhHuCyo00BxO5Y=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=VqNk/IIim/pvSre0ugFCxZLF0MtRDyjd6SJ3qUwAFt3RxbqOOjNVWGkEgXuJ8SGs97WyR/XHJ2lYy8vOMleDrJFbPIXyaRBVUfcPTR0z+/x6MVQSFdTgG2xTIHX6HQrzfnheoljwyUTnKXQTT6iWsGP33q+HuRg6xAqP18JVRVg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=iwNZ/2iY; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="iwNZ/2iY" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313260; x=1805849260; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=Bk+t7ou2YxiDymnxt3TXKek3Ry7JWKhHuCyo00BxO5Y=; b=iwNZ/2iYp91VFW1LxJowA/vRROwNxotUg1/l+h/mh8hFgGHJELVm0avr 8+j5BuGgUUtu5k6dETgr4nKnIOh6P7Xq9Uu6+L3TnNHHphHmKpu6ozsGc khA5kbvMlPxMDf90YwxvJWmc6/yfdcXUgZEus9eg77dlhgVMD5KWzKQ7R RP2KigPSPem2tElXjR8P1Djml4njDUPigMCfPBgXamDKq68NLBTo6MObz u487C7ivCuPYk8CGHERRiJzQZDA3aFQHMCEoscEsTpJuXE9TjQrDb1PRJ NOS6zU2fz1b5RilwLgrnbrTms7W5ZvTy8XgS5xEuDOE07aGvjULOd0gtv w==; X-CSE-ConnectionGUID: GEWldV6WRkiwEyfRKAjlgQ== X-CSE-MsgGUID: 5KkMgPTGQYKN7BTHK6kzVw== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397302" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397302" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:40 -0700 X-CSE-ConnectionGUID: +j4k8gH/TWu+TbIERBrKSg== X-CSE-MsgGUID: h7wwNzvgQ6ai9OOq7i6Atw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221323008" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:36 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 23/24] perf/x86: Activate back-to-back NMI detection for arch-PEBS induced NMIs Date: Tue, 24 Mar 2026 08:41:17 +0800 Message-Id: <20260324004118.3772171-24-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" When two or more identical PEBS events with the same sampling period are programmed on a mix of PDIST and non-PDIST counters, multiple back-to-back NMIs can be triggered. The Linux PMI handler processes the first NMI and clears the GLOBAL_STATUS MSR. If a second NMI is triggered immediately after the first, it is recognized as a "suspicious NMI" because no bits are set in the GLOBAL_STATUS MSR (cleared by the first NMI). This issue does not lead to PEBS data corruption or data loss, but it does result in an annoying warning message. The current NMI handler supports back-to-back NMI detection, but it requires the PMI handler to return the count of actually processed events, which the PEBS handler does not currently do. This patch modifies the PEBS handlers to return the count of actually processed events, thereby activating back-to-back NMI detection and avoiding the "suspicious NMI" warning. Suggested-by: Andi Kleen Signed-off-by: Dapeng Mi --- arch/x86/events/intel/core.c | 6 ++---- arch/x86/events/intel/ds.c | 40 ++++++++++++++++++++++++------------ arch/x86/events/perf_event.h | 2 +- 3 files changed, 30 insertions(+), 18 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index e0dd57906bca..9da0a1354045 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3558,9 +3558,8 @@ static int handle_pmi_common(struct pt_regs *regs, u6= 4 status) if (__test_and_clear_bit(GLOBAL_STATUS_BUFFER_OVF_BIT, (unsigned long *)&= status)) { u64 pebs_enabled =3D cpuc->pebs_enabled; =20 - handled++; x86_pmu_handle_guest_pebs(regs, &data); - static_call(x86_pmu_drain_pebs)(regs, &data); + handled +=3D static_call(x86_pmu_drain_pebs)(regs, &data); =20 /* * PMI throttle may be triggered, which stops the PEBS event. @@ -3587,8 +3586,7 @@ static int handle_pmi_common(struct pt_regs *regs, u6= 4 status) */ if (__test_and_clear_bit(GLOBAL_STATUS_ARCH_PEBS_THRESHOLD_BIT, (unsigned long *)&status)) { - handled++; - static_call(x86_pmu_drain_pebs)(regs, &data); + handled +=3D static_call(x86_pmu_drain_pebs)(regs, &data); =20 if (cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS] && is_pebs_counter_event_group(cpuc->events[INTEL_PMC_IDX_FIXED_SLOTS])) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 4743bdfb4ed4..6e1c516122c0 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -3035,7 +3035,7 @@ __intel_pmu_pebs_events(struct perf_event *event, __intel_pmu_pebs_last_event(event, iregs, regs, data, at, count, setup_sa= mple); } =20 -static void intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_s= ample_data *data) +static int intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sa= mple_data *data) { struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); struct debug_store *ds =3D cpuc->ds; @@ -3044,7 +3044,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs = *iregs, struct perf_sample_ int n; =20 if (!x86_pmu.pebs_active) - return; + return 0; =20 at =3D (struct pebs_record_core *)(unsigned long)ds->pebs_buffer_base; top =3D (struct pebs_record_core *)(unsigned long)ds->pebs_index; @@ -3055,22 +3055,24 @@ static void intel_pmu_drain_pebs_core(struct pt_reg= s *iregs, struct perf_sample_ ds->pebs_index =3D ds->pebs_buffer_base; =20 if (!test_bit(0, cpuc->active_mask)) - return; + return 0; =20 WARN_ON_ONCE(!event); =20 if (!event->attr.precise_ip) - return; + return 0; =20 n =3D top - at; if (n <=3D 0) { if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_save_and_restart_reload(event, 0); - return; + return 0; } =20 __intel_pmu_pebs_events(event, iregs, data, at, top, 0, n, setup_pebs_fixed_sample_data); + + return 1; /* PMC0 only*/ } =20 static void intel_pmu_pebs_event_update_no_drain(struct cpu_hw_events *cpu= c, u64 mask) @@ -3093,7 +3095,7 @@ static void intel_pmu_pebs_event_update_no_drain(stru= ct cpu_hw_events *cpuc, u64 } } =20 -static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sa= mple_data *data) +static int intel_pmu_drain_pebs_nhm(struct pt_regs *iregs, struct perf_sam= ple_data *data) { struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); struct debug_store *ds =3D cpuc->ds; @@ -3102,11 +3104,12 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs= *iregs, struct perf_sample_d short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] =3D {}; short error[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] =3D {}; int max_pebs_events =3D intel_pmu_max_num_pebs(NULL); + u64 events_bitmap =3D 0; int bit, i, size; u64 mask; =20 if (!x86_pmu.pebs_active) - return; + return 0; =20 base =3D (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base; top =3D (struct pebs_record_nhm *)(unsigned long)ds->pebs_index; @@ -3122,7 +3125,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *= iregs, struct perf_sample_d =20 if (unlikely(base >=3D top)) { intel_pmu_pebs_event_update_no_drain(cpuc, mask); - return; + return 0; } =20 for (at =3D base; at < top; at +=3D x86_pmu.pebs_record_size) { @@ -3186,6 +3189,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *= iregs, struct perf_sample_d if ((counts[bit] =3D=3D 0) && (error[bit] =3D=3D 0)) continue; =20 + events_bitmap |=3D bit; event =3D cpuc->events[bit]; if (WARN_ON_ONCE(!event)) continue; @@ -3207,6 +3211,8 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *= iregs, struct perf_sample_d setup_pebs_fixed_sample_data); } } + + return hweight64(events_bitmap); } =20 static __always_inline void @@ -3262,7 +3268,7 @@ __intel_pmu_handle_last_pebs_record(struct pt_regs *i= regs, =20 static DEFINE_PER_CPU(struct x86_perf_regs, x86_pebs_regs); =20 -static void intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sa= mple_data *data) +static int intel_pmu_drain_pebs_icl(struct pt_regs *iregs, struct perf_sam= ple_data *data) { short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] =3D {}; void *last[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS]; @@ -3272,10 +3278,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs= *iregs, struct perf_sample_d struct pt_regs *regs =3D &perf_regs->regs; struct pebs_basic *basic; void *base, *at, *top; + u64 events_bitmap =3D 0; u64 mask; =20 if (!x86_pmu.pebs_active) - return; + return 0; =20 base =3D (struct pebs_basic *)(unsigned long)ds->pebs_buffer_base; top =3D (struct pebs_basic *)(unsigned long)ds->pebs_index; @@ -3288,7 +3295,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *= iregs, struct perf_sample_d =20 if (unlikely(base >=3D top)) { intel_pmu_pebs_event_update_no_drain(cpuc, mask); - return; + return 0; } =20 if (!iregs) @@ -3303,6 +3310,7 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs *= iregs, struct perf_sample_d continue; =20 pebs_status =3D mask & basic->applicable_counters; + events_bitmap |=3D pebs_status; __intel_pmu_handle_pebs_record(iregs, regs, data, at, pebs_status, counts, last, setup_pebs_adaptive_sample_data); @@ -3310,9 +3318,11 @@ static void intel_pmu_drain_pebs_icl(struct pt_regs = *iregs, struct perf_sample_d =20 __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, counts, last, setup_pebs_adaptive_sample_data); + + return hweight64(events_bitmap); } =20 -static void intel_pmu_drain_arch_pebs(struct pt_regs *iregs, +static int intel_pmu_drain_arch_pebs(struct pt_regs *iregs, struct perf_sample_data *data) { short counts[INTEL_PMC_IDX_FIXED + MAX_FIXED_PEBS_EVENTS] =3D {}; @@ -3322,13 +3332,14 @@ static void intel_pmu_drain_arch_pebs(struct pt_reg= s *iregs, struct x86_perf_regs *perf_regs =3D this_cpu_ptr(&x86_pebs_regs); struct pt_regs *regs =3D &perf_regs->regs; void *base, *at, *top; + u64 events_bitmap =3D 0; u64 mask; =20 rdmsrq(MSR_IA32_PEBS_INDEX, index.whole); =20 if (unlikely(!index.wr)) { intel_pmu_pebs_event_update_no_drain(cpuc, X86_PMC_IDX_MAX); - return; + return 0; } =20 base =3D cpuc->pebs_vaddr; @@ -3367,6 +3378,7 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs = *iregs, =20 basic =3D at + sizeof(struct arch_pebs_header); pebs_status =3D mask & basic->applicable_counters; + events_bitmap |=3D pebs_status; __intel_pmu_handle_pebs_record(iregs, regs, data, at, pebs_status, counts, last, setup_arch_pebs_sample_data); @@ -3386,6 +3398,8 @@ static void intel_pmu_drain_arch_pebs(struct pt_regs = *iregs, __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, counts, last, setup_arch_pebs_sample_data); + + return hweight64(events_bitmap); } =20 static void __init intel_arch_pebs_init(void) diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 36688d28407f..e6bf786728eb 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1014,7 +1014,7 @@ struct x86_pmu { int pebs_record_size; int pebs_buffer_size; u64 pebs_events_mask; - void (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); + int (*drain_pebs)(struct pt_regs *regs, struct perf_sample_data *data); struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); u64 (*pebs_latency_data)(struct perf_event *event, u64 status); --=20 2.34.1 From nobody Thu Apr 2 10:56:41 2026 Received: from mgamail.intel.com (mgamail.intel.com [198.175.65.13]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by smtp.subspace.kernel.org (Postfix) with ESMTPS id E36E53783A0; Tue, 24 Mar 2026 00:47:44 +0000 (UTC) Authentication-Results: smtp.subspace.kernel.org; arc=none smtp.client-ip=198.175.65.13 ARC-Seal: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313266; cv=none; b=EW7x9H3rb5FUpqfNLEZGOTXAcVO0Wa99DlJaI9OpBlZK9BoyLWEwfOjWQD/5hFGRO46BgOoalhXaLkHBolTX+5QcEtxzNnfid8TCNnaxZeVOPrtmLmKemuamjnveMUzu42reBxsDwW4IxUEy0kINVwySp/Gm6/AWCdmSAskNGVA= ARC-Message-Signature: i=1; a=rsa-sha256; d=subspace.kernel.org; s=arc-20240116; t=1774313266; c=relaxed/simple; bh=SVdbexuW8xfUfof3mVmaw/4W1T03yLxP8hI+atdxO6U=; h=From:To:Cc:Subject:Date:Message-Id:In-Reply-To:References: MIME-Version; b=C2KKd6gz1GAVjt6Umn5+IkeN/U/ZG70M+6mcDcrRHwZ5wSk3LDK8O1vAEfnXdLIZHA0APg5Bt3tb68Y4ZKJ0ftKS9vwBC6extWxsuHId2slgPXwdtOHOVxFTw1/ie4ptDbkSO+gCy4CGYy7zOV5SgW1FaCqICd2HSJUkxchlsGg= ARC-Authentication-Results: i=1; smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com; spf=pass smtp.mailfrom=linux.intel.com; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b=DnGkCjgX; arc=none smtp.client-ip=198.175.65.13 Authentication-Results: smtp.subspace.kernel.org; dmarc=pass (p=none dis=none) header.from=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; spf=pass smtp.mailfrom=linux.intel.com Authentication-Results: smtp.subspace.kernel.org; dkim=pass (2048-bit key) header.d=intel.com header.i=@intel.com header.b="DnGkCjgX" DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=intel.com; i=@intel.com; q=dns/txt; s=Intel; t=1774313265; x=1805849265; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=SVdbexuW8xfUfof3mVmaw/4W1T03yLxP8hI+atdxO6U=; b=DnGkCjgXN3OV6L4Juk950q8zPXNWyaq8kobq54lhYNZZhtWD7FIjftYQ Tue0JRcGsyoU8guxGaiVLw8ontEKumNIdWdoTV2cQ1pPeABN4JQIHO3Hf ujodaFS+kM4XrdheTnae8rOhJb6m5X4XSwiTCPpN21ABBxZDlPlBlls8G 6sttZy+PS+8a3nY0x85v7oWUX6pMY9Y6rnYuDV/6/aLp45WJfhq4Avaqz rGgLGn57f817s/4+nPx79dX5ITngqDyng81KeUBChyQJjlzklh5Z6dCzH mi7US1yP2pvAS5D9Wl5h9hmVJU72NAnW50lNEz73wKWy+1Dbq805WXmZp w==; X-CSE-ConnectionGUID: uNVzS98uRc6hmbEVIzVPzQ== X-CSE-MsgGUID: 1wcfnyEPT4+zj6LMBO2uNg== X-IronPort-AV: E=McAfee;i="6800,10657,11738"; a="86397312" X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="86397312" Received: from fmviesa008.fm.intel.com ([10.60.135.148]) by orvoesa105.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 23 Mar 2026 17:47:44 -0700 X-CSE-ConnectionGUID: hKvA7PhdTxqxBfypwO5Yfg== X-CSE-MsgGUID: dg+u2ZPKTkOyqZIuiPkITw== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="6.23,138,1770624000"; d="scan'208";a="221323027" Received: from spr.sh.intel.com ([10.112.229.196]) by fmviesa008.fm.intel.com with ESMTP; 23 Mar 2026 17:47:40 -0700 From: Dapeng Mi To: Peter Zijlstra , Ingo Molnar , Arnaldo Carvalho de Melo , Namhyung Kim , Thomas Gleixner , Dave Hansen , Ian Rogers , Adrian Hunter , Jiri Olsa , Alexander Shishkin , Andi Kleen , Eranian Stephane Cc: Mark Rutland , broonie@kernel.org, Ravi Bangoria , linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org, Zide Chen , Falcon Thomas , Dapeng Mi , Xudong Hao , Dapeng Mi Subject: [Patch v7 24/24] perf/x86/intel: Add sanity check for PEBS fragment size Date: Tue, 24 Mar 2026 08:41:18 +0800 Message-Id: <20260324004118.3772171-25-dapeng1.mi@linux.intel.com> X-Mailer: git-send-email 2.34.1 In-Reply-To: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> References: <20260324004118.3772171-1-dapeng1.mi@linux.intel.com> Precedence: bulk X-Mailing-List: linux-kernel@vger.kernel.org List-Id: List-Subscribe: List-Unsubscribe: MIME-Version: 1.0 Content-Transfer-Encoding: quoted-printable Content-Type: text/plain; charset="utf-8" Prevent potential infinite loops by adding a sanity check for the corrupted PEBS fragment sizes which could happen in theory. If a corrupted PEBS fragment is detected, the entire PEBS record including the fragment and all subsequent records will be discarded. This ensures the integrity of PEBS data and prevents infinite loops in setup_arch_pebs_sample_data() again. Signed-off-by: Dapeng Mi --- V7: new patch. arch/x86/events/intel/ds.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 6e1c516122c0..4b0dd8379737 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -2819,7 +2819,7 @@ static void setup_arch_pebs_sample_data(struct perf_e= vent *event, } =20 /* Parse followed fragments if there are. */ - if (arch_pebs_record_continued(header)) { + if (arch_pebs_record_continued(header) && header->size) { at =3D at + header->size; goto again; } @@ -2948,13 +2948,17 @@ __intel_pmu_pebs_last_event(struct perf_event *even= t, struct pt_regs *iregs, struct pt_regs *regs, struct perf_sample_data *data, - void *at, - int count, + void *at, int count, bool corrupted, setup_fn setup_sample) { struct hw_perf_event *hwc =3D &event->hw; =20 - setup_sample(event, iregs, at, data, regs); + /* Skip parsing corrupted PEBS record. */ + if (corrupted) + perf_sample_data_init(data, 0, event->hw.last_period); + else + setup_sample(event, iregs, at, data, regs); + if (iregs =3D=3D &dummy_iregs) { /* * The PEBS records may be drained in the non-overflow context, @@ -3026,13 +3030,15 @@ __intel_pmu_pebs_events(struct perf_event *event, iregs =3D &dummy_iregs; =20 while (cnt > 1) { - __intel_pmu_pebs_event(event, iregs, regs, data, at, setup_sample); + __intel_pmu_pebs_event(event, iregs, regs, data, + at, setup_sample); at +=3D cpuc->pebs_record_size; at =3D get_next_pebs_record_by_bit(at, top, bit); cnt--; } =20 - __intel_pmu_pebs_last_event(event, iregs, regs, data, at, count, setup_sa= mple); + __intel_pmu_pebs_last_event(event, iregs, regs, data, at, + count, false, setup_sample); } =20 static int intel_pmu_drain_pebs_core(struct pt_regs *iregs, struct perf_sa= mple_data *data) @@ -3247,7 +3253,8 @@ static __always_inline void __intel_pmu_handle_last_pebs_record(struct pt_regs *iregs, struct pt_regs *regs, struct perf_sample_data *data, - u64 mask, short *counts, void **last, + u64 mask, short *counts, + void **last, bool corrupted, setup_fn setup_sample) { struct cpu_hw_events *cpuc =3D this_cpu_ptr(&cpu_hw_events); @@ -3261,7 +3268,7 @@ __intel_pmu_handle_last_pebs_record(struct pt_regs *i= regs, event =3D cpuc->events[bit]; =20 __intel_pmu_pebs_last_event(event, iregs, regs, data, last[bit], - counts[bit], setup_sample); + counts[bit], corrupted, setup_sample); } =20 } @@ -3317,7 +3324,7 @@ static int intel_pmu_drain_pebs_icl(struct pt_regs *i= regs, struct perf_sample_da } =20 __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, counts, last, - setup_pebs_adaptive_sample_data); + false, setup_pebs_adaptive_sample_data); =20 return hweight64(events_bitmap); } @@ -3333,6 +3340,7 @@ static int intel_pmu_drain_arch_pebs(struct pt_regs *= iregs, struct pt_regs *regs =3D &perf_regs->regs; void *base, *at, *top; u64 events_bitmap =3D 0; + bool corrupted =3D false; u64 mask; =20 rdmsrq(MSR_IA32_PEBS_INDEX, index.whole); @@ -3388,6 +3396,10 @@ static int intel_pmu_drain_arch_pebs(struct pt_regs = *iregs, if (!header->size) break; at +=3D header->size; + if (WARN_ON_ONCE(at >=3D top)) { + corrupted =3D true; + goto done; + } header =3D at; } =20 @@ -3395,8 +3407,9 @@ static int intel_pmu_drain_arch_pebs(struct pt_regs *= iregs, at +=3D header->size; } =20 +done: __intel_pmu_handle_last_pebs_record(iregs, regs, data, mask, - counts, last, + counts, last, corrupted, setup_arch_pebs_sample_data); =20 return hweight64(events_bitmap); --=20 2.34.1