sgx_reclaim_direct() was introduced to preemptively reclaim some pages
as the best effort to avoid on-demand reclamation that can stall forward
progress in some situations, e.g., allocating pages to load previously
reclaimed page to perform EDMM operations on [1].
Currently when the global usage is close to the capacity,
sgx_reclaim_direct() makes one invocation to sgx_reclaim_pages_global()
but does not guarantee there are free pages available for later
allocations to succeed. In other words, the only goal here is to reduce
the chance of on-demand reclamation at allocation time. In cases of
allocation failure, the caller, the EDMM ioctl()'s, would return -EAGAIN
to user space and let the user space to decide whether to retry or not.
With EPC cgroups enabled, usage of a cgroup can also reach its limit
(usually much lower than capacity) and trigger per-cgroup reclamation.
Implement a similar strategy to reduce the chance of on-demand
per-cgroup reclamation for this use case.
Create a wrapper, sgx_cgroup_reclaim_direct(), to perform a preemptive
reclamation at cgroup level, and have sgx_reclaim_direct() call it when
EPC cgroup is enabled.
[1] https://lore.kernel.org/all/a0d8f037c4a075d56bf79f432438412985f7ff7a.1652137848.git.reinette.chatre@intel.com/T/#u
Signed-off-by: Haitao Huang <haitao.huang@linux.intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Jarkko Sakkinen <jarkko@kernel.org>
---
V17:
- Improve comments and capitalization. (Kai)
---
arch/x86/kernel/cpu/sgx/epc_cgroup.c | 15 +++++++++++++++
arch/x86/kernel/cpu/sgx/epc_cgroup.h | 3 +++
arch/x86/kernel/cpu/sgx/main.c | 4 ++++
3 files changed, 22 insertions(+)
diff --git a/arch/x86/kernel/cpu/sgx/epc_cgroup.c b/arch/x86/kernel/cpu/sgx/epc_cgroup.c
index 4faff943ce15..7394f78dec49 100644
--- a/arch/x86/kernel/cpu/sgx/epc_cgroup.c
+++ b/arch/x86/kernel/cpu/sgx/epc_cgroup.c
@@ -240,6 +240,21 @@ static bool sgx_cgroup_should_reclaim(struct sgx_cgroup *sgx_cg)
return (cur >= max);
}
+/**
+ * sgx_cgroup_reclaim_direct() - Preemptive reclamation.
+ *
+ * Scan and attempt to reclaim %SGX_NR_TO_SCAN as best effort to make later
+ * EPC allocation quicker.
+ */
+void sgx_cgroup_reclaim_direct(void)
+{
+ struct sgx_cgroup *sgx_cg = sgx_get_current_cg();
+
+ if (sgx_cgroup_should_reclaim(sgx_cg))
+ sgx_cgroup_reclaim_pages(sgx_cg, current->mm, SGX_NR_TO_SCAN);
+ sgx_put_cg(sgx_cg);
+}
+
/*
* Asynchronous work flow to reclaim pages from the cgroup when the cgroup is
* at/near its maximum capacity.
diff --git a/arch/x86/kernel/cpu/sgx/epc_cgroup.h b/arch/x86/kernel/cpu/sgx/epc_cgroup.h
index 2285dbfc9462..a530c9611332 100644
--- a/arch/x86/kernel/cpu/sgx/epc_cgroup.h
+++ b/arch/x86/kernel/cpu/sgx/epc_cgroup.h
@@ -35,6 +35,8 @@ static inline int __init sgx_cgroup_wq_init(void)
static inline void __init sgx_cgroup_wq_deinit(void) { }
+static inline void sgx_cgroup_reclaim_direct(void) { }
+
#else /* CONFIG_CGROUP_MISC */
struct sgx_cgroup {
@@ -86,6 +88,7 @@ static inline void sgx_put_cg(struct sgx_cgroup *sgx_cg)
int sgx_cgroup_try_charge(struct sgx_cgroup *sgx_cg, enum sgx_reclaim reclaim);
void sgx_cgroup_uncharge(struct sgx_cgroup *sgx_cg);
+void sgx_cgroup_reclaim_direct(void);
void __init sgx_cgroup_init(void);
int __init sgx_cgroup_wq_init(void);
void __init sgx_cgroup_wq_deinit(void);
diff --git a/arch/x86/kernel/cpu/sgx/main.c b/arch/x86/kernel/cpu/sgx/main.c
index b47d627bd370..6f293115b75e 100644
--- a/arch/x86/kernel/cpu/sgx/main.c
+++ b/arch/x86/kernel/cpu/sgx/main.c
@@ -414,6 +414,10 @@ static void sgx_reclaim_pages_global(struct mm_struct *charge_mm)
*/
void sgx_reclaim_direct(void)
{
+ /* Reduce chance of per-cgroup reclamation for later allocation */
+ sgx_cgroup_reclaim_direct();
+
+ /* Reduce chance of the global reclamation for later allocation */
if (sgx_should_reclaim_global(SGX_NR_LOW_PAGES))
sgx_reclaim_pages_global(current->mm);
}
--
2.43.0