Add a new attribute "replaceable" to allow the coexsist of both atomic
replace livepatch and non atomic replace livepatch. If the replaceable is
set to 0, the livepatch won't be replaced by a atomic replace livepatch.
This is a preparation for the followup patch.
Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
---
include/linux/livepatch.h | 2 ++
kernel/livepatch/core.c | 44 +++++++++++++++++++++++++++++++++++++++
2 files changed, 46 insertions(+)
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 51a258c24ff5..f2e962aab5b0 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -147,6 +147,7 @@ struct klp_state {
* @objs: object entries for kernel objects to be patched
* @states: system states that can get modified
* @replace: replace all actively used patches
+ * @replaceable: whether this patch can be replaced or not
* @list: list node for global list of actively used patches
* @kobj: kobject for sysfs resources
* @obj_list: dynamic list of the object entries
@@ -161,6 +162,7 @@ struct klp_patch {
struct klp_object *objs;
struct klp_state *states;
bool replace;
+ bool replaceable;
/* internal */
struct list_head list;
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 0cd39954d5a1..5e0c2caa0af8 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -347,6 +347,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
* /sys/kernel/livepatch/<patch>/transition
* /sys/kernel/livepatch/<patch>/force
* /sys/kernel/livepatch/<patch>/replace
+ * /sys/kernel/livepatch/<patch>/replaceable
* /sys/kernel/livepatch/<patch>/stack_order
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/patched
@@ -474,17 +475,60 @@ static ssize_t stack_order_show(struct kobject *kobj,
return sysfs_emit(buf, "%d\n", stack_order);
}
+static ssize_t replaceable_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct klp_patch *patch;
+ bool replaceable;
+ int ret;
+
+ ret = kstrtobool(buf, &replaceable);
+ if (ret)
+ return ret;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+
+ mutex_lock(&klp_mutex);
+
+ if (patch->replaceable == replaceable)
+ goto out;
+
+ if (patch == klp_transition_patch) {
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ patch->replaceable = replaceable;
+
+out:
+ mutex_unlock(&klp_mutex);
+
+ if (ret)
+ return ret;
+ return count;
+}
+static ssize_t replaceable_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_patch *patch;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ return sysfs_emit(buf, "%d\n", patch->replaceable);
+}
+
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
static struct kobj_attribute stack_order_kobj_attr = __ATTR_RO(stack_order);
+static struct kobj_attribute replaceable_kobj_attr = __ATTR_RW(replaceable);
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
&transition_kobj_attr.attr,
&force_kobj_attr.attr,
&replace_kobj_attr.attr,
&stack_order_kobj_attr.attr,
+ &replaceable_kobj_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(klp_patch);
--
2.43.5
© 2016 - 2025 Red Hat, Inc.