This feature can provide livepatch patch order information.
With the order of sysfs interface of one klp_patch, we can
use patch order to find out which function of the patch is
now activate.
After the discussion, we decided that patch-level sysfs
interface is the only accaptable way to introduce this
information.
This feature is like:
cat /sys/kernel/livepatch/livepatch_1/order -> 1
means this livepatch_1 module is the 1st klp patch applied.
cat /sys/kernel/livepatch/livepatch_module/order -> N
means this lviepatch_module is the Nth klp patch applied
to the system.
Suggested-by: Petr Mladek <pmladek@suse.com>
Suggested-by: Miroslav Benes <mbenes@suse.cz>
Suggested-by: Josh Poimboeuf <jpoimboe@kernel.org>
Signed-off-by: Wardenjohn <zhangwarden@gmail.com>
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 51a258c24ff5..0fbbc1636ebe 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -154,6 +154,7 @@ struct klp_state {
* @forced: was involved in a forced transition
* @free_work: patch cleanup from workqueue-context
* @finish: for waiting till it is safe to remove the patch module
+ * @order: the order of this patch applied to the system
*/
struct klp_patch {
/* external */
@@ -170,6 +171,7 @@ struct klp_patch {
bool forced;
struct work_struct free_work;
struct completion finish;
+ int order;
};
#define klp_for_each_object_static(patch, obj) \
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 3c21c31796db..024853aa43a8 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -347,6 +347,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs,
* /sys/kernel/livepatch/<patch>/transition
* /sys/kernel/livepatch/<patch>/force
* /sys/kernel/livepatch/<patch>/replace
+ * /sys/kernel/livepatch/<patch>/order
* /sys/kernel/livepatch/<patch>/<object>
* /sys/kernel/livepatch/<patch>/<object>/patched
* /sys/kernel/livepatch/<patch>/<object>/<function,sympos>
@@ -452,15 +453,26 @@ static ssize_t replace_show(struct kobject *kobj,
return sysfs_emit(buf, "%d\n", patch->replace);
}
+static ssize_t order_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct klp_patch *patch;
+
+ patch = container_of(kobj, struct klp_patch, kobj);
+ return sysfs_emit(buf, "%d\n", patch->order);
+}
+
static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled);
static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition);
static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace);
+static struct kobj_attribute order_kobj_attr = __ATTR_RO(order);
static struct attribute *klp_patch_attrs[] = {
&enabled_kobj_attr.attr,
&transition_kobj_attr.attr,
&force_kobj_attr.attr,
&replace_kobj_attr.attr,
+ &order_kobj_attr.attr,
NULL
};
ATTRIBUTE_GROUPS(klp_patch);
diff --git a/kernel/livepatch/transition.c b/kernel/livepatch/transition.c
index ba069459c101..73bce68d22f8 100644
--- a/kernel/livepatch/transition.c
+++ b/kernel/livepatch/transition.c
@@ -46,6 +46,15 @@ EXPORT_SYMBOL(klp_sched_try_switch_key);
#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */
+static inline int klp_get_patch_order(struct klp_patch *patch)
+{
+ int order = 0;
+
+ klp_for_each_patch(patch)
+ order = order + 1;
+ return order;
+}
+
/*
* This work can be performed periodically to finish patching or unpatching any
* "straggler" tasks which failed to transition in the first attempt.
@@ -591,6 +600,8 @@ void klp_init_transition(struct klp_patch *patch, int state)
pr_debug("'%s': initializing %s transition\n", patch->mod->name,
klp_target_state == KLP_TRANSITION_PATCHED ? "patching" : "unpatching");
+ patch->order = klp_get_patch_order(patch);
+
/*
* Initialize all tasks to the initial patch state to prepare them for
* switching to the target state.
--
2.18.2
On Fri 2024-09-20 17:04:03, Wardenjohn wrote: > This feature can provide livepatch patch order information. > With the order of sysfs interface of one klp_patch, we can > use patch order to find out which function of the patch is > now activate. > > After the discussion, we decided that patch-level sysfs > interface is the only accaptable way to introduce this > information. > > This feature is like: > cat /sys/kernel/livepatch/livepatch_1/order -> 1 > means this livepatch_1 module is the 1st klp patch applied. > > cat /sys/kernel/livepatch/livepatch_module/order -> N > means this lviepatch_module is the Nth klp patch applied > to the system. > > --- a/kernel/livepatch/transition.c > +++ b/kernel/livepatch/transition.c > @@ -46,6 +46,15 @@ EXPORT_SYMBOL(klp_sched_try_switch_key); > > #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ > > +static inline int klp_get_patch_order(struct klp_patch *patch) > +{ > + int order = 0; > + > + klp_for_each_patch(patch) > + order = order + 1; > + return order; > +} This does not work well. It uses the order on the stack when the livepatch is being loaded. It is not updated when any livepatch gets removed. It might create wrong values. I have even tried to reproduce this: # modprobe livepatch-sample # modprobe livepatch-shadow-fix1 # cat /sys/kernel/livepatch/livepatch_sample/order 1 # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order 2 # echo 0 >/sys/kernel/livepatch/livepatch_sample/enabled # rmmod livepatch_sample # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order 2 # modprobe livepatch-sample # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order 2 # cat /sys/kernel/livepatch/livepatch_sample/order 2 BANG: The livepatches have the same order. I suggest to replace this with a global load counter. Something like: diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 51a258c24ff5..44a8887573bb 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -150,10 +150,12 @@ struct klp_state { * @list: list node for global list of actively used patches * @kobj: kobject for sysfs resources * @obj_list: dynamic list of the object entries + * @load_counter sequence counter in which the patch is loaded * @enabled: the patch is enabled (but operation may be incomplete) * @forced: was involved in a forced transition * @free_work: patch cleanup from workqueue-context * @finish: for waiting till it is safe to remove the patch module + * @order: the order of this patch applied to the system */ struct klp_patch { /* external */ @@ -166,6 +168,7 @@ struct klp_patch { struct list_head list; struct kobject kobj; struct list_head obj_list; + int load_counter; bool enabled; bool forced; struct work_struct free_work; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 3c21c31796db..3a858477ae02 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -44,6 +44,9 @@ DEFINE_MUTEX(klp_mutex); */ LIST_HEAD(klp_patches); +/* The counter is incremented everytime a new livepatch is being loaded. */ +static int klp_load_counter; + static struct kobject *klp_root_kobj; static bool klp_is_module(struct klp_object *obj) @@ -347,6 +350,7 @@ int klp_apply_section_relocs(struct module *pmod, Elf_Shdr *sechdrs, * /sys/kernel/livepatch/<patch>/transition * /sys/kernel/livepatch/<patch>/force * /sys/kernel/livepatch/<patch>/replace + * /sys/kernel/livepatch/<patch>/load_counter * /sys/kernel/livepatch/<patch>/<object> * /sys/kernel/livepatch/<patch>/<object>/patched * /sys/kernel/livepatch/<patch>/<object>/<function,sympos> @@ -452,15 +456,26 @@ static ssize_t replace_show(struct kobject *kobj, return sysfs_emit(buf, "%d\n", patch->replace); } +static ssize_t load_counter_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + struct klp_patch *patch; + + patch = container_of(kobj, struct klp_patch, kobj); + return sysfs_emit(buf, "%d\n", patch->load_counter); +} + static struct kobj_attribute enabled_kobj_attr = __ATTR_RW(enabled); static struct kobj_attribute transition_kobj_attr = __ATTR_RO(transition); static struct kobj_attribute force_kobj_attr = __ATTR_WO(force); static struct kobj_attribute replace_kobj_attr = __ATTR_RO(replace); +static struct kobj_attribute load_counter_kobj_attr = __ATTR_RO(load_counter); static struct attribute *klp_patch_attrs[] = { &enabled_kobj_attr.attr, &transition_kobj_attr.attr, &force_kobj_attr.attr, &replace_kobj_attr.attr, + &load_counter_kobj_attr.attr, NULL }; ATTRIBUTE_GROUPS(klp_patch); @@ -934,6 +949,7 @@ static void klp_init_patch_early(struct klp_patch *patch) INIT_LIST_HEAD(&patch->list); INIT_LIST_HEAD(&patch->obj_list); kobject_init(&patch->kobj, &klp_ktype_patch); + patch->load_counter = klp_load_counter + 1; patch->enabled = false; patch->forced = false; INIT_WORK(&patch->free_work, klp_free_patch_work_fn); @@ -1050,6 +1066,7 @@ static int __klp_enable_patch(struct klp_patch *patch) } klp_start_transition(); + klp_load_counter++; patch->enabled = true; klp_try_complete_transition(); -- 2.46.1 Any better (shorter) name would be appreciated ;-) Best Regards, Petr
Hi! Petr! > On Sep 24, 2024, at 19:27, Petr Mladek <pmladek@suse.com> wrote: > > This does not work well. It uses the order on the stack when > the livepatch is being loaded. It is not updated when any livepatch gets > removed. It might create wrong values. > > I have even tried to reproduce this: > > # modprobe livepatch-sample > # modprobe livepatch-shadow-fix1 > # cat /sys/kernel/livepatch/livepatch_sample/order > 1 > # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order > 2 > > # echo 0 >/sys/kernel/livepatch/livepatch_sample/enabled > # rmmod livepatch_sample > # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order > 2 > > # modprobe livepatch-sample > # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order > 2 > # cat /sys/kernel/livepatch/livepatch_sample/order > 2 > > BANG: The livepatches have the same order. > My bad...lol... I tested this patch under my env but ignore such important scenario. Thank you very much! Petr!! This patch do have problem, I need to rewrite it again. I am sorry, again. Sincerely, Wardenjohn,
On Tue 2024-09-24 13:27:58, Petr Mladek wrote: > On Fri 2024-09-20 17:04:03, Wardenjohn wrote: > > This feature can provide livepatch patch order information. > > With the order of sysfs interface of one klp_patch, we can > > use patch order to find out which function of the patch is > > now activate. > > > > After the discussion, we decided that patch-level sysfs > > interface is the only accaptable way to introduce this > > information. > > > > This feature is like: > > cat /sys/kernel/livepatch/livepatch_1/order -> 1 > > means this livepatch_1 module is the 1st klp patch applied. > > > > cat /sys/kernel/livepatch/livepatch_module/order -> N > > means this lviepatch_module is the Nth klp patch applied > > to the system. > > > > --- a/kernel/livepatch/transition.c > > +++ b/kernel/livepatch/transition.c > > @@ -46,6 +46,15 @@ EXPORT_SYMBOL(klp_sched_try_switch_key); > > > > #endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_CALL */ > > > > +static inline int klp_get_patch_order(struct klp_patch *patch) > > +{ > > + int order = 0; > > + > > + klp_for_each_patch(patch) > > + order = order + 1; > > + return order; > > +} > > This does not work well. It uses the order on the stack when > the livepatch is being loaded. It is not updated when any livepatch gets > removed. It might create wrong values. > > I have even tried to reproduce this: > > # modprobe livepatch-sample > # modprobe livepatch-shadow-fix1 > # cat /sys/kernel/livepatch/livepatch_sample/order > 1 > # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order > 2 > > # echo 0 >/sys/kernel/livepatch/livepatch_sample/enabled > # rmmod livepatch_sample > # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order > 2 > > # modprobe livepatch-sample > # cat /sys/kernel/livepatch/livepatch_shadow_fix1/order > 2 > # cat /sys/kernel/livepatch/livepatch_sample/order > 2 > > BANG: The livepatches have the same order. > > I suggest to replace this with a global load counter. Something like: Wait! Miroslav asked me on chat about a possibility to use klp_mutex in the sysfs order_show() callback. It is a good point. If I get it correctly then we actually could take klp_mutex in the sysfs callbacks associated with struct klp_patch, similar to enabled_store(). It should be safe because the "final" kobject_put(&patch->kobj) is called without klp_mutex, see klp_free_patch_finish(). It was explicitly done this way to allow taking klp_mutex in enabled_store(). Note that it was not safe to take klp_mutex in the sysfs callback associated with struct klp_func. The "final" kobject_put(&func->kobj) is called under klp_mutex, see __klp_free_funcs(). Back to the order_show(). We could take klp_mutex there => we do not need to store the order in struct klp_patch. Instead, we could do something like: static ssize_t order_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct klp_patch *this_patch, *patch; int order; this_patch = container_of(kobj, struct klp_patch, kobj); mutex_lock(&klp_mutex); order = 0; klp_for_each_patch(patch) { order++; if (patch == this_patch) break; } mutex_unlock(&klp_mutex); return sysfs_emit(buf, "%d\n", order); } BTW: I would prefer to rename the attribute from "order" to "stack_order". IMHO, it would make the meaning more clear. Best Regards, Petr
© 2016 - 2024 Red Hat, Inc.