From: Donglin Peng <pengdonglin@xiaomi.com>
Introduce btf__permute() API to allow in-place rearrangement of BTF types.
This function reorganizes BTF type order according to a provided array of
type IDs, updating all type references to maintain consistency.
Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Song Liu <song@kernel.org>
Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
---
tools/lib/bpf/btf.c | 166 +++++++++++++++++++++++++++++++++++++++
tools/lib/bpf/btf.h | 43 ++++++++++
tools/lib/bpf/libbpf.map | 1 +
3 files changed, 210 insertions(+)
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 18907f0fcf9f..ab95ff19fde3 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -5829,3 +5829,169 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
btf->owns_base = false;
return libbpf_err(err);
}
+
+struct btf_permute {
+ struct btf *btf;
+ __u32 *id_map;
+};
+
+/* Callback function to remap individual type ID references */
+static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
+{
+ struct btf_permute *p = ctx;
+ __u32 new_type_id = *type_id;
+
+ /* skip references that point into the base BTF or VOID */
+ if (new_type_id < p->btf->start_id)
+ return 0;
+
+ /* invalid reference id */
+ if (new_type_id >= btf__type_cnt(p->btf))
+ return -EINVAL;
+
+ new_type_id = p->id_map[new_type_id - p->btf->start_id];
+ /* reference a dropped type is not allowed */
+ if (new_type_id == 0)
+ return -EINVAL;
+
+ *type_id = new_type_id;
+ return 0;
+}
+
+int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
+ const struct btf_permute_opts *opts)
+{
+ struct btf_permute p;
+ struct btf_ext *btf_ext;
+ void *next_type, *end_type;
+ void *nt, *new_types = NULL;
+ int err = 0, i, new_type_len;
+ __u32 *order_map = NULL;
+ __u32 id, new_nr_types = 0;
+
+ if (!OPTS_VALID(opts, btf_permute_opts) || id_map_cnt != btf->nr_types)
+ return libbpf_err(-EINVAL);
+
+ /* used to record the storage sequence of types */
+ order_map = calloc(btf->nr_types, sizeof(*id_map));
+ if (!order_map) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ new_types = calloc(btf->hdr->type_len, 1);
+ if (!new_types) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ if (btf_ensure_modifiable(btf)) {
+ err = -ENOMEM;
+ goto done;
+ }
+
+ for (i = 0; i < id_map_cnt; i++) {
+ id = id_map[i];
+ /* Drop the specified type */
+ if (id == 0)
+ continue;
+ /* Invalid id */
+ if (id < btf->start_id || id >= btf__type_cnt(btf)) {
+ err = -EINVAL;
+ goto done;
+ }
+ id -= btf->start_id;
+ /* Multiple types cannot be mapped to the same ID */
+ if (order_map[id]) {
+ err = -EINVAL;
+ goto done;
+ }
+ order_map[id] = i + btf->start_id;
+ new_nr_types = max(id + 1, new_nr_types);
+ }
+
+ /* Check for missing IDs */
+ for (i = 0; i < new_nr_types; i++) {
+ if (order_map[i] == 0) {
+ err = -EINVAL;
+ goto done;
+ }
+ }
+
+ p.btf = btf;
+ p.id_map = id_map;
+ nt = new_types;
+ for (i = 0; i < new_nr_types; i++) {
+ struct btf_field_iter it;
+ const struct btf_type *t;
+ __u32 *type_id;
+ int type_size;
+
+ id = order_map[i];
+ /* must be a valid type ID */
+ t = btf__type_by_id(btf, id);
+ if (!t) {
+ err = -EINVAL;
+ goto done;
+ }
+ type_size = btf_type_size(t);
+ memcpy(nt, t, type_size);
+
+ /* Fix up referenced IDs for BTF */
+ err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
+ if (err)
+ goto done;
+ while ((type_id = btf_field_iter_next(&it))) {
+ err = btf_permute_remap_type_id(type_id, &p);
+ if (err)
+ goto done;
+ }
+
+ nt += type_size;
+ }
+
+ /* Fix up referenced IDs for btf_ext */
+ btf_ext = OPTS_GET(opts, btf_ext, NULL);
+ if (btf_ext) {
+ err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
+ if (err)
+ goto done;
+ }
+
+ new_type_len = nt - new_types;
+ next_type = new_types;
+ end_type = next_type + new_type_len;
+ i = 0;
+ while (next_type + sizeof(struct btf_type) <= end_type) {
+ btf->type_offs[i++] = next_type - new_types;
+ next_type += btf_type_size(next_type);
+ }
+
+ /* Resize */
+ if (new_type_len < btf->hdr->type_len) {
+ void *tmp_types;
+
+ tmp_types = realloc(new_types, new_type_len);
+ if (new_type_len && !tmp_types) {
+ err = -ENOMEM;
+ goto done;
+ }
+ new_types = tmp_types;
+ btf->nr_types = new_nr_types;
+ btf->type_offs_cap = btf->nr_types;
+ btf->types_data_cap = new_type_len;
+ btf->hdr->type_len = new_type_len;
+ btf->hdr->str_off = new_type_len;
+ btf->raw_size = btf->hdr->hdr_len + btf->hdr->type_len + btf->hdr->str_len;
+ }
+
+ free(order_map);
+ free(btf->types_data);
+ btf->types_data = new_types;
+ return 0;
+
+done:
+ free(order_map);
+ free(new_types);
+ return libbpf_err(err);
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index ccfd905f03df..e63dcce531b3 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -273,6 +273,49 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
*/
LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
+struct btf_permute_opts {
+ size_t sz;
+ /* optional .BTF.ext info along the main BTF info */
+ struct btf_ext *btf_ext;
+ size_t :0;
+};
+#define btf_permute_opts__last_field btf_ext
+
+/**
+ * @brief **btf__permute()** performs in-place BTF type rearrangement
+ * @param btf BTF object to permute
+ * @param id_map Array mapping original type IDs to new IDs
+ * @param id_map_cnt Number of elements in @id_map
+ * @param opts Optional parameters for BTF extension updates
+ * @return 0 on success, negative error code on failure
+ *
+ * **btf__permute()** rearranges BTF types according to the specified ID mapping.
+ * The @id_map array defines the new type ID for each original type ID.
+ *
+ * For **base BTF**:
+ * - @id_map must include all types from ID 1 to `btf__type_cnt(btf)-1`
+ * - @id_map_cnt should be `btf__type_cnt(btf) - 1`
+ * - Mapping uses `id_map[original_id - 1] = new_id`
+ *
+ * For **split BTF**:
+ * - @id_map should cover only split types
+ * - @id_map_cnt should be `btf__type_cnt(btf) - btf__type_cnt(btf__base_btf(btf))`
+ * - Mapping uses `id_map[original_id - btf__type_cnt(btf__base_btf(btf))] = new_id`
+ *
+ * Setting @id_map element to 0 drops the corresponding type. Dropped types must not
+ * be referenced by any retained types. After permutation, type references in BTF
+ * data and optional extension are updated automatically.
+ *
+ * Note: Dropping types may orphan some strings, requiring subsequent **btf__dedup()**
+ * to clean up unreferenced strings.
+ *
+ * On error, returns negative error code and sets errno:
+ * - `-EINVAL`: Invalid parameters or ID mapping (duplicates, out-of-range)
+ * - `-ENOMEM`: Memory allocation failure
+ */
+LIBBPF_API int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
+ const struct btf_permute_opts *opts);
+
struct btf_dump;
struct btf_dump_opts {
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 8ed8749907d4..b778e5a5d0a8 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -451,4 +451,5 @@ LIBBPF_1.7.0 {
global:
bpf_map__set_exclusive_program;
bpf_map__exclusive_program;
+ btf__permute;
} LIBBPF_1.6.0;
--
2.34.1
On Tue, Nov 18, 2025 at 7:21 PM Donglin Peng <dolinux.peng@gmail.com> wrote:
>
> From: Donglin Peng <pengdonglin@xiaomi.com>
>
> Introduce btf__permute() API to allow in-place rearrangement of BTF types.
> This function reorganizes BTF type order according to a provided array of
> type IDs, updating all type references to maintain consistency.
>
> Cc: Eduard Zingerman <eddyz87@gmail.com>
> Cc: Alexei Starovoitov <ast@kernel.org>
> Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
> Cc: Alan Maguire <alan.maguire@oracle.com>
> Cc: Song Liu <song@kernel.org>
> Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
> Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
> ---
> tools/lib/bpf/btf.c | 166 +++++++++++++++++++++++++++++++++++++++
> tools/lib/bpf/btf.h | 43 ++++++++++
> tools/lib/bpf/libbpf.map | 1 +
> 3 files changed, 210 insertions(+)
>
> diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> index 18907f0fcf9f..ab95ff19fde3 100644
> --- a/tools/lib/bpf/btf.c
> +++ b/tools/lib/bpf/btf.c
> @@ -5829,3 +5829,169 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
> btf->owns_base = false;
> return libbpf_err(err);
> }
> +
> +struct btf_permute {
> + struct btf *btf;
> + __u32 *id_map;
> +};
> +
> +/* Callback function to remap individual type ID references */
> +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
> +{
> + struct btf_permute *p = ctx;
> + __u32 new_type_id = *type_id;
> +
> + /* skip references that point into the base BTF or VOID */
> + if (new_type_id < p->btf->start_id)
> + return 0;
> +
> + /* invalid reference id */
> + if (new_type_id >= btf__type_cnt(p->btf))
> + return -EINVAL;
> +
> + new_type_id = p->id_map[new_type_id - p->btf->start_id];
> + /* reference a dropped type is not allowed */
> + if (new_type_id == 0)
> + return -EINVAL;
see below, this shouldn't happen, let's drop redundant check
> +
> + *type_id = new_type_id;
> + return 0;
> +}
> +
> +int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
> + const struct btf_permute_opts *opts)
> +{
> + struct btf_permute p;
> + struct btf_ext *btf_ext;
> + void *next_type, *end_type;
> + void *nt, *new_types = NULL;
> + int err = 0, i, new_type_len;
> + __u32 *order_map = NULL;
> + __u32 id, new_nr_types = 0;
> +
> + if (!OPTS_VALID(opts, btf_permute_opts) || id_map_cnt != btf->nr_types)
> + return libbpf_err(-EINVAL);
> +
> + /* used to record the storage sequence of types */
> + order_map = calloc(btf->nr_types, sizeof(*id_map));
> + if (!order_map) {
> + err = -ENOMEM;
> + goto done;
> + }
> +
> + new_types = calloc(btf->hdr->type_len, 1);
> + if (!new_types) {
> + err = -ENOMEM;
> + goto done;
> + }
> +
> + if (btf_ensure_modifiable(btf)) {
> + err = -ENOMEM;
> + goto done;
> + }
> +
> + for (i = 0; i < id_map_cnt; i++) {
> + id = id_map[i];
> + /* Drop the specified type */
> + if (id == 0)
> + continue;
if we don't allow this (no support for deletion, I wouldn't rush to
add that right now)...
pw-bot: cr
> + /* Invalid id */
obvious statement, IMO, please drop the comment
> + if (id < btf->start_id || id >= btf__type_cnt(btf)) {
> + err = -EINVAL;
> + goto done;
> + }
> + id -= btf->start_id;
> + /* Multiple types cannot be mapped to the same ID */
> + if (order_map[id]) {
> + err = -EINVAL;
> + goto done;
> + }
> + order_map[id] = i + btf->start_id;
> + new_nr_types = max(id + 1, new_nr_types);
> + }
> +
> + /* Check for missing IDs */
> + for (i = 0; i < new_nr_types; i++) {
> + if (order_map[i] == 0) {
> + err = -EINVAL;
> + goto done;
> + }
> + }
... then you won't need this check at all, because we enforced that
each remapped ID is different and we have exactly nr_types of them.
Same for new_nr_types calculation above, seems redundant
> +
> + p.btf = btf;
> + p.id_map = id_map;
> + nt = new_types;
> + for (i = 0; i < new_nr_types; i++) {
> + struct btf_field_iter it;
> + const struct btf_type *t;
> + __u32 *type_id;
> + int type_size;
> +
> + id = order_map[i];
> + /* must be a valid type ID */
redundant comment, please drop
> + t = btf__type_by_id(btf, id);
> + if (!t) {
no need to check this, we already validated that all types are valid earlier
> + err = -EINVAL;
> + goto done;
> + }
> + type_size = btf_type_size(t);
> + memcpy(nt, t, type_size);
> +
> + /* Fix up referenced IDs for BTF */
> + err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
> + if (err)
> + goto done;
> + while ((type_id = btf_field_iter_next(&it))) {
> + err = btf_permute_remap_type_id(type_id, &p);
> + if (err)
> + goto done;
> + }
> +
> + nt += type_size;
> + }
> +
> + /* Fix up referenced IDs for btf_ext */
> + btf_ext = OPTS_GET(opts, btf_ext, NULL);
> + if (btf_ext) {
> + err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
> + if (err)
> + goto done;
> + }
> +
> + new_type_len = nt - new_types;
new_type_len has to be exactly the same as the old size, this is redundant
> + next_type = new_types;
> + end_type = next_type + new_type_len;
> + i = 0;
> + while (next_type + sizeof(struct btf_type) <= end_type) {
while (next_type < end_type)?
Reference to struct btf_type is confusing, as generally type is bigger
than just sizeof(struct btf_type). But there is no need for this, with
correct code next_type < end_type is sufficient check
But really, this can also be written cleanly as a simple for loop
for (i = 0; i < nr_types; i++) {
btf->type_offs[i] = next_type - new_types;
next_type += btf_type_size(next_type);
}
> + btf->type_offs[i++] = next_type - new_types;
> + next_type += btf_type_size(next_type);
> + }
> +
> + /* Resize */
there cannot be any resizing, drop this, you only need to reassign
btf->types_data, that's all
> + if (new_type_len < btf->hdr->type_len) {
> + void *tmp_types;
> +
> + tmp_types = realloc(new_types, new_type_len);
> + if (new_type_len && !tmp_types) {
> + err = -ENOMEM;
> + goto done;
> + }
> + new_types = tmp_types;
> + btf->nr_types = new_nr_types;
> + btf->type_offs_cap = btf->nr_types;
> + btf->types_data_cap = new_type_len;
> + btf->hdr->type_len = new_type_len;
> + btf->hdr->str_off = new_type_len;
> + btf->raw_size = btf->hdr->hdr_len + btf->hdr->type_len + btf->hdr->str_len;
> + }
> +
> + free(order_map);
> + free(btf->types_data);
> + btf->types_data = new_types;
> + return 0;
> +
> +done:
> + free(order_map);
> + free(new_types);
> + return libbpf_err(err);
> +}
> diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
> index ccfd905f03df..e63dcce531b3 100644
> --- a/tools/lib/bpf/btf.h
> +++ b/tools/lib/bpf/btf.h
> @@ -273,6 +273,49 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
> */
> LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
>
> +struct btf_permute_opts {
> + size_t sz;
> + /* optional .BTF.ext info along the main BTF info */
> + struct btf_ext *btf_ext;
> + size_t :0;
> +};
> +#define btf_permute_opts__last_field btf_ext
> +
> +/**
> + * @brief **btf__permute()** performs in-place BTF type rearrangement
> + * @param btf BTF object to permute
> + * @param id_map Array mapping original type IDs to new IDs
> + * @param id_map_cnt Number of elements in @id_map
> + * @param opts Optional parameters for BTF extension updates
> + * @return 0 on success, negative error code on failure
> + *
> + * **btf__permute()** rearranges BTF types according to the specified ID mapping.
> + * The @id_map array defines the new type ID for each original type ID.
> + *
> + * For **base BTF**:
> + * - @id_map must include all types from ID 1 to `btf__type_cnt(btf)-1`
> + * - @id_map_cnt should be `btf__type_cnt(btf) - 1`
> + * - Mapping uses `id_map[original_id - 1] = new_id`
> + *
> + * For **split BTF**:
> + * - @id_map should cover only split types
> + * - @id_map_cnt should be `btf__type_cnt(btf) - btf__type_cnt(btf__base_btf(btf))`
> + * - Mapping uses `id_map[original_id - btf__type_cnt(btf__base_btf(btf))] = new_id`
> + *
> + * Setting @id_map element to 0 drops the corresponding type. Dropped types must not
> + * be referenced by any retained types. After permutation, type references in BTF
> + * data and optional extension are updated automatically.
let's not add deletion support just yet
> + *
> + * Note: Dropping types may orphan some strings, requiring subsequent **btf__dedup()**
> + * to clean up unreferenced strings.
one more reason to not add deletion just yet. It's good to have an API
that can express this, but we don't have to support deletion just yet.
> + *
> + * On error, returns negative error code and sets errno:
> + * - `-EINVAL`: Invalid parameters or ID mapping (duplicates, out-of-range)
> + * - `-ENOMEM`: Memory allocation failure
> + */
> +LIBBPF_API int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
> + const struct btf_permute_opts *opts);
> +
> struct btf_dump;
>
> struct btf_dump_opts {
> diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> index 8ed8749907d4..b778e5a5d0a8 100644
> --- a/tools/lib/bpf/libbpf.map
> +++ b/tools/lib/bpf/libbpf.map
> @@ -451,4 +451,5 @@ LIBBPF_1.7.0 {
> global:
> bpf_map__set_exclusive_program;
> bpf_map__exclusive_program;
> + btf__permute;
> } LIBBPF_1.6.0;
> --
> 2.34.1
>
On Wed, 2025-11-19 at 10:21 -0800, Andrii Nakryiko wrote:
> On Tue, Nov 18, 2025 at 7:21 PM Donglin Peng <dolinux.peng@gmail.com> wrote:
[...]
> > + nt = new_types;
> > + for (i = 0; i < new_nr_types; i++) {
> > + struct btf_field_iter it;
> > + const struct btf_type *t;
> > + __u32 *type_id;
> > + int type_size;
> > +
> > + id = order_map[i];
> > + /* must be a valid type ID */
>
> redundant comment, please drop
>
> > + t = btf__type_by_id(btf, id);
> > + if (!t) {
>
> no need to check this, we already validated that all types are valid earlier
>
> > + err = -EINVAL;
> > + goto done;
> > + }
> > + type_size = btf_type_size(t);
> > + memcpy(nt, t, type_size);
> > +
> > + /* Fix up referenced IDs for BTF */
> > + err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
> > + if (err)
> > + goto done;
> > + while ((type_id = btf_field_iter_next(&it))) {
> > + err = btf_permute_remap_type_id(type_id, &p);
> > + if (err)
> > + goto done;
> > + }
> > +
> > + nt += type_size;
> > + }
> > +
> > + /* Fix up referenced IDs for btf_ext */
> > + btf_ext = OPTS_GET(opts, btf_ext, NULL);
> > + if (btf_ext) {
> > + err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
> > + if (err)
> > + goto done;
> > + }
> > +
> > + new_type_len = nt - new_types;
>
>
> new_type_len has to be exactly the same as the old size, this is redundant
>
> > + next_type = new_types;
> > + end_type = next_type + new_type_len;
> > + i = 0;
> > + while (next_type + sizeof(struct btf_type) <= end_type) {
>
> while (next_type < end_type)?
>
> Reference to struct btf_type is confusing, as generally type is bigger
> than just sizeof(struct btf_type). But there is no need for this, with
> correct code next_type < end_type is sufficient check
>
> But really, this can also be written cleanly as a simple for loop
>
> for (i = 0; i < nr_types; i++) {
> btf->type_offs[i] = next_type - new_types;
> next_type += btf_type_size(next_type);
> }
>
Adding to what Andrii says, the whole group of assignments is
reducible:
+ new_type_len = nt - new_types;
+ next_type = new_types;
+ end_type = next_type + new_type_len;
=> end_type = new_types + new_type_len; // subst next_type -> new_types
=> end_type = new_types + nt - new_types; // subst new_types -> nt - new_types
=> end_type = nt
Why recomputing it in such a convoluted way?
> > + btf->type_offs[i++] = next_type - new_types;
> > + next_type += btf_type_size(next_type);
> > + }
> > +
[...]
On Fri, Nov 21, 2025 at 7:21 AM Eduard Zingerman <eddyz87@gmail.com> wrote:
>
> On Wed, 2025-11-19 at 10:21 -0800, Andrii Nakryiko wrote:
> > On Tue, Nov 18, 2025 at 7:21 PM Donglin Peng <dolinux.peng@gmail.com> wrote:
>
> [...]
>
> > > + nt = new_types;
> > > + for (i = 0; i < new_nr_types; i++) {
> > > + struct btf_field_iter it;
> > > + const struct btf_type *t;
> > > + __u32 *type_id;
> > > + int type_size;
> > > +
> > > + id = order_map[i];
> > > + /* must be a valid type ID */
> >
> > redundant comment, please drop
> >
> > > + t = btf__type_by_id(btf, id);
> > > + if (!t) {
> >
> > no need to check this, we already validated that all types are valid earlier
> >
> > > + err = -EINVAL;
> > > + goto done;
> > > + }
> > > + type_size = btf_type_size(t);
> > > + memcpy(nt, t, type_size);
> > > +
> > > + /* Fix up referenced IDs for BTF */
> > > + err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
> > > + if (err)
> > > + goto done;
> > > + while ((type_id = btf_field_iter_next(&it))) {
> > > + err = btf_permute_remap_type_id(type_id, &p);
> > > + if (err)
> > > + goto done;
> > > + }
> > > +
> > > + nt += type_size;
> > > + }
> > > +
> > > + /* Fix up referenced IDs for btf_ext */
> > > + btf_ext = OPTS_GET(opts, btf_ext, NULL);
> > > + if (btf_ext) {
> > > + err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
> > > + if (err)
> > > + goto done;
> > > + }
> > > +
> > > + new_type_len = nt - new_types;
> >
> >
> > new_type_len has to be exactly the same as the old size, this is redundant
> >
> > > + next_type = new_types;
> > > + end_type = next_type + new_type_len;
> > > + i = 0;
> > > + while (next_type + sizeof(struct btf_type) <= end_type) {
> >
> > while (next_type < end_type)?
> >
> > Reference to struct btf_type is confusing, as generally type is bigger
> > than just sizeof(struct btf_type). But there is no need for this, with
> > correct code next_type < end_type is sufficient check
> >
> > But really, this can also be written cleanly as a simple for loop
> >
> > for (i = 0; i < nr_types; i++) {
> > btf->type_offs[i] = next_type - new_types;
> > next_type += btf_type_size(next_type);
> > }
> >
>
> Adding to what Andrii says, the whole group of assignments is
> reducible:
>
> + new_type_len = nt - new_types;
> + next_type = new_types;
> + end_type = next_type + new_type_len;
>
> => end_type = new_types + new_type_len; // subst next_type -> new_types
> => end_type = new_types + nt - new_types; // subst new_types -> nt - new_types
> => end_type = nt
>
> Why recomputing it in such a convoluted way?
Sorry, I referred to the implementation of btf_parse_type_sec and could have
thought more about how to refactor it.
>
> > > + btf->type_offs[i++] = next_type - new_types;
> > > + next_type += btf_type_size(next_type);
> > > + }
> > > +
>
> [...]
On Thu, Nov 20, 2025 at 2:22 AM Andrii Nakryiko
<andrii.nakryiko@gmail.com> wrote:
>
> On Tue, Nov 18, 2025 at 7:21 PM Donglin Peng <dolinux.peng@gmail.com> wrote:
> >
> > From: Donglin Peng <pengdonglin@xiaomi.com>
> >
> > Introduce btf__permute() API to allow in-place rearrangement of BTF types.
> > This function reorganizes BTF type order according to a provided array of
> > type IDs, updating all type references to maintain consistency.
> >
> > Cc: Eduard Zingerman <eddyz87@gmail.com>
> > Cc: Alexei Starovoitov <ast@kernel.org>
> > Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
> > Cc: Alan Maguire <alan.maguire@oracle.com>
> > Cc: Song Liu <song@kernel.org>
> > Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
> > Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
> > ---
> > tools/lib/bpf/btf.c | 166 +++++++++++++++++++++++++++++++++++++++
> > tools/lib/bpf/btf.h | 43 ++++++++++
> > tools/lib/bpf/libbpf.map | 1 +
> > 3 files changed, 210 insertions(+)
> >
> > diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> > index 18907f0fcf9f..ab95ff19fde3 100644
> > --- a/tools/lib/bpf/btf.c
> > +++ b/tools/lib/bpf/btf.c
> > @@ -5829,3 +5829,169 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
> > btf->owns_base = false;
> > return libbpf_err(err);
> > }
> > +
> > +struct btf_permute {
> > + struct btf *btf;
> > + __u32 *id_map;
> > +};
> > +
> > +/* Callback function to remap individual type ID references */
> > +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
> > +{
> > + struct btf_permute *p = ctx;
> > + __u32 new_type_id = *type_id;
> > +
> > + /* skip references that point into the base BTF or VOID */
> > + if (new_type_id < p->btf->start_id)
> > + return 0;
> > +
> > + /* invalid reference id */
> > + if (new_type_id >= btf__type_cnt(p->btf))
> > + return -EINVAL;
> > +
> > + new_type_id = p->id_map[new_type_id - p->btf->start_id];
> > + /* reference a dropped type is not allowed */
> > + if (new_type_id == 0)
> > + return -EINVAL;
>
> see below, this shouldn't happen, let's drop redundant check
Thanks, I will remove it.
>
> > +
> > + *type_id = new_type_id;
> > + return 0;
> > +}
> > +
> > +int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
> > + const struct btf_permute_opts *opts)
> > +{
> > + struct btf_permute p;
> > + struct btf_ext *btf_ext;
> > + void *next_type, *end_type;
> > + void *nt, *new_types = NULL;
> > + int err = 0, i, new_type_len;
> > + __u32 *order_map = NULL;
> > + __u32 id, new_nr_types = 0;
> > +
> > + if (!OPTS_VALID(opts, btf_permute_opts) || id_map_cnt != btf->nr_types)
> > + return libbpf_err(-EINVAL);
> > +
> > + /* used to record the storage sequence of types */
> > + order_map = calloc(btf->nr_types, sizeof(*id_map));
> > + if (!order_map) {
> > + err = -ENOMEM;
> > + goto done;
> > + }
> > +
> > + new_types = calloc(btf->hdr->type_len, 1);
> > + if (!new_types) {
> > + err = -ENOMEM;
> > + goto done;
> > + }
> > +
> > + if (btf_ensure_modifiable(btf)) {
> > + err = -ENOMEM;
> > + goto done;
> > + }
> > +
> > + for (i = 0; i < id_map_cnt; i++) {
> > + id = id_map[i];
> > + /* Drop the specified type */
> > + if (id == 0)
> > + continue;
>
> if we don't allow this (no support for deletion, I wouldn't rush to
> add that right now)...
Thanks, I will remove it.
>
> pw-bot: cr
>
>
> > + /* Invalid id */
>
> obvious statement, IMO, please drop the comment
Thanks, I will remove it.
>
> > + if (id < btf->start_id || id >= btf__type_cnt(btf)) {
> > + err = -EINVAL;
> > + goto done;
> > + }
> > + id -= btf->start_id;
> > + /* Multiple types cannot be mapped to the same ID */
> > + if (order_map[id]) {
> > + err = -EINVAL;
> > + goto done;
> > + }
> > + order_map[id] = i + btf->start_id;
> > + new_nr_types = max(id + 1, new_nr_types);
> > + }
> > +
> > + /* Check for missing IDs */
> > + for (i = 0; i < new_nr_types; i++) {
> > + if (order_map[i] == 0) {
> > + err = -EINVAL;
> > + goto done;
> > + }
> > + }
>
> ... then you won't need this check at all, because we enforced that
> each remapped ID is different and we have exactly nr_types of them.
> Same for new_nr_types calculation above, seems redundant
Yes, if we don't support the dropping feature, there's no need to do
this. I'll remove it.
>
> > +
> > + p.btf = btf;
> > + p.id_map = id_map;
> > + nt = new_types;
> > + for (i = 0; i < new_nr_types; i++) {
> > + struct btf_field_iter it;
> > + const struct btf_type *t;
> > + __u32 *type_id;
> > + int type_size;
> > +
> > + id = order_map[i];
> > + /* must be a valid type ID */
>
> redundant comment, please drop
Thanks, I will remove it.
>
> > + t = btf__type_by_id(btf, id);
> > + if (!t) {
>
> no need to check this, we already validated that all types are valid earlier
Thanks, I will remove it.
>
> > + err = -EINVAL;
> > + goto done;
> > + }
> > + type_size = btf_type_size(t);
> > + memcpy(nt, t, type_size);
> > +
> > + /* Fix up referenced IDs for BTF */
> > + err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
> > + if (err)
> > + goto done;
> > + while ((type_id = btf_field_iter_next(&it))) {
> > + err = btf_permute_remap_type_id(type_id, &p);
> > + if (err)
> > + goto done;
> > + }
> > +
> > + nt += type_size;
> > + }
> > +
> > + /* Fix up referenced IDs for btf_ext */
> > + btf_ext = OPTS_GET(opts, btf_ext, NULL);
> > + if (btf_ext) {
> > + err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
> > + if (err)
> > + goto done;
> > + }
> > +
> > + new_type_len = nt - new_types;
>
>
> new_type_len has to be exactly the same as the old size, this is redundant
Yes, if we don't support the dropping feature, there's no need to do
this. I'll remove it.
>
> > + next_type = new_types;
> > + end_type = next_type + new_type_len;
> > + i = 0;
> > + while (next_type + sizeof(struct btf_type) <= end_type) {
>
> while (next_type < end_type)?
>
> Reference to struct btf_type is confusing, as generally type is bigger
> than just sizeof(struct btf_type). But there is no need for this, with
> correct code next_type < end_type is sufficient check
>
> But really, this can also be written cleanly as a simple for loop
>
> for (i = 0; i < nr_types; i++) {
> btf->type_offs[i] = next_type - new_types;
> next_type += btf_type_size(next_type);
> }
Great, thanks.
>
> > + btf->type_offs[i++] = next_type - new_types;
> > + next_type += btf_type_size(next_type);
> > + }
> > +
> > + /* Resize */
>
> there cannot be any resizing, drop this, you only need to reassign
> btf->types_data, that's all
Okay, I will do it.
>
> > + if (new_type_len < btf->hdr->type_len) {
> > + void *tmp_types;
> > +
> > + tmp_types = realloc(new_types, new_type_len);
> > + if (new_type_len && !tmp_types) {
> > + err = -ENOMEM;
> > + goto done;
> > + }
> > + new_types = tmp_types;
> > + btf->nr_types = new_nr_types;
> > + btf->type_offs_cap = btf->nr_types;
> > + btf->types_data_cap = new_type_len;
> > + btf->hdr->type_len = new_type_len;
> > + btf->hdr->str_off = new_type_len;
> > + btf->raw_size = btf->hdr->hdr_len + btf->hdr->type_len + btf->hdr->str_len;
> > + }
> > +
> > + free(order_map);
> > + free(btf->types_data);
> > + btf->types_data = new_types;
> > + return 0;
> > +
> > +done:
> > + free(order_map);
> > + free(new_types);
> > + return libbpf_err(err);
> > +}
> > diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
> > index ccfd905f03df..e63dcce531b3 100644
> > --- a/tools/lib/bpf/btf.h
> > +++ b/tools/lib/bpf/btf.h
> > @@ -273,6 +273,49 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
> > */
> > LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
> >
> > +struct btf_permute_opts {
> > + size_t sz;
> > + /* optional .BTF.ext info along the main BTF info */
> > + struct btf_ext *btf_ext;
> > + size_t :0;
> > +};
> > +#define btf_permute_opts__last_field btf_ext
> > +
> > +/**
> > + * @brief **btf__permute()** performs in-place BTF type rearrangement
> > + * @param btf BTF object to permute
> > + * @param id_map Array mapping original type IDs to new IDs
> > + * @param id_map_cnt Number of elements in @id_map
> > + * @param opts Optional parameters for BTF extension updates
> > + * @return 0 on success, negative error code on failure
> > + *
> > + * **btf__permute()** rearranges BTF types according to the specified ID mapping.
> > + * The @id_map array defines the new type ID for each original type ID.
> > + *
> > + * For **base BTF**:
> > + * - @id_map must include all types from ID 1 to `btf__type_cnt(btf)-1`
> > + * - @id_map_cnt should be `btf__type_cnt(btf) - 1`
> > + * - Mapping uses `id_map[original_id - 1] = new_id`
> > + *
> > + * For **split BTF**:
> > + * - @id_map should cover only split types
> > + * - @id_map_cnt should be `btf__type_cnt(btf) - btf__type_cnt(btf__base_btf(btf))`
> > + * - Mapping uses `id_map[original_id - btf__type_cnt(btf__base_btf(btf))] = new_id`
> > + *
> > + * Setting @id_map element to 0 drops the corresponding type. Dropped types must not
> > + * be referenced by any retained types. After permutation, type references in BTF
> > + * data and optional extension are updated automatically.
>
> let's not add deletion support just yet
Thanks, I will modify the annotations.
>
> > + *
> > + * Note: Dropping types may orphan some strings, requiring subsequent **btf__dedup()**
> > + * to clean up unreferenced strings.
>
> one more reason to not add deletion just yet. It's good to have an API
> that can express this, but we don't have to support deletion just yet.
Thanks, I will remove it.
>
> > + *
> > + * On error, returns negative error code and sets errno:
> > + * - `-EINVAL`: Invalid parameters or ID mapping (duplicates, out-of-range)
> > + * - `-ENOMEM`: Memory allocation failure
> > + */
> > +LIBBPF_API int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
> > + const struct btf_permute_opts *opts);
> > +
> > struct btf_dump;
> >
> > struct btf_dump_opts {
> > diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
> > index 8ed8749907d4..b778e5a5d0a8 100644
> > --- a/tools/lib/bpf/libbpf.map
> > +++ b/tools/lib/bpf/libbpf.map
> > @@ -451,4 +451,5 @@ LIBBPF_1.7.0 {
> > global:
> > bpf_map__set_exclusive_program;
> > bpf_map__exclusive_program;
> > + btf__permute;
> > } LIBBPF_1.6.0;
> > --
> > 2.34.1
> >
© 2016 - 2025 Red Hat, Inc.