[RFC PATCH 1/2] libbpf: Add BTF permutation support for type reordering

Donglin Peng posted 2 patches 2 months, 3 weeks ago
[RFC PATCH 1/2] libbpf: Add BTF permutation support for type reordering
Posted by Donglin Peng 2 months, 3 weeks ago
From: Donglin Peng <pengdonglin@xiaomi.com>

Introduce btf__permute() API to allow in-place rearrangement of BTF types.
This function reorganizes BTF type order according to a provided array of
type IDs, updating all type references to maintain consistency.

Cc: Eduard Zingerman <eddyz87@gmail.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Andrii Nakryiko <andrii.nakryiko@gmail.com>
Cc: Alan Maguire <alan.maguire@oracle.com>
Cc: Song Liu <song@kernel.org>
Cc: Xiaoqin Zhang <zhangxiaoqin@xiaomi.com>
Signed-off-by: Donglin Peng <pengdonglin@xiaomi.com>
---
 tools/lib/bpf/btf.c      | 186 +++++++++++++++++++++++++++++++++++++++
 tools/lib/bpf/btf.h      |  43 +++++++++
 tools/lib/bpf/libbpf.map |   1 +
 3 files changed, 230 insertions(+)

diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 18907f0fcf9f..d22a2b0581b3 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -5829,3 +5829,189 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
 		btf->owns_base = false;
 	return libbpf_err(err);
 }
+
+struct btf_permute {
+	struct btf *btf;
+	__u32 *id_map;
+	__u32 offs;
+};
+
+/* Callback function to remap individual type ID references */
+static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
+{
+	struct btf_permute *p = ctx;
+	__u32 new_type_id = *type_id;
+
+	/* skip references that point into the base BTF */
+	if (new_type_id < p->btf->start_id)
+		return 0;
+
+	/* invalid reference id */
+	if (new_type_id >= btf__type_cnt(p->btf))
+		return -EINVAL;
+
+	new_type_id = p->id_map[new_type_id - p->offs];
+	/* reference a dropped type is not allowed */
+	if (new_type_id == 0)
+		return -EINVAL;
+
+	*type_id = new_type_id;
+	return 0;
+}
+
+int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
+		 const struct btf_permute_opts *opts)
+{
+	struct btf_permute p;
+	struct btf_ext *btf_ext;
+	void *next_type, *end_type;
+	void *nt, *new_types = NULL;
+	int err = 0, n, i, new_type_len;
+	__u32 *order_map = NULL;
+	__u32 offs, id, new_nr_types = 0;
+
+	if (btf__base_btf(btf)) {
+		/*
+		 * For split BTF, the number of types added on the
+		 * top of base BTF
+		 */
+		n = btf->nr_types;
+		offs = btf->start_id;
+	} else if (id_map[0] != 0) {
+		/* id_map[0] must be 0 for base BTF */
+		err = -EINVAL;
+		goto done;
+	} else {
+		/* include VOID type 0 for base BTF */
+		n = btf__type_cnt(btf);
+		offs = 0;
+	}
+
+	if (!OPTS_VALID(opts, btf_permute_opts) || (id_map_cnt != n))
+		return libbpf_err(-EINVAL);
+
+	/* used to record the storage sequence of types */
+	order_map = calloc(n, sizeof(*id_map));
+	if (!order_map) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	new_types = calloc(btf->hdr->type_len, 1);
+	if (!new_types) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	if (btf_ensure_modifiable(btf)) {
+		err = -ENOMEM;
+		goto done;
+	}
+
+	for (i = 0; i < id_map_cnt; i++) {
+		id = id_map[i];
+		/*
+		 * 0: Drop the specified type (exclude base BTF type 0).
+		 * For base BTF, type 0 is always preserved.
+		 */
+		if (id == 0)
+			continue;
+		/* Invalid id  */
+		if (id < btf->start_id || id >= btf__type_cnt(btf)) {
+			err = -EINVAL;
+			goto done;
+		}
+		id -= offs;
+		/* Multiple types cannot be mapped to the same ID */
+		if (order_map[id]) {
+			err = -EINVAL;
+			goto done;
+		}
+		order_map[id] = i + offs;
+		new_nr_types = max(id + 1, new_nr_types);
+	}
+
+	/* Check for missing IDs */
+	for (i = offs ? 0 : 1; i < new_nr_types; i++) {
+		if (order_map[i] == 0) {
+			err = -EINVAL;
+			goto done;
+		}
+	}
+
+	p.btf = btf;
+	p.id_map = id_map;
+	p.offs = offs;
+	nt = new_types;
+	for (i = offs ? 0 : 1; i < new_nr_types; i++) {
+		struct btf_field_iter it;
+		const struct btf_type *t;
+		__u32 *type_id;
+		int type_size;
+
+		id = order_map[i];
+		/* must be a valid type ID */
+		t = btf__type_by_id(btf, id);
+		if (!t) {
+			err = -EINVAL;
+			goto done;
+		}
+		type_size = btf_type_size(t);
+		memcpy(nt, t, type_size);
+
+		/* Fix up referenced IDs for BTF */
+		err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
+		if (err)
+			goto done;
+		while ((type_id = btf_field_iter_next(&it))) {
+			err = btf_permute_remap_type_id(type_id, &p);
+			if (err)
+				goto done;
+		}
+
+		nt += type_size;
+	}
+
+	/* Fix up referenced IDs for btf_ext */
+	btf_ext = OPTS_GET(opts, btf_ext, NULL);
+	if (btf_ext) {
+		err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
+		if (err)
+			goto done;
+	}
+
+	new_type_len = nt - new_types;
+	next_type = new_types;
+	end_type = next_type + new_type_len;
+	i = 0;
+	while (next_type + sizeof(struct btf_type) <= end_type) {
+		btf->type_offs[i++] = next_type - new_types;
+		next_type += btf_type_size(next_type);
+	}
+
+	/* Resize */
+	if (new_type_len < btf->hdr->type_len) {
+		void *tmp_types;
+
+		tmp_types = realloc(new_types, new_type_len);
+		if (new_type_len && !tmp_types) {
+			err = -ENOMEM;
+			goto done;
+		}
+		new_types = tmp_types;
+		btf->nr_types = new_nr_types - (offs ? 0 : 1);
+		btf->type_offs_cap = btf->nr_types;
+		btf->types_data_cap = new_type_len;
+		btf->hdr->type_len = new_type_len;
+		btf->hdr->str_off = new_type_len;
+		btf->raw_size = btf->hdr->hdr_len + btf->hdr->type_len + btf->hdr->str_len;
+	}
+	free(btf->types_data);
+	btf->types_data = new_types;
+	return 0;
+
+done:
+	free(order_map);
+	free(new_types);
+	return libbpf_err(err);
+}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index ccfd905f03df..ec4e31e918c3 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -273,6 +273,49 @@ LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts);
  */
 LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf);
 
+struct btf_permute_opts {
+	size_t sz;
+	/* optional .BTF.ext info along the main BTF info */
+	struct btf_ext *btf_ext;
+	size_t :0;
+};
+#define btf_permute_opts__last_field btf_ext
+
+/**
+ * @brief **btf__permute()** performs in-place BTF type rearrangement
+ * @param btf BTF object to permute
+ * @param id_map Array mapping original type IDs to new IDs
+ * @param id_map_cnt Number of elements in @id_map
+ * @param opts Optional parameters for BTF extension updates
+ * @return 0 on success, negative error code on failure
+ *
+ * **btf__permute()** rearranges BTF types according to the specified ID mapping.
+ * The @id_map array defines the new type ID for each original type ID.
+ *
+ * For **base BTF**:
+ * - @id_map must include all types from ID 0 to `btf__type_cnt(btf)-1`
+ * - @id_map_cnt should be `btf__type_cnt(btf)`
+ * - Mapping uses `id_map[original_id] = new_id`
+ *
+ * For **split BTF**:
+ * - @id_map should cover only split types
+ * - @id_map_cnt should be `btf__type_cnt(btf) - btf__type_cnt(btf__base_btf(btf))`
+ * - Mapping uses `id_map[original_id - btf__type_cnt(btf__base_btf(btf))] = new_id`
+ *
+ * Setting @id_map element to 0 (except `id_map[0]` for base BTF) drops the corresponding type.
+ * Dropped types must not be referenced by any retained types. After permutation,
+ * type references in BTF data and optional extension are updated automatically.
+ *
+ * Note: Dropping types may orphan some strings, requiring subsequent **btf__dedup()**
+ * to clean up unreferenced strings.
+ *
+ * On error, returns negative error code and sets errno:
+ *   - `-EINVAL`: Invalid parameters or ID mapping (duplicates, out-of-range)
+ *   - `-ENOMEM`: Memory allocation failure
+ */
+LIBBPF_API int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
+			    const struct btf_permute_opts *opts);
+
 struct btf_dump;
 
 struct btf_dump_opts {
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 8ed8749907d4..b778e5a5d0a8 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -451,4 +451,5 @@ LIBBPF_1.7.0 {
 	global:
 		bpf_map__set_exclusive_program;
 		bpf_map__exclusive_program;
+		btf__permute;
 } LIBBPF_1.6.0;
-- 
2.34.1
Re: [RFC PATCH 1/2] libbpf: Add BTF permutation support for type reordering
Posted by bot+bpf-ci@kernel.org 2 months, 3 weeks ago
> diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> index 9f141395c..de3aa62fd 100644
> --- a/tools/lib/bpf/btf.c
> +++ b/tools/lib/bpf/btf.c
> @@ -5829,3 +5829,189 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
>  		btf->owns_base = false;
>  	return libbpf_err(err);
>  }
> +
> +struct btf_permute {
> +	struct btf *btf;
> +	__u32 *id_map;
> +	__u32 offs;
> +};
> +
> +/* Callback function to remap individual type ID references */
> +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
> +{

[ ... ]

> +int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
> +		 const struct btf_permute_opts *opts)
> +{
> +	struct btf_permute p;
> +	struct btf_ext *btf_ext;
> +	void *next_type, *end_type;
> +	void *nt, *new_types = NULL;
> +	int err = 0, n, i, new_type_len;
> +	__u32 *order_map = NULL;
> +	__u32 offs, id, new_nr_types = 0;
> +
> +	if (btf__base_btf(btf)) {
> +		/*
> +		 * For split BTF, the number of types added on the
> +		 * top of base BTF
> +		 */
> +		n = btf->nr_types;
> +		offs = btf->start_id;
> +	} else if (id_map[0] != 0) {
> +		/* id_map[0] must be 0 for base BTF */
> +		err = -EINVAL;
> +		goto done;
> +	} else {
> +		/* include VOID type 0 for base BTF */
> +		n = btf__type_cnt(btf);
> +		offs = 0;
> +	}
> +
> +	if (!OPTS_VALID(opts, btf_permute_opts) || (id_map_cnt != n))
> +		return libbpf_err(-EINVAL);
> +
> +	/* used to record the storage sequence of types */
> +	order_map = calloc(n, sizeof(*id_map));
> +	if (!order_map) {
> +		err = -ENOMEM;
> +		goto done;
> +	}
> +
> +	new_types = calloc(btf->hdr->type_len, 1);
> +	if (!new_types) {
> +		err = -ENOMEM;
> +		goto done;
> +	}
> +
> +	if (btf_ensure_modifiable(btf)) {
> +		err = -ENOMEM;
> +		goto done;
> +	}
> +
> +	for (i = 0; i < id_map_cnt; i++) {
> +		id = id_map[i];
> +		/*
> +		 * 0: Drop the specified type (exclude base BTF type 0).
> +		 * For base BTF, type 0 is always preserved.
> +		 */
> +		if (id == 0)
> +			continue;
> +		/* Invalid id  */
> +		if (id < btf->start_id || id >= btf__type_cnt(btf)) {
> +			err = -EINVAL;
> +			goto done;
> +		}
> +		id -= offs;
> +		/* Multiple types cannot be mapped to the same ID */
> +		if (order_map[id]) {
> +			err = -EINVAL;
> +			goto done;
> +		}
> +		order_map[id] = i + offs;
> +		new_nr_types = max(id + 1, new_nr_types);
> +	}
> +
> +	/* Check for missing IDs */
> +	for (i = offs ? 0 : 1; i < new_nr_types; i++) {
> +		if (order_map[i] == 0) {
> +			err = -EINVAL;
> +			goto done;
> +		}
> +	}
> +
> +	p.btf = btf;
> +	p.id_map = id_map;
> +	p.offs = offs;
> +	nt = new_types;
> +	for (i = offs ? 0 : 1; i < new_nr_types; i++) {
> +		struct btf_field_iter it;
> +		const struct btf_type *t;
> +		__u32 *type_id;
> +		int type_size;
> +
> +		id = order_map[i];
> +		/* must be a valid type ID */
> +		t = btf__type_by_id(btf, id);
> +		if (!t) {
> +			err = -EINVAL;
> +			goto done;
> +		}
> +		type_size = btf_type_size(t);
> +		memcpy(nt, t, type_size);
> +
> +		/* Fix up referenced IDs for BTF */
> +		err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
> +		if (err)
> +			goto done;
> +		while ((type_id = btf_field_iter_next(&it))) {
> +			err = btf_permute_remap_type_id(type_id, &p);
> +			if (err)
> +				goto done;
> +		}
> +
> +		nt += type_size;
> +	}
> +
> +	/* Fix up referenced IDs for btf_ext */
> +	btf_ext = OPTS_GET(opts, btf_ext, NULL);
> +	if (btf_ext) {
> +		err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
> +		if (err)
> +			goto done;
> +	}
> +
> +	new_type_len = nt - new_types;
> +	next_type = new_types;
> +	end_type = next_type + new_type_len;
> +	i = 0;
> +	while (next_type + sizeof(struct btf_type) <= end_type) {
> +		btf->type_offs[i++] = next_type - new_types;
> +		next_type += btf_type_size(next_type);
> +	}
> +
> +	/* Resize */
> +	if (new_type_len < btf->hdr->type_len) {
> +		void *tmp_types;
> +
> +		tmp_types = realloc(new_types, new_type_len);
> +		if (new_type_len && !tmp_types) {
> +			err = -ENOMEM;
> +			goto done;
> +		}
> +		new_types = tmp_types;
> +		btf->nr_types = new_nr_types - (offs ? 0 : 1);
> +		btf->type_offs_cap = btf->nr_types;
> +		btf->types_data_cap = new_type_len;
> +		btf->hdr->type_len = new_type_len;
> +		btf->hdr->str_off = new_type_len;
> +		btf->raw_size = btf->hdr->hdr_len + btf->hdr->type_len + btf->hdr->str_len;
> +	}
> +	free(btf->types_data);
> +	btf->types_data = new_types;
> +	return 0;

Does this leak order_map on the success path? It looks like order_map is
allocated above and freed only in the error path at the done label, but
the success path returns here without freeing it.

> +
> +done:
> +	free(order_map);
> +	free(new_types);
> +	return libbpf_err(err);
> +}

[ ... ]


---
AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md

CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19359945665
Re: [RFC PATCH 1/2] libbpf: Add BTF permutation support for type reordering
Posted by Donglin Peng 2 months, 3 weeks ago
On Fri, Nov 14, 2025 at 5:47 PM <bot+bpf-ci@kernel.org> wrote:
>
> > diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
> > index 9f141395c..de3aa62fd 100644
> > --- a/tools/lib/bpf/btf.c
> > +++ b/tools/lib/bpf/btf.c
> > @@ -5829,3 +5829,189 @@ int btf__relocate(struct btf *btf, const struct btf *base_btf)
> >               btf->owns_base = false;
> >       return libbpf_err(err);
> >  }
> > +
> > +struct btf_permute {
> > +     struct btf *btf;
> > +     __u32 *id_map;
> > +     __u32 offs;
> > +};
> > +
> > +/* Callback function to remap individual type ID references */
> > +static int btf_permute_remap_type_id(__u32 *type_id, void *ctx)
> > +{
>
> [ ... ]
>
> > +int btf__permute(struct btf *btf, __u32 *id_map, __u32 id_map_cnt,
> > +              const struct btf_permute_opts *opts)
> > +{
> > +     struct btf_permute p;
> > +     struct btf_ext *btf_ext;
> > +     void *next_type, *end_type;
> > +     void *nt, *new_types = NULL;
> > +     int err = 0, n, i, new_type_len;
> > +     __u32 *order_map = NULL;
> > +     __u32 offs, id, new_nr_types = 0;
> > +
> > +     if (btf__base_btf(btf)) {
> > +             /*
> > +              * For split BTF, the number of types added on the
> > +              * top of base BTF
> > +              */
> > +             n = btf->nr_types;
> > +             offs = btf->start_id;
> > +     } else if (id_map[0] != 0) {
> > +             /* id_map[0] must be 0 for base BTF */
> > +             err = -EINVAL;
> > +             goto done;
> > +     } else {
> > +             /* include VOID type 0 for base BTF */
> > +             n = btf__type_cnt(btf);
> > +             offs = 0;
> > +     }
> > +
> > +     if (!OPTS_VALID(opts, btf_permute_opts) || (id_map_cnt != n))
> > +             return libbpf_err(-EINVAL);
> > +
> > +     /* used to record the storage sequence of types */
> > +     order_map = calloc(n, sizeof(*id_map));
> > +     if (!order_map) {
> > +             err = -ENOMEM;
> > +             goto done;
> > +     }
> > +
> > +     new_types = calloc(btf->hdr->type_len, 1);
> > +     if (!new_types) {
> > +             err = -ENOMEM;
> > +             goto done;
> > +     }
> > +
> > +     if (btf_ensure_modifiable(btf)) {
> > +             err = -ENOMEM;
> > +             goto done;
> > +     }
> > +
> > +     for (i = 0; i < id_map_cnt; i++) {
> > +             id = id_map[i];
> > +             /*
> > +              * 0: Drop the specified type (exclude base BTF type 0).
> > +              * For base BTF, type 0 is always preserved.
> > +              */
> > +             if (id == 0)
> > +                     continue;
> > +             /* Invalid id  */
> > +             if (id < btf->start_id || id >= btf__type_cnt(btf)) {
> > +                     err = -EINVAL;
> > +                     goto done;
> > +             }
> > +             id -= offs;
> > +             /* Multiple types cannot be mapped to the same ID */
> > +             if (order_map[id]) {
> > +                     err = -EINVAL;
> > +                     goto done;
> > +             }
> > +             order_map[id] = i + offs;
> > +             new_nr_types = max(id + 1, new_nr_types);
> > +     }
> > +
> > +     /* Check for missing IDs */
> > +     for (i = offs ? 0 : 1; i < new_nr_types; i++) {
> > +             if (order_map[i] == 0) {
> > +                     err = -EINVAL;
> > +                     goto done;
> > +             }
> > +     }
> > +
> > +     p.btf = btf;
> > +     p.id_map = id_map;
> > +     p.offs = offs;
> > +     nt = new_types;
> > +     for (i = offs ? 0 : 1; i < new_nr_types; i++) {
> > +             struct btf_field_iter it;
> > +             const struct btf_type *t;
> > +             __u32 *type_id;
> > +             int type_size;
> > +
> > +             id = order_map[i];
> > +             /* must be a valid type ID */
> > +             t = btf__type_by_id(btf, id);
> > +             if (!t) {
> > +                     err = -EINVAL;
> > +                     goto done;
> > +             }
> > +             type_size = btf_type_size(t);
> > +             memcpy(nt, t, type_size);
> > +
> > +             /* Fix up referenced IDs for BTF */
> > +             err = btf_field_iter_init(&it, nt, BTF_FIELD_ITER_IDS);
> > +             if (err)
> > +                     goto done;
> > +             while ((type_id = btf_field_iter_next(&it))) {
> > +                     err = btf_permute_remap_type_id(type_id, &p);
> > +                     if (err)
> > +                             goto done;
> > +             }
> > +
> > +             nt += type_size;
> > +     }
> > +
> > +     /* Fix up referenced IDs for btf_ext */
> > +     btf_ext = OPTS_GET(opts, btf_ext, NULL);
> > +     if (btf_ext) {
> > +             err = btf_ext_visit_type_ids(btf_ext, btf_permute_remap_type_id, &p);
> > +             if (err)
> > +                     goto done;
> > +     }
> > +
> > +     new_type_len = nt - new_types;
> > +     next_type = new_types;
> > +     end_type = next_type + new_type_len;
> > +     i = 0;
> > +     while (next_type + sizeof(struct btf_type) <= end_type) {
> > +             btf->type_offs[i++] = next_type - new_types;
> > +             next_type += btf_type_size(next_type);
> > +     }
> > +
> > +     /* Resize */
> > +     if (new_type_len < btf->hdr->type_len) {
> > +             void *tmp_types;
> > +
> > +             tmp_types = realloc(new_types, new_type_len);
> > +             if (new_type_len && !tmp_types) {
> > +                     err = -ENOMEM;
> > +                     goto done;
> > +             }
> > +             new_types = tmp_types;
> > +             btf->nr_types = new_nr_types - (offs ? 0 : 1);
> > +             btf->type_offs_cap = btf->nr_types;
> > +             btf->types_data_cap = new_type_len;
> > +             btf->hdr->type_len = new_type_len;
> > +             btf->hdr->str_off = new_type_len;
> > +             btf->raw_size = btf->hdr->hdr_len + btf->hdr->type_len + btf->hdr->str_len;
> > +     }
> > +     free(btf->types_data);
> > +     btf->types_data = new_types;
> > +     return 0;
>
> Does this leak order_map on the success path? It looks like order_map is
> allocated above and freed only in the error path at the done label, but
> the success path returns here without freeing it.

Thanks, I will fix it.

>
> > +
> > +done:
> > +     free(order_map);
> > +     free(new_types);
> > +     return libbpf_err(err);
> > +}
>
> [ ... ]
>
>
> ---
> AI reviewed your patch. Please fix the bug or email reply why it's not a bug.
> See: https://github.com/kernel-patches/vmtest/blob/master/ci/claude/README.md
>
> CI run summary: https://github.com/kernel-patches/bpf/actions/runs/19359945665