The "memramp() shrinking" scenario no longer applies, so let's remove
that now-unnecessary handling.
Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
Acked-by: Ingo Molnar <mingo@kernel.org> # x86 bits
Signed-off-by: David Hildenbrand <david@redhat.com>
---
arch/x86/mm/pat/memtype_interval.c | 44 ++++--------------------------
1 file changed, 6 insertions(+), 38 deletions(-)
diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
index 645613d59942a..9d03f0dbc4715 100644
--- a/arch/x86/mm/pat/memtype_interval.c
+++ b/arch/x86/mm/pat/memtype_interval.c
@@ -49,26 +49,15 @@ INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
-enum {
- MEMTYPE_EXACT_MATCH = 0,
- MEMTYPE_END_MATCH = 1
-};
-
-static struct memtype *memtype_match(u64 start, u64 end, int match_type)
+static struct memtype *memtype_match(u64 start, u64 end)
{
struct memtype *entry_match;
entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
while (entry_match != NULL && entry_match->start < end) {
- if ((match_type == MEMTYPE_EXACT_MATCH) &&
- (entry_match->start == start) && (entry_match->end == end))
- return entry_match;
-
- if ((match_type == MEMTYPE_END_MATCH) &&
- (entry_match->start < start) && (entry_match->end == end))
+ if (entry_match->start == start && entry_match->end == end)
return entry_match;
-
entry_match = interval_iter_next(entry_match, start, end-1);
}
@@ -132,32 +121,11 @@ struct memtype *memtype_erase(u64 start, u64 end)
{
struct memtype *entry_old;
- /*
- * Since the memtype_rbroot tree allows overlapping ranges,
- * memtype_erase() checks with EXACT_MATCH first, i.e. free
- * a whole node for the munmap case. If no such entry is found,
- * it then checks with END_MATCH, i.e. shrink the size of a node
- * from the end for the mremap case.
- */
- entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
- if (!entry_old) {
- entry_old = memtype_match(start, end, MEMTYPE_END_MATCH);
- if (!entry_old)
- return ERR_PTR(-EINVAL);
- }
-
- if (entry_old->start == start) {
- /* munmap: erase this node */
- interval_remove(entry_old, &memtype_rbroot);
- } else {
- /* mremap: update the end value of this node */
- interval_remove(entry_old, &memtype_rbroot);
- entry_old->end = start;
- interval_insert(entry_old, &memtype_rbroot);
-
- return NULL;
- }
+ entry_old = memtype_match(start, end);
+ if (!entry_old)
+ return ERR_PTR(-EINVAL);
+ interval_remove(entry_old, &memtype_rbroot);
return entry_old;
}
--
2.49.0
* David Hildenbrand <david@redhat.com> [250512 08:34]:
> The "memramp() shrinking" scenario no longer applies, so let's remove
> that now-unnecessary handling.
>
> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
> Acked-by: Ingo Molnar <mingo@kernel.org> # x86 bits
> Signed-off-by: David Hildenbrand <david@redhat.com>
small comment, but this looks good.
Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
> arch/x86/mm/pat/memtype_interval.c | 44 ++++--------------------------
> 1 file changed, 6 insertions(+), 38 deletions(-)
>
> diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
> index 645613d59942a..9d03f0dbc4715 100644
> --- a/arch/x86/mm/pat/memtype_interval.c
> +++ b/arch/x86/mm/pat/memtype_interval.c
> @@ -49,26 +49,15 @@ INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
>
> static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
>
> -enum {
> - MEMTYPE_EXACT_MATCH = 0,
> - MEMTYPE_END_MATCH = 1
> -};
> -
> -static struct memtype *memtype_match(u64 start, u64 end, int match_type)
> +static struct memtype *memtype_match(u64 start, u64 end)
> {
> struct memtype *entry_match;
>
> entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
>
> while (entry_match != NULL && entry_match->start < end) {
I think this could use interval_tree_for_each_span() instead.
> - if ((match_type == MEMTYPE_EXACT_MATCH) &&
> - (entry_match->start == start) && (entry_match->end == end))
> - return entry_match;
> -
> - if ((match_type == MEMTYPE_END_MATCH) &&
> - (entry_match->start < start) && (entry_match->end == end))
> + if (entry_match->start == start && entry_match->end == end)
> return entry_match;
> -
> entry_match = interval_iter_next(entry_match, start, end-1);
> }
>
> @@ -132,32 +121,11 @@ struct memtype *memtype_erase(u64 start, u64 end)
> {
> struct memtype *entry_old;
>
> - /*
> - * Since the memtype_rbroot tree allows overlapping ranges,
> - * memtype_erase() checks with EXACT_MATCH first, i.e. free
> - * a whole node for the munmap case. If no such entry is found,
> - * it then checks with END_MATCH, i.e. shrink the size of a node
> - * from the end for the mremap case.
> - */
> - entry_old = memtype_match(start, end, MEMTYPE_EXACT_MATCH);
> - if (!entry_old) {
> - entry_old = memtype_match(start, end, MEMTYPE_END_MATCH);
> - if (!entry_old)
> - return ERR_PTR(-EINVAL);
> - }
> -
> - if (entry_old->start == start) {
> - /* munmap: erase this node */
> - interval_remove(entry_old, &memtype_rbroot);
> - } else {
> - /* mremap: update the end value of this node */
> - interval_remove(entry_old, &memtype_rbroot);
> - entry_old->end = start;
> - interval_insert(entry_old, &memtype_rbroot);
> -
> - return NULL;
> - }
> + entry_old = memtype_match(start, end);
> + if (!entry_old)
> + return ERR_PTR(-EINVAL);
>
> + interval_remove(entry_old, &memtype_rbroot);
> return entry_old;
> }
>
> --
> 2.49.0
>
On 13.05.25 19:48, Liam R. Howlett wrote:
> * David Hildenbrand <david@redhat.com> [250512 08:34]:
>> The "memramp() shrinking" scenario no longer applies, so let's remove
>> that now-unnecessary handling.
>>
>> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>> Acked-by: Ingo Molnar <mingo@kernel.org> # x86 bits
>> Signed-off-by: David Hildenbrand <david@redhat.com>
>
> small comment, but this looks good.
>
> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
Thanks!
>
>> ---
>> arch/x86/mm/pat/memtype_interval.c | 44 ++++--------------------------
>> 1 file changed, 6 insertions(+), 38 deletions(-)
>>
>> diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
>> index 645613d59942a..9d03f0dbc4715 100644
>> --- a/arch/x86/mm/pat/memtype_interval.c
>> +++ b/arch/x86/mm/pat/memtype_interval.c
>> @@ -49,26 +49,15 @@ INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
>>
>> static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
>>
>> -enum {
>> - MEMTYPE_EXACT_MATCH = 0,
>> - MEMTYPE_END_MATCH = 1
>> -};
>> -
>> -static struct memtype *memtype_match(u64 start, u64 end, int match_type)
>> +static struct memtype *memtype_match(u64 start, u64 end)
>> {
>> struct memtype *entry_match;
>>
>> entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
>>
>> while (entry_match != NULL && entry_match->start < end) {
>
> I think this could use interval_tree_for_each_span() instead.
Fancy, let me look at this. Probably I'll send another patch on top of
this series to do that conversion. (as you found, patch #9 moves that code)
--
Cheers,
David / dhildenb
On 14.05.25 19:53, David Hildenbrand wrote:
> On 13.05.25 19:48, Liam R. Howlett wrote:
>> * David Hildenbrand <david@redhat.com> [250512 08:34]:
>>> The "memramp() shrinking" scenario no longer applies, so let's remove
>>> that now-unnecessary handling.
>>>
>>> Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
>>> Acked-by: Ingo Molnar <mingo@kernel.org> # x86 bits
>>> Signed-off-by: David Hildenbrand <david@redhat.com>
>>
>> small comment, but this looks good.
>>
>> Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
>
> Thanks!
>
>>
>>> ---
>>> arch/x86/mm/pat/memtype_interval.c | 44 ++++--------------------------
>>> 1 file changed, 6 insertions(+), 38 deletions(-)
>>>
>>> diff --git a/arch/x86/mm/pat/memtype_interval.c b/arch/x86/mm/pat/memtype_interval.c
>>> index 645613d59942a..9d03f0dbc4715 100644
>>> --- a/arch/x86/mm/pat/memtype_interval.c
>>> +++ b/arch/x86/mm/pat/memtype_interval.c
>>> @@ -49,26 +49,15 @@ INTERVAL_TREE_DEFINE(struct memtype, rb, u64, subtree_max_end,
>>>
>>> static struct rb_root_cached memtype_rbroot = RB_ROOT_CACHED;
>>>
>>> -enum {
>>> - MEMTYPE_EXACT_MATCH = 0,
>>> - MEMTYPE_END_MATCH = 1
>>> -};
>>> -
>>> -static struct memtype *memtype_match(u64 start, u64 end, int match_type)
>>> +static struct memtype *memtype_match(u64 start, u64 end)
>>> {
>>> struct memtype *entry_match;
>>>
>>> entry_match = interval_iter_first(&memtype_rbroot, start, end-1);
>>>
>>> while (entry_match != NULL && entry_match->start < end) {
>>
>> I think this could use interval_tree_for_each_span() instead.
>
> Fancy, let me look at this. Probably I'll send another patch on top of
> this series to do that conversion. (as you found, patch #9 moves that code)
Hmmm, I think interval_tree_for_each_span() does not apply here.
Unless I am missing something important, interval_tree_for_each_span()
does not work in combination with INTERVAL_TREE_DEFINE where we want to
use a custom type as tree nodes (-> struct memtype).
interval_tree_for_each_span() only works with the basic "struct
interval_tree_node" implementation ... which is probably also why there
are only a handful (3) of interval_tree_for_each_span() users, all in
iommufd context?
But staring at interval_tree.h vs. interval_tree_generic.h, I am a bit
confused ...
--
Cheers,
David / dhildenb
© 2016 - 2026 Red Hat, Inc.