Hi,
while checking check_ram_in_range_map() I am confused by the condition set/check on bdr.
If I am reading the code correctly, if bdr is set once, it would never get cleared, hence
that function will always returns 0.
should we clear bdr before each new iteration?
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 5b4e6d3bf7bc..1605b956b25e 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -611,6 +611,7 @@ static int check_ram_in_range_map(unsigned long start_pfn,
return 1;
start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
+ bdr = NULL;
}
return 0;
On Mon, Dec 09, 2024 at 06:50:06PM +0800, Baochen Qiang wrote:
> Hi,
>
> while checking check_ram_in_range_map() I am confused by the condition set/check on bdr.
> If I am reading the code correctly, if bdr is set once, it would never get cleared, hence
> that function will always returns 0.
>
> should we clear bdr before each new iteration?
I think so. Even better refactor the code so that the non-NULL bdr
doesn't leak out:
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 5b4e6d3bf7bc..181e244f410a 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -584,6 +584,22 @@ int dma_direct_supported(struct device *dev, u64 mask)
return mask >= phys_to_dma_unencrypted(dev, min_mask);
}
+static const struct bus_dma_region *dma_find_range(struct device *dev,
+ unsigned long start_pfn)
+{
+ const struct bus_dma_region *m;
+
+ for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
+ unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
+
+ if (start_pfn >= cpu_start_pfn &&
+ start_pfn - cpu_start_pfn < PFN_DOWN(m->size))
+ return m;
+ }
+
+ return NULL;
+}
+
/*
* To check whether all ram resource ranges are covered by dma range map
* Returns 0 when further check is needed
@@ -593,23 +609,14 @@ static int check_ram_in_range_map(unsigned long start_pfn,
unsigned long nr_pages, void *data)
{
unsigned long end_pfn = start_pfn + nr_pages;
- const struct bus_dma_region *bdr = NULL;
- const struct bus_dma_region *m;
struct device *dev = data;
while (start_pfn < end_pfn) {
- for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
- unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
+ const struct bus_dma_region *bdr;
- if (start_pfn >= cpu_start_pfn &&
- start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
- bdr = m;
- break;
- }
- }
+ bdr = dma_find_range(dev, start_pfn);
if (!bdr)
return 1;
-
start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
}
On 12/12/2024 3:14 PM, Christoph Hellwig wrote:
> On Mon, Dec 09, 2024 at 06:50:06PM +0800, Baochen Qiang wrote:
>> Hi,
>>
>> while checking check_ram_in_range_map() I am confused by the condition set/check on bdr.
>> If I am reading the code correctly, if bdr is set once, it would never get cleared, hence
>> that function will always returns 0.
>>
>> should we clear bdr before each new iteration?
>
> I think so. Even better refactor the code so that the non-NULL bdr
> doesn't leak out:
>
> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
> index 5b4e6d3bf7bc..181e244f410a 100644
> --- a/kernel/dma/direct.c
> +++ b/kernel/dma/direct.c
> @@ -584,6 +584,22 @@ int dma_direct_supported(struct device *dev, u64 mask)
> return mask >= phys_to_dma_unencrypted(dev, min_mask);
> }
>
> +static const struct bus_dma_region *dma_find_range(struct device *dev,
> + unsigned long start_pfn)
> +{
> + const struct bus_dma_region *m;
> +
> + for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
> + unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
> +
> + if (start_pfn >= cpu_start_pfn &&
> + start_pfn - cpu_start_pfn < PFN_DOWN(m->size))
> + return m;
> + }
> +
> + return NULL;
> +}
> +
> /*
> * To check whether all ram resource ranges are covered by dma range map
> * Returns 0 when further check is needed
> @@ -593,23 +609,14 @@ static int check_ram_in_range_map(unsigned long start_pfn,
> unsigned long nr_pages, void *data)
> {
> unsigned long end_pfn = start_pfn + nr_pages;
> - const struct bus_dma_region *bdr = NULL;
> - const struct bus_dma_region *m;
> struct device *dev = data;
>
> while (start_pfn < end_pfn) {
> - for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
> - unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
> + const struct bus_dma_region *bdr;
>
> - if (start_pfn >= cpu_start_pfn &&
> - start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
> - bdr = m;
> - break;
> - }
> - }
> + bdr = dma_find_range(dev, start_pfn);
> if (!bdr)
> return 1;
> -
> start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
> }
>
looks better. thanks for reply.
Hi,
On 20.12.2024 04:25, Baochen Qiang wrote:
> On 12/12/2024 3:14 PM, Christoph Hellwig wrote:
>> On Mon, Dec 09, 2024 at 06:50:06PM +0800, Baochen Qiang wrote:
>>> Hi,
>>>
>>> while checking check_ram_in_range_map() I am confused by the condition set/check on bdr.
>>> If I am reading the code correctly, if bdr is set once, it would never get cleared, hence
>>> that function will always returns 0.
>>>
>>> should we clear bdr before each new iteration?
>> I think so. Even better refactor the code so that the non-NULL bdr
>> doesn't leak out:
>>
>> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
>> index 5b4e6d3bf7bc..181e244f410a 100644
>> --- a/kernel/dma/direct.c
>> +++ b/kernel/dma/direct.c
>> @@ -584,6 +584,22 @@ int dma_direct_supported(struct device *dev, u64 mask)
>> return mask >= phys_to_dma_unencrypted(dev, min_mask);
>> }
>>
>> +static const struct bus_dma_region *dma_find_range(struct device *dev,
>> + unsigned long start_pfn)
>> +{
>> + const struct bus_dma_region *m;
>> +
>> + for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
>> + unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
>> +
>> + if (start_pfn >= cpu_start_pfn &&
>> + start_pfn - cpu_start_pfn < PFN_DOWN(m->size))
>> + return m;
>> + }
>> +
>> + return NULL;
>> +}
>> +
>> /*
>> * To check whether all ram resource ranges are covered by dma range map
>> * Returns 0 when further check is needed
>> @@ -593,23 +609,14 @@ static int check_ram_in_range_map(unsigned long start_pfn,
>> unsigned long nr_pages, void *data)
>> {
>> unsigned long end_pfn = start_pfn + nr_pages;
>> - const struct bus_dma_region *bdr = NULL;
>> - const struct bus_dma_region *m;
>> struct device *dev = data;
>>
>> while (start_pfn < end_pfn) {
>> - for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
>> - unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
>> + const struct bus_dma_region *bdr;
>>
>> - if (start_pfn >= cpu_start_pfn &&
>> - start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
>> - bdr = m;
>> - break;
>> - }
>> - }
>> + bdr = dma_find_range(dev, start_pfn);
>> if (!bdr)
>> return 1;
>> -
>> start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
>> }
>>
> looks better. thanks for reply.
Could you send a formal patch with this fix, description and
'Suggested-by: Christoph Hellwig' tag?
Best regards
--
Marek Szyprowski, PhD
Samsung R&D Institute Poland
On 2/28/2025 9:22 PM, Marek Szyprowski wrote:
> Hi,
>
> On 20.12.2024 04:25, Baochen Qiang wrote:
>> On 12/12/2024 3:14 PM, Christoph Hellwig wrote:
>>> On Mon, Dec 09, 2024 at 06:50:06PM +0800, Baochen Qiang wrote:
>>>> Hi,
>>>>
>>>> while checking check_ram_in_range_map() I am confused by the condition set/check on bdr.
>>>> If I am reading the code correctly, if bdr is set once, it would never get cleared, hence
>>>> that function will always returns 0.
>>>>
>>>> should we clear bdr before each new iteration?
>>> I think so. Even better refactor the code so that the non-NULL bdr
>>> doesn't leak out:
>>>
>>> diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
>>> index 5b4e6d3bf7bc..181e244f410a 100644
>>> --- a/kernel/dma/direct.c
>>> +++ b/kernel/dma/direct.c
>>> @@ -584,6 +584,22 @@ int dma_direct_supported(struct device *dev, u64 mask)
>>> return mask >= phys_to_dma_unencrypted(dev, min_mask);
>>> }
>>>
>>> +static const struct bus_dma_region *dma_find_range(struct device *dev,
>>> + unsigned long start_pfn)
>>> +{
>>> + const struct bus_dma_region *m;
>>> +
>>> + for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
>>> + unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
>>> +
>>> + if (start_pfn >= cpu_start_pfn &&
>>> + start_pfn - cpu_start_pfn < PFN_DOWN(m->size))
>>> + return m;
>>> + }
>>> +
>>> + return NULL;
>>> +}
>>> +
>>> /*
>>> * To check whether all ram resource ranges are covered by dma range map
>>> * Returns 0 when further check is needed
>>> @@ -593,23 +609,14 @@ static int check_ram_in_range_map(unsigned long start_pfn,
>>> unsigned long nr_pages, void *data)
>>> {
>>> unsigned long end_pfn = start_pfn + nr_pages;
>>> - const struct bus_dma_region *bdr = NULL;
>>> - const struct bus_dma_region *m;
>>> struct device *dev = data;
>>>
>>> while (start_pfn < end_pfn) {
>>> - for (m = dev->dma_range_map; PFN_DOWN(m->size); m++) {
>>> - unsigned long cpu_start_pfn = PFN_DOWN(m->cpu_start);
>>> + const struct bus_dma_region *bdr;
>>>
>>> - if (start_pfn >= cpu_start_pfn &&
>>> - start_pfn - cpu_start_pfn < PFN_DOWN(m->size)) {
>>> - bdr = m;
>>> - break;
>>> - }
>>> - }
>>> + bdr = dma_find_range(dev, start_pfn);
>>> if (!bdr)
>>> return 1;
>>> -
>>> start_pfn = PFN_DOWN(bdr->cpu_start) + PFN_DOWN(bdr->size);
>>> }
>>>
>> looks better. thanks for reply.
>
> Could you send a formal patch with this fix, description and
> 'Suggested-by: Christoph Hellwig' tag?
sure, will submit soon.
>
> Best regards
© 2016 - 2025 Red Hat, Inc.