If the ttm bo is backed by pages, then it's possible to safely kmap
one page at a time, using kmap_try_from_panic().
Unfortunately there is no way to do the same with ioremap, so it
only supports the kmap case.
This is needed for proper drm_panic support with xe driver.
Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
---
v8:
* Added in v8
drivers/gpu/drm/ttm/ttm_bo_util.c | 27 +++++++++++++++++++++++++++
include/drm/ttm/ttm_bo.h | 1 +
2 files changed, 28 insertions(+)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15cab9bda17f..9c3f3b379c2a 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -377,6 +377,33 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
return (!map->virtual) ? -ENOMEM : 0;
}
+/**
+ *
+ * ttm_bo_kmap_try_from_panic
+ *
+ * @bo: The buffer object
+ * @page: The page to map
+ *
+ * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
+ * This can safely be called from the panic handler, if you make sure the bo
+ * is the one being displayed, so is properly allocated, and won't be modified.
+ *
+ * Returns the vaddr, that you can use to write to the bo, and that you should
+ * pass to kunmap_local() when you're done with this page, or NULL if the bo
+ * is in iomem.
+ */
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
+{
+ if (page + 1 > PFN_UP(bo->resource->size))
+ return NULL;
+
+ if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
+ return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
+
+ return NULL;
+}
+EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
+
/**
* ttm_bo_kmap
*
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index cf027558b6db..8c0ce3fa077f 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -429,6 +429,7 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
--
2.49.0
Hi Jocelyn,
kernel test robot noticed the following build warnings:
[auto build test WARNING on 7247efca0dcbc8ac6147db9200ed1549c0662465]
url: https://github.com/intel-lab-lkp/linux/commits/Jocelyn-Falempe/drm-i915-fbdev-Add-intel_fbdev_get_map/20250606-200804
base: 7247efca0dcbc8ac6147db9200ed1549c0662465
patch link: https://lore.kernel.org/r/20250606120519.753928-5-jfalempe%40redhat.com
patch subject: [PATCH v8 4/9] drm/ttm: Add ttm_bo_kmap_try_from_panic()
config: x86_64-buildonly-randconfig-001-20250607 (https://download.01.org/0day-ci/archive/20250607/202506070340.P5oZoRwh-lkp@intel.com/config)
compiler: clang version 20.1.2 (https://github.com/llvm/llvm-project 58df0ef89dd64126512e4ee27b4ac3fd8ddf6247)
rustc: rustc 1.78.0 (9b00956e5 2024-04-29)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20250607/202506070340.P5oZoRwh-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202506070340.P5oZoRwh-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> drivers/gpu/drm/ttm/ttm_bo_util.c:381: warning: Cannot understand *
on line 381 - I thought it was a doc line
vim +381 drivers/gpu/drm/ttm/ttm_bo_util.c
379
380 /**
> 381 *
382 * ttm_bo_kmap_try_from_panic
383 *
384 * @bo: The buffer object
385 * @page: The page to map
386 *
387 * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
388 * This can safely be called from the panic handler, if you make sure the bo
389 * is the one being displayed, so is properly allocated, and won't be modified.
390 *
391 * Returns the vaddr, that you can use to write to the bo, and that you should
392 * pass to kunmap_local() when you're done with this page, or NULL if the bo
393 * is in iomem.
394 */
395 void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
396 {
397 if (page + 1 > PFN_UP(bo->resource->size))
398 return NULL;
399
400 if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
401 return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
402
403 return NULL;
404 }
405 EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
406
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
On 6/6/25 13:48, Jocelyn Falempe wrote:
> If the ttm bo is backed by pages, then it's possible to safely kmap
> one page at a time, using kmap_try_from_panic().
I strongly assume that we don't care about locking anything in this case, don't we?
> Unfortunately there is no way to do the same with ioremap, so it
> only supports the kmap case.
Oh, there actually is on most modern systems.
At least on 64bit systems amdgpu maps the whole VRAM BAR into kernel address space on driver load.
So as long as you have a large BAR system you can trivially have access to the MMIO memory.
> This is needed for proper drm_panic support with xe driver.
>
> Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
> ---
>
> v8:
> * Added in v8
>
> drivers/gpu/drm/ttm/ttm_bo_util.c | 27 +++++++++++++++++++++++++++
> include/drm/ttm/ttm_bo.h | 1 +
> 2 files changed, 28 insertions(+)
>
> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
> index 15cab9bda17f..9c3f3b379c2a 100644
> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
> @@ -377,6 +377,33 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
> return (!map->virtual) ? -ENOMEM : 0;
> }
>
> +/**
> + *
> + * ttm_bo_kmap_try_from_panic
> + *
> + * @bo: The buffer object
> + * @page: The page to map
> + *
> + * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
> + * This can safely be called from the panic handler, if you make sure the bo
"This can *only* be called from the panic handler..."
Apart from those open questions, looks sane to me.
Regards,
Christian.
> + * is the one being displayed, so is properly allocated, and won't be modified.
> + *
> + * Returns the vaddr, that you can use to write to the bo, and that you should
> + * pass to kunmap_local() when you're done with this page, or NULL if the bo
> + * is in iomem.
> + */
> +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
> +{
> + if (page + 1 > PFN_UP(bo->resource->size))
> + return NULL;
> +
> + if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
> + return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
> +
> + return NULL;
> +}
> +EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
> +
> /**
> * ttm_bo_kmap
> *
> diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
> index cf027558b6db..8c0ce3fa077f 100644
> --- a/include/drm/ttm/ttm_bo.h
> +++ b/include/drm/ttm/ttm_bo.h
> @@ -429,6 +429,7 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
> int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
> unsigned long num_pages, struct ttm_bo_kmap_obj *map);
> void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
> +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
> int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
> void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
> int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
On 06/06/2025 14:28, Christian König wrote:
> On 6/6/25 13:48, Jocelyn Falempe wrote:
>> If the ttm bo is backed by pages, then it's possible to safely kmap
>> one page at a time, using kmap_try_from_panic().
>
> I strongly assume that we don't care about locking anything in this case, don't we?
Yes, normally it's called for the current framebuffer, so I assume it's
properly allocated, and isn't growing/shrinking while being displayed.
>
>> Unfortunately there is no way to do the same with ioremap, so it
>> only supports the kmap case.
>
> Oh, there actually is on most modern systems.
>
> At least on 64bit systems amdgpu maps the whole VRAM BAR into kernel address space on driver load.
>
> So as long as you have a large BAR system you can trivially have access to the MMIO memory.
For amdgpu, I used the indirect MMIO access, so I didn't need to ioremap
https://elixir.bootlin.com/linux/v6.15/source/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c#L1800
For the xe driver, I only tested on integrated GPU, using system RAM, so
this first approach is good enough.
But I'm still interested to find a solution, is there a way to get the
current io-mapping if it exists?
>
>> This is needed for proper drm_panic support with xe driver.
>>
>> Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
>> ---
>>
>> v8:
>> * Added in v8
>>
>> drivers/gpu/drm/ttm/ttm_bo_util.c | 27 +++++++++++++++++++++++++++
>> include/drm/ttm/ttm_bo.h | 1 +
>> 2 files changed, 28 insertions(+)
>>
>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
>> index 15cab9bda17f..9c3f3b379c2a 100644
>> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
>> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
>> @@ -377,6 +377,33 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
>> return (!map->virtual) ? -ENOMEM : 0;
>> }
>>
>> +/**
>> + *
>> + * ttm_bo_kmap_try_from_panic
>> + *
>> + * @bo: The buffer object
>> + * @page: The page to map
>> + *
>> + * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
>> + * This can safely be called from the panic handler, if you make sure the bo
>
> "This can *only* be called from the panic handler..."
Yes, I will fix that, it shouldn't be called for normal operations.
>
> Apart from those open questions, looks sane to me.
>
> Regards,
> Christian.
>
>> + * is the one being displayed, so is properly allocated, and won't be modified.
>> + *
>> + * Returns the vaddr, that you can use to write to the bo, and that you should
>> + * pass to kunmap_local() when you're done with this page, or NULL if the bo
>> + * is in iomem.
>> + */
>> +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
>> +{
>> + if (page + 1 > PFN_UP(bo->resource->size))
>> + return NULL;
>> +
>> + if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
>> + return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
>> +
>> + return NULL;
>> +}
>> +EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
>> +
>> /**
>> * ttm_bo_kmap
>> *
>> diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
>> index cf027558b6db..8c0ce3fa077f 100644
>> --- a/include/drm/ttm/ttm_bo.h
>> +++ b/include/drm/ttm/ttm_bo.h
>> @@ -429,6 +429,7 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
>> int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
>> unsigned long num_pages, struct ttm_bo_kmap_obj *map);
>> void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
>> +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
>> int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
>> void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
>> int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
>
On 6/6/25 15:14, Jocelyn Falempe wrote:
> On 06/06/2025 14:28, Christian König wrote:
>> On 6/6/25 13:48, Jocelyn Falempe wrote:
>>> If the ttm bo is backed by pages, then it's possible to safely kmap
>>> one page at a time, using kmap_try_from_panic().
>>
>> I strongly assume that we don't care about locking anything in this case, don't we?
>
> Yes, normally it's called for the current framebuffer, so I assume it's properly allocated, and isn't growing/shrinking while being displayed.
>
>>
>>> Unfortunately there is no way to do the same with ioremap, so it
>>> only supports the kmap case.
>>
>> Oh, there actually is on most modern systems.
>>
>> At least on 64bit systems amdgpu maps the whole VRAM BAR into kernel address space on driver load.
>>
>> So as long as you have a large BAR system you can trivially have access to the MMIO memory.
>
> For amdgpu, I used the indirect MMIO access, so I didn't need to ioremap
> https://elixir.bootlin.com/linux/v6.15/source/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c#L1800
Good point. That is probably quite slow, but works under really all circumstances as long as the device hasn't fallen of the bus.
> For the xe driver, I only tested on integrated GPU, using system RAM, so this first approach is good enough.
> But I'm still interested to find a solution, is there a way to get the current io-mapping if it exists?
You need to ask that the XE guys. There is TTMs bdev->funcs->access_memory() callback which should allow doing that, but I have no idea how that is implemented for XE.
Regards,
Christian.
>
>
>>
>>> This is needed for proper drm_panic support with xe driver.
>>>
>>> Signed-off-by: Jocelyn Falempe <jfalempe@redhat.com>
>>> ---
>>>
>>> v8:
>>> * Added in v8
>>>
>>> drivers/gpu/drm/ttm/ttm_bo_util.c | 27 +++++++++++++++++++++++++++
>>> include/drm/ttm/ttm_bo.h | 1 +
>>> 2 files changed, 28 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
>>> index 15cab9bda17f..9c3f3b379c2a 100644
>>> --- a/drivers/gpu/drm/ttm/ttm_bo_util.c
>>> +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
>>> @@ -377,6 +377,33 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
>>> return (!map->virtual) ? -ENOMEM : 0;
>>> }
>>> +/**
>>> + *
>>> + * ttm_bo_kmap_try_from_panic
>>> + *
>>> + * @bo: The buffer object
>>> + * @page: The page to map
>>> + *
>>> + * Sets up a kernel virtual mapping using kmap_local_page_try_from_panic().
>>> + * This can safely be called from the panic handler, if you make sure the bo
>>
>> "This can *only* be called from the panic handler..."
>
> Yes, I will fix that, it shouldn't be called for normal operations.
>
>>
>> Apart from those open questions, looks sane to me.
>>
>> Regards,
>> Christian.
>>
>>> + * is the one being displayed, so is properly allocated, and won't be modified.
>>> + *
>>> + * Returns the vaddr, that you can use to write to the bo, and that you should
>>> + * pass to kunmap_local() when you're done with this page, or NULL if the bo
>>> + * is in iomem.
>>> + */
>>> +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page)
>>> +{
>>> + if (page + 1 > PFN_UP(bo->resource->size))
>>> + return NULL;
>>> +
>>> + if (!bo->resource->bus.is_iomem && bo->ttm->pages && bo->ttm->pages[page])
>>> + return kmap_local_page_try_from_panic(bo->ttm->pages[page]);
>>> +
>>> + return NULL;
>>> +}
>>> +EXPORT_SYMBOL(ttm_bo_kmap_try_from_panic);
>>> +
>>> /**
>>> * ttm_bo_kmap
>>> *
>>> diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
>>> index cf027558b6db..8c0ce3fa077f 100644
>>> --- a/include/drm/ttm/ttm_bo.h
>>> +++ b/include/drm/ttm/ttm_bo.h
>>> @@ -429,6 +429,7 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
>>> int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
>>> unsigned long num_pages, struct ttm_bo_kmap_obj *map);
>>> void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
>>> +void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
>>> int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
>>> void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
>>> int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
>>
>
© 2016 - 2025 Red Hat, Inc.