Add a generic iomem abstraction to safely read and write ioremapped
regions.
The reads and writes are done through IoRaw, and are thus checked either
at compile-time, if the size of the region is known at that point, or at
runtime otherwise.
Non-exclusive access to the underlying memory region is made possible to
cater to cases where overlapped regions are unavoidable.
Signed-off-by: Daniel Almeida <daniel.almeida@collabora.com>
---
rust/helpers/io.c | 10 +++++
rust/kernel/io.rs | 1 +
rust/kernel/io/mem.rs | 98 +++++++++++++++++++++++++++++++++++++++++++
3 files changed, 109 insertions(+)
create mode 100644 rust/kernel/io/mem.rs
diff --git a/rust/helpers/io.c b/rust/helpers/io.c
index 3cb47bd01942..cb10060c08ae 100644
--- a/rust/helpers/io.c
+++ b/rust/helpers/io.c
@@ -106,3 +106,13 @@ resource_size_t rust_helper_resource_size(struct resource *res)
return resource_size(res);
}
+struct resource *rust_helper_request_mem_region(resource_size_t start, resource_size_t n,
+ const char *name)
+{
+ return request_mem_region(start, n, name);
+}
+
+void rust_helper_release_mem_region(resource_size_t start, resource_size_t n)
+{
+ release_mem_region(start, n);
+}
diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs
index 566d8b177e01..9ce3482b5ecd 100644
--- a/rust/kernel/io.rs
+++ b/rust/kernel/io.rs
@@ -7,6 +7,7 @@
use crate::error::{code::EINVAL, Result};
use crate::{bindings, build_assert};
+pub mod mem;
pub mod resource;
/// Raw representation of an MMIO region.
diff --git a/rust/kernel/io/mem.rs b/rust/kernel/io/mem.rs
new file mode 100644
index 000000000000..a287dc0898e0
--- /dev/null
+++ b/rust/kernel/io/mem.rs
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Generic memory-mapped IO.
+
+use core::ops::Deref;
+
+use crate::device::Device;
+use crate::devres::Devres;
+use crate::io::resource::Resource;
+use crate::io::Io;
+use crate::io::IoRaw;
+use crate::prelude::*;
+
+/// A generic memory-mapped IO region.
+///
+/// Accesses to the underlying region is checked either at compile time, if the
+/// region's size is known at that point, or at runtime otherwise.
+///
+/// Whether `IoMem` represents an exclusive access to the underlying memory
+/// region is determined by the caller at creation time, as overlapping access
+/// may be needed in some cases.
+///
+/// # Invariants
+///
+/// `IoMem` always holds an `IoRaw` inststance that holds a valid pointer to the
+/// start of the I/O memory mapped region.
+pub struct IoMem<const SIZE: usize = 0, const EXCLUSIVE: bool = true> {
+ io: IoRaw<SIZE>,
+ res_start: u64,
+}
+
+impl<const SIZE: usize, const EXCLUSIVE: bool> IoMem<SIZE, EXCLUSIVE> {
+ /// Creates a new `IoMem` instance.
+ pub(crate) fn new(resource: &Resource, device: &Device) -> Result<Devres<Self>> {
+ let size = resource.size();
+ if size == 0 {
+ return Err(EINVAL);
+ }
+
+ let res_start = resource.start();
+
+ if EXCLUSIVE {
+ // SAFETY:
+ // - `res_start` and `size` are read from a presumably valid `struct resource`.
+ // - `size` is known not to be zero at this point.
+ // - `resource.name()` returns a valid C string.
+ let mem_region = unsafe {
+ bindings::request_mem_region(res_start, size, resource.name().as_char_ptr())
+ };
+
+ if mem_region.is_null() {
+ return Err(EBUSY);
+ }
+ }
+
+ // SAFETY:
+ // - `res_start` and `size` are read from a presumably valid `struct resource`.
+ // - `size` is known not to be zero at this point.
+ let addr = unsafe { bindings::ioremap(res_start, size as kernel::ffi::c_ulong) };
+ if addr.is_null() {
+ if EXCLUSIVE {
+ // SAFETY:
+ // - `res_start` and `size` are read from a presumably valid `struct resource`.
+ // - `size` is the same as the one passed to `request_mem_region`.
+ unsafe { bindings::release_mem_region(res_start, size) };
+ }
+ return Err(ENOMEM);
+ }
+
+ let io = IoRaw::new(addr as usize, size as usize)?;
+ let io = IoMem { io, res_start };
+ let devres = Devres::new(device, io, GFP_KERNEL)?;
+
+ Ok(devres)
+ }
+}
+
+impl<const SIZE: usize, const EXCLUSIVE: bool> Drop for IoMem<SIZE, EXCLUSIVE> {
+ fn drop(&mut self) {
+ if EXCLUSIVE {
+ // SAFETY: `res_start` and `io.maxsize()` were the values passed to
+ // `request_mem_region`.
+ unsafe { bindings::release_mem_region(self.res_start, self.io.maxsize() as u64) }
+ }
+
+ // SAFETY: Safe as by the invariant of `Io`.
+ unsafe { bindings::iounmap(self.io.addr() as *mut core::ffi::c_void) }
+ }
+}
+
+impl<const SIZE: usize, const EXCLUSIVE: bool> Deref for IoMem<SIZE, EXCLUSIVE> {
+ type Target = Io<SIZE>;
+
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: Safe as by the invariant of `IoMem`.
+ unsafe { Io::from_raw(&self.io) }
+ }
+}
--
2.48.0
Hi Daniel On 16.01.25 1:56 PM, Daniel Almeida wrote: > Add a generic iomem abstraction to safely read and write ioremapped > regions. > > The reads and writes are done through IoRaw, and are thus checked either > at compile-time, if the size of the region is known at that point, or at > runtime otherwise. > > Non-exclusive access to the underlying memory region is made possible to > cater to cases where overlapped regions are unavoidable. > > Signed-off-by: Daniel Almeida <daniel.almeida@collabora.com> > --- > rust/helpers/io.c | 10 +++++ > rust/kernel/io.rs | 1 + > rust/kernel/io/mem.rs | 98 +++++++++++++++++++++++++++++++++++++++++++ > 3 files changed, 109 insertions(+) > create mode 100644 rust/kernel/io/mem.rs > > diff --git a/rust/helpers/io.c b/rust/helpers/io.c > index 3cb47bd01942..cb10060c08ae 100644 > --- a/rust/helpers/io.c > +++ b/rust/helpers/io.c > @@ -106,3 +106,13 @@ resource_size_t rust_helper_resource_size(struct resource *res) > return resource_size(res); > } > > +struct resource *rust_helper_request_mem_region(resource_size_t start, resource_size_t n, > + const char *name) > +{ > + return request_mem_region(start, n, name); > +} > + > +void rust_helper_release_mem_region(resource_size_t start, resource_size_t n) > +{ > + release_mem_region(start, n); > +} > diff --git a/rust/kernel/io.rs b/rust/kernel/io.rs > index 566d8b177e01..9ce3482b5ecd 100644 > --- a/rust/kernel/io.rs > +++ b/rust/kernel/io.rs > @@ -7,6 +7,7 @@ > use crate::error::{code::EINVAL, Result}; > use crate::{bindings, build_assert}; > > +pub mod mem; > pub mod resource; > > /// Raw representation of an MMIO region. > diff --git a/rust/kernel/io/mem.rs b/rust/kernel/io/mem.rs > new file mode 100644 > index 000000000000..a287dc0898e0 > --- /dev/null > +++ b/rust/kernel/io/mem.rs > @@ -0,0 +1,98 @@ > +// SPDX-License-Identifier: GPL-2.0 > + > +//! Generic memory-mapped IO. > + > +use core::ops::Deref; > + > +use crate::device::Device; > +use crate::devres::Devres; > +use crate::io::resource::Resource; > +use crate::io::Io; > +use crate::io::IoRaw; > +use crate::prelude::*; > + > +/// A generic memory-mapped IO region. > +/// > +/// Accesses to the underlying region is checked either at compile time, if the > +/// region's size is known at that point, or at runtime otherwise. > +/// > +/// Whether `IoMem` represents an exclusive access to the underlying memory > +/// region is determined by the caller at creation time, as overlapping access > +/// may be needed in some cases. > +/// > +/// # Invariants > +/// > +/// `IoMem` always holds an `IoRaw` inststance that holds a valid pointer to the > +/// start of the I/O memory mapped region. > +pub struct IoMem<const SIZE: usize = 0, const EXCLUSIVE: bool = true> { > + io: IoRaw<SIZE>, > + res_start: u64, Please use bindings::resource_size_t here to to be compatible with 32-bit architectures. > +} > + > +impl<const SIZE: usize, const EXCLUSIVE: bool> IoMem<SIZE, EXCLUSIVE> { > + /// Creates a new `IoMem` instance. > + pub(crate) fn new(resource: &Resource, device: &Device) -> Result<Devres<Self>> { > + let size = resource.size(); > + if size == 0 { > + return Err(EINVAL); > + } > + > + let res_start = resource.start(); > + > + if EXCLUSIVE { > + // SAFETY: > + // - `res_start` and `size` are read from a presumably valid `struct resource`. > + // - `size` is known not to be zero at this point. > + // - `resource.name()` returns a valid C string. > + let mem_region = unsafe { > + bindings::request_mem_region(res_start, size, resource.name().as_char_ptr()) > + }; > + > + if mem_region.is_null() { > + return Err(EBUSY); > + } > + } > + > + // SAFETY: > + // - `res_start` and `size` are read from a presumably valid `struct resource`. > + // - `size` is known not to be zero at this point. > + let addr = unsafe { bindings::ioremap(res_start, size as kernel::ffi::c_ulong) }; > + if addr.is_null() { > + if EXCLUSIVE { > + // SAFETY: > + // - `res_start` and `size` are read from a presumably valid `struct resource`. > + // - `size` is the same as the one passed to `request_mem_region`. > + unsafe { bindings::release_mem_region(res_start, size) }; > + } > + return Err(ENOMEM); > + } > + > + let io = IoRaw::new(addr as usize, size as usize)?; > + let io = IoMem { io, res_start }; > + let devres = Devres::new(device, io, GFP_KERNEL)?; > + > + Ok(devres) > + } > +} > + > +impl<const SIZE: usize, const EXCLUSIVE: bool> Drop for IoMem<SIZE, EXCLUSIVE> { > + fn drop(&mut self) { > + if EXCLUSIVE { > + // SAFETY: `res_start` and `io.maxsize()` were the values passed to > + // `request_mem_region`. > + unsafe { bindings::release_mem_region(self.res_start, self.io.maxsize() as u64) } Please use bindings::resource_size_t here as well. > + } > + > + // SAFETY: Safe as by the invariant of `Io`. > + unsafe { bindings::iounmap(self.io.addr() as *mut core::ffi::c_void) } > + } > +} > + > +impl<const SIZE: usize, const EXCLUSIVE: bool> Deref for IoMem<SIZE, EXCLUSIVE> { > + type Target = Io<SIZE>; > + > + fn deref(&self) -> &Self::Target { > + // SAFETY: Safe as by the invariant of `IoMem`. > + unsafe { Io::from_raw(&self.io) } > + } > +} Cheers Christian
© 2016 - 2025 Red Hat, Inc.