Most of this patch series has already been pushed upstream, this is just the second half of the patch series that has not been pushed yet + some additional changes which were required to implement changes requested by the mailing list. This patch series is originally from Asahi, previously posted by Daniel Almeida.
The previous version of the patch series can be found here:
https://patchwork.freedesktop.org/series/164580/
Branch with patches applied available here sure this builds:
https://gitlab.freedesktop.org/lyudess/linux/-/commits/rust/gem-shmem
This patch series applies on top of drm-rust-next
Lyude Paul (5): rust: drm: gem: s/device::Device/Device/ for shmem.rs drm/gem/shmem: Introduce __drm_gem_shmem_free_sgt_locked() rust: drm: gem/shmem: Add DmaResvGuard helper rust: drm: gem: Introduce shmem::SGTable rust: drm: gem: Add vmap functions to shmem bindings
drivers/gpu/drm/drm_gem_shmem_helper.c | 32 +- include/drm/drm_gem_shmem_helper.h | 1 + rust/kernel/drm/gem/shmem.rs | 602 ++++++++++++++++++++++++- 3 files changed, 614 insertions(+), 21 deletions(-)
base-commit: d9a6809478f9815b6455a327aa001737ac7b2c09
We're about to start explicitly mentioning kernel devices as well in this file, so this makes it easier to differentiate the two by allowing us to import `device` as `kernel::device`.
Signed-off-by: Lyude Paul lyude@redhat.com Reviewed-by: Alexandre Courbot acourbot@nvidia.com
--- V11: * Fix location of //
rust/kernel/drm/gem/shmem.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs index e1b648920d2f6..35d7523e164ff 100644 --- a/rust/kernel/drm/gem/shmem.rs +++ b/rust/kernel/drm/gem/shmem.rs @@ -12,10 +12,10 @@ use crate::{ container_of, drm::{ - device, driver, gem, - private::Sealed, // + private::Sealed, + Device, // }, error::to_result, prelude::*, @@ -106,7 +106,7 @@ fn as_raw_shmem(&self) -> *mut bindings::drm_gem_shmem_object { /// /// Additional config options can be specified using `config`. pub fn new( - dev: &device::Device<T::Driver>, + dev: &Device<T::Driver>, size: usize, config: ObjectConfig<'_, T>, args: T::Args, @@ -148,9 +148,9 @@ pub fn new( }
/// Returns the `Device` that owns this GEM object. - pub fn dev(&self) -> &device::Device<T::Driver> { + pub fn dev(&self) -> &Device<T::Driver> { // SAFETY: `dev` will have been initialized in `Self::new()` by `drm_gem_shmem_init()`. - unsafe { device::Device::from_raw((*self.as_raw()).dev) } + unsafe { Device::from_raw((*self.as_raw()).dev) } }
extern "C" fn free_callback(obj: *mut bindings::drm_gem_object) {
On Tue, Apr 28, 2026 at 03:03:41PM -0400, Lyude Paul wrote:
We're about to start explicitly mentioning kernel devices as well in this file, so this makes it easier to differentiate the two by allowing us to import `device` as `kernel::device`.
Signed-off-by: Lyude Paul lyude@redhat.com Reviewed-by: Alexandre Courbot acourbot@nvidia.com
Reviewed-by: Alice Ryhl aliceryhl@google.com
One of the complications of trying to use the shmem helpers to create a scatterlist for shmem objects is that we need to be able to provide a guarantee that the driver cannot be unbound for the lifetime of the scatterlist.
The easiest way of handling this seems to be just hooking up an unmap operation to devres the first time we create a scatterlist, which allows us to still take advantage of gem shmem facilities without breaking that guarantee. To allow for this, we extract __drm_gem_shmem_free_sgt_locked() - which allows a caller (e.g. the rust bindings) to manually unmap the sgt for a gem object as needed.
Signed-off-by: Lyude Paul lyude@redhat.com Reviewed-by: Alexandre Courbot acourbot@nvidia.com
--- V10: * Fix incorrect function name in documentation for __drm_gem_shmem_release_sgt_locked()
drivers/gpu/drm/drm_gem_shmem_helper.c | 32 +++++++++++++++++++++----- include/drm/drm_gem_shmem_helper.h | 1 + 2 files changed, 27 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 545933c7f7121..c989459eb2159 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -158,6 +158,30 @@ struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t } EXPORT_SYMBOL_GPL(drm_gem_shmem_create);
+/** + * __drm_gem_shmem_release_sgt_locked - Unpin and DMA unmap pages, and release the + * cached scatter/gather table for an shmem GEM object. + * @shmem: shmem GEM object + * + * If the passed shmem object has an active scatter/gather table for driver + * usage, this function will unmap it and release the memory associated with it. + * It is the responsibility of the caller to ensure it holds the dma_resv_lock + * for this object. + * + * Drivers should not need to call this function themselves, it is mainly + * intended for usage in the Rust shmem bindings. + */ +void __drm_gem_shmem_free_sgt_locked(struct drm_gem_shmem_object *shmem) +{ + dma_resv_assert_held(shmem->base.resv); + + dma_unmap_sgtable(shmem->base.dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0); + sg_free_table(shmem->sgt); + kfree(shmem->sgt); + shmem->sgt = NULL; +} +EXPORT_SYMBOL_GPL(__drm_gem_shmem_free_sgt_locked); + /** * drm_gem_shmem_release - Release resources associated with a shmem GEM object. * @shmem: shmem GEM object @@ -176,12 +200,8 @@ void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem)
drm_WARN_ON(obj->dev, refcount_read(&shmem->vmap_use_count));
- if (shmem->sgt) { - dma_unmap_sgtable(obj->dev->dev, shmem->sgt, - DMA_BIDIRECTIONAL, 0); - sg_free_table(shmem->sgt); - kfree(shmem->sgt); - } + if (shmem->sgt) + __drm_gem_shmem_free_sgt_locked(shmem); if (shmem->pages) drm_gem_shmem_put_pages_locked(shmem);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 5ccdae21b94a9..b2c23af628e1a 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -111,6 +111,7 @@ int drm_gem_shmem_init(struct drm_device *dev, struct drm_gem_shmem_object *shme struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size); void drm_gem_shmem_release(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); +void __drm_gem_shmem_free_sgt_locked(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
Just a temporary holdover to make locking/unlocking the dma_resv lock much easier.
Signed-off-by: Lyude Paul lyude@redhat.com Co-authored-by: Alexandre Courbot acourbot@nvidia.com --- rust/kernel/drm/gem/shmem.rs | 30 +++++++++++++++++++++++++++++- 1 file changed, 29 insertions(+), 1 deletion(-)
diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs index 35d7523e164ff..92ec2b67ed023 100644 --- a/rust/kernel/drm/gem/shmem.rs +++ b/rust/kernel/drm/gem/shmem.rs @@ -27,7 +27,10 @@ Deref, DerefMut, // }, - ptr::NonNull, + ptr::{ + self, + NonNull, // + }, }; use gem::{ BaseObjectPrivate, @@ -224,3 +227,28 @@ impl<T: DriverObject> driver::AllocImpl for Object<T> { dumb_map_offset: None, }; } + +/// Private helper-type for holding the `dma_resv` object for a GEM shmem object. +/// +/// When this is dropped, the `dma_resv` lock is dropped as well. +/// +// TODO: This should be replace with a WwMutex equivalent once we have such bindings in the kernel. +struct DmaResvGuard<'a, T: DriverObject>(&'a Object<T>); + +impl<'a, T: DriverObject> DmaResvGuard<'a, T> { + #[inline(always)] + fn new(obj: &'a Object<T>) -> Self { + // SAFETY: This lock is initialized throughout the lifetime of `object`. + unsafe { bindings::dma_resv_lock(obj.raw_dma_resv(), ptr::null_mut()) }; + + Self(obj) + } +} + +impl<'a, T: DriverObject> Drop for DmaResvGuard<'a, T> { + #[inline(always)] + fn drop(&mut self) { + // SAFETY: We are releasing the lock grabbed during the creation of this object. + unsafe { bindings::dma_resv_unlock(self.0.raw_dma_resv()) }; + } +}
On Tue, Apr 28, 2026 at 03:03:43PM -0400, Lyude Paul wrote:
Just a temporary holdover to make locking/unlocking the dma_resv lock much easier.
Signed-off-by: Lyude Paul lyude@redhat.com Co-authored-by: Alexandre Courbot acourbot@nvidia.com
Needs Alexandre's SoB.
+/// Private helper-type for holding the `dma_resv` object for a GEM shmem object. +/// +/// When this is dropped, the `dma_resv` lock is dropped as well. +/// +// TODO: This should be replace with a WwMutex equivalent once we have such bindings in the kernel. +struct DmaResvGuard<'a, T: DriverObject>(&'a Object<T>);
We will also need this in GPUVM, so I don't think it needs to be private to shmem.
Alice
On Wed Apr 29, 2026 at 5:11 PM JST, Alice Ryhl wrote:
On Tue, Apr 28, 2026 at 03:03:43PM -0400, Lyude Paul wrote:
Just a temporary holdover to make locking/unlocking the dma_resv lock much easier.
Signed-off-by: Lyude Paul lyude@redhat.com Co-authored-by: Alexandre Courbot acourbot@nvidia.com
Needs Alexandre's SoB.
Signed-off-by: Alexandre Courbot acourbot@nvidia.com
In order to do this, we need to be careful to ensure that any interface we expose for scatterlists ensures that any mappings created from one are destroyed on driver-unbind. To do this, we introduce a Devres resource into shmem::Object that we use in order to ensure that we release any SGTable mappings on driver-unbind. We store this in an UnsafeCell and protect access to it using the dma_resv lock that we already have from the shmem gem object, which is the same lock that currently protects drm_gem_object_shmem->sgt.
We also provide two different methods for acquiring an sg table: self.sg_table(), and self.owned_sg_table(). The first function is for short-term uses of mapped SGTables, the second is for callers that need to hold onto the mapped SGTable for an extended period of time. The second variant uses Devres of course, whereas the first simply relies on rust's borrow checker to prevent driver-unbind when using the mapped SGTable.
Signed-off-by: Lyude Paul lyude@redhat.com
--- V3: * Rename OwnedSGTable to shmem::SGTable. Since the current version of the SGTable abstractions now has a `Owned` and `Borrowed` variant, I think renaming this to shmem::SGTable makes things less confusing. We do however, keep the name of owned_sg_table() as-is. V4: * Clarify safety comments for SGTable to explain why the object is thread-safe. * Rename from SGTableRef to SGTable V10: * Use Devres in order to ensure that SGTables are revocable, and are unmapped on driver-unbind. V11: * s/create_sg_table()/get_sg_table() * Get rid of extraneous `ret = ` in shmem::Object::get_sg_table() V12: * Actually move sgt_res in this patch and not the next one V13: * Use DmaResvGuard suggestion from Alexander * Use Alexander's (much better) solution for get_sg_table() * Use SetOnce instead of UnsafeCell * s/SGTableRef/SGTableMap * Fix typo in SGTableMap documentation * Create fallible constructor for SGTableMap * Don't reuse dma_resv lock for protecting Object contents, just use Mutex + SetOnce * Drop use of drm_gem_shmem_get_pages_sgt_locked(), since we don't need to hold the dma_resv lock ourselves for anything but this function. * Check that the device we receive in the bounds for sg_table() and owned_sg_table() that said Device is in fact, the correct device. * Remove redundant docs in owned_sg_table(), just point it back to sg_table(). * Implement Deborah's suggestion to fix double-free in free_callback() * Restore original order of Object<T> * Fix doc typo for SGTableMap
rust/kernel/drm/gem/shmem.rs | 212 +++++++++++++++++++++++++++++++++-- 1 file changed, 203 insertions(+), 9 deletions(-)
diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs index 92ec2b67ed023..c0187ff22e526 100644 --- a/rust/kernel/drm/gem/shmem.rs +++ b/rust/kernel/drm/gem/shmem.rs @@ -11,18 +11,33 @@
use crate::{ container_of, + device::{ + self, + Bound, // + }, + devres::*, drm::{ driver, gem, private::Sealed, Device, // }, - error::to_result, + error::{ + from_err_ptr, + to_result, // + }, prelude::*, - sync::aref::ARef, + scatterlist, + sync::{ + aref::ARef, + new_mutex, + Mutex, + SetOnce, // + }, types::Opaque, // }; use core::{ + mem, ops::{ Deref, DerefMut, // @@ -66,6 +81,10 @@ pub struct Object<T: DriverObject> { obj: Opaquebindings::drm_gem_shmem_object, /// Parent object that owns this object's DMA reservation object. parent_resv_obj: Option<ARef<Object<T>>>, + /// Devres object for unmapping any SGTable on driver-unbind. + /// TODO: Drop the mutex once we can use Init with SetOnce. + #[pin] + sgt_res: Mutex<SetOnce<Devres<SGTableMap<T>>>>, #[pin] inner: T, } @@ -118,6 +137,7 @@ pub fn new( try_pin_init!(Self { obj <- Opaque::init_zeroed(), parent_resv_obj: config.parent_resv_obj.map(|p| p.into()), + sgt_res <- new_mutex!(SetOnce::new()), inner <- T::new(dev, size, args), }), GFP_KERNEL, @@ -161,22 +181,99 @@ extern "C" fn free_callback(obj: *mut bindings::drm_gem_object) { // - DRM always passes a valid gem object here // - We used drm_gem_shmem_create() in our create_gem_object callback, so we know that // `obj` is contained within a drm_gem_shmem_object - let this = unsafe { container_of!(obj, bindings::drm_gem_shmem_object, base) }; - - // SAFETY: - // - We're in free_callback - so this function is safe to call. - // - We won't be using the gem resources on `this` after this call. - unsafe { bindings::drm_gem_shmem_release(this) }; + let base = unsafe { container_of!(obj, bindings::drm_gem_shmem_object, base) };
// SAFETY: // - We verified above that `obj` is valid, which makes `this` valid // - This function is set in AllocOps, so we know that `this` is contained within a // `Object<T>` - let this = unsafe { container_of!(Opaque::cast_from(this), Self, obj) }.cast_mut(); + let this = unsafe { container_of!(Opaque::cast_from(base), Self, obj) }.cast_mut(); + + // SAFETY: We are in free_callback(), which means that we have exclusive access to `this`. + let mut sgt_guard = unsafe { (*this).sgt_res.lock() }; + + // drm_gem_shmem_release() will clear any existing sgt, so we need to clear sgt_res before + // calling it to prevent a double-free. + drop(mem::take(sgt_guard.deref_mut())); + + // Drop the lock we acquired, we don't need it anymore and it will be acquired again when we + // perform the final drop in this function. + drop(sgt_guard); + + // SAFETY: + // - We're in free_callback - so this function is safe to call. + // - We won't be using the gem resources on `this` after this call. + unsafe { bindings::drm_gem_shmem_release(base) };
// SAFETY: We're recovering the Kbox<> we created in gem_create_object() let _ = unsafe { KBox::from_raw(this) }; } + + // If necessary, create an SGTable for the gem object and register a Devres for it to ensure + // that it is unmapped on driver unbind. + fn get_sg_table<'a>( + &'a self, + dev: &'a device::Device<Bound>, + ) -> Result<&'a Devres<SGTableMap<T>>> { + if dev.as_raw() != self.dev().as_ref().as_raw() { + return Err(EINVAL); + } + + let sgt_res = self.sgt_res.lock(); + let sgt_map = if let Some(devres) = sgt_res.as_ref() { + devres + } else { + // INVARIANT: We store this Devres in the object itself and don't move it, ensuring that + // the object it points to remains valid for the lifetime of the SGTableMap. + sgt_res.populate(Devres::new(dev, SGTableMap::new(self))?); + + // SAFETY: We just populated `sgt_res` above, making this safe to unwrap. + unsafe { sgt_res.as_ref().unwrap_unchecked() } + }; + + // SAFETY: + // We only write to `sgt_res` in two places: + // - The above code. + // - `free_callback()`, which can't be called as long as `self` is alive. + // Therefore, it's safe to hold a reference to the contents of `sgt_res` without holding + // the lock for the lifetime of 'a, making this lifetime extension safe. + Ok(unsafe { mem::transmute::<&_, &'a _>(sgt_map) }) + } + + /// Creates (if necessary) and returns an immutable reference to a scatter-gather table of DMA + /// pages for this object. + /// + /// This will pin the object in memory. It is expected that `dev` should be a pointer to the + /// same [`device::Device`] which `self` belongs to, otherwise this function will return + /// `Err(EINVAL)`. + #[inline] + pub fn sg_table<'a>( + &'a self, + dev: &'a device::Device<Bound>, + ) -> Result<&'a scatterlist::SGTable> { + let sgt = self.get_sg_table(dev)?; + + Ok(sgt.access(dev)?.deref()) + } + + /// Creates (if necessary) and returns an owned reference to a scatter-gather table of DMA pages + /// for this object. + /// + /// This is the same as [`sg_table`], except that it instead returns an + /// [`shmem::SGTable`] which holds a reference to the associated gem object, instead of a + /// reference to an [`scatterlist::SGTable`]. + /// + /// See [`sg_table`] for more info. + /// + /// [`shmem::SGTable`]: SGTable + /// [`sg_table`]: Self::sg_table + pub fn owned_sg_table(&self, dev: &device::Device<Bound>) -> Result<SGTable<T>> { + self.get_sg_table(dev)?; + + // INVARIANT: We just ensured above that `self.sgt_res` is initialized with + // `Devres<SGTableMap<T>>`. + Ok(SGTable(self.into())) + } }
impl<T: DriverObject> Deref for Object<T> { @@ -252,3 +349,100 @@ fn drop(&mut self) { unsafe { bindings::dma_resv_unlock(self.0.raw_dma_resv()) }; } } + +/// A reference to a GEM object that is known to have a mapped [`SGTable`]. +/// +/// This is used by the Rust bindings with [`Devres`] in order to ensure that mappings for SGTables +/// on GEM shmem objects are revoked on driver-unbind. +/// +/// # Invariants +/// +/// - `self.obj` always points to a valid GEM object. +/// - This object is proof that `self.obj.owner.sgt` has an initialized and valid +/// [`scatterlist::SGTable`]. +pub struct SGTableMap<T: DriverObject> { + obj: NonNull<Object<T>>, +} + +impl<T: DriverObject> Deref for SGTableMap<T> { + type Target = scatterlist::SGTable; + + fn deref(&self) -> &Self::Target { + // SAFETY: + // - The NonNull is guaranteed to be valid via our type invariants. + // - The sgt field is guaranteed to be initialized and valid via our type invariants. + unsafe { scatterlist::SGTable::from_raw((*self.obj.as_ref().as_raw_shmem()).sgt) } + } +} + +impl<T: DriverObject> Drop for SGTableMap<T> { + fn drop(&mut self) { + // SAFETY: `obj` is always valid via our type invariants + let obj = unsafe { self.obj.as_ref() }; + let _lock = DmaResvGuard::new(obj); + + // SAFETY: We acquired the lock needed for calling this function above + unsafe { bindings::__drm_gem_shmem_free_sgt_locked(obj.as_raw_shmem()) }; + } +} + +impl<T: DriverObject> SGTableMap<T> { + fn new(obj: &Object<T>) -> impl Init<Self, Error> { + // INVARIANT: + // - We call drm_gem_shmem_get_pages_sgt_locked below and check whether or not it + // succeeds, fulfilling the invariant of SGTableMap that the object's `sgt` field is + // initialized. + // SAFETY: + // - `obj` is fully initialized, making this function safe to call. + from_err_ptr(unsafe { bindings::drm_gem_shmem_get_pages_sgt(obj.as_raw_shmem()) })?; + + Ok(Self { obj: obj.into() }) + } +} + +// SAFETY: The NonNull in SGTableMap is guaranteed valid by our type invariants, and the GEM object +// it points to is guaranteed to be thread-safe. +unsafe impl<T: DriverObject> Send for SGTableMap<T> {} +// SAFETY: The NonNull in SGTableMap is guaranteed valid by our type invariants, and the GEM object +// it points to is guaranteed to be thread-safe. +unsafe impl<T: DriverObject> Sync for SGTableMap<T> {} + +/// An owned reference to a scatter-gather table of DMA address spans for a GEM shmem object. +/// +/// This object holds an owned reference to the underlying GEM shmem object, ensuring that the +/// [`scatterlist::SGTable`] referenced by this type remains valid for the lifetime of this object. +/// +/// # Invariants +/// +/// - This type is proof that `self.0.sgt_res` is initialized with a `Devres<SGTableMap<T>>`. +/// - This object is only exposed in situations where we know the underlying `SGTable` will not be +/// modified for the lifetime of this object. Thus, it is safe to send/access this type across +/// threads. +pub struct SGTable<T: DriverObject>(ARef<Object<T>>); + +// SAFETY: This object is thread-safe via our type invariants. +unsafe impl<T: DriverObject> Send for SGTable<T> {} +// SAFETY: This object is thread-safe via our type invariants. +unsafe impl<T: DriverObject> Sync for SGTable<T> {} + +impl<T: DriverObject> Deref for SGTable<T> { + type Target = Devres<SGTableMap<T>>; + + #[inline(always)] + fn deref(&self) -> &Self::Target { + // TODO: This is Bad, but we'll replace this and do better once we can pass an Init to + // SetOnce. + // SAFETY: + // - We only write to this location in two places: + // - Object::<T>::get_sg_table() + // - Object::<T>::free_callback() (which gets called by gem shmem helpers after the last + // gem object ref drop). + // Our type is proof that get_sg_table() has been called previously, and also proof that + // `free_callback()` has not been called - meaning that we can assume no races will occur + // for the lifetime of this object and we can safely access the Mutex contents without + // actually locking it. + // - Since we proved get_sg_table() has been called, we know that sgt_res is already + // populated and thus unwrap_unchecked() is safe to call. + unsafe { (*self.0.sgt_res.data.get()).as_ref().unwrap_unchecked() } + } +}
On Tue, Apr 28, 2026 at 03:03:44PM -0400, Lyude Paul wrote:
In order to do this, we need to be careful to ensure that any interface we expose for scatterlists ensures that any mappings created from one are destroyed on driver-unbind. To do this, we introduce a Devres resource into shmem::Object that we use in order to ensure that we release any SGTable mappings on driver-unbind. We store this in an UnsafeCell and protect
Outdated? No longer UnsafeCell.
access to it using the dma_resv lock that we already have from the shmem gem object, which is the same lock that currently protects drm_gem_object_shmem->sgt.
We also provide two different methods for acquiring an sg table: self.sg_table(), and self.owned_sg_table(). The first function is for short-term uses of mapped SGTables, the second is for callers that need to hold onto the mapped SGTable for an extended period of time. The second variant uses Devres of course, whereas the first simply relies on rust's borrow checker to prevent driver-unbind when using the mapped SGTable.
Signed-off-by: Lyude Paul lyude@redhat.com
obj: Opaque<bindings::drm_gem_shmem_object>, /// Parent object that owns this object's DMA reservation object. parent_resv_obj: Option<ARef<Object<T>>>,
- /// Devres object for unmapping any SGTable on driver-unbind.
- /// TODO: Drop the mutex once we can use Init with SetOnce.
- #[pin]
- sgt_res: Mutex<SetOnce<Devres<SGTableMap<T>>>>,
As far as I can tell, you don't need this Mutex. Also, it causes problems like requiring the reference transmute below.
Alice
One of the more obvious use cases for gem shmem objects is the ability to create mappings into their contents. So, let's hook this up in our rust bindings.
Similar to how we handle SGTables, we make sure there's two different types of mappings: owned mappings (kernel::drm::gem::shmem::VMap) and borrowed mappings (kernel::drm::gem::shmem::VMapRef).
Signed-off-by: Lyude Paul lyude@redhat.com
--- V7: * Switch over to the new iosys map bindings that use the Io trait V8: * Get rid of iosys_map bindings for now, only support non-iomem types * s/as_shmem()/as_raw_shmem() V9: * Get rid of some outdated comments I missed * Add missing SIZE check to raw_vmap() * Add a proper unit test that ensures that we actually validate SIZE at compile-time. Turns out it takes only 34 lines to make a boilerplate DRM driver for a kunit test :) * Add unit tests * Add some missing #[inline]s V10: * Correct issue with iomem error path We previously called raw_vunmap() if we got an iomem allocation, but raw_vunmap() was written such that it assumed all allocations were sysmem allocations. Fix this by just making raw_vunmap() accept a iosys_map. V11: * Use Alexandre's clever solution to remove the macros we were using for maintaining two different VMap types. * Change the order of items in Object<T> to ensure that sgt_res is always dropped before obj. * Fix typo in Object.raw_vmap() * s/raw_vmap()/make_vmap()/ Deduplicate code a bit more as well by using more generics here
rust/kernel/drm/gem/shmem.rs | 352 ++++++++++++++++++++++++++++++++++- 1 file changed, 351 insertions(+), 1 deletion(-)
diff --git a/rust/kernel/drm/gem/shmem.rs b/rust/kernel/drm/gem/shmem.rs index c0187ff22e526..d68b6e3896915 100644 --- a/rust/kernel/drm/gem/shmem.rs +++ b/rust/kernel/drm/gem/shmem.rs @@ -26,6 +26,11 @@ from_err_ptr, to_result, // }, + io::{ + Io, + IoCapable, + IoKnownSize, // + }, prelude::*, scatterlist, sync::{ @@ -37,7 +42,11 @@ types::Opaque, // }; use core::{ - mem, + ffi::c_void, + mem::{ + self, + MaybeUninit, // + }, ops::{ Deref, DerefMut, // @@ -48,6 +57,7 @@ }, }; use gem::{ + BaseObject, BaseObjectPrivate, DriverObject, IntoGEMObject, // @@ -274,6 +284,80 @@ pub fn owned_sg_table(&self, dev: &device::Device<Bound>) -> Result<SGTable<T>> // `Devres<SGTableMap<T>>`. Ok(SGTable(self.into())) } + + /// Attempt to create a vmap from the gem object, and confirm the size of said vmap. + fn make_vmap<'a, R, const SIZE: usize>(&'a self) -> Result<VMap<T, R, SIZE>> + where + R: Deref<Target = Self> + From<&'a Self>, + { + // INVARIANT: We check here that the gem object is at least as large as `SIZE`. + if self.size() < SIZE { + return Err(ENOSPC); + } + + let mut map: MaybeUninitbindings::iosys_map = MaybeUninit::uninit(); + let guard = DmaResvGuard::new(self); + + // SAFETY: drm_gem_shmem_vmap can be called with the DMA reservation lock held + to_result(unsafe { + bindings::drm_gem_shmem_vmap_locked(self.as_raw_shmem(), map.as_mut_ptr()) + })?; + + // Drop the guard explicitly here, since we may need to call raw_vunmap() (which re-acquires + // the lock). + drop(guard); + + // SAFETY: The call to drm_gem_shmem_vmap_locked succeeded above, so we are guaranteed that + // map is properly initialized. + let map = unsafe { map.assume_init() }; + + // XXX: We don't currently support iomem allocations + if map.is_iomem { + // SAFETY: + // - The vmap operation above succeeded, guaranteeing that `map` points to a valid + // memory mapping. + // - We checked that this is an iomem allocation, making it safe to read vaddr_iomem + unsafe { self.raw_vunmap(map) }; + + Err(ENOTSUPP) + } else { + Ok(VMap { + // SAFETY: We checked that this is not an iomem allocation, making it safe to read + // vaddr + addr: unsafe { map.__bindgen_anon_1.vaddr }, + owner: self.into(), + }) + } + } + + /// Unmap a vmap from the gem object. + /// + /// # Safety + /// + /// - The caller promises that `map` is a valid vmap on this gem object. + /// - The caller promises that the memory pointed to by map will no longer be accesed through + /// this instance. + unsafe fn raw_vunmap(&self, mut map: bindings::iosys_map) { + let _guard = DmaResvGuard::new(self); + + // SAFETY: + // - This function is safe to call with the DMA reservation lock held. + // - Our `ARef` is proof that the underlying gem object here is initialized and thus safe to + // dereference. + unsafe { bindings::drm_gem_shmem_vunmap_locked(self.as_raw_shmem(), &mut map) }; + } + + /// Creates and returns a virtual kernel memory mapping for this object. + #[inline] + pub fn vmap<const SIZE: usize>(&self) -> Result<VMapRef<'_, T, SIZE>> { + self.make_vmap() + } + + /// Creates and returns an owned reference to a virtual kernel memory mapping for this object. + #[inline] + pub fn owned_vmap<const SIZE: usize>(&self) -> Result<VMapOwned<T, SIZE>> { + self.make_vmap() + } }
impl<T: DriverObject> Deref for Object<T> { @@ -407,6 +491,155 @@ unsafe impl<T: DriverObject> Send for SGTableMap<T> {} // it points to is guaranteed to be thread-safe. unsafe impl<T: DriverObject> Sync for SGTableMap<T> {}
+macro_rules! impl_vmap_io_capable { + ($impl:ident, $ty:ty) => { + impl<D, R, const SIZE: usize> IoCapable<$ty> for $impl<D, R, SIZE> + where + D: DriverObject, + R: Deref<Target = Object<D>>, + { + #[inline(always)] + unsafe fn io_read(&self, address: usize) -> $ty { + let ptr = address as *mut $ty; + + // SAFETY: The safety contract of `io_read` guarantees that address is a valid + // address within the bounds of `Self` of at least the size of $ty, and is properly + // aligned. + unsafe { ptr::read(ptr) } + } + + #[inline(always)] + unsafe fn io_write(&self, value: $ty, address: usize) { + let ptr = address as *mut $ty; + + // SAFETY: The safety contract of `io_write` guarantees that address is a valid + // address within the bounds of `Self` of at least the size of $ty, and is properly + // aligned. + unsafe { ptr::write(ptr, value) } + } + } + }; +} + +/// A reference to a virtual mapping for an shmem-based GEM object in kernel address space. +/// +/// # Invariants +/// +/// - The size of `owner` is >= SIZE. +/// - The memory pointed to by addr remains valid at least until this object is dropped. +pub struct VMap<D, R, const SIZE: usize = 0> +where + D: DriverObject, + R: Deref<Target = Object<D>>, +{ + addr: *mut c_void, + owner: R, +} + +/// An alias type for a reference to a shmem-based GEM object's VMap. +pub type VMapRef<'a, D, const SIZE: usize = 0> = VMap<D, &'a Object<D>, SIZE>; + +/// An alias type for an owned reference to a shmem-based GEM object's VMap. +pub type VMapOwned<D, const SIZE: usize = 0> = VMap<D, ARef<Object<D>>, SIZE>; + +impl<D, R, const SIZE: usize> VMap<D, R, SIZE> +where + D: DriverObject, + R: Deref<Target = Object<D>>, +{ + /// Borrows a reference to the object that owns this virtual mapping. + #[inline(always)] + pub fn owner(&self) -> &Object<D> { + &self.owner + } +} + +impl<D, R, const SIZE: usize> Drop for VMap<D, R, SIZE> +where + D: DriverObject, + R: Deref<Target = Object<D>>, +{ + #[inline(always)] + fn drop(&mut self) { + // SAFETY: + // - Our existence is proof that this map was previously created using self.owner. + // - Since we are in Drop, we are guaranteed that no one will access the memory + // through this mapping after calling this. + unsafe { + self.owner.raw_vunmap(bindings::iosys_map { + is_iomem: false, + __bindgen_anon_1: bindings::iosys_map__bindgen_ty_1 { vaddr: self.addr }, + }) + }; + } +} + +impl<D, R, const SIZE: usize> Io for VMap<D, R, SIZE> +where + D: DriverObject, + R: Deref<Target = Object<D>>, +{ + #[inline(always)] + fn addr(&self) -> usize { + self.addr as usize + } + + #[inline(always)] + fn maxsize(&self) -> usize { + self.owner.size() + } +} + +impl<D, R, const SIZE: usize> IoKnownSize for VMap<D, R, SIZE> +where + D: DriverObject, + R: Deref<Target = Object<D>>, +{ + const MIN_SIZE: usize = SIZE; +} + +impl_vmap_io_capable!(VMap, u8); +impl_vmap_io_capable!(VMap, u16); +impl_vmap_io_capable!(VMap, u32); +#[cfg(CONFIG_64BIT)] +impl_vmap_io_capable!(VMap, u64); + +impl<D: DriverObject, const SIZE: usize> Clone for VMapOwned<D, SIZE> { + #[inline] + fn clone(&self) -> Self { + // SAFETY: We have a successful vmap already, so this can't fail. + unsafe { self.owner.owned_vmap().unwrap_unchecked() } + } +} + +impl<'a, D: DriverObject, const SIZE: usize> Clone for VMapRef<'a, D, SIZE> { + #[inline] + fn clone(&self) -> Self { + // SAFETY: We have a successful vmap already, so this can't fail. + unsafe { self.owner.vmap().unwrap_unchecked() } + } +} + +impl<'a, D: DriverObject, const SIZE: usize> From<VMapRef<'a, D, SIZE>> for VMapOwned<D, SIZE> { + #[inline] + fn from(value: VMapRef<'a, D, SIZE>) -> Self { + let this = Self { + addr: value.addr, + owner: value.owner.into(), + }; + + mem::forget(value); + this + } +} + +// SAFETY: VMap is thread-safe, and the fact that this VMap has an owned reference to the object +// means this object will remain valid until dropped. +unsafe impl<D: DriverObject, const SIZE: usize> Send for VMapOwned<D, SIZE> {} +// SAFETY: VMap is thread-safe, and the fact that this VMap has an owned reference to the object +// means this object will remain valid until dropped. +unsafe impl<D: DriverObject, const SIZE: usize> Sync for VMapOwned<D, SIZE> {} + /// An owned reference to a scatter-gather table of DMA address spans for a GEM shmem object. /// /// This object holds an owned reference to the underlying GEM shmem object, ensuring that the @@ -446,3 +679,120 @@ fn deref(&self) -> &Self::Target { unsafe { (*self.0.sgt_res.data.get()).as_ref().unwrap_unchecked() } } } + +#[kunit_tests(rust_drm_gem_shmem)] +mod tests { + use super::*; + use crate::{ + drm, + faux, + page::PAGE_SIZE, // + }; + + // The bare minimum needed to create a fake drm driver for kunit + + #[pin_data] + struct KunitData {} + struct KunitDriver; + struct KunitFile; + #[pin_data] + struct KunitObject {} + + const INFO: drm::DriverInfo = drm::DriverInfo { + major: 0, + minor: 0, + patchlevel: 0, + name: c"kunit", + desc: c"Kunit", + }; + + impl drm::file::DriverFile for KunitFile { + type Driver = KunitDriver; + + fn open(_dev: &drm::Device<KunitDriver>) -> Result<Pin<KBox<Self>>> { + Ok(KBox::new(Self, GFP_KERNEL)?.into()) + } + } + + impl gem::DriverObject for KunitObject { + type Driver = KunitDriver; + type Args = (); + + fn new( + _dev: &drm::Device<KunitDriver>, + _size: usize, + _args: Self::Args, + ) -> impl PinInit<Self, Error> { + try_pin_init!(KunitObject {}) + } + } + + #[vtable] + impl drm::Driver for KunitDriver { + type Data = KunitData; + type File = KunitFile; + type Object = Object<KunitObject>; + + const INFO: drm::DriverInfo = INFO; + const IOCTLS: &'static [drm::ioctl::DrmIoctlDescriptor] = &[]; + } + + fn create_drm_dev() -> Result<(faux::Registration, ARef<drm::Device<KunitDriver>>)> { + // Create a faux DRM device so we can test gem object creation. + let data = try_pin_init!(KunitData {}); + let dev = faux::Registration::new(c"Kunit", None)?; + let drm = drm::Device::<KunitDriver>::new(dev.as_ref(), data)?; + + Ok((dev, drm)) + } + + #[test] + fn compile_time_vmap_sizes() -> Result { + let (_dev, drm) = create_drm_dev()?; + + // Create a gem object to test with + let cfg_ = ObjectConfig::<KunitObject> { + map_wc: false, + parent_resv_obj: None, + }; + let obj = Object::<KunitObject>::new(&drm, PAGE_SIZE, cfg_, ())?; + + // Try creating a normal vmap + obj.vmap::<PAGE_SIZE>()?; + + // Try creating a vmap that's smaller then the size we specified + obj.vmap::<{ PAGE_SIZE - 100 }>()?; + + // Make sure creating a vmap that's too large fails + assert!(obj.vmap::<{ PAGE_SIZE + 200 }>().is_err()); + + Ok(()) + } + + #[test] + fn vmap_io() -> Result { + let (_dev, drm) = create_drm_dev()?; + + // Create a gem object to test with + let cfg_ = ObjectConfig::<KunitObject> { + map_wc: false, + parent_resv_obj: None, + }; + let obj = Object::<KunitObject>::new(&drm, PAGE_SIZE, cfg_, ())?; + + let vmap = obj.vmap::<PAGE_SIZE>()?; + + vmap.write8(0xDE, 0x0); + assert_eq!(vmap.read8(0x0), 0xDE); + vmap.write32(0xFFFFFFFF, 0x20); + + assert_eq!(vmap.read32(0x20), 0xFFFFFFFF); + + assert_eq!(vmap.read8(0x20), 0xFF); + assert_eq!(vmap.read8(0x21), 0xFF); + assert_eq!(vmap.read8(0x22), 0xFF); + assert_eq!(vmap.read8(0x23), 0xFF); + + Ok(()) + } +}
linaro-mm-sig@lists.linaro.org