diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 748d4538c60a1eb170830b854657a70481a19ebf..ff832a47a395532bd18ad88fa0f3c47b27b3f140 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -8,8 +8,10 @@
 
 #include <drm/drm_device.h>
 #include <drm/drm_drv.h>
+#include <drm/drm_exec.h>
 #include <drm/drm_file.h>
 #include <drm/drm_gem.h>
+#include <drm/drm_gpuvm.h>
 #include <drm/drm_ioctl.h>
 #include <kunit/test.h>
 #include <linux/blk-mq.h>
@@ -53,3 +55,6 @@ const gfp_t RUST_CONST_HELPER___GFP_HIGHMEM = ___GFP_HIGHMEM;
 const gfp_t RUST_CONST_HELPER___GFP_NOWARN = ___GFP_NOWARN;
 const blk_features_t RUST_CONST_HELPER_BLK_FEAT_ROTATIONAL = BLK_FEAT_ROTATIONAL;
 const fop_flags_t RUST_CONST_HELPER_FOP_UNSIGNED_OFFSET = FOP_UNSIGNED_OFFSET;
+
+const uint32_t BINDINGS_DRM_EXEC_INTERRUPTIBLE_WAIT =
+	DRM_EXEC_INTERRUPTIBLE_WAIT;
diff --git a/rust/helpers/drm_gpuvm.c b/rust/helpers/drm_gpuvm.c
new file mode 100644
index 0000000000000000000000000000000000000000..f4f4ea2c4ec897e2735f167b5a66af590b10cf55
--- /dev/null
+++ b/rust/helpers/drm_gpuvm.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <drm/drm_gpuvm.h>
+
+#ifdef CONFIG_DRM
+#ifdef CONFIG_DRM_GPUVM
+
+struct drm_gpuvm *rust_helper_drm_gpuvm_get(struct drm_gpuvm *obj)
+{
+	return drm_gpuvm_get(obj);
+}
+
+void rust_helper_drm_gpuvm_exec_unlock(struct drm_gpuvm_exec *vm_exec)
+{
+	return drm_gpuvm_exec_unlock(vm_exec);
+}
+
+void rust_helper_drm_gpuva_init_from_op(struct drm_gpuva *va, struct drm_gpuva_op_map *op)
+{
+	drm_gpuva_init_from_op(va, op);
+}
+
+struct drm_gpuvm_bo *rust_helper_drm_gpuvm_bo_get(struct drm_gpuvm_bo *vm_bo)
+{
+	return drm_gpuvm_bo_get(vm_bo);
+}
+
+bool rust_helper_drm_gpuvm_is_extobj(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj)
+{
+	return drm_gpuvm_is_extobj(gpuvm, obj);
+}
+
+#endif
+#endif
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index be3e6c2ffbe6de02d6830014226fdce9630607ab..435ca180f77b3e653bcc0ea0e41d25356f920830 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -15,6 +15,7 @@
 #include "device.c"
 #include "dma-resv.c"
 #include "drm.c"
+#include "drm_gpuvm.c"
 #include "err.c"
 #include "fs.c"
 #include "io.c"
diff --git a/rust/kernel/drm/gpuvm.rs b/rust/kernel/drm/gpuvm.rs
new file mode 100644
index 0000000000000000000000000000000000000000..f98b049b644ad93d038ba2655023f234469591cf
--- /dev/null
+++ b/rust/kernel/drm/gpuvm.rs
@@ -0,0 +1,762 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+//! The DRM GPUVM abstraction.
+//!
+//! C header: [`include/drm/drm_gpuvm.h`](../../../../include/drm/drm_gpuvm.h)
+
+use core::cell::UnsafeCell;
+use core::marker::{PhantomData, PhantomPinned};
+use core::mem::ManuallyDrop;
+use core::ops::{Deref, DerefMut, Range};
+use core::ptr::NonNull;
+
+use crate::bindings;
+use crate::bindings::drm_gpuvm_exec;
+use crate::drm::device;
+use crate::drm::drv;
+use crate::drm::gem::IntoGEMObject;
+use crate::error::code::EINVAL;
+use crate::error::code::ENOMEM;
+use crate::error::from_result;
+use crate::error::to_result;
+use crate::error::Result;
+use crate::init;
+use crate::init::pin_init_from_closure;
+use crate::prelude::*;
+use crate::types::ARef;
+use crate::types::AlwaysRefCounted;
+use crate::types::Opaque;
+
+// SAFETY: This type was manually checked and is safe to zero initialize.
+unsafe impl init::Zeroable for bindings::drm_gpuvm_bo {}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVm (a GPU address space).
+pub trait DriverGpuVm: Sized {
+    /// The parent `Driver` implementation for this `DriverGpuVm`.
+    type Driver: drv::Driver;
+    /// The type representing a VA range.
+    type GpuVa: DriverGpuVa;
+    /// The type representing the connection between a driver's GEM object and
+    /// the GPUVM instance.
+    type GpuVmBo: DriverGpuVmBo;
+    /// The context being held by the driver during a `map`, `unmap` or `remap`
+    /// operation.
+    type StepContext;
+
+    /// Called when the GPUVM framework expects the driver to map a VA range
+    /// according to `op`.
+    fn step_map(
+        self: &mut UpdatingGpuVm<'_, Self>,
+        op: &mut OpMap<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result;
+
+    /// Called when the GPUVM framework expects the driver to unmap a VA range
+    /// according to `op`.
+    fn step_unmap(
+        self: &mut UpdatingGpuVm<'_, Self>,
+        op: &mut OpUnMap<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result;
+
+    /// Called when the GPUVM framework expects the driver to remap a VA range
+    /// according to `op`.
+    ///
+    /// A remap operation keeps either the start or the end of the range fixed,
+    /// modifying the other end to make the range smaller or larger as
+    /// applicable.
+    fn step_remap(
+        self: &mut UpdatingGpuVm<'_, Self>,
+        op: &mut OpReMap<Self>,
+        vm_bo: &GpuVmBo<Self>,
+        ctx: &mut Self::StepContext,
+    ) -> Result;
+}
+
+struct StepContext<'a, T: DriverGpuVm> {
+    gpuvm: &'a GpuVm<T>,
+    ctx: &'a mut T::StepContext,
+}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVa (a mapping in GPU address space).
+///
+// XXX: why this trait?
+pub trait DriverGpuVa: Sized {}
+
+impl DriverGpuVa for () {}
+
+/// Trait that must be implemented by DRM drivers to represent a DRM GpuVmBo (a
+/// connection between a BO and a VM).
+pub trait DriverGpuVmBo: Sized {
+    /// Allows the driver to create a new instance of `DriverGpuVmBo`.
+    ///
+    /// This is automatically implemented if the driver's `DriverGpuVmBo` type
+    /// implements `Default`.
+    fn new() -> impl PinInit<Self>;
+}
+
+/// Provide a default implementation for trivial types
+impl<T: Default> DriverGpuVmBo for T {
+    fn new() -> impl PinInit<Self> {
+        // Safety: T::Default always succeed, so Ok(()) is always returned.
+        // Also, we never move out of `slot`.
+        unsafe {
+            pin_init_from_closure(|slot| {
+                *slot = Self::default();
+                Ok(())
+            })
+        }
+    }
+}
+
+#[repr(transparent)]
+/// Represents a mapping operation.
+pub struct OpMap<T: DriverGpuVm>(bindings::drm_gpuva_op_map, PhantomData<T>);
+#[repr(transparent)]
+/// Represents an unmapping operation.
+pub struct OpUnMap<T: DriverGpuVm>(bindings::drm_gpuva_op_unmap, PhantomData<T>);
+
+/// Represents a remap operation.
+///
+/// A remap operation keeps either the start or the end of the range fixed,
+/// modifying the other end to make the range smaller or larger as applicable.
+#[repr(transparent)]
+pub struct OpReMap<T: DriverGpuVm>(bindings::drm_gpuva_op_remap, PhantomData<T>);
+
+impl<T: DriverGpuVm> OpMap<T> {
+    /// The base address of the new mapping.
+    pub fn addr(&self) -> u64 {
+        self.0.va.addr
+    }
+    /// The range of the new mapping.
+    pub fn range(&self) -> u64 {
+        self.0.va.range
+    }
+
+    /// The offset in the underlying GEM object.
+    pub fn offset(&self) -> u64 {
+        self.0.gem.offset
+    }
+
+    /// The underlying GEM object that backs this mapping.
+    pub fn object(&self) -> &<T::Driver as drv::Driver>::Object {
+        let p = <<T::Driver as drv::Driver>::Object as IntoGEMObject>::from_gem_obj(self.0.gem.obj);
+        // SAFETY: The GEM object has an active reference for the lifetime of this op
+        unsafe { &*p }
+    }
+
+    /// Maps and link a VA range in the `GpuVmBo`, increasing the refcount for
+    /// `GpuVmBo`.
+    pub fn map_and_link_va(
+        &mut self,
+        gpuvm: &mut UpdatingGpuVm<'_, T>,
+        gpuva: Pin<KBox<GpuVa<T>>>,
+        gpu_vmbo: &GpuVmBo<T>,
+    ) -> Result<(), Pin<KBox<GpuVa<T>>>> {
+        // SAFETY: We are handing off the GpuVa ownership and it will not be moved.
+        let p = KBox::leak(unsafe { Pin::into_inner_unchecked(gpuva) });
+        // SAFETY: These C functions are called with the correct invariants
+        unsafe {
+            bindings::drm_gpuva_init_from_op(&mut p.gpuva, &mut self.0);
+            if bindings::drm_gpuva_insert(gpuvm.0.gpuvm() as *mut _, &mut p.gpuva) != 0 {
+                // EEXIST, return the GpuVa to the caller as an error
+                return Err(Pin::new_unchecked(KBox::from_raw(p)));
+            };
+            // SAFETY: This takes a new reference to the gpuvmbo.
+            bindings::drm_gpuva_link(&mut p.gpuva, &gpu_vmbo.bo as *const _ as *mut _);
+        }
+        Ok(())
+    }
+}
+
+impl<T: DriverGpuVm> OpUnMap<T> {
+    /// Returns the `GpuVa` that is being unmapped.
+    pub fn va(&self) -> Option<&GpuVa<T>> {
+        if self.0.va.is_null() {
+            return None;
+        }
+        // SAFETY: Container invariant is guaranteed for ops structs created for our types.
+        let p = unsafe { crate::container_of!(self.0.va, GpuVa<T>, gpuva) as *mut GpuVa<T> };
+        // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+        Some(unsafe { &*p })
+    }
+
+    /// Unmaps and unlinks a given VA range.
+    pub fn unmap_and_unlink_va(&mut self) -> Option<Pin<KBox<GpuVa<T>>>> {
+        if self.0.va.is_null() {
+            return None;
+        }
+        // SAFETY: Container invariant is guaranteed for ops structs created for our types.
+        let p = unsafe { crate::container_of!(self.0.va, GpuVa<T>, gpuva) as *mut GpuVa<T> };
+
+        // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+        unsafe {
+            bindings::drm_gpuva_unmap(&mut self.0);
+            bindings::drm_gpuva_unlink(self.0.va);
+        }
+
+        // Unlinking/unmapping relinquishes ownership of the GpuVa object,
+        // so clear the pointer
+        self.0.va = core::ptr::null_mut();
+        // SAFETY: The GpuVa object reference is valid per the op_unmap contract
+        Some(unsafe { Pin::new_unchecked(KBox::from_raw(p)) })
+    }
+}
+
+impl<T: DriverGpuVm> OpReMap<T> {
+    /// Obtains the preceding part of a split mapping.
+    pub fn prev_map(&mut self) -> Option<&mut OpMap<T>> {
+        // SAFETY: The prev pointer must be valid if not-NULL per the op_remap contract
+        unsafe { (self.0.prev as *mut OpMap<T>).as_mut() }
+    }
+
+    /// Obtains the subsequent part of a split mapping.
+    pub fn next_map(&mut self) -> Option<&mut OpMap<T>> {
+        // SAFETY: The next pointer must be valid if not-NULL per the op_remap contract
+        unsafe { (self.0.next as *mut OpMap<T>).as_mut() }
+    }
+
+    /// Obtains the unmap operation for the original existing mapping.
+    pub fn unmap(&mut self) -> &mut OpUnMap<T> {
+        // SAFETY: The unmap pointer is always valid per the op_remap contract
+        unsafe { (self.0.unmap as *mut OpUnMap<T>).as_mut().unwrap() }
+    }
+}
+
+/// A base GPU VA.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVa<T: DriverGpuVm> {
+    #[pin]
+    gpuva: bindings::drm_gpuva,
+    #[pin]
+    inner: T::GpuVa,
+    #[pin]
+    _p: PhantomPinned,
+}
+
+// SAFETY: This type is safe to zero-init (as far as C is concerned).
+unsafe impl init::Zeroable for bindings::drm_gpuva {}
+
+impl<T: DriverGpuVm> GpuVa<T> {
+    /// Initializes a new VA range.
+    pub fn new<E>(inner: impl PinInit<T::GpuVa, E>) -> Result<Pin<KBox<GpuVa<T>>>>
+    where
+        Error: From<E>,
+    {
+        KBox::try_pin_init(
+            try_pin_init!(Self {
+                gpuva <- init::zeroed(),
+                inner <- inner,
+                _p: PhantomPinned
+            }),
+            GFP_KERNEL,
+        )
+    }
+
+    /// The base address of the VA range.
+    pub fn addr(&self) -> u64 {
+        self.gpuva.va.addr
+    }
+
+    /// The size of the VA range.
+    pub fn range(&self) -> u64 {
+        self.gpuva.va.range
+    }
+
+    /// The offset within the GEM object backing this VA range.
+    pub fn offset(&self) -> u64 {
+        self.gpuva.gem.offset
+    }
+}
+
+/// A base GpuVm BO.
+///
+/// This object is internally refcounted by the GPUVM core.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVmBo<T: DriverGpuVm> {
+    #[pin]
+    bo: bindings::drm_gpuvm_bo,
+    #[pin]
+    inner: T::GpuVmBo,
+    #[pin]
+    _p: PhantomPinned,
+}
+
+impl<T: DriverGpuVm> GpuVmBo<T> {
+    /// Return a reference to the inner driver data for this GpuVmBo
+    pub fn inner(&self) -> &T::GpuVmBo {
+        &self.inner
+    }
+}
+
+// SAFETY: DRM GpuVmBo objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVmBo<T> {
+    fn inc_ref(&self) {
+        // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref().
+        unsafe { bindings::drm_gpuvm_bo_get(&self.bo as *const _ as *mut _) };
+    }
+
+    unsafe fn dec_ref(mut obj: NonNull<Self>) {
+        // SAFETY: drm_gpuvm_bo_put() requires holding the gpuva lock, which is the dma_resv lock by default.
+        // The drm_gpuvm_put function satisfies the requirements for dec_ref().
+        // (We do not support custom locks yet.)
+        unsafe {
+            let resv = (*obj.as_mut().bo.obj).resv;
+            bindings::dma_resv_lock(resv, core::ptr::null_mut());
+            bindings::drm_gpuvm_bo_put(&mut obj.as_mut().bo);
+            bindings::dma_resv_unlock(resv);
+        }
+    }
+}
+
+/// A base GPU VM.
+#[repr(C)]
+#[pin_data]
+pub struct GpuVm<T: DriverGpuVm> {
+    #[pin]
+    gpuvm: Opaque<bindings::drm_gpuvm>,
+    #[pin]
+    inner: UnsafeCell<T>,
+    #[pin]
+    _p: PhantomPinned,
+}
+
+/// # Safety
+///
+/// This function should be called only by the GPUVM core.
+unsafe extern "C" fn vm_free_callback<T: DriverGpuVm>(raw_gpuvm: *mut bindings::drm_gpuvm) {
+    // SAFETY: Container invariant is guaranteed for objects using our callback.
+    let p = unsafe {
+        crate::container_of!(
+            raw_gpuvm as *mut Opaque<bindings::drm_gpuvm>,
+            GpuVm<T>,
+            gpuvm
+        ) as *mut GpuVm<T>
+    };
+
+    // SAFETY: p is guaranteed to be valid for drm_gpuvm objects using this callback.
+    unsafe { drop(KBox::from_raw(p)) };
+}
+
+/// # Safety
+///
+/// This function should be called only by the GPUVM core.
+unsafe extern "C" fn vm_bo_alloc_callback<T: DriverGpuVm>() -> *mut bindings::drm_gpuvm_bo {
+    let obj: Result<Pin<KBox<GpuVmBo<T>>>> = KBox::try_pin_init(
+        try_pin_init!(GpuVmBo::<T> {
+            bo <- init::zeroed(),
+            inner <- T::GpuVmBo::new(),
+            _p: PhantomPinned
+        }),
+        GFP_KERNEL,
+    );
+
+    match obj {
+        Ok(obj) =>
+        // SAFETY: The DRM core will keep this object pinned
+        unsafe {
+            let p = KBox::leak(Pin::into_inner_unchecked(obj));
+            &mut p.bo
+        },
+        Err(_) => core::ptr::null_mut(),
+    }
+}
+
+/// # Safety
+///
+/// This function should be called only by the GPUVM core.
+unsafe extern "C" fn vm_bo_free_callback<T: DriverGpuVm>(raw_vm_bo: *mut bindings::drm_gpuvm_bo) {
+    // SAFETY: Container invariant is guaranteed for objects using this callback.
+    let p = unsafe { crate::container_of!(raw_vm_bo, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+
+    // SAFETY: p is guaranteed to be valid for drm_gpuvm_bo objects using this callback.
+    unsafe { drop(KBox::from_raw(p)) };
+}
+
+/// # Safety
+///
+/// This function should be called only by the GPUVM core.
+unsafe extern "C" fn step_map_callback<T: DriverGpuVm>(
+    op: *mut bindings::drm_gpuva_op,
+    _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+    // SAFETY: We know this is a map op, and OpMap is a transparent wrapper.
+    let map = unsafe { &mut *((&mut (*op).__bindgen_anon_1.map) as *mut _ as *mut OpMap<T>) };
+    // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+    // guaranteed to outlive this function.
+    let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+    from_result(|| {
+        UpdatingGpuVm(ctx.gpuvm).step_map(map, ctx.ctx)?;
+        Ok(0)
+    })
+}
+
+/// # Safety
+///
+/// This function should be called only by the GPUVM core.
+unsafe extern "C" fn step_remap_callback<T: DriverGpuVm>(
+    op: *mut bindings::drm_gpuva_op,
+    _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+    // SAFETY: We know this is a map op, and OpReMap is a transparent wrapper.
+    let remap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.remap) as *mut _ as *mut OpReMap<T>) };
+    // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+    // guaranteed to outlive this function.
+    let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+    let p_vm_bo = remap.unmap().va().unwrap().gpuva.vm_bo;
+
+    let res = {
+        // SAFETY: vm_bo pointer must be valid and non-null by the step_remap invariants.
+        // Since we grab a ref, this reference's lifetime is until the decref.
+        let vm_bo_ref = unsafe {
+            bindings::drm_gpuvm_bo_get(p_vm_bo);
+            &*(crate::container_of!(p_vm_bo, GpuVmBo<T>, bo) as *mut GpuVmBo<T>)
+        };
+
+        from_result(|| {
+            UpdatingGpuVm(ctx.gpuvm).step_remap(remap, vm_bo_ref, ctx.ctx)?;
+            Ok(0)
+        })
+    };
+
+    // SAFETY: We incremented the refcount above, and the Rust reference we took is
+    // no longer in scope.
+    unsafe { bindings::drm_gpuvm_bo_put(p_vm_bo) };
+
+    res
+}
+
+/// # Safety
+///
+/// This function should be called only by the GPUVM core.
+unsafe extern "C" fn step_unmap_callback<T: DriverGpuVm>(
+    op: *mut bindings::drm_gpuva_op,
+    _priv: *mut core::ffi::c_void,
+) -> core::ffi::c_int {
+    // SAFETY: We know this is a map op, and OpUnMap is a transparent wrapper.
+    let unmap = unsafe { &mut *((&mut (*op).__bindgen_anon_1.unmap) as *mut _ as *mut OpUnMap<T>) };
+    // SAFETY: This is a pointer to a StepContext created inline in sm_map(), which is
+    // guaranteed to outlive this function.
+    let ctx = unsafe { &mut *(_priv as *mut StepContext<'_, T>) };
+
+    from_result(|| {
+        UpdatingGpuVm(ctx.gpuvm).step_unmap(unmap, ctx.ctx)?;
+        Ok(0)
+    })
+}
+
+/// # Safety
+///
+/// This function should be called only by the GPUVM core.
+unsafe extern "C" fn exec_lock_gem_object(
+    vm_exec: *mut bindings::drm_gpuvm_exec,
+) -> core::ffi::c_int {
+    // SAFETY: The gpuvm_exec object is valid and priv_ is a GEM object pointer
+    // when this callback is used
+    unsafe { bindings::drm_exec_lock_obj(&mut (*vm_exec).exec, (*vm_exec).extra.priv_ as *mut _) }
+}
+
+impl<T: DriverGpuVm> GpuVm<T> {
+    const OPS: bindings::drm_gpuvm_ops = bindings::drm_gpuvm_ops {
+        vm_free: Some(vm_free_callback::<T>),
+        op_alloc: None,
+        op_free: None,
+        vm_bo_alloc: Some(vm_bo_alloc_callback::<T>),
+        vm_bo_free: Some(vm_bo_free_callback::<T>),
+        vm_bo_validate: None,
+        sm_step_map: Some(step_map_callback::<T>),
+        sm_step_remap: Some(step_remap_callback::<T>),
+        sm_step_unmap: Some(step_unmap_callback::<T>),
+    };
+
+    fn gpuvm(&self) -> *const bindings::drm_gpuvm {
+        self.gpuvm.get()
+    }
+
+    /// Creates a new GPUVM instance.
+    pub fn new<E>(
+        name: &'static CStr,
+        dev: &device::Device<T::Driver>,
+        r_obj: &<T::Driver as drv::Driver>::Object,
+        range: Range<u64>,
+        reserve_range: Range<u64>,
+        inner: impl PinInit<T, E>,
+    ) -> Result<ARef<GpuVm<T>>>
+    where
+        Error: From<E>,
+    {
+        let obj: Pin<KBox<Self>> = KBox::try_pin_init(
+            try_pin_init!(Self {
+                // SAFETY: drm_gpuvm_init cannot fail and always initializes the member
+                gpuvm <- unsafe {
+                    init::pin_init_from_closure(move |slot: *mut Opaque<bindings::drm_gpuvm> | {
+                        // Zero-init required by drm_gpuvm_init
+                        *slot = core::mem::zeroed();
+                        bindings::drm_gpuvm_init(
+                            Opaque::raw_get(slot),
+                            name.as_char_ptr(),
+                            0,
+                            dev.as_raw(),
+                            r_obj.gem_obj() as *const _ as *mut _,
+                            range.start,
+                            range.end - range.start,
+                            reserve_range.start,
+                            reserve_range.end - reserve_range.start,
+                            &Self::OPS
+                        );
+                        Ok(())
+                    })
+                },
+                // SAFETY: Just passing through to the initializer argument
+                inner <- unsafe {
+                    init::pin_init_from_closure(move |slot: *mut UnsafeCell<T> | {
+                        inner.__pinned_init(slot as *mut _)
+                    })
+                },
+                _p: PhantomPinned
+            }),
+            GFP_KERNEL,
+        )?;
+
+        // SAFETY: We never move out of the object
+        let vm_ref = unsafe {
+            ARef::from_raw(NonNull::new_unchecked(KBox::leak(
+                Pin::into_inner_unchecked(obj),
+            )))
+        };
+
+        Ok(vm_ref)
+    }
+
+    /// Locks the reservations for all BOs associated with this GPUVM instance.
+    ///
+    /// Optionally locks `extra_object` as well.
+    ///
+    // XXX: Maybe this `extra_obj` signature can be improved.
+    pub fn exec_lock<'a, 'b>(
+        &'a self,
+        extra_obj: Option<&'b <T::Driver as drv::Driver>::Object>,
+    ) -> Result<LockedGpuVm<'a, 'b, T>> {
+        // Do not try to lock the object if it is internal (since it is already locked).
+        let is_ext = extra_obj.map(|a| self.is_extobj(a)).unwrap_or(false);
+
+        let mut guard = ManuallyDrop::new(LockedGpuVm {
+            gpuvm: self,
+            // vm_exec needs to be pinned, so stick it in a Box.
+            vm_exec: KBox::init(
+                init!(drm_gpuvm_exec {
+                    vm: self.gpuvm() as *mut _,
+                    flags: bindings::BINDINGS_DRM_EXEC_INTERRUPTIBLE_WAIT,
+                    exec: Default::default(),
+                    extra: match (is_ext, extra_obj) {
+                        (true, Some(obj)) => bindings::drm_gpuvm_exec__bindgen_ty_1 {
+                            fn_: Some(exec_lock_gem_object),
+                            priv_: obj.gem_obj() as *const _ as *mut _,
+                        },
+                        _ => Default::default(),
+                    },
+                    num_fences: 0,
+                }),
+                GFP_KERNEL,
+            )?,
+            obj: extra_obj,
+        });
+
+        // SAFETY: The object is valid and was initialized above
+        to_result(unsafe { bindings::drm_gpuvm_exec_lock(&mut *guard.vm_exec) })?;
+
+        Ok(ManuallyDrop::into_inner(guard))
+    }
+
+    /// Returns true if the given object is external to the GPUVM
+    /// (that is, if it does not share the DMA reservation object of the GPUVM).
+    pub fn is_extobj(&self, obj: &impl IntoGEMObject) -> bool {
+        let gem = obj.gem_obj() as *const _ as *mut _;
+        // SAFETY: This is safe to call as long as the arguments are valid pointers.
+        unsafe { bindings::drm_gpuvm_is_extobj(self.gpuvm() as *mut _, gem) }
+    }
+}
+
+// SAFETY: DRM GpuVm objects are always reference counted and the get/put functions
+// satisfy the requirements.
+unsafe impl<T: DriverGpuVm> AlwaysRefCounted for GpuVm<T> {
+    fn inc_ref(&self) {
+        // SAFETY: The drm_gpuvm_get function satisfies the requirements for inc_ref().
+        unsafe { bindings::drm_gpuvm_get(&self.gpuvm as *const _ as *mut _) };
+    }
+
+    unsafe fn dec_ref(obj: NonNull<Self>) {
+        // SAFETY: The drm_gpuvm_put function satisfies the requirements for dec_ref().
+        unsafe { bindings::drm_gpuvm_put(Opaque::raw_get(&(*obj.as_ptr()).gpuvm)) };
+    }
+}
+
+/// A guard type indicating that all of the BOs associated with a GPUVM instance
+/// are locked.
+pub struct LockedGpuVm<'a, 'b, T: DriverGpuVm> {
+    gpuvm: &'a GpuVm<T>,
+    vm_exec: KBox<bindings::drm_gpuvm_exec>,
+    // XXX: if `extra_obj` is none, then what happens at find_bo?
+    obj: Option<&'b <T::Driver as drv::Driver>::Object>,
+}
+
+impl<T: DriverGpuVm> LockedGpuVm<'_, '_, T> {
+    /// Finds the `GpuVmBo` associated with the internal object.
+    pub fn find_bo(&mut self) -> Option<ARef<GpuVmBo<T>>> {
+        let obj = self.obj?;
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        let p = unsafe {
+            bindings::drm_gpuvm_bo_find(
+                self.gpuvm.gpuvm() as *mut _,
+                obj.gem_obj() as *const _ as *mut _,
+            )
+        };
+        if p.is_null() {
+            None
+        } else {
+            // SAFETY: All the drm_gpuvm_bo objects in this GpuVm are always allocated by us as GpuVmBo<T>.
+            let p = unsafe { crate::container_of!(p, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+            // SAFETY: We checked for NULL above, and the types ensure that
+            // this object was created by vm_bo_alloc_callback<T>.
+            Some(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) })
+        }
+    }
+
+    /// Same as [`Self::find_bo`], but allocates a new `GpuVmBo` if one is not
+    /// found.
+    pub fn obtain_bo(&mut self) -> Result<ARef<GpuVmBo<T>>> {
+        let obj = self.obj.ok_or(EINVAL)?;
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        let p = unsafe {
+            bindings::drm_gpuvm_bo_obtain(
+                self.gpuvm.gpuvm() as *mut _,
+                obj.gem_obj() as *const _ as *mut _,
+            )
+        };
+        if p.is_null() {
+            Err(ENOMEM)
+        } else {
+            // SAFETY: Container invariant is guaranteed for GpuVmBo objects for this GpuVm.
+            let p = unsafe { crate::container_of!(p, GpuVmBo<T>, bo) as *mut GpuVmBo<T> };
+            // SAFETY: We checked for NULL above, and the types ensure that
+            // this object was created by vm_bo_alloc_callback<T>.
+            Ok(unsafe { ARef::from_raw(NonNull::new_unchecked(p)) })
+        }
+    }
+
+    /// Asks for `req_addr` to be mapped for `req_range` bytes at `req_offset`
+    /// in the GPU's VA range. This will build split and merge steps that will
+    /// call into the driver via the `DriverGpuVm` trait.
+    pub fn sm_map(
+        &mut self,
+        ctx: &mut T::StepContext,
+        req_addr: u64,
+        req_range: u64,
+        req_offset: u64,
+    ) -> Result {
+        let obj = self.obj.ok_or(EINVAL)?;
+        let mut ctx = StepContext {
+            ctx,
+            gpuvm: self.gpuvm,
+        };
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        to_result(unsafe {
+            bindings::drm_gpuvm_sm_map(
+                self.gpuvm.gpuvm() as *mut _,
+                &mut ctx as *mut _ as *mut _,
+                req_addr,
+                req_range,
+                obj.gem_obj() as *const _ as *mut _,
+                req_offset,
+            )
+        })
+    }
+
+    /// Unmaps `[req_addr, req_addr + req_range)` in this GPUVM instance.
+    ///
+    /// This will call into the driver to unmap, and, if required, split
+    /// existing mappings via the `DriverGpuVm` trait.
+    ///
+    // XXX: Maybe we can use Range<u64> here?
+    pub fn sm_unmap(&mut self, ctx: &mut T::StepContext, req_addr: u64, req_range: u64) -> Result {
+        let mut ctx = StepContext {
+            ctx,
+            gpuvm: self.gpuvm,
+        };
+        // SAFETY: LockedGpuVm implies the right locks are held.
+        to_result(unsafe {
+            bindings::drm_gpuvm_sm_unmap(
+                self.gpuvm.gpuvm() as *mut _,
+                &mut ctx as *mut _ as *mut _,
+                req_addr,
+                req_range,
+            )
+        })
+    }
+}
+
+impl<T: DriverGpuVm> Deref for LockedGpuVm<'_, '_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: The existence of this LockedGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &*self.gpuvm.inner.get() }
+    }
+}
+
+impl<T: DriverGpuVm> DerefMut for LockedGpuVm<'_, '_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &mut *self.gpuvm.inner.get() }
+    }
+}
+
+impl<T: DriverGpuVm> Drop for LockedGpuVm<'_, '_, T> {
+    fn drop(&mut self) {
+        // SAFETY: We hold the lock, so it's safe to unlock
+        unsafe {
+            bindings::drm_gpuvm_exec_unlock(&mut *self.vm_exec);
+        }
+    }
+}
+
+/// Represents a GPUVM instance that is being updated through a series of `map`,
+/// `unmap` and `remap` steps.
+pub struct UpdatingGpuVm<'a, T: DriverGpuVm>(&'a GpuVm<T>);
+
+impl<T: DriverGpuVm> UpdatingGpuVm<'_, T> {}
+
+impl<T: DriverGpuVm> Deref for UpdatingGpuVm<'_, T> {
+    type Target = T;
+
+    fn deref(&self) -> &T {
+        // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &*self.0.inner.get() }
+    }
+}
+
+impl<T: DriverGpuVm> DerefMut for UpdatingGpuVm<'_, T> {
+    fn deref_mut(&mut self) -> &mut T {
+        // SAFETY: The existence of this UpdatingGpuVm implies the lock is held,
+        // so this is the only reference
+        unsafe { &mut *self.0.inner.get() }
+    }
+}
+
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Sync for GpuVm<T> {}
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Send for GpuVm<T> {}
+
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Sync for GpuVmBo<T> {}
+// SAFETY: All our trait methods take locks
+unsafe impl<T: DriverGpuVm> Send for GpuVmBo<T> {}
diff --git a/rust/kernel/drm/mod.rs b/rust/kernel/drm/mod.rs
index c44760a1332fa1ef875939b48e7af450f7372020..849dc1e577f15bfada11d6739dff48ac33813326 100644
--- a/rust/kernel/drm/mod.rs
+++ b/rust/kernel/drm/mod.rs
@@ -6,4 +6,6 @@ pub mod device;
 pub mod drv;
 pub mod file;
 pub mod gem;
+#[cfg(CONFIG_DRM_GPUVM = "y")]
+pub mod gpuvm;
 pub mod ioctl;