Skip to content
Snippets Groups Projects
Unverified Commit 333b8906 authored by Thomas Hellström's avatar Thomas Hellström Committed by Rodrigo Vivi
Browse files

drm/xe/userptr: Unmap userptrs in the mmu notifier


If userptr pages are freed after a call to the xe mmu notifier,
the device will not be blocked out from theoretically accessing
these pages unless they are also unmapped from the iommu, and
this violates some aspects of the iommu-imposed security.

Ensure that userptrs are unmapped in the mmu notifier to
mitigate this. A naive attempt would try to free the sg table, but
the sg table itself may be accessed by a concurrent bind
operation, so settle for only unmapping.

v3:
- Update lockdep asserts.
- Fix a typo (Matthew Auld)

Fixes: 81e058a3 ("drm/xe: Introduce helper to populate userptr")
Cc: Oak Zeng <oak.zeng@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Cc: <stable@vger.kernel.org> # v6.10+
Signed-off-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Acked-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20250304173342.22009-4-thomas.hellstrom@linux.intel.com


(cherry picked from commit ba767b9d)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 0a98219b
No related branches found
No related tags found
No related merge requests found
......@@ -150,6 +150,45 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_NO_KERNEL_MAPPING);
}
static void xe_hmm_userptr_set_mapped(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
struct xe_vm *vm = xe_vma_vm(&uvma->vma);
lockdep_assert_held_write(&vm->lock);
lockdep_assert_held(&vm->userptr.notifier_lock);
mutex_lock(&userptr->unmap_mutex);
xe_assert(vm->xe, !userptr->mapped);
userptr->mapped = true;
mutex_unlock(&userptr->unmap_mutex);
}
void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
bool write = !xe_vma_read_only(vma);
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
if (!lockdep_is_held_type(&vm->userptr.notifier_lock, 0) &&
!lockdep_is_held_type(&vm->lock, 0) &&
!(vma->gpuva.flags & XE_VMA_DESTROYED)) {
/* Don't unmap in exec critical section. */
xe_vm_assert_held(vm);
/* Don't unmap while mapping the sg. */
lockdep_assert_held(&vm->lock);
}
mutex_lock(&userptr->unmap_mutex);
if (userptr->sg && userptr->mapped)
dma_unmap_sgtable(xe->drm.dev, userptr->sg,
write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
userptr->mapped = false;
mutex_unlock(&userptr->unmap_mutex);
}
/**
* xe_hmm_userptr_free_sg() - Free the scatter gather table of userptr
* @uvma: the userptr vma which hold the scatter gather table
......@@ -161,16 +200,9 @@ static int xe_build_sg(struct xe_device *xe, struct hmm_range *range,
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma)
{
struct xe_userptr *userptr = &uvma->userptr;
struct xe_vma *vma = &uvma->vma;
bool write = !xe_vma_read_only(vma);
struct xe_vm *vm = xe_vma_vm(vma);
struct xe_device *xe = vm->xe;
struct device *dev = xe->drm.dev;
xe_assert(xe, userptr->sg);
dma_unmap_sgtable(dev, userptr->sg,
write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE, 0);
xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg);
xe_hmm_userptr_unmap(uvma);
sg_free_table(userptr->sg);
userptr->sg = NULL;
}
......@@ -297,6 +329,7 @@ int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma,
xe_mark_range_accessed(&hmm_range, write);
userptr->sg = &userptr->sgt;
xe_hmm_userptr_set_mapped(uvma);
userptr->notifier_seq = hmm_range.notifier_seq;
up_read(&vm->userptr.notifier_lock);
kvfree(pfns);
......
......@@ -13,4 +13,6 @@ struct xe_userptr_vma;
int xe_hmm_userptr_populate_range(struct xe_userptr_vma *uvma, bool is_mm_mmap_locked);
void xe_hmm_userptr_free_sg(struct xe_userptr_vma *uvma);
void xe_hmm_userptr_unmap(struct xe_userptr_vma *uvma);
#endif
......@@ -620,6 +620,8 @@ static void __vma_userptr_invalidate(struct xe_vm *vm, struct xe_userptr_vma *uv
err = xe_vm_invalidate_vma(vma);
XE_WARN_ON(err);
}
xe_hmm_userptr_unmap(uvma);
}
static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni,
......@@ -1039,6 +1041,7 @@ static struct xe_vma *xe_vma_create(struct xe_vm *vm,
INIT_LIST_HEAD(&userptr->invalidate_link);
INIT_LIST_HEAD(&userptr->repin_link);
vma->gpuva.gem.offset = bo_offset_or_userptr;
mutex_init(&userptr->unmap_mutex);
err = mmu_interval_notifier_insert(&userptr->notifier,
current->mm,
......@@ -1080,6 +1083,7 @@ static void xe_vma_destroy_late(struct xe_vma *vma)
* them anymore
*/
mmu_interval_notifier_remove(&userptr->notifier);
mutex_destroy(&userptr->unmap_mutex);
xe_vm_put(vm);
} else if (xe_vma_is_null(vma)) {
xe_vm_put(vm);
......
......@@ -59,12 +59,16 @@ struct xe_userptr {
struct sg_table *sg;
/** @notifier_seq: notifier sequence number */
unsigned long notifier_seq;
/** @unmap_mutex: Mutex protecting dma-unmapping */
struct mutex unmap_mutex;
/**
* @initial_bind: user pointer has been bound at least once.
* write: vm->userptr.notifier_lock in read mode and vm->resv held.
* read: vm->userptr.notifier_lock in write mode or vm->resv held.
*/
bool initial_bind;
/** @mapped: Whether the @sgt sg-table is dma-mapped. Protected by @unmap_mutex. */
bool mapped;
#if IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT)
u32 divisor;
#endif
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment