From 02a5f66ace4ec7e3f689dcd8268f05b51ad1e53e Mon Sep 17 00:00:00 2001 From: Matthew Auld <matthew.auld@intel.com> Date: Tue, 26 Sep 2023 16:25:39 +0100 Subject: [PATCH] mmap --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 9 ++++++++- drivers/gpu/drm/i915/i915_mm.c | 9 +++++++-- drivers/gpu/drm/i915/i915_mm.h | 3 ++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index aa4d842d4c5a8..01ee80df97d41 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -251,7 +251,10 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) struct vm_area_struct *area = vmf->vma; struct i915_mmap_offset *mmo = area->vm_private_data; struct drm_i915_gem_object *obj = mmo->obj; + struct scatterlist *sgl; resource_size_t iomap; + unsigned int offset; + unsigned long n; int err; /* Sanity check that we allow writing into this object */ @@ -272,10 +275,14 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) iomap -= obj->mm.region->region.start; } + n = ((vmf->address - area->vm_start) >> PAGE_SHIFT) + + area->vm_pgoff - drm_vma_node_start(&mmo->vma_node); + sgl = i915_gem_object_get_sg(obj, n, &offset); + /* PTEs are revoked in obj->ops->put_pages() */ err = remap_io_sg(area, area->vm_start, area->vm_end - area->vm_start, - obj->mm.pages->sgl, iomap); + sgl, offset, iomap); if (area->vm_flags & VM_WRITE) { GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); diff --git a/drivers/gpu/drm/i915/i915_mm.c b/drivers/gpu/drm/i915/i915_mm.c index 7998bc74ab49d..5c1d1da41fe1c 100644 --- a/drivers/gpu/drm/i915/i915_mm.c +++ b/drivers/gpu/drm/i915/i915_mm.c @@ -35,6 +35,7 @@ struct remap_pfn { pgprot_t prot; struct sgt_iter sgt; + unsigned int offset; resource_size_t iobase; }; @@ -57,8 +58,9 @@ static int remap_sg(pte_t *pte, unsigned long addr, void *data) /* Special PTE are not associated with any struct page */ set_pte_at(r->mm, addr, pte, - pte_mkspecial(pfn_pte(sgt_pfn(r), r->prot))); + pte_mkspecial(pfn_pte(sgt_pfn(r) + r->offset, r->prot))); r->pfn++; /* track insertions in case we need to unwind later */ + r->offset = 0; r->sgt.curr += PAGE_SIZE; if (r->sgt.curr >= r->sgt.max) @@ -122,18 +124,21 @@ int remap_io_mapping(struct vm_area_struct *vma, * @addr: target user address to start at * @size: size of map area * @sgl: Start sg entry + * @offset: Offset within sg entry * @iobase: Use stored dma address offset by this address or pfn if -1 * * Note: this is only safe if the mm semaphore is held when called. */ int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, - struct scatterlist *sgl, resource_size_t iobase) + struct scatterlist *sgl, unsigned int offset, + resource_size_t iobase) { struct remap_pfn r = { .mm = vma->vm_mm, .prot = vma->vm_page_prot, .sgt = __sgt_iter(sgl, use_dma(iobase)), + .offset = offset, .iobase = iobase, }; int err; diff --git a/drivers/gpu/drm/i915/i915_mm.h b/drivers/gpu/drm/i915/i915_mm.h index 04c8974d822bd..7f5b2b5543a85 100644 --- a/drivers/gpu/drm/i915/i915_mm.h +++ b/drivers/gpu/drm/i915/i915_mm.h @@ -30,6 +30,7 @@ int remap_io_mapping(struct vm_area_struct *vma, int remap_io_sg(struct vm_area_struct *vma, unsigned long addr, unsigned long size, - struct scatterlist *sgl, resource_size_t iobase); + struct scatterlist *sgl, unsigned int offset, + resource_size_t iobase); #endif /* __I915_MM_H__ */ -- GitLab