From a26b4cdad1a76336a0152f1d6ffb27f9e8b21fb8 Mon Sep 17 00:00:00 2001 From: "Michael J. Ruhl" <michael.j.ruhl@intel.com> Date: Wed, 26 Apr 2023 16:56:48 -0400 Subject: [PATCH] drm/xe: Rename GPU offset helper to reflect true usage The _io_offset helper function is returning an offset into the GPU address space. Using the CPU address offset (io_) is not correct. Rename to reflect usage. Update to use GPU offset information. Reviewed-by: Matthew Auld <matthew.auld@intel.com> Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com> --- drivers/gpu/drm/xe/xe_bo.c | 8 ++++---- drivers/gpu/drm/xe/xe_bo.h | 2 +- drivers/gpu/drm/xe/xe_migrate.c | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index c82e995df779..ab2307d0744a 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1335,7 +1335,7 @@ struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_gt *gt, * XXX: This is in the VM bind data path, likely should calculate this once and * store, with a recalculation if the BO is moved. */ -uint64_t vram_region_io_offset(struct ttm_resource *res) +uint64_t vram_region_gpu_offset(struct ttm_resource *res) { struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); struct xe_gt *gt = mem_type_to_gt(xe, res->mem_type); @@ -1343,7 +1343,7 @@ uint64_t vram_region_io_offset(struct ttm_resource *res) if (res->mem_type == XE_PL_STOLEN) return xe_ttm_stolen_gpu_offset(xe); - return gt->mem.vram.io_start - xe->mem.vram.io_start; + return xe->mem.vram.base + gt->mem.vram.base; } /** @@ -1427,7 +1427,7 @@ int xe_bo_pin(struct xe_bo *bo) XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS)); place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE, &vram) - - vram_region_io_offset(bo->ttm.resource)) >> PAGE_SHIFT; + vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT; place->lpfn = place->fpfn + (bo->size >> PAGE_SHIFT); spin_lock(&xe->pinned.lock); @@ -1574,7 +1574,7 @@ dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, xe_res_first(bo->ttm.resource, page << PAGE_SHIFT, page_size, &cur); - return cur.start + offset + vram_region_io_offset(bo->ttm.resource); + return cur.start + offset + vram_region_gpu_offset(bo->ttm.resource); } } diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index 7e111332c35a..65680c312893 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -226,7 +226,7 @@ void xe_bo_vunmap(struct xe_bo *bo); bool mem_type_is_vram(u32 mem_type); bool xe_bo_is_vram(struct xe_bo *bo); bool xe_bo_is_stolen(struct xe_bo *bo); -uint64_t vram_region_io_offset(struct ttm_resource *res); +uint64_t vram_region_gpu_offset(struct ttm_resource *res); bool xe_bo_can_migrate(struct xe_bo *bo, u32 mem_type); diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index f40f47ccb76f..d8401793dd0b 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -419,7 +419,7 @@ static u32 pte_update_size(struct xe_migrate *m, } else { /* Offset into identity map. */ *L0_ofs = xe_migrate_vram_ofs(cur->start + - vram_region_io_offset(res)); + vram_region_gpu_offset(res)); cmds += cmd_size; } @@ -469,7 +469,7 @@ static void emit_pte(struct xe_migrate *m, addr |= XE_PTE_PS64; } - addr += vram_region_io_offset(bo->ttm.resource); + addr += vram_region_gpu_offset(bo->ttm.resource); addr |= XE_PPGTT_PTE_LM; } addr |= PPAT_CACHED | XE_PAGE_PRESENT | XE_PAGE_RW; -- GitLab