Skip to content
Snippets Groups Projects
Commit 4b6736ea authored by Thomas Zimmermann's avatar Thomas Zimmermann
Browse files

shmem: keep pages mapped

TODO: This patch is unfinished. Calls to drm_gem_shmem_{vunmap,unpin} should
only decrement shmem->pages_use_count, but never release any pages. Releasing
pages would be done be the purge or free functions.
parent 7d706fdf
No related branches found
No related tags found
Loading
......@@ -162,6 +162,8 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem)
sg_free_table(shmem->sgt);
kfree(shmem->sgt);
}
if (shmem->vaddr)
vunmap(shmem->vaddr);
if (shmem->pages)
drm_gem_shmem_put_pages(shmem);
}
......@@ -336,6 +338,8 @@ static int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
ret = -ENOMEM;
else
iosys_map_set_vaddr(map, shmem->vaddr);
drm_gem_never_purge(obj);
}
if (ret) {
......@@ -395,11 +399,17 @@ static void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem,
if (--shmem->vmap_use_count > 0)
return;
/*
* For dma-buf imported objects we do the vunmap directly here. For
* natively allocated objects, we keep the pages around until the
* object gets purged or freed. Caching the page mappings improves
* performance for frequently vmapped objects.
*/
if (obj->import_attach) {
dma_buf_vunmap(obj->import_attach->dmabuf, map);
shmem->vaddr = NULL;
} else {
vunmap(shmem->vaddr);
drm_gem_shmem_put_pages(shmem);
drm_gem_may_purge(obj);
}
shmem->vaddr = NULL;
......@@ -469,7 +479,7 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv)
}
EXPORT_SYMBOL(drm_gem_shmem_madvise);
static bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
static bool drm_gem_shmem_is_purgeable_locked(struct drm_gem_shmem_object *shmem)
{
return (shmem->madv > 0) &&
!shmem->vmap_use_count && shmem->sgt &&
......@@ -479,11 +489,21 @@ static bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem)
unsigned long drm_gem_shmem_purgeable_size(struct drm_gem_shmem_object *shmem)
{
struct drm_gem_object *obj = &shmem->base;
unsigned long count = 0;
if (!drm_gem_shmem_is_purgeable(shmem))
if (!mutex_trylock(&shmem->pages_lock))
return 0;
if (!mutex_trylock(&shmem->vmap_lock))
goto out_mutex_unlock;
return obj->size >> PAGE_SHIFT;
if (drm_gem_shmem_is_purgeable_locked(shmem))
count = obj->size >> PAGE_SHIFT;
mutex_unlock(&shmem->vmap_lock);
out_mutex_unlock:
mutex_unlock(&shmem->pages_lock);
return count;
}
EXPORT_SYMBOL(drm_gem_shmem_purgeable_size);
......@@ -492,7 +512,12 @@ unsigned long drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem)
struct drm_gem_object *obj = &shmem->base;
struct drm_device *dev = obj->dev;
WARN_ON(!drm_gem_shmem_is_purgeable(shmem));
WARN_ON(!drm_gem_shmem_is_purgeable_locked(shmem));
if (shmem->vaddr) {
vunmap(shmem->vaddr);
shmem->vaddr = NULL;
}
dma_unmap_sgtable(dev->dev, shmem->sgt, DMA_BIDIRECTIONAL, 0);
sg_free_table(shmem->sgt);
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment