Skip to content
Snippets Groups Projects
Commit bbdf2da6 authored by Tomeu Vizoso's avatar Tomeu Vizoso
Browse files

Revert "drm/panfrost: Add support for GPU heap allocations"

This reverts commit 100204de.
parent 989fa70b
No related merge requests found
......@@ -14,6 +14,8 @@
The hard part is handling when more address spaces are needed than what
the h/w provides.
- Support pinning pages on demand (GPU page faults).
- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
- Support for madvise and a shrinker.
......
......@@ -50,12 +50,7 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data,
struct drm_panfrost_create_bo *args = data;
if (!args->size || args->pad ||
(args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP)))
return -EINVAL;
/* Heaps should never be executable */
if ((args->flags & PANFROST_BO_HEAP) &&
!(args->flags & PANFROST_BO_NOEXEC))
(args->flags & ~PANFROST_BO_NOEXEC))
return -EINVAL;
bo = panfrost_gem_create_with_handle(file, dev, args->size, args->flags,
......
......@@ -27,20 +27,6 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj)
drm_mm_remove_node(&bo->node);
spin_unlock(&pfdev->mm_lock);
if (bo->sgts) {
int i;
int n_sgt = bo->base.base.size / SZ_2M;
for (i = 0; i < n_sgt; i++) {
if (bo->sgts[i].sgl) {
dma_unmap_sg(pfdev->dev, bo->sgts[i].sgl,
bo->sgts[i].nents, DMA_BIDIRECTIONAL);
sg_free_table(&bo->sgts[i]);
}
}
kfree(bo->sgts);
}
drm_gem_shmem_free_object(obj);
}
......@@ -101,10 +87,7 @@ static int panfrost_gem_map(struct panfrost_device *pfdev, struct panfrost_gem_o
if (ret)
return ret;
if (!bo->is_heap)
ret = panfrost_mmu_map(bo);
return ret;
return panfrost_mmu_map(bo);
}
struct panfrost_gem_object *
......@@ -118,11 +101,7 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
struct drm_gem_shmem_object *shmem;
struct panfrost_gem_object *bo;
/* Round up heap allocations to 2MB to keep fault handling simple */
if (flags & PANFROST_BO_HEAP)
size = roundup(size, SZ_2M);
else
size = roundup(size, PAGE_SIZE);
size = roundup(size, PAGE_SIZE);
shmem = drm_gem_shmem_create_with_handle(file_priv, dev, size, handle);
if (IS_ERR(shmem))
......@@ -130,7 +109,6 @@ panfrost_gem_create_with_handle(struct drm_file *file_priv,
bo = to_panfrost_bo(&shmem->base);
bo->noexec = !!(flags & PANFROST_BO_NOEXEC);
bo->is_heap = !!(flags & PANFROST_BO_HEAP);
ret = panfrost_gem_map(pfdev, bo);
if (ret)
......
......@@ -9,12 +9,10 @@
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct sg_table *sgts;
struct drm_mm_node node;
bool is_mapped :1;
bool noexec :1;
bool is_heap :1;
};
static inline
......@@ -23,12 +21,6 @@ struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
static inline
struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node)
{
return container_of(node, struct panfrost_gem_object, node);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
struct panfrost_gem_object *
......
......@@ -3,7 +3,6 @@
/* Copyright (C) 2019 Arm Ltd. */
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
......@@ -11,7 +10,6 @@
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/shmem_fs.h>
#include <linux/sizes.h>
#include "panfrost_device.h"
......@@ -259,12 +257,12 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
size_t unmapped_page;
size_t pgsize = get_pgsize(iova, len - unmapped_len);
if (ops->iova_to_phys(ops, iova)) {
unmapped_page = ops->unmap(ops, iova, pgsize);
WARN_ON(unmapped_page != pgsize);
}
iova += pgsize;
unmapped_len += pgsize;
unmapped_page = ops->unmap(ops, iova, pgsize);
if (!unmapped_page)
break;
iova += unmapped_page;
unmapped_len += unmapped_page;
}
mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
......@@ -300,105 +298,6 @@ static const struct iommu_gather_ops mmu_tlb_ops = {
.tlb_sync = mmu_tlb_sync_context,
};
static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
{
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT;
drm_mm_for_each_node(node, &pfdev->mm) {
if (offset >= node->start && offset < (node->start + node->size))
return node;
}
return NULL;
}
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
{
int ret, i;
struct drm_mm_node *node;
struct panfrost_gem_object *bo;
struct address_space *mapping;
pgoff_t page_offset;
struct sg_table *sgt;
struct page **pages;
node = addr_to_drm_mm_node(pfdev, as, addr);
if (!node)
return -ENOENT;
bo = drm_mm_node_to_panfrost_bo(node);
if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
node->start << PAGE_SHIFT);
return -EINVAL;
}
/* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT;
page_offset -= node->start;
mutex_lock(&bo->base.pages_lock);
if (!bo->base.pages) {
bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts)
return -ENOMEM;
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
if (!pages) {
kfree(bo->sgts);
bo->sgts = NULL;
return -ENOMEM;
}
bo->base.pages = pages;
bo->base.pages_use_count = 1;
} else
pages = bo->base.pages;
mapping = bo->base.base.filp->f_mapping;
mapping_set_unevictable(mapping);
for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
pages[i] = shmem_read_mapping_page(mapping, i);
if (IS_ERR(pages[i])) {
mutex_unlock(&bo->base.pages_lock);
ret = PTR_ERR(pages[i]);
goto err_pages;
}
}
mutex_unlock(&bo->base.pages_lock);
sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
if (ret)
goto err_pages;
if (!dma_map_sg(pfdev->dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL)) {
ret = -EINVAL;
goto err_map;
}
mmu_map_sg(pfdev, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
bo->is_mapped = true;
dev_dbg(pfdev->dev, "mapped page fault @ %llx", addr);
return 0;
err_map:
sg_free_table(sgt);
err_pages:
drm_gem_shmem_put_pages(&bo->base);
return ret;
}
static const char *access_type_name(struct panfrost_device *pfdev,
u32 fault_status)
{
......@@ -435,7 +334,7 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
int i, ret;
int i;
dev_err(pfdev->dev, "mmu irq status=%x\n", status);
......@@ -459,18 +358,6 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
/* Page fault only */
if ((status & mask) == BIT(i)) {
WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
if (!ret) {
mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
status &= ~mask;
continue;
}
}
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
......
......@@ -83,7 +83,6 @@ struct drm_panfrost_wait_bo {
};
#define PANFROST_BO_NOEXEC 1
#define PANFROST_BO_HEAP 2
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment