...
 
Commits (9)
......@@ -209,12 +209,42 @@ Would be great to refactor this all into a set of small common helpers.
Contact: Daniel Vetter
Put a reservation_object into drm_gem_object
Generic fbdev defio support
---------------------------
The defio support code in the fbdev core has some very specific requirements,
which means drivers need to have a special framebuffer for fbdev. Which prevents
us from using the generic fbdev emulation code everywhere. The main issue is
that it uses some fields in struct page itself, which breaks shmem gem objects
(and other things).
Possible solution would be to write our own defio mmap code in the drm fbdev
emulation. It would need to fully wrap the existing mmap ops, forwarding
everything after it has done the write-protect/mkwrite trickery:
- In the drm_fbdev_fb_mmap helper, if we need defio, change the
default page prots to write-protected with something like this::
vma->vm_page_prot = pgprot_wrprotect(vma->vm_page_prot);
- Set the mkwrite and fsync callbacks with similar implementions to the core
fbdev defio stuff. These should all work on plain ptes, they don't actually
require a struct page. uff. These should all work on plain ptes, they don't
actually require a struct page.
- Track the dirty pages in a separate structure (bitfield with one bit per page
should work) to avoid clobbering struct page.
Might be good to also have some igt testcases for this.
Contact: Daniel Vetter, Noralf Tronnes
Remove the ->gem_prime_res_obj callback
--------------------------------------------
This would remove the need for the ->gem_prime_res_obj callback. It would also
allow us to implement generic helpers for waiting for a bo, allowing for quite a
bit of refactoring in the various wait ioctl implementations.
The ->gem_prime_res_obj callback can be removed from drivers by using the
reservation_object in the drm_gem_object. It may also be possible to use the
generic drm_gem_reservation_object_wait helper for waiting for a bo.
Contact: Daniel Vetter
......
......@@ -333,6 +333,8 @@ source "drivers/gpu/drm/tve200/Kconfig"
source "drivers/gpu/drm/xen/Kconfig"
source "drivers/gpu/drm/lima/Kconfig"
# Keep legacy drivers last
menuconfig DRM_LEGACY
......
......@@ -109,3 +109,4 @@ obj-$(CONFIG_DRM_TINYDRM) += tinydrm/
obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_LIMA) += lima/
......@@ -170,6 +170,10 @@ void drm_gem_private_object_init(struct drm_device *dev,
kref_init(&obj->refcount);
obj->handle_count = 0;
obj->size = size;
reservation_object_init(&obj->_resv);
if (!obj->resv)
obj->resv = &obj->_resv;
drm_vma_node_reset(&obj->vma_node);
}
EXPORT_SYMBOL(drm_gem_private_object_init);
......@@ -657,6 +661,44 @@ drm_gem_object_lookup(struct drm_file *filp, u32 handle)
}
EXPORT_SYMBOL(drm_gem_object_lookup);
/**
* drm_gem_reservation_object_wait - Wait on GEM object's reservation's objects
* shared and/or exclusive fences.
* @filep: DRM file private date
* @handle: userspace handle
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
* @timeout: timeout value in jiffies or zero to return immediately
*
* Returns:
*
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
* greater than 0 on success.
*/
long drm_gem_reservation_object_wait(struct drm_file *filep, u32 handle,
bool wait_all, unsigned long timeout)
{
long ret;
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(filep, handle);
if (!obj) {
DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
return -EINVAL;
}
ret = reservation_object_wait_timeout_rcu(obj->resv, wait_all,
true, timeout);
if (ret == 0)
ret = -ETIME;
else if (ret > 0)
ret = 0;
drm_gem_object_put_unlocked(obj);
return ret;
}
EXPORT_SYMBOL(drm_gem_reservation_object_wait);
/**
* drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
* @dev: drm_device
......@@ -821,6 +863,7 @@ drm_gem_object_release(struct drm_gem_object *obj)
if (obj->filp)
fput(obj->filp);
reservation_object_fini(&obj->_resv);
drm_gem_free_mmap_offset(obj);
}
EXPORT_SYMBOL(drm_gem_object_release);
......
......@@ -504,6 +504,7 @@ struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
.size = obj->size,
.flags = flags,
.priv = obj,
.resv = obj->resv,
};
if (dev->driver->gem_prime_res_obj)
......
......@@ -762,7 +762,7 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
*
* Calculate the timeout in jiffies from an absolute time in sec/nsec.
*/
static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
{
ktime_t abs_timeout, now;
u64 timeout_ns, timeout_jiffies64;
......@@ -786,6 +786,7 @@ static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
return timeout_jiffies64 + 1;
}
EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
static int drm_syncobj_array_wait(struct drm_device *dev,
struct drm_file *file_private,
......
# SPDX-License-Identifier: GPL-2.0 OR MIT
# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
config DRM_LIMA
tristate "LIMA (DRM support for ARM Mali 400/450 GPU)"
depends on DRM
depends on ARM || ARM64 || COMPILE_TEST
select DRM_SCHED
help
DRM driver for ARM Mali 400/450 GPUs.
# SPDX-License-Identifier: GPL-2.0 OR MIT
# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
lima-y := \
lima_drv.o \
lima_device.o \
lima_pmu.o \
lima_l2_cache.o \
lima_mmu.o \
lima_gp.o \
lima_pp.o \
lima_gem.o \
lima_vm.o \
lima_sched.o \
lima_ctx.o \
lima_gem_prime.o \
lima_dlbu.o \
lima_bcast.o \
lima_object.o
obj-$(CONFIG_DRM_LIMA) += lima.o
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/io.h>
#include <linux/device.h>
#include "lima_device.h"
#include "lima_bcast.h"
#include "lima_regs.h"
#define bcast_write(reg, data) writel(data, ip->iomem + reg)
#define bcast_read(reg) readl(ip->iomem + reg)
void lima_bcast_enable(struct lima_device *dev, int num_pp)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
struct lima_ip *ip = dev->ip + lima_ip_bcast;
int i, mask = bcast_read(LIMA_BCAST_BROADCAST_MASK) & 0xffff0000;
for (i = 0; i < num_pp; i++) {
struct lima_ip *pp = pipe->processor[i];
mask |= 1 << (pp->id - lima_ip_pp0);
}
bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
}
int lima_bcast_init(struct lima_ip *ip)
{
int i, mask = 0;
for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
if (ip->dev->ip[i].present)
mask |= 1 << (i - lima_ip_pp0);
}
bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
return 0;
}
void lima_bcast_fini(struct lima_ip *ip)
{
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_BCAST_H__
#define __LIMA_BCAST_H__
struct lima_ip;
int lima_bcast_init(struct lima_ip *ip);
void lima_bcast_fini(struct lima_ip *ip);
void lima_bcast_enable(struct lima_device *dev, int num_pp);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/slab.h>
#include "lima_device.h"
#include "lima_ctx.h"
int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
{
struct lima_ctx *ctx;
int i, err;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
kref_init(&ctx->refcnt);
for (i = 0; i < lima_pipe_num; i++) {
err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
if (err)
goto err_out0;
}
err = xa_alloc(&mgr->handles, id, UINT_MAX, ctx, GFP_KERNEL);
if (err < 0)
goto err_out0;
return 0;
err_out0:
for (i--; i >= 0; i--)
lima_sched_context_fini(dev->pipe + i, ctx->context + i);
kfree(ctx);
return err;
}
static void lima_ctx_do_release(struct kref *ref)
{
struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt);
int i;
for (i = 0; i < lima_pipe_num; i++)
lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i);
kfree(ctx);
}
int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id)
{
struct lima_ctx *ctx;
mutex_lock(&mgr->lock);
ctx = xa_erase(&mgr->handles, id);
if (ctx) {
kref_put(&ctx->refcnt, lima_ctx_do_release);
return 0;
}
mutex_unlock(&mgr->lock);
return -EINVAL;
}
struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id)
{
struct lima_ctx *ctx;
mutex_lock(&mgr->lock);
ctx = xa_load(&mgr->handles, id);
if (ctx)
kref_get(&ctx->refcnt);
mutex_unlock(&mgr->lock);
return ctx;
}
void lima_ctx_put(struct lima_ctx *ctx)
{
kref_put(&ctx->refcnt, lima_ctx_do_release);
}
void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);
xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC);
}
void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr)
{
struct lima_ctx *ctx;
unsigned long id;
xa_for_each(&mgr->handles, id, ctx) {
kref_put(&ctx->refcnt, lima_ctx_do_release);
}
xa_destroy(&mgr->handles);
mutex_destroy(&mgr->lock);
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_CTX_H__
#define __LIMA_CTX_H__
#include <linux/xarray.h>
#include "lima_device.h"
struct lima_ctx {
struct kref refcnt;
struct lima_device *dev;
struct lima_sched_context context[lima_pipe_num];
atomic_t guilty;
};
struct lima_ctx_mgr {
struct mutex lock;
struct xarray handles;
};
int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id);
int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id);
struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id);
void lima_ctx_put(struct lima_ctx *ctx);
void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr);
void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include "lima_device.h"
#include "lima_gp.h"
#include "lima_pp.h"
#include "lima_mmu.h"
#include "lima_pmu.h"
#include "lima_l2_cache.h"
#include "lima_dlbu.h"
#include "lima_bcast.h"
#include "lima_vm.h"
struct lima_ip_desc {
char *name;
char *irq_name;
bool must_have[lima_gpu_num];
int offset[lima_gpu_num];
int (*init)(struct lima_ip *);
void (*fini)(struct lima_ip *);
};
#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
[lima_ip_##ipname] = { \
.name = #ipname, \
.irq_name = irq, \
.must_have = { \
[lima_gpu_mali400] = mst0, \
[lima_gpu_mali450] = mst1, \
}, \
.offset = { \
[lima_gpu_mali400] = off0, \
[lima_gpu_mali450] = off1, \
}, \
.init = lima_##func##_init, \
.fini = lima_##func##_fini, \
}
static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
LIMA_IP_DESC(pmu, false, false, 0x02000, 0x02000, pmu, "pmu"),
LIMA_IP_DESC(l2_cache0, true, true, 0x01000, 0x10000, l2_cache, NULL),
LIMA_IP_DESC(l2_cache1, false, true, -1, 0x01000, l2_cache, NULL),
LIMA_IP_DESC(l2_cache2, false, false, -1, 0x11000, l2_cache, NULL),
LIMA_IP_DESC(gp, true, true, 0x00000, 0x00000, gp, "gp"),
LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"),
LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"),
LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"),
LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"),
LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"),
LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"),
LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"),
LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"),
LIMA_IP_DESC(gpmmu, true, true, 0x03000, 0x03000, mmu, "gpmmu"),
LIMA_IP_DESC(ppmmu0, true, true, 0x04000, 0x04000, mmu, "ppmmu0"),
LIMA_IP_DESC(ppmmu1, false, false, 0x05000, 0x05000, mmu, "ppmmu1"),
LIMA_IP_DESC(ppmmu2, false, false, 0x06000, 0x06000, mmu, "ppmmu2"),
LIMA_IP_DESC(ppmmu3, false, false, 0x07000, 0x07000, mmu, "ppmmu3"),
LIMA_IP_DESC(ppmmu4, false, false, -1, 0x1C000, mmu, "ppmmu4"),
LIMA_IP_DESC(ppmmu5, false, false, -1, 0x1D000, mmu, "ppmmu5"),
LIMA_IP_DESC(ppmmu6, false, false, -1, 0x1E000, mmu, "ppmmu6"),
LIMA_IP_DESC(ppmmu7, false, false, -1, 0x1F000, mmu, "ppmmu7"),
LIMA_IP_DESC(dlbu, false, true, -1, 0x14000, dlbu, NULL),
LIMA_IP_DESC(bcast, false, true, -1, 0x13000, bcast, NULL),
LIMA_IP_DESC(pp_bcast, false, true, -1, 0x16000, pp_bcast, "pp"),
LIMA_IP_DESC(ppmmu_bcast, false, true, -1, 0x15000, mmu, NULL),
};
const char *lima_ip_name(struct lima_ip *ip)
{
return lima_ip_desc[ip->id].name;
}
static int lima_clk_init(struct lima_device *dev)
{
int err;
unsigned long bus_rate, gpu_rate;
dev->clk_bus = devm_clk_get(dev->dev, "bus");
if (IS_ERR(dev->clk_bus)) {
dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
return PTR_ERR(dev->clk_bus);
}
dev->clk_gpu = devm_clk_get(dev->dev, "core");
if (IS_ERR(dev->clk_gpu)) {
dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
return PTR_ERR(dev->clk_gpu);
}
bus_rate = clk_get_rate(dev->clk_bus);
dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
gpu_rate = clk_get_rate(dev->clk_gpu);
dev_info(dev->dev, "mod rate = %lu", gpu_rate);
if ((err = clk_prepare_enable(dev->clk_bus)))
return err;
if ((err = clk_prepare_enable(dev->clk_gpu)))
goto error_out0;
dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
if (IS_ERR(dev->reset)) {
err = PTR_ERR(dev->reset);
goto error_out1;
} else if (dev->reset != NULL) {
if ((err = reset_control_deassert(dev->reset)))
goto error_out1;
}
return 0;
error_out1:
clk_disable_unprepare(dev->clk_gpu);
error_out0:
clk_disable_unprepare(dev->clk_bus);
return err;
}
static void lima_clk_fini(struct lima_device *dev)
{
if (dev->reset != NULL)
reset_control_assert(dev->reset);
clk_disable_unprepare(dev->clk_gpu);
clk_disable_unprepare(dev->clk_bus);
}
static int lima_regulator_init(struct lima_device *dev)
{
int ret;
dev->regulator = devm_regulator_get_optional(dev->dev, "mali");
if (IS_ERR(dev->regulator)) {
ret = PTR_ERR(dev->regulator);
dev->regulator = NULL;
if (ret == -ENODEV)
return 0;
dev_err(dev->dev, "failed to get regulator: %d\n", ret);
return ret;
}
ret = regulator_enable(dev->regulator);
if (ret < 0) {
dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
return ret;
}
return 0;
}
static void lima_regulator_fini(struct lima_device *dev)
{
if (dev->regulator)
regulator_disable(dev->regulator);
}
static int lima_init_ip(struct lima_device *dev, int index)
{
struct lima_ip_desc *desc = lima_ip_desc + index;
struct lima_ip *ip = dev->ip + index;
int offset = desc->offset[dev->id];
bool must = desc->must_have[dev->id];
int err;
if (offset < 0)
return 0;
ip->dev = dev;
ip->id = index;
ip->iomem = dev->iomem + offset;
if (desc->irq_name) {
err = platform_get_irq_byname(dev->pdev, desc->irq_name);
if (err < 0)
goto out;
ip->irq = err;
}
err = desc->init(ip);
if (!err) {
ip->present = true;
return 0;
}
out:
return must ? err : 0;
}
static void lima_fini_ip(struct lima_device *ldev, int index)
{
struct lima_ip_desc *desc = lima_ip_desc + index;
struct lima_ip *ip = ldev->ip + index;
if (ip->present)
desc->fini(ip);
}
static int lima_init_gp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
int err;
if ((err = lima_sched_pipe_init(pipe, "gp")))
return err;
pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0;
pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu;
pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp;
if ((err = lima_gp_pipe_init(dev))) {
lima_sched_pipe_fini(pipe);
return err;
}
return 0;
}
static void lima_fini_gp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
lima_gp_pipe_fini(dev);
lima_sched_pipe_fini(pipe);
}
static int lima_init_pp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
int err, i;
if ((err = lima_sched_pipe_init(pipe, "pp")))
return err;
for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) {
struct lima_ip *pp = dev->ip + lima_ip_pp0 + i;
struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i;
struct lima_ip *l2_cache;
if (dev->id == lima_gpu_mali400)
l2_cache = dev->ip + lima_ip_l2_cache0;
else
l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2);
if (pp->present && ppmmu->present && l2_cache->present) {
pipe->mmu[pipe->num_mmu++] = ppmmu;
pipe->processor[pipe->num_processor++] = pp;
if (!pipe->l2_cache[i >> 2])
pipe->l2_cache[pipe->num_l2_cache++] = l2_cache;
}
}
if (dev->ip[lima_ip_bcast].present) {
pipe->bcast_processor = dev->ip + lima_ip_pp_bcast;
pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast;
}
if ((err = lima_pp_pipe_init(dev))) {
lima_sched_pipe_fini(pipe);
return err;
}
return 0;
}
static void lima_fini_pp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
lima_pp_pipe_fini(dev);
lima_sched_pipe_fini(pipe);
}
int lima_device_init(struct lima_device *ldev)
{
int err, i;
struct resource *res;
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
err = lima_clk_init(ldev);
if (err) {
dev_err(ldev->dev, "clk init fail %d\n", err);
return err;
}
if ((err = lima_regulator_init(ldev))) {
dev_err(ldev->dev, "regulator init fail %d\n", err);
goto err_out0;
}
ldev->empty_vm = lima_vm_create(ldev);
if (!ldev->empty_vm) {
err = -ENOMEM;
goto err_out1;
}
ldev->va_start = 0;
if (ldev->id == lima_gpu_mali450) {
ldev->va_end = LIMA_VA_RESERVE_START;
ldev->dlbu_cpu = dma_alloc_wc(
ldev->dev, LIMA_PAGE_SIZE,
&ldev->dlbu_dma, GFP_KERNEL);
if (!ldev->dlbu_cpu) {
err = -ENOMEM;
goto err_out2;
}
}
else
ldev->va_end = LIMA_VA_RESERVE_END;
res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
ldev->iomem = devm_ioremap_resource(ldev->dev, res);
if (IS_ERR(ldev->iomem)) {
dev_err(ldev->dev, "fail to ioremap iomem\n");
err = PTR_ERR(ldev->iomem);
goto err_out3;
}
for (i = 0; i < lima_ip_num; i++) {
err = lima_init_ip(ldev, i);
if (err)
goto err_out4;
}
err = lima_init_gp_pipe(ldev);
if (err)
goto err_out4;
err = lima_init_pp_pipe(ldev);
if (err)
goto err_out5;
return 0;
err_out5:
lima_fini_gp_pipe(ldev);
err_out4:
while (--i >= 0)
lima_fini_ip(ldev, i);
err_out3:
if (ldev->dlbu_cpu)
dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
ldev->dlbu_cpu, ldev->dlbu_dma);
err_out2:
lima_vm_put(ldev->empty_vm);
err_out1:
lima_regulator_fini(ldev);
err_out0:
lima_clk_fini(ldev);
return err;
}
void lima_device_fini(struct lima_device *ldev)
{
int i;
lima_fini_pp_pipe(ldev);
lima_fini_gp_pipe(ldev);
for (i = lima_ip_num - 1; i >= 0; i--)
lima_fini_ip(ldev, i);
if (ldev->dlbu_cpu)
dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
ldev->dlbu_cpu, ldev->dlbu_dma);
lima_vm_put(ldev->empty_vm);
lima_regulator_fini(ldev);
lima_clk_fini(ldev);
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_DEVICE_H__
#define __LIMA_DEVICE_H__
#include <drm/drm_device.h>
#include <linux/delay.h>
#include "lima_sched.h"
enum lima_gpu_id {
lima_gpu_mali400 = 0,
lima_gpu_mali450,
lima_gpu_num,
};
enum lima_ip_id {
lima_ip_pmu,
lima_ip_gpmmu,
lima_ip_ppmmu0,
lima_ip_ppmmu1,
lima_ip_ppmmu2,
lima_ip_ppmmu3,
lima_ip_ppmmu4,
lima_ip_ppmmu5,
lima_ip_ppmmu6,
lima_ip_ppmmu7,
lima_ip_gp,
lima_ip_pp0,
lima_ip_pp1,
lima_ip_pp2,
lima_ip_pp3,
lima_ip_pp4,
lima_ip_pp5,
lima_ip_pp6,
lima_ip_pp7,
lima_ip_l2_cache0,
lima_ip_l2_cache1,
lima_ip_l2_cache2,
lima_ip_dlbu,
lima_ip_bcast,
lima_ip_pp_bcast,
lima_ip_ppmmu_bcast,
lima_ip_num,
};
struct lima_device;
struct lima_ip {
struct lima_device *dev;
enum lima_ip_id id;
bool present;
void __iomem *iomem;
int irq;
union {
/* gp/pp */
bool async_reset;
/* l2 cache */
spinlock_t lock;
} data;
};
enum lima_pipe_id {
lima_pipe_gp,
lima_pipe_pp,
lima_pipe_num,
};
struct lima_device {
struct device *dev;
struct drm_device *ddev;
struct platform_device *pdev;
enum lima_gpu_id id;
int num_pp;
void __iomem *iomem;
struct clk *clk_bus;
struct clk *clk_gpu;
struct reset_control *reset;
struct regulator *regulator;
struct lima_ip ip[lima_ip_num];
struct lima_sched_pipe pipe[lima_pipe_num];
struct lima_vm *empty_vm;
uint64_t va_start;
uint64_t va_end;
u32 *dlbu_cpu;
dma_addr_t dlbu_dma;
};
static inline struct lima_device *
to_lima_dev(struct drm_device *dev)
{
return dev->dev_private;
}
int lima_device_init(struct lima_device *ldev);
void lima_device_fini(struct lima_device *ldev);
const char *lima_ip_name(struct lima_ip *ip);
typedef int (*lima_poll_func_t)(struct lima_ip *);
static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
int sleep_us, int timeout_us)
{
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
might_sleep_if(sleep_us);
while (1) {
if (func(ip))
return 0;
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
if (sleep_us)
usleep_range((sleep_us >> 2) + 1, sleep_us);
}
return 0;
}
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/io.h>
#include <linux/device.h>
#include "lima_device.h"
#include "lima_dlbu.h"
#include "lima_vm.h"
#include "lima_regs.h"
#define dlbu_write(reg, data) writel(data, ip->iomem + reg)
#define dlbu_read(reg) readl(ip->iomem + reg)
void lima_dlbu_enable(struct lima_device *dev, int num_pp)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
struct lima_ip *ip = dev->ip + lima_ip_dlbu;
int i, mask = 0;
for (i = 0; i < num_pp; i++) {
struct lima_ip *pp = pipe->processor[i];
mask |= 1 << (pp->id - lima_ip_pp0);
}
dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, mask);
}
void lima_dlbu_disable(struct lima_device *dev)
{
struct lima_ip *ip = dev->ip + lima_ip_dlbu;
dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, 0);
}
void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
{
dlbu_write(LIMA_DLBU_TLLIST_VBASEADDR, reg[0]);
dlbu_write(LIMA_DLBU_FB_DIM, reg[1]);
dlbu_write(LIMA_DLBU_TLLIST_CONF, reg[2]);
dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
}
int lima_dlbu_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
dlbu_write(LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
dlbu_write(LIMA_DLBU_MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
return 0;
}
void lima_dlbu_fini(struct lima_ip *ip)
{
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_DLBU_H__
#define __LIMA_DLBU_H__
struct lima_ip;
struct lima_device;
void lima_dlbu_enable(struct lima_device *dev, int num_pp);
void lima_dlbu_disable(struct lima_device *dev);
void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
int lima_dlbu_init(struct lima_ip *ip);
void lima_dlbu_fini(struct lima_ip *ip);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include <drm/lima_drm.h>
#include "lima_drv.h"
#include "lima_gem.h"
#include "lima_gem_prime.h"
#include "lima_vm.h"
int lima_sched_timeout_ms = 0;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
struct lima_device *ldev = to_lima_dev(dev);
switch (args->param) {
case DRM_LIMA_PARAM_GPU_ID:
switch (ldev->id) {
case lima_gpu_mali400:
args->value = DRM_LIMA_PARAM_GPU_ID_MALI400;
break;
case lima_gpu_mali450:
args->value = DRM_LIMA_PARAM_GPU_ID_MALI450;
break;
default:
args->value = DRM_LIMA_PARAM_GPU_ID_UNKNOWN;
break;
}
break;
case DRM_LIMA_PARAM_NUM_PP:
args->value = ldev->pipe[lima_pipe_pp].num_processor;
break;
default:
return -EINVAL;
}
return 0;
}
static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_create *args = data;
if (args->flags)
return -EINVAL;
if (args->size == 0)
return -EINVAL;
return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
}
static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_info *args = data;
return lima_gem_get_info(file, args->handle, &args->va, &args->offset);
}
static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_submit *args = data;
struct lima_device *ldev = to_lima_dev(dev);
struct lima_drm_priv *priv = file->driver_priv;
struct drm_lima_gem_submit_bo *bos;
struct lima_sched_pipe *pipe;
struct lima_sched_task *task;
struct lima_ctx *ctx;
struct lima_submit submit = {0};
size_t size;
int err = 0;
if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
return -EINVAL;
if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE))
return -EINVAL;
pipe = ldev->pipe + args->pipe;
if (args->frame_size != pipe->frame_size)
return -EINVAL;
bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL);
if (!bos)
return -ENOMEM;
size = args->nr_bos * sizeof(*submit.bos);
if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
err = -EFAULT;
goto out0;
}
task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
if (!task) {
err = -ENOMEM;
goto out0;
}
task->frame = task + 1;
if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
err = -EFAULT;
goto out1;
}
err = pipe->task_validate(pipe, task);
if (err)
goto out1;
ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
if (!ctx) {
err = -ENOENT;
goto out1;
}
submit.pipe = args->pipe;
submit.bos = bos;
submit.lbos = (void *)bos + size;
submit.nr_bos = args->nr_bos;
submit.task = task;
submit.ctx = ctx;
submit.flags = args->flags;
submit.in_sync[0] = args->in_sync[0];
submit.in_sync[1] = args->in_sync[1];
submit.out_sync = args->out_sync;
err = lima_gem_submit(file, &submit);
lima_ctx_put(ctx);
out1:
if (err)
kmem_cache_free(pipe->task_slab, task);
out0:
kvfree(bos);
return err;
}
static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_wait *args = data;
if (args->op & ~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE))
return -EINVAL;
return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
}
static int lima_ioctl_ctx_create(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_ctx_create *args = data;
struct lima_drm_priv *priv = file->driver_priv;
struct lima_device *ldev = to_lima_dev(dev);
return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
}
static int lima_ioctl_ctx_free(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_ctx_create *args = data;
struct lima_drm_priv *priv = file->driver_priv;
return lima_ctx_free(&priv->ctx_mgr, args->id);
}
static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
{
int err;
struct lima_drm_priv *priv;
struct lima_device *ldev = to_lima_dev(dev);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->vm = lima_vm_create(ldev);
if (!priv->vm) {
err = -ENOMEM;
goto err_out0;
}
lima_ctx_mgr_init(&priv->ctx_mgr);
file->driver_priv = priv;
return 0;
err_out0:
kfree(priv);
return err;
}
static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct lima_drm_priv *priv = file->driver_priv;
lima_ctx_mgr_fini(&priv->ctx_mgr);
lima_vm_put(priv->vm);
kfree(priv);
}
static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct file_operations lima_drm_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.mmap = lima_gem_mmap,
};
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
.postclose = lima_drm_driver_postclose,
.ioctls = lima_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
.fops = &lima_drm_driver_fops,
.gem_free_object_unlocked = lima_gem_free_object,
.gem_open_object = lima_gem_object_open,
.gem_close_object = lima_gem_object_close,
.gem_vm_ops = &lima_gem_vm_ops,
.name = "lima",
.desc = "lima DRM",
.date = "20190217",
.major = 1,
.minor = 0,
.patchlevel = 0,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
.gem_prime_mmap = lima_gem_prime_mmap,
};
static int lima_pdev_probe(struct platform_device *pdev)
{
struct lima_device *ldev;
struct drm_device *ddev;
int err;
ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL);
if (!ldev)
return -ENOMEM;
ldev->pdev = pdev;
ldev->dev = &pdev->dev;
ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, ldev);
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
ddev->dev_private = ldev;
ldev->ddev = ddev;
err = lima_device_init(ldev);
if (err) {
dev_err(&pdev->dev, "Fatal error during GPU init\n");
goto err_out0;
}
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
goto err_out1;
return 0;
err_out1:
lima_device_fini(ldev);
err_out0:
drm_dev_put(ddev);
return err;
}
static int lima_pdev_remove(struct platform_device *pdev)
{
struct lima_device *ldev = platform_get_drvdata(pdev);
struct drm_device *ddev = ldev->ddev;
drm_dev_unregister(ddev);
lima_device_fini(ldev);
drm_dev_put(ddev);
return 0;
}
static const struct of_device_id dt_match[] = {
{ .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
{ .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver lima_platform_driver = {
.probe = lima_pdev_probe,
.remove = lima_pdev_remove,
.driver = {
.name = "lima",
.of_match_table = dt_match,
},
};
static int __init lima_init(void)
{
int ret;
ret = lima_sched_slab_init();
if (ret)
return ret;
ret = platform_driver_register(&lima_platform_driver);
if (ret)
lima_sched_slab_fini();
return ret;
}
module_init(lima_init);
static void __exit lima_exit(void)
{
platform_driver_unregister(&lima_platform_driver);
lima_sched_slab_fini();
}
module_exit(lima_exit);
MODULE_AUTHOR("Lima Project Developers");
MODULE_DESCRIPTION("Lima DRM Driver");
MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_DRV_H__
#define __LIMA_DRV_H__
#include <drm/drm_file.h>
#include "lima_ctx.h"
extern int lima_sched_timeout_ms;
struct lima_vm;
struct lima_bo;
struct lima_sched_task;
struct drm_lima_gem_submit_bo;
struct lima_drm_priv {
struct lima_vm *vm;
struct lima_ctx_mgr ctx_mgr;
};
struct lima_submit {
struct lima_ctx *ctx;
int pipe;
u32 flags;
struct drm_lima_gem_submit_bo *bos;
struct lima_bo **lbos;
u32 nr_bos;
u32 in_sync[2];
u32 out_sync;
struct lima_sched_task *task;
};
static inline struct lima_drm_priv *
to_lima_drm_priv(struct drm_file *file)
{
return file->driver_priv;
}
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/idr.h>
#include <linux/sync_file.h>
#include <linux/pfn_t.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
#include <drm/lima_drm.h>
#include "lima_drv.h"
#include "lima_gem.h"
#include "lima_gem_prime.h"
#include "lima_vm.h"
#include "lima_object.h"
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
int err;
struct lima_bo *bo;
struct lima_device *ldev = to_lima_dev(dev);
bo = lima_bo_create(ldev, size, flags, NULL, NULL);
if (IS_ERR(bo))
return PTR_ERR(bo);
err = drm_gem_handle_create(file, &bo->gem, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put_unlocked(&bo->gem);
return err;
}
void lima_gem_free_object(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
lima_bo_destroy(bo);
}
int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
return lima_vm_bo_add(vm, bo, true);
}
void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
lima_vm_bo_del(vm, bo);
}
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
{
struct drm_gem_object *obj;
struct lima_bo *bo;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
int err;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
bo = to_lima_bo(obj);
*va = lima_vm_get_va(vm, bo);
err = drm_gem_create_mmap_offset(obj);
if (!err)
*offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put_unlocked(obj);
return err;
}
static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct lima_bo *bo = to_lima_bo(obj);
pfn_t pfn;
pgoff_t pgoff;
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
return vmf_insert_mixed(vma, vmf->address, pfn);
}
const struct vm_operations_struct lima_gem_vm_ops = {
.fault = lima_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
void lima_set_vma_flags(struct vm_area_struct *vma)
{
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_page_prot = pgprot_writecombine(prot);
}
int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
lima_set_vma_flags(vma);
return 0;
}
static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
bool write, bool explicit)
{
int err = 0;
if (!write) {
err = reservation_object_reserve_shared(bo->gem.resv, 1);
if (err)
return err;
}
/* explicit sync use user passed dep fence */
if (explicit)
return 0;
/* implicit sync use bo fence in resv obj */
if (write) {
unsigned nr_fences;
struct dma_fence **fences;
int i;
err = reservation_object_get_fences_rcu(
bo->gem.resv, NULL, &nr_fences, &fences);
if (err || !nr_fences)
return err;
for (i = 0; i < nr_fences; i++) {
err = lima_sched_task_add_dep(task, fences[i]);
if (err)
break;
}
/* for error case free remaining fences */
for ( ; i < nr_fences; i++)
dma_fence_put(fences[i]);
kfree(fences);
}
else {
struct dma_fence *fence;
fence = reservation_object_get_excl_rcu(bo->gem.resv);
if (fence) {