Commit 729f80eb authored by Alyssa Rosenzweig's avatar Alyssa Rosenzweig 💜

Merge branch 'miniwrap' into 'develop'

Miniwrap

See merge request !17
parents 6b2a7f2b a0db8722
A simple program written in C to learn more about the ioctl's used by the
bifrost GPU's kernel driver, so we can start drawing some triangles.
Eventually, this code will become historical.
import gdb
import gdb.printing
class MaliPtr:
""" Print a GPU pointer """
def __init__(self, val):
self.val = val
def to_string(self):
return hex(self.val)
pp = gdb.printing.RegexpCollectionPrettyPrinter("panloader")
pp.add_printer('gpu_ptr', '^mali_ptr$', MaliPtr)
gdb.printing.register_pretty_printer(None, pp)
......@@ -548,7 +548,6 @@ struct mali_jd_atom_v2 {
u8 :8;
mali_jd_core_req core_req; /**< core requirements */
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_jd_atom_v2, 48, 48);
/**
* enum mali_error - Mali error codes shared with userspace
......@@ -580,7 +579,6 @@ union mali_ioctl_header {
u64 :64;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(union mali_ioctl_header, 8, 8);
struct mali_ioctl_get_version {
union mali_ioctl_header header;
......@@ -588,7 +586,6 @@ struct mali_ioctl_get_version {
u16 minor; /* [out] */
u32 :32;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_get_version, 16, 16);
struct mali_ioctl_mem_alloc {
union mali_ioctl_header header;
......@@ -605,7 +602,6 @@ struct mali_ioctl_mem_alloc {
u32 :32;
u16 :16;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_mem_alloc, 56, 56);
struct mali_mem_import_user_buffer {
u64 ptr;
......@@ -629,7 +625,6 @@ struct mali_ioctl_mem_import {
u64 gpu_va;
u64 va_pages;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_mem_import, 48, 48);
struct mali_ioctl_mem_commit {
union mali_ioctl_header header;
......@@ -640,7 +635,6 @@ struct mali_ioctl_mem_commit {
u32 result_subcode;
u32 :32;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_mem_commit, 32, 32);
enum mali_ioctl_mem_query_type {
MALI_MEM_QUERY_COMMIT_SIZE = 1,
......@@ -657,7 +651,6 @@ struct mali_ioctl_mem_query {
/* [out] */
u64 value;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_mem_query, 32, 32);
struct mali_ioctl_mem_free {
union mali_ioctl_header header;
......@@ -698,14 +691,12 @@ struct mali_ioctl_sync {
} type :8;
u64 :56;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_sync, 40, 40);
struct mali_ioctl_set_flags {
union mali_ioctl_header header;
u32 create_flags; /* [in] */
u32 :32;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_set_flags, 16, 16);
struct mali_ioctl_stream_create {
union mali_ioctl_header header;
......@@ -715,7 +706,6 @@ struct mali_ioctl_stream_create {
s32 fd;
u32 :32;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_stream_create, 48, 48);
struct mali_ioctl_job_submit {
union mali_ioctl_header header;
......@@ -724,14 +714,12 @@ struct mali_ioctl_job_submit {
u32 nr_atoms;
u32 stride;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_job_submit, 24, 24);
struct mali_ioctl_get_context_id {
union mali_ioctl_header header;
/* [out] */
s64 id;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_get_context_id, 16, 16);
#undef PAD_CPU_PTR
......
This diff is collapsed.
......@@ -258,7 +258,6 @@ struct mali_ioctl_gpu_props_reg_dump {
/** This must be last member of the structure */
struct mali_gpu_coherent_group_info coherency_info;
} __attribute__((packed));
ASSERT_SIZEOF_TYPE(struct mali_ioctl_gpu_props_reg_dump, 536, 536);
#define MALI_IOCTL_GPU_PROPS_REG_DUMP (_IOWR(0x82, 14, struct mali_ioctl_gpu_props_reg_dump))
......
......@@ -31,65 +31,4 @@ typedef int16_t s16;
typedef int32_t s32;
typedef int64_t s64;
/* ASSERT_SIZEOF_TYPE:
*
* Forces compilation to fail if the size of the struct differs from the given
* arch-specific size that was observed during tracing. A size of 0 indicates
* that the ioctl has not been observed in a trace yet, and thus it's size is
* unconfirmed.
*
* Useful for preventing mistakenly extending the length of an ioctl struct and
* thus, causing all members part of said extension to be located at incorrect
* memory locations.
*/
#ifdef __LP64__
#define ASSERT_SIZEOF_TYPE(type__, size32__, size64__) \
_Static_assert(size64__ == 0 || sizeof(type__) == size64__, \
#type__ " does not match expected size " #size64__)
#else
#define ASSERT_SIZEOF_TYPE(type__, size32__, size64__) \
_Static_assert(size32__ == 0 || sizeof(type__) == size32__, \
#type__ " does not match expected size " #size32__)
#endif
#define __PASTE_TOKENS(a, b) a ## b
/*
* PASTE_TOKENS(a, b):
*
* Expands a and b, then concatenates the resulting tokens
*/
#define PASTE_TOKENS(a, b) __PASTE_TOKENS(a, b)
#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define OFFSET_OF(type, member) __builtin_offsetof(type, member)
#define YES_NO(b) ((b) ? "Yes" : "No")
#define PANLOADER_CONSTRUCTOR \
static void __attribute__((constructor)) PASTE_TOKENS(__panloader_ctor_l, __LINE__)()
#define PANLOADER_DESTRUCTOR \
static void __attribute__((destructor)) PASTE_TOKENS(__panloader_dtor_l, __LINE__)()
#define msleep(n) (usleep(n * 1000))
/* Semantic logging type.
*
* Raw: for raw messages to be printed as is.
* Message: for helpful information to be commented out in replays.
* Property: for properties of a struct
*
* Use one of panwrap_log, panwrap_msg, or panwrap_prop as syntax sugar.
*/
enum panwrap_log_type {
PANWRAP_RAW,
PANWRAP_MESSAGE,
PANWRAP_PROPERTY
};
#define panwrap_log(...) panwrap_log_typed(PANWRAP_RAW, __VA_ARGS__)
#define panwrap_msg(...) panwrap_log_typed(PANWRAP_MESSAGE, __VA_ARGS__)
#define panwrap_prop(...) panwrap_log_typed(PANWRAP_PROPERTY, __VA_ARGS__)
#endif /* __PANLOADER_UTIL_H__ */
......@@ -10,8 +10,6 @@ is_android = cc.get_define('__ANDROID__') == '1'
test_cc_flags = [
'-O3',
# '-pg',
# '-g',
'-Wall',
'-Wno-unused-parameter',
'-Wno-sign-compare',
......
......@@ -2,8 +2,6 @@ srcs = [
'panwrap-syscall.c',
'panwrap-util.c',
'panwrap-mmap.c',
'panwrap-decoder.c',
'panwrap-shader.c'
]
shared_library(
......@@ -12,7 +10,6 @@ shared_library(
include_directories: inc,
dependencies: [
common_dep,
spd_dep
],
install: true,
)
This diff is collapsed.
/*
* © Copyright 2017-2018 The Panfrost Community
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained
* from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.
*
*/
#ifndef PANWRAP_DECODER_H
#define PANWRAP_DECODER_H
#include <mali-ioctl.h>
#include <mali-job.h>
#include "panwrap.h"
int panwrap_replay_jc(mali_ptr jc_gpu_va, bool bifrost);
int panwrap_replay_soft_replay(mali_ptr jc_gpu_va);
#endif /* !PANWRAP_DECODER_H */
......@@ -31,128 +31,23 @@
static LIST_HEAD(allocations);
static LIST_HEAD(mmaps);
#define FLAG_INFO(flag) { flag, #flag }
static const struct panwrap_flag_info mmap_flags_flag_info[] = {
FLAG_INFO(MAP_SHARED),
FLAG_INFO(MAP_PRIVATE),
FLAG_INFO(MAP_ANONYMOUS),
FLAG_INFO(MAP_DENYWRITE),
FLAG_INFO(MAP_FIXED),
FLAG_INFO(MAP_GROWSDOWN),
FLAG_INFO(MAP_HUGETLB),
FLAG_INFO(MAP_LOCKED),
FLAG_INFO(MAP_NONBLOCK),
FLAG_INFO(MAP_NORESERVE),
FLAG_INFO(MAP_POPULATE),
FLAG_INFO(MAP_STACK),
#if MAP_UNINITIALIZED != 0
FLAG_INFO(MAP_UNINITIALIZED),
#endif
{}
};
static const struct panwrap_flag_info mmap_prot_flag_info[] = {
FLAG_INFO(PROT_EXEC),
FLAG_INFO(PROT_READ),
FLAG_INFO(PROT_WRITE),
{}
};
#undef FLAG_INFO
char* pointer_as_memory_reference(mali_ptr ptr)
{
struct panwrap_mapped_memory *mapped;
char *out = malloc(128);
/* First check for SAME_VA mappings, then look for non-SAME_VA
* mappings, then for unmapped regions */
if ((ptr == (uintptr_t) ptr && (mapped = panwrap_find_mapped_mem_containing((void*) (uintptr_t) ptr))) ||
(mapped = panwrap_find_mapped_gpu_mem_containing(ptr))) {
snprintf(out, 128, "alloc_gpu_va_%d + %d", mapped->allocation_number, (int) (ptr - mapped->gpu_va));
return out;
}
struct panwrap_allocated_memory *pos, *mem = NULL;
/* Find the pending unmapped allocation for the memory */
list_for_each_entry(pos, &allocations, node) {
if (ptr >= pos->gpu_va && ptr < (pos->gpu_va + pos->length)) {
mem = pos;
break;
}
}
extern char *log_base;
if (mem) {
snprintf(out, 128, "alloc_gpu_va_%d + %d", mem->allocation_number, (int) (ptr - mem->gpu_va));
return out;
}
/* Just use the raw address if other options are exhausted */
snprintf(out, 128, MALI_PTR_FMT, ptr);
return out;
}
/* On job submission, there will be a -lot- of structures built up in memory.
* While we could decode them, for triangle #1 it's easier to just dump them
* all verbatim, as hex arrays, and memcpy them into the allocated memory
* spaces. The main issue is address fix up, which we also handle here. */
void replay_memory_specific(struct panwrap_mapped_memory *pos, int offset, int len)
void replay_memory()
{
/* If we don't have write access, no replay :) */
if (!(pos->flags & MALI_MEM_PROT_CPU_WR)) return;
/* Tracking these types of mappings would require more
* sophistication to avoid faulting when reading pages that
* haven't been committed yet, so don't try and read them.
*/
if (pos->flags & MALI_MEM_GROW_ON_GPF) return;
if (pos->flags & MALI_MEM_PROT_GPU_EX) {
if (offset)
panwrap_msg("Shader sync not supported!\n");
struct panwrap_mapped_memory *pos;
/* Shader memory get dumped but not replayed, as the
* dis/assembler is setup in-tree as it is. */
list_for_each_entry(pos, &mmaps, node) {
/* If we don't have write access, no replay */
if (!(pos->flags & MALI_MEM_PROT_CPU_WR)) continue;
if (pos->flags & MALI_MEM_GROW_ON_GPF) continue;
/* Dump to file */
char filename[128];
snprintf(filename, 128, "%s.bin", pos->name);
snprintf(filename, 128, "%s/%s.bin", log_base, pos->name);
FILE *fp = fopen(filename, "wb");
fwrite(pos->addr, 1, len, fp);
fwrite(pos->addr, 1, pos->length, fp);
fclose(fp);
const char *prefix = "";
panwrap_log("%s FILE *f_%s = fopen(\"%s\", \"rb\");\n", prefix, pos->name, filename);
panwrap_log("%s fread(%s, 1, %d, f_%s);\n", prefix, pos->name, len, pos->name);
panwrap_log("%s fclose(f_%s);\n", prefix, pos->name);
} else {
/* Fill it with dumped memory, skipping zeroes */
uint32_t *array = (uint32_t *) pos->addr;
for (uint32_t i = offset / sizeof(uint32_t); i < (offset + len) / sizeof(uint32_t); ++i) {
if (array[i])
panwrap_log("%s%s[%d] = 0x%x;\n", pos->touched[i] ? "// " : "", pos->name, i, array[i]);
}
/* Touch what we have written */
/* TODO: Implement correctly */
// memset(pos->touched + (offset / sizeof(uint32_t)), 1, len / sizeof(uint32_t));
}
panwrap_log("\n");
}
void replay_memory()
{
struct panwrap_mapped_memory *pos;
list_for_each_entry(pos, &mmaps, node) {
replay_memory_specific(pos, 0, pos->length);
}
}
......@@ -166,8 +61,6 @@ void panwrap_track_allocation(mali_ptr addr, int flags, int number, size_t lengt
mem->allocation_number = number;
mem->length = length;
panwrap_msg("%llx\n", addr);
list_add(&mem->node, &allocations);
/* XXX: Hacky workaround for cz's board */
......@@ -175,6 +68,8 @@ void panwrap_track_allocation(mali_ptr addr, int flags, int number, size_t lengt
panwrap_track_mmap(addr, (void *) (uintptr_t) addr, length, PROT_READ | PROT_WRITE, MAP_SHARED);
}
extern FILE * log_output;
void panwrap_track_mmap(mali_ptr gpu_va, void *addr, size_t length,
int prot, int flags)
{
......@@ -189,21 +84,13 @@ void panwrap_track_mmap(mali_ptr gpu_va, void *addr, size_t length,
}
}
if (!mem) {
panwrap_msg("Error: Untracked gpu memory " MALI_PTR_FMT " mapped to %p\n",
gpu_va, addr);
panwrap_msg("\tprot = ");
panwrap_log_decoded_flags(mmap_prot_flag_info, prot);
panwrap_log_cont("\n");
panwrap_msg("\tflags = ");
panwrap_log_decoded_flags(mmap_flags_flag_info, flags);
panwrap_log_cont("\n");
printf("// Untracked...\n");
return;
}
mapped_mem = malloc(sizeof(*mapped_mem));
list_init(&mapped_mem->node);
/* Try not to break other systems... there are so many configurations
* of userspaces/kernels/architectures and none of them are compatible,
* ugh. */
......@@ -221,29 +108,20 @@ void panwrap_track_mmap(mali_ptr gpu_va, void *addr, size_t length,
mapped_mem->prot = prot;
mapped_mem->flags = mem->flags;
mapped_mem->allocation_number = mem->allocation_number;
mapped_mem->touched = calloc(length, sizeof(bool));
list_add(&mapped_mem->node, &mmaps);
list_del(&mem->node);
free(mem);
panwrap_msg("va %d mapped to %" PRIx64 "\n", mapped_mem->allocation_number,
mapped_mem->gpu_va);
/* Generate somewhat semantic name for the region */
snprintf(mapped_mem->name, sizeof(mapped_mem->name),
"%s_%d",
mem->flags & MALI_MEM_PROT_GPU_EX ? "shader" : "memory",
mapped_mem->allocation_number);
/* Map region itself */
panwrap_log("uint32_t *%s = mmap64(NULL, %zd, %d, %d, fd, alloc_gpu_va_%d);\n\n",
mapped_mem->name, length, prot, flags, mapped_mem->allocation_number);
panwrap_log("if (%s == MAP_FAILED) printf(\"Error mapping %s\\n\");\n\n",
mapped_mem->name, mapped_mem->name);
/* Track it */
fprintf(log_output, "MMAP %" PRIx64" %s.bin\n", mapped_mem->gpu_va, mapped_mem->name);
}
void panwrap_track_munmap(void *addr)
......@@ -252,7 +130,7 @@ void panwrap_track_munmap(void *addr)
panwrap_find_mapped_mem(addr);
if (!mapped_mem) {
panwrap_msg("Unknown mmap %p unmapped\n", addr);
printf("// Unknown mmap %p unmapped\n", addr);
return;
}
......@@ -272,124 +150,3 @@ struct panwrap_mapped_memory *panwrap_find_mapped_mem(void *addr)
return NULL;
}
struct panwrap_mapped_memory *panwrap_find_mapped_mem_containing(void *addr)
{
struct panwrap_mapped_memory *pos;
list_for_each_entry(pos, &mmaps, node) {
if (addr >= pos->addr && addr < pos->addr + pos->length)
return pos;
}
return NULL;
}
struct panwrap_mapped_memory *panwrap_find_mapped_gpu_mem(mali_ptr addr)
{
struct panwrap_mapped_memory *pos;
list_for_each_entry(pos, &mmaps, node) {
if (pos->gpu_va == addr)
return pos;
}
return NULL;
}
struct panwrap_mapped_memory *panwrap_find_mapped_gpu_mem_containing(mali_ptr addr)
{
struct panwrap_mapped_memory *pos;
list_for_each_entry(pos, &mmaps, node) {
if (addr >= pos->gpu_va && addr < pos->gpu_va + pos->length)
return pos;
}
return NULL;
}
void
panwrap_assert_gpu_same(const struct panwrap_mapped_memory *mem,
mali_ptr gpu_va, size_t size,
const unsigned char *data)
{
const char *buffer = panwrap_fetch_gpu_mem(mem, gpu_va, size);
for (size_t i = 0; i < size; i++) {
if (buffer[i] != data[i]) {
panwrap_msg("At " MALI_PTR_FMT ", expected:\n",
gpu_va);
panwrap_indent++;
panwrap_log_hexdump_trimmed(data, size);
panwrap_indent--;
panwrap_msg("Instead got:\n");
panwrap_indent++;
panwrap_log_hexdump_trimmed(buffer, size);
panwrap_indent--;
abort();
}
}
}
void
panwrap_assert_gpu_mem_zero(const struct panwrap_mapped_memory *mem,
mali_ptr gpu_va, size_t size)
{
const char *buffer = panwrap_fetch_gpu_mem(mem, gpu_va, size);
for (size_t i = 0; i < size; i++) {
if (buffer[i] != '\0') {
panwrap_msg("At " MALI_PTR_FMT ", expected all 0 but got:\n",
gpu_va);
panwrap_indent++;
panwrap_log_hexdump_trimmed(buffer, size);
panwrap_indent--;
abort();
}
}
}
void __attribute__((noreturn))
__panwrap_fetch_mem_err(const struct panwrap_mapped_memory *mem,
mali_ptr gpu_va, size_t size,
int line, const char *filename)
{
panwrap_indent = 0;
panwrap_msg("\n");
panwrap_msg("INVALID GPU MEMORY ACCESS @"
MALI_PTR_FMT " - " MALI_PTR_FMT ":\n",
gpu_va, gpu_va + size);
panwrap_msg("Occurred at line %d of %s\n", line, filename);
if (mem) {
panwrap_msg("Mapping information:\n");
panwrap_indent++;
panwrap_msg("CPU VA: %p - %p\n",
mem->addr, mem->addr + mem->length - 1);
panwrap_msg("GPU VA: " MALI_PTR_FMT " - " MALI_PTR_FMT "\n",
mem->gpu_va,
(mali_ptr)(mem->gpu_va + mem->length - 1));
panwrap_msg("Length: %zu bytes\n", mem->length);
panwrap_indent--;
if (!(mem->prot & MALI_MEM_PROT_CPU_RD))
panwrap_msg("Memory is only accessible from GPU\n");
else
panwrap_msg("Access length was out of bounds\n");
} else {
panwrap_msg("GPU memory is not contained within known GPU VA mappings\n");
struct panwrap_mapped_memory *pos;
list_for_each_entry(pos, &mmaps, node) {
panwrap_msg(MALI_PTR_FMT " (%p)\n", pos->gpu_va, pos->addr);
}
}
panwrap_log_flush();
abort();
}
......@@ -41,92 +41,16 @@ struct panwrap_mapped_memory {
int allocation_number;
char name[32];
bool* touched;
struct list node;
};
/* Set this if you don't want your life to be hell while debugging */
#define DISABLE_CPU_CACHING 1
#define TOUCH_MEMSET(mem, addr, sz, offset) \
memset((mem)->touched + (((addr) - (mem)->gpu_va) / sizeof(uint32_t)), 1, ((sz) - (offset)) / sizeof(uint32_t)); \
panwrap_log("\n");
#define TOUCH_LEN(mem, addr, sz, ename, number, dyn) \
TOUCH_MEMSET(mem, addr, sz, 0) \
panwrap_log("mali_ptr %s_%d_p = pandev_upload(%d, NULL, alloc_gpu_va_%d, %s, &%s_%d, sizeof(%s_%d), false);\n\n", ename, number, (dyn) ? -1 : (int) (((addr) - (mem)->gpu_va)), (mem)->allocation_number, (mem)->name, ename, number, ename, number);
/* Job payloads are touched somewhat different than other structures, due to the
* variable lengths and odd packing requirements */
#define TOUCH_JOB_HEADER(mem, addr, sz, offset, number) \
TOUCH_MEMSET(mem, addr, sz, offset) \
panwrap_log("mali_ptr job_%d_p = pandev_upload(-1, NULL, alloc_gpu_va_%d, %s, &job_%d, sizeof(job_%d) - %d, true);\n\n", number, mem->allocation_number, mem->name, number, number, offset);
#define TOUCH_SEQUENTIAL(mem, addr, sz, ename, number) \
TOUCH_MEMSET(mem, addr, sz, 0) \
panwrap_log("mali_ptr %s_%d_p = pandev_upload_sequential(alloc_gpu_va_%d, %s, &%s_%d, sizeof(%s_%d));\n\n", ename, number, mem->allocation_number, mem->name, ename, number, ename, number);
/* Syntax sugar for sanely sized objects */
#define TOUCH(mem, addr, obj, ename, number, dyn) \
TOUCH_LEN(mem, addr, sizeof(typeof(obj)), ename, number, dyn)
void replay_memory();
void replay_memory_specific(struct panwrap_mapped_memory *pos, int offset, int len);
char *pointer_as_memory_reference(mali_ptr ptr);
void panwrap_track_allocation(mali_ptr gpu_va, int flags, int number, size_t length);
void panwrap_track_mmap(mali_ptr gpu_va, void *addr, size_t length,
int prot, int flags);
void panwrap_track_munmap(void *addr);
struct panwrap_mapped_memory *panwrap_find_mapped_mem(void *addr);
struct panwrap_mapped_memory *panwrap_find_mapped_mem_containing(void *addr);
struct panwrap_mapped_memory *panwrap_find_mapped_gpu_mem(mali_ptr addr);
struct panwrap_mapped_memory *panwrap_find_mapped_gpu_mem_containing(mali_ptr addr);
void panwrap_assert_gpu_same(const struct panwrap_mapped_memory *mem,
mali_ptr gpu_va, size_t size,
const unsigned char *data);
void panwrap_assert_gpu_mem_zero(const struct panwrap_mapped_memory *mem,
mali_ptr gpu_va, size_t size);
void __attribute__((noreturn))
__panwrap_fetch_mem_err(const struct panwrap_mapped_memory *mem,
mali_ptr gpu_va, size_t size,
int line, const char *filename);
static inline void *
__panwrap_fetch_gpu_mem(const struct panwrap_mapped_memory *mem,
mali_ptr gpu_va, size_t size,
int line, const char *filename)
{
if (!mem)
mem = panwrap_find_mapped_gpu_mem_containing(gpu_va);
if (!mem ||
size + (gpu_va - mem->gpu_va) > mem->length ||
!(mem->prot & MALI_MEM_PROT_CPU_RD))
__panwrap_fetch_mem_err(mem, gpu_va, size, line, filename);
return mem->addr + gpu_va - mem->gpu_va;
}
#define panwrap_fetch_gpu_mem(mem, gpu_va, size) \
__panwrap_fetch_gpu_mem(mem, gpu_va, size, __LINE__, __FILE__)
/* Returns a validated pointer to mapped GPU memory with the given pointer type,
* size automatically determined from the pointer type
*/
#define PANWRAP_PTR(mem, gpu_va, type) \
((type*)(__panwrap_fetch_gpu_mem(mem, gpu_va, sizeof(type), \
__LINE__, __FILE__)))
/* Usage: <variable type> PANWRAP_PTR_VAR(name, mem, gpu_va) */
#define PANWRAP_PTR_VAR(name, mem, gpu_va) \
name = __panwrap_fetch_gpu_mem(mem, gpu_va, sizeof(*name), \
__LINE__, __FILE__)
void replay_memory(void);
#endif /* __MMAP_TRACE_H__ */
/*
* © Copyright 2018 The Panfrost Community
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained
* from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA 02110-1301, USA.