Commit 90b4045f authored by Keith Whitwell's avatar Keith Whitwell
Browse files

wip

parent a80e33f4
......@@ -34,6 +34,7 @@
#include "pipe/p_screen.h"
#include "util/u_debug.h"
#include "util/u_atomic.h"
#include "util/u_box.h"
#ifdef __cplusplus
......@@ -95,8 +96,8 @@ pipe_buffer_reference(struct pipe_buffer **ptr, struct pipe_buffer *buf)
assert(ptr);
old_buf = *ptr;
if (pipe_reference(&(*ptr)->reference, &buf->reference))
old_buf->screen->buffer_destroy(old_buf);
if (pipe_reference(&(*ptr)->base.reference, &buf->base.reference))
old_buf->base.screen->resource_destroy(&old_buf->base);
*ptr = buf;
}
......@@ -106,7 +107,7 @@ pipe_surface_reference(struct pipe_surface **ptr, struct pipe_surface *surf)
struct pipe_surface *old_surf = *ptr;
if (pipe_reference(&(*ptr)->reference, &surf->reference))
old_surf->texture->screen->tex_surface_destroy(old_surf);
old_surf->resource->screen->tex_surface_destroy(old_surf);
*ptr = surf;
}
......@@ -115,8 +116,18 @@ pipe_texture_reference(struct pipe_texture **ptr, struct pipe_texture *tex)
{
struct pipe_texture *old_tex = *ptr;
if (pipe_reference(&(*ptr)->base.reference, &tex->base.reference))
old_tex->base.screen->resource_destroy(&old_tex->base);
*ptr = tex;
}
static INLINE void
pipe_resource_reference(struct pipe_resource **ptr, struct pipe_resource *tex)
{
struct pipe_resource *old_tex = *ptr;
if (pipe_reference(&(*ptr)->reference, &tex->reference))
old_tex->screen->texture_destroy(old_tex);
old_tex->screen->resource_destroy(old_tex);
*ptr = tex;
}
......@@ -129,87 +140,116 @@ static INLINE struct pipe_buffer *
pipe_buffer_create( struct pipe_screen *screen,
unsigned alignment, unsigned usage, unsigned size )
{
return screen->buffer_create(screen, alignment, usage, size);
struct pipe_buffer buffer;
memset(&buffer, 0, sizeof buffer);
buffer.base.target = PIPE_RESOURCE_BUFFER;
buffer.base.format = PIPE_FORMAT_R8_UNORM; /* want TYPELESS or similar */
buffer.base.usage = usage;
buffer.base.width0 = size;
buffer.base.height0 = 1;
buffer.base.depth0 = 1;
return (struct pipe_buffer *)screen->resource_create(screen, &buffer.base);
}
#if 0
static INLINE struct pipe_buffer *
pipe_user_buffer_create( struct pipe_screen *screen, void *ptr, unsigned size )
{
return screen->user_buffer_create(screen, ptr, size);
}
#endif
static INLINE void *
pipe_buffer_map(struct pipe_screen *screen,
struct pipe_buffer *buf,
unsigned usage)
pipe_buffer_map_range(struct pipe_context *pipe,
struct pipe_buffer *buffer,
unsigned offset,
unsigned length,
unsigned usage,
struct pipe_transfer **transfer)
{
if(screen->buffer_map_range) {
unsigned offset = 0;
unsigned length = buf->size;
return screen->buffer_map_range(screen, buf, offset, length, usage);
}
else
return screen->buffer_map(screen, buf, usage);
struct pipe_box box;
assert(offset < buffer->base.width0);
assert(offset + length <= buffer->base.width0);
assert(length);
u_box_1d(offset, length, &box);
*transfer = pipe->get_transfer( pipe,
&buffer->base,
u_subresource(0, 0),
usage,
&box);
if (*transfer == NULL)
return NULL;
return pipe->transfer_map( pipe, *transfer );
}
static INLINE void
pipe_buffer_unmap(struct pipe_screen *screen,
struct pipe_buffer *buf)
static INLINE void *
pipe_buffer_map(struct pipe_context *pipe,
struct pipe_buffer *buffer,
unsigned usage,
struct pipe_transfer **transfer)
{
screen->buffer_unmap(screen, buf);
return pipe_buffer_map_range(pipe, buffer, usage, 0, buffer->base.width0, transfer);
}
static INLINE void *
pipe_buffer_map_range(struct pipe_screen *screen,
struct pipe_buffer *buf,
unsigned offset,
unsigned length,
unsigned usage)
static INLINE void
pipe_buffer_unmap(struct pipe_context *pipe,
struct pipe_buffer *buf,
struct pipe_transfer *transfer)
{
assert(offset < buf->size);
assert(offset + length <= buf->size);
assert(length);
if(screen->buffer_map_range)
return screen->buffer_map_range(screen, buf, offset, length, usage);
else
return screen->buffer_map(screen, buf, usage);
if (transfer) {
pipe->transfer_unmap(pipe, transfer);
pipe->transfer_destroy(pipe, transfer);
}
}
static INLINE void
pipe_buffer_flush_mapped_range(struct pipe_screen *screen,
struct pipe_buffer *buf,
pipe_buffer_flush_mapped_range(struct pipe_context *pipe,
struct pipe_transfer *transfer,
unsigned offset,
unsigned length)
{
assert(offset < buf->size);
assert(offset + length <= buf->size);
struct pipe_box box;
assert(length);
if(screen->buffer_flush_mapped_range)
screen->buffer_flush_mapped_range(screen, buf, offset, length);
u_box_1d(offset, length, &box);
pipe->transfer_flush_region(pipe, transfer, &box);
}
static INLINE void
pipe_buffer_write(struct pipe_screen *screen,
pipe_buffer_write(struct pipe_context *pipe,
struct pipe_buffer *buf,
unsigned offset, unsigned size,
unsigned offset,
unsigned size,
const void *data)
{
void *map;
assert(offset < buf->size);
assert(offset + size <= buf->size);
assert(size);
map = pipe_buffer_map_range(screen, buf, offset, size,
PIPE_BUFFER_USAGE_CPU_WRITE |
PIPE_BUFFER_USAGE_FLUSH_EXPLICIT |
PIPE_BUFFER_USAGE_DISCARD);
assert(map);
if(map) {
memcpy((uint8_t *)map + offset, data, size);
pipe_buffer_flush_mapped_range(screen, buf, offset, size);
pipe_buffer_unmap(screen, buf);
}
struct pipe_box box;
struct pipe_subresource subresource;
subresource.face = 0;
subresource.level = 0;
box.x = offset;
box.y = 0;
box.z = 0;
box.w = size;
box.h = 1;
box.d = 1;
pipe->transfer_inline_write( pipe,
&buf->base,
subresource,
PIPE_TRANSFER_WRITE,
&box,
data);
}
/**
......@@ -219,86 +259,82 @@ pipe_buffer_write(struct pipe_screen *screen,
* been written before.
*/
static INLINE void
pipe_buffer_write_nooverlap(struct pipe_screen *screen,
pipe_buffer_write_nooverlap(struct pipe_context *pipe,
struct pipe_buffer *buf,
unsigned offset, unsigned size,
const void *data)
{
void *map;
assert(offset < buf->size);
assert(offset + size <= buf->size);
assert(size);
map = pipe_buffer_map_range(screen, buf, offset, size,
PIPE_BUFFER_USAGE_CPU_WRITE |
PIPE_BUFFER_USAGE_FLUSH_EXPLICIT |
PIPE_BUFFER_USAGE_DISCARD |
PIPE_BUFFER_USAGE_UNSYNCHRONIZED);
assert(map);
if(map) {
memcpy((uint8_t *)map + offset, data, size);
pipe_buffer_flush_mapped_range(screen, buf, offset, size);
pipe_buffer_unmap(screen, buf);
}
struct pipe_box box;
struct pipe_subresource subresource;
subresource.face = 0;
subresource.level = 0;
box.x = offset;
box.y = 0;
box.z = 0;
box.w = size;
box.h = 1;
box.d = 1;
pipe->transfer_inline_write(pipe,
&buf->base,
subresource,
(PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_NOOVERWRITE),
&box,
data);
}
static INLINE void
pipe_buffer_read(struct pipe_screen *screen,
pipe_buffer_read(struct pipe_context *pipe,
struct pipe_buffer *buf,
unsigned offset, unsigned size,
void *data)
{
void *map;
assert(offset < buf->size);
assert(offset + size <= buf->size);
assert(size);
map = pipe_buffer_map_range(screen, buf, offset, size, PIPE_BUFFER_USAGE_CPU_READ);
assert(map);
if(map) {
memcpy(data, (const uint8_t *)map + offset, size);
pipe_buffer_unmap(screen, buf);
}
struct pipe_box box;
struct pipe_subresource subresource;
subresource.face = 0;
subresource.level = 0;
box.x = offset;
box.y = 0;
box.z = 0;
box.w = size;
box.h = 1;
box.d = 1;
pipe->transfer_inline_read( pipe,
&buf->base,
subresource,
PIPE_TRANSFER_READ,
&box,
data);
}
static INLINE void *
pipe_transfer_map( struct pipe_context *context,
struct pipe_transfer *transf )
struct pipe_transfer *transfer )
{
return context->transfer_map(context, transf);
return context->transfer_map( context, transfer );
}
static INLINE void
pipe_transfer_unmap( struct pipe_context *context,
struct pipe_transfer *transf )
struct pipe_transfer *transfer )
{
context->transfer_unmap(context, transf);
context->transfer_unmap( context, transfer );
}
static INLINE void
pipe_transfer_destroy( struct pipe_context *context,
struct pipe_transfer *transfer )
pipe_transfer_destroy( struct pipe_context *context,
struct pipe_transfer *transfer )
{
context->tex_transfer_destroy(context, transfer);
context->transfer_destroy(context, transfer);
}
static INLINE unsigned
pipe_transfer_buffer_flags( struct pipe_transfer *transf )
{
switch (transf->usage & PIPE_TRANSFER_READ_WRITE) {
case PIPE_TRANSFER_READ_WRITE:
return PIPE_BUFFER_USAGE_CPU_READ | PIPE_BUFFER_USAGE_CPU_WRITE;
case PIPE_TRANSFER_READ:
return PIPE_BUFFER_USAGE_CPU_READ;
case PIPE_TRANSFER_WRITE:
return PIPE_BUFFER_USAGE_CPU_WRITE;
default:
debug_assert(0);
return 0;
}
}
#ifdef __cplusplus
}
......
......@@ -32,7 +32,7 @@
struct pipe_transfer;
#if 0
/**
* Clip tile against transfer dims.
* \return TRUE if tile is totally clipped, FALSE otherwise
......@@ -50,6 +50,7 @@ pipe_clip_tile(uint x, uint y, uint *w, uint *h, const struct pipe_transfer *pt)
*h = pt->height - y;
return FALSE;
}
#endif
#ifdef __cplusplus
extern "C" {
......
......@@ -291,22 +291,9 @@ struct pipe_context {
* \param level mipmap level.
* \return mask of PIPE_REFERENCED_FOR_READ/WRITE or PIPE_UNREFERENCED
*/
unsigned int (*is_texture_referenced)(struct pipe_context *pipe,
struct pipe_texture *texture,
unsigned face, unsigned level);
/**
* Check whether a buffer is referenced by an unflushed hw command.
* The state-tracker uses this function to avoid unnecessary flushes.
* It is safe (but wasteful) to always return
* PIPE_REFERENCED_FOR_READ | PIPE_REFERENCED_FOR_WRITE.
* \param pipe context whose unflushed hw commands will be checked.
* \param buf buffer to check.
* \return mask of PIPE_REFERENCED_FOR_READ/WRITE or PIPE_UNREFERENCED
*/
unsigned int (*is_buffer_referenced)(struct pipe_context *pipe,
struct pipe_buffer *buf);
unsigned int (*is_resource_referenced)(struct pipe_context *pipe,
struct pipe_resource *texture,
unsigned face, unsigned level);
/**
......@@ -315,24 +302,50 @@ struct pipe_context {
* Transfers are (by default) context-private and allow uploads to be
* interleaved with
*/
struct pipe_transfer *(*get_tex_transfer)(struct pipe_context *,
struct pipe_texture *texture,
unsigned face, unsigned level,
unsigned zslice,
enum pipe_transfer_usage usage,
unsigned x, unsigned y,
unsigned w, unsigned h);
void (*tex_transfer_destroy)(struct pipe_context *,
struct pipe_transfer *(*get_transfer)(struct pipe_context *,
struct pipe_resource *resource,
struct pipe_subresource,
enum pipe_transfer_usage,
const struct pipe_box *);
void (*transfer_destroy)(struct pipe_context *,
struct pipe_transfer *);
void *(*transfer_map)( struct pipe_context *,
struct pipe_transfer *transfer );
/* If transfer was created with WRITE|FLUSH_EXPLICIT, only the
* regions specified with this call are guaranteed to be written to
* the resource.
*/
void (*transfer_flush_region)( struct pipe_context *,
struct pipe_transfer *transfer,
const struct pipe_box *);
void (*transfer_unmap)( struct pipe_context *,
struct pipe_transfer *transfer );
/* One-shot transfer operation with data supplied in a user
* pointer. XXX: strides??
*/
void (*transfer_inline_write)( struct pipe_context *,
struct pipe_resource *,
struct pipe_subresource,
enum pipe_transfer_usage,
const struct pipe_box *,
const void *data );
/* One-shot read transfer operation with data returned in a user
* pointer. XXX: strides??
*/
void (*transfer_inline_read)( struct pipe_context *,
struct pipe_resource *,
struct pipe_subresource,
enum pipe_transfer_usage,
const struct pipe_box *,
void *data );
};
......
......@@ -137,10 +137,11 @@ enum pipe_error {
/** Texture types */
enum pipe_texture_target {
PIPE_TEXTURE_1D = 0,
PIPE_TEXTURE_2D = 1,
PIPE_TEXTURE_3D = 2,
PIPE_TEXTURE_CUBE = 3,
PIPE_BUFFER = 0,
PIPE_TEXTURE_1D = 1,
PIPE_TEXTURE_2D = 2,
PIPE_TEXTURE_3D = 3,
PIPE_TEXTURE_CUBE = 4,
PIPE_MAX_TEXTURE_TYPES
};
......@@ -208,10 +209,23 @@ enum pipe_texture_target {
* Transfer object usage flags
*/
enum pipe_transfer_usage {
/**
* Resource contents read back (or accessed directly) at transfer
* create time.
*/
PIPE_TRANSFER_READ = (1 << 0),
/**
* Resource contents will be written back at transfer_destroy
* time (or modified as a result of being accessed directly).
*/
PIPE_TRANSFER_WRITE = (1 << 1),
/** Read/modify/write */
/**
* Read/modify/write
*/
PIPE_TRANSFER_READ_WRITE = PIPE_TRANSFER_READ | PIPE_TRANSFER_WRITE,
/**
* The transfer should map the texture storage directly. The driver may
* return NULL if that isn't possible, and the state tracker needs to cope
......@@ -221,7 +235,54 @@ enum pipe_transfer_usage {
* does read/modify/write cycles on them directly, and a more complicated
* path which uses minimal read and write transfers.
*/
PIPE_TRANSFER_MAP_DIRECTLY = (1 << 2)
PIPE_TRANSFER_MAP_DIRECTLY = (1 << 2),
/**
* Discards the memory within the mapped region.
*
* It should not be used with PIPE_TRANSFER_CPU_READ.
*
* See also:
* - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_RANGE_BIT flag.
* - Direct3D's D3DLOCK_DISCARD flag.
*/
PIPE_TRANSFER_DISCARD = (1 << 8),
/**
* Fail if the resource cannot be mapped immediately.
*
* See also:
* - Direct3D's D3DLOCK_DONOTWAIT flag.
* - Mesa3D's MESA_MAP_NOWAIT_BIT flag.
* - WDDM's D3DDDICB_LOCKFLAGS.DonotWait flag.
*/
PIPE_TRANSFER_DONTBLOCK = (1 << 9),
/**
* Do not attempt to synchronize pending operations on the resource when mapping.
*
* It should not be used with PIPE_TRANSFER_CPU_READ.
*
* See also:
* - OpenGL's ARB_map_buffer_range extension, MAP_UNSYNCHRONIZED_BIT flag.
* - Direct3D's D3DLOCK_NOOVERWRITE flag.
* - WDDM's D3DDDICB_LOCKFLAGS.IgnoreSync flag.
*/
PIPE_TRANSFER_UNSYNCHRONIZED = (1 << 10),
PIPE_TRANSFER_NOOVERWRITE = (1 << 10), /* are these really the same?? */
/**
* Written ranges will be notified later with
* pipe_context::transfer_flush_region.
*
* It should not be used with PIPE_TRANSFER_CPU_READ.
*
* See also:
* - pipe_context::transfer_flush_region
* - OpenGL's ARB_map_buffer_range extension, MAP_FLUSH_EXPLICIT_BIT flag.
*/
PIPE_TRANSFER_FLUSH_EXPLICIT = (1 << 11),
};
......@@ -238,73 +299,6 @@ enum pipe_transfer_usage {
#define PIPE_BUFFER_USAGE_INDEX (1 << 6)
#define PIPE_BUFFER_USAGE_CONSTANT (1 << 7)
/*
* CPU access flags.
*
* These flags should only be used for texture transfers or when mapping
* buffers.
*
* Note that the PIPE_BUFFER_USAGE_CPU_xxx flags above are also used for
* mapping. Either PIPE_BUFFER_USAGE_CPU_READ or PIPE_BUFFER_USAGE_CPU_WRITE
* must be set.
*/
/**
* Discards the memory within the mapped region.
*
* It should not be used with PIPE_BUFFER_USAGE_CPU_READ.
*
* See also:
* - OpenGL's ARB_map_buffer_range extension, MAP_INVALIDATE_RANGE_BIT flag.
* - Direct3D's D3DLOCK_DISCARD flag.
*/
#define PIPE_BUFFER_USAGE_DISCARD (1 << 8)
/**
* Fail if the resource cannot be mapped immediately.
*
* See also:
* - Direct3D's D3DLOCK_DONOTWAIT flag.
* - Mesa3D's MESA_MAP_NOWAIT_BIT flag.
* - WDDM's D3DDDICB_LOCKFLAGS.DonotWait flag.
*/
#define PIPE_BUFFER_USAGE_DONTBLOCK (1 << 9)
/**
* Do not attempt to synchronize pending operations on the resource when mapping.
*
* It should not be used with PIPE_BUFFER_USAGE_CPU_READ.
*
* See also:
* - OpenGL's ARB_map_buffer_range extension, MAP_UNSYNCHRONIZED_BIT flag.
* - Direct3D's D3DLOCK_NOOVERWRITE flag.
* - WDDM's D3DDDICB_LOCKFLAGS.IgnoreSync flag.
*/
#define PIPE_BUFFER_USAGE_UNSYNCHRONIZED (1 << 10)
/**
* Written ranges will be notified later with
* pipe_screen::buffer_flush_mapped_range.