Commit e3a6e600 authored by Eric Anholt's avatar Eric Anholt

[965] Convert the driver to dri_bufmgr interface and enable TTM.

This is currently believed to work but be a significant performance loss.
Performance recovery should be soon to follow.

The dri_bo_fake_disable_backing_store() call was added to allow backing store
disable like bufmgr_fake.c did, which is a significant performance win (though
it's missing the no-fence-subdata part).

This commit is a squash merge of the 965-ttm branch, which had some history
I wanted to avoid pulling due to noisiness and brokenness at many points
for git-bisecting.
parent 3ecdae82
......@@ -203,6 +203,10 @@ dri_bufmgr *dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
unsigned int cookie),
void *driver_priv);
void dri_bufmgr_fake_set_debug(dri_bufmgr *bufmgr, GLboolean enable_debug);
void dri_bo_fake_disable_backing_store(dri_bo *bo,
void (*invalidate_cb)(dri_bo *bo,
void *ptr),
void *ptr);
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
dri_bo *dri_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
unsigned int handle);
......
......@@ -181,7 +181,7 @@ typedef struct _dri_bo_fake {
struct block *block;
void *backing_store;
void (*invalidate_cb)(dri_bufmgr *bufmgr, void * );
void (*invalidate_cb)(dri_bo *bo, void *ptr);
void *invalidate_ptr;
} dri_bo_fake;
......@@ -318,9 +318,9 @@ static void
free_backing_store(dri_bo *bo)
{
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
if (bo_fake->backing_store) {
assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
ALIGN_FREE(bo_fake->backing_store);
bo_fake->backing_store = NULL;
}
......@@ -332,8 +332,8 @@ set_dirty(dri_bo *bo)
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
if (bo_fake->flags & BM_NO_BACKING_STORE)
bo_fake->invalidate_cb(&bufmgr_fake->bufmgr, bo_fake->invalidate_ptr);
if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
assert(!(bo_fake->flags & BM_PINNED));
......@@ -677,6 +677,40 @@ dri_fake_bo_unreference(dri_bo *bo)
_glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
}
/**
* Set the buffer as not requiring backing store, and instead get the callback
* invoked whenever it would be set dirty.
*/
void dri_bo_fake_disable_backing_store(dri_bo *bo,
void (*invalidate_cb)(dri_bo *bo,
void *ptr),
void *ptr)
{
dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
_glthread_LOCK_MUTEX(bufmgr_fake->mutex);
if (bo_fake->backing_store)
free_backing_store(bo);
bo_fake->flags |= BM_NO_BACKING_STORE;
DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
bo_fake->dirty = 1;
bo_fake->invalidate_cb = invalidate_cb;
bo_fake->invalidate_ptr = ptr;
/* Note that it is invalid right from the start. Also note
* invalidate_cb is called with the bufmgr locked, so cannot
* itself make bufmgr calls.
*/
if (invalidate_cb != NULL)
invalidate_cb(bo, ptr);
_glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
}
/**
* Map a buffer into bo->virtual, allocating either card memory space (If
* BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
......
......@@ -135,6 +135,7 @@ struct intel_context
void (*assert_not_dirty) (struct intel_context *intel);
void (*debug_batch)(struct intel_context *intel);
} vtbl;
GLint refcount;
......
......@@ -5,11 +5,11 @@ include $(TOP)/configs/current
LIBNAME = i965_dri.so
DRIVER_SOURCES = \
bufmgr_fake.c \
intel_batchbuffer.c \
intel_blit.c \
intel_buffer_objects.c \
intel_buffers.c \
intel_bufmgr_ttm.c \
intel_context.c \
intel_decode.c \
intel_ioctl.c \
......@@ -53,6 +53,7 @@ DRIVER_SOURCES = \
brw_sf_state.c \
brw_state_batch.c \
brw_state_cache.c \
brw_state_dump.c \
brw_state_pool.c \
brw_state_upload.c \
brw_tex.c \
......@@ -80,6 +81,7 @@ DRIVER_SOURCES = \
C_SOURCES = \
$(COMMON_SOURCES) \
$(COMMON_BM_SOURCES) \
$(MINIGLX_SOURCES) \
$(DRIVER_SOURCES)
......
......@@ -242,7 +242,7 @@ struct brw_surface_binding_table {
struct brw_cache;
struct brw_mem_pool {
struct buffer *buffer;
dri_bo *buffer;
GLuint size;
GLuint offset; /* offset of first free byte */
......@@ -310,6 +310,8 @@ struct brw_state_pointers {
struct brw_tracked_state {
struct brw_state_flags dirty;
void (*update)( struct brw_context *brw );
void (*emit_reloc)( struct brw_context *brw );
GLboolean always_update;
};
......@@ -596,16 +598,17 @@ struct brw_context
GLuint input_size_masks[4];
/* State structs
/**
* Array of sampler state uploaded at sampler_gs_offset of BRW_SAMPLER
* cache
*/
struct brw_sampler_default_color sdc[BRW_MAX_TEX_UNIT];
struct brw_sampler_state sampler[BRW_MAX_TEX_UNIT];
GLuint render_surf;
GLuint nr_surfaces;
GLuint max_threads;
struct buffer *scratch_buffer;
dri_bo *scratch_buffer;
GLuint scratch_buffer_size;
GLuint sampler_count;
......@@ -659,6 +662,10 @@ void brw_init_state( struct brw_context *brw );
void brw_destroy_state( struct brw_context *brw );
/*======================================================================
* brw_state_dump.c
*/
void brw_debug_batch(struct intel_context *intel);
/*======================================================================
* brw_tex.c
......
......@@ -312,11 +312,7 @@ static void upload_constant_buffer(struct brw_context *brw)
/* Copy data to the buffer:
*/
bmBufferSubData(&brw->intel,
pool->buffer,
brw->curbe.gs_offset,
bufsz,
buf);
dri_bo_subdata(pool->buffer, brw->curbe.gs_offset, bufsz, buf);
}
/* Because this provokes an action (ie copy the constants into the
......
......@@ -289,7 +289,7 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
LOCK_HARDWARE(intel);
if (brw->intel.numClipRects == 0) {
assert(intel->batch->ptr == intel->batch->map + intel->batch->offset);
assert(intel->batch->ptr == intel->batch->map);
UNLOCK_HARDWARE(intel);
return GL_TRUE;
}
......@@ -358,14 +358,7 @@ static GLboolean brw_try_draw_prims( GLcontext *ctx,
* way around this, as not every flush is due to a buffer filling
* up.
*/
if (!intel_batchbuffer_flush( brw->intel.batch )) {
DBG("%s intel_batchbuffer_flush failed\n", __FUNCTION__);
retval = GL_FALSE;
}
if (retval && intel->thrashing) {
bmSetFence(intel);
}
intel_batchbuffer_flush( brw->intel.batch );
/* Free any old data so it doesn't clog up texture memory - we
* won't be referencing it again.
......@@ -425,7 +418,6 @@ void brw_draw_prims( GLcontext *ctx,
GLuint min_index,
GLuint max_index )
{
struct intel_context *intel = intel_context(ctx);
GLboolean retval;
/* Decide if we want to rebase. If so we end up recursing once
......@@ -445,20 +437,6 @@ void brw_draw_prims( GLcontext *ctx,
*/
retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
/* This looks like out-of-memory but potentially we have
* situation where there is enough memory but it has become
* fragmented. Clear out all heaps and start from scratch by
* faking a contended lock event: (done elsewhere)
*/
if (!retval && !intel->Fallback && bmError(intel)) {
DBG("retrying\n");
/* Then try a second time only to upload textures and draw the
* primitives:
*/
retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
}
/* Otherwise, we really are out of memory. Pass the drawing
* command to the software tnl module and which will in turn call
* swrast to do the drawing.
......@@ -469,13 +447,6 @@ void brw_draw_prims( GLcontext *ctx,
}
}
static void brw_invalidate_vbo_cb( struct intel_context *intel, void *ptr )
{
/* nothing to do, we don't rely on the contents being preserved */
}
void brw_draw_init( struct brw_context *brw )
{
GLcontext *ctx = &brw->intel.ctx;
......@@ -490,22 +461,25 @@ void brw_draw_init( struct brw_context *brw )
for (i = 0; i < BRW_NR_UPLOAD_BUFS; i++) {
brw->vb.upload.vbo[i] = ctx->Driver.NewBufferObject(ctx, 1, GL_ARRAY_BUFFER_ARB);
/* NOTE: These are set to no-backing-store.
ctx->Driver.BufferData(ctx,
GL_ARRAY_BUFFER_ARB,
BRW_UPLOAD_INIT_SIZE,
NULL,
GL_DYNAMIC_DRAW_ARB,
brw->vb.upload.vbo[i]);
/* Set the internal VBOs to no-backing-store. We only use them as a
* temporary within a brw_try_draw_prims while the lock is held.
*/
bmBufferSetInvalidateCB(&brw->intel,
intel_bufferobj_buffer(intel_buffer_object(brw->vb.upload.vbo[i])),
brw_invalidate_vbo_cb,
&brw->intel,
GL_TRUE);
}
if (!brw->intel.intelScreen->ttm) {
struct intel_buffer_object *intel_bo =
intel_buffer_object(brw->vb.upload.vbo[i]);
ctx->Driver.BufferData( ctx,
GL_ARRAY_BUFFER_ARB,
BRW_UPLOAD_INIT_SIZE,
NULL,
GL_DYNAMIC_DRAW_ARB,
brw->vb.upload.vbo[0] );
dri_bo_fake_disable_backing_store(intel_bufferobj_buffer(intel_bo),
NULL, NULL);
}
}
}
void brw_draw_destroy( struct brw_context *brw )
......
......@@ -58,7 +58,7 @@ struct brw_array_state {
GLuint dword;
} vb0;
struct buffer *buffer;
dri_bo *buffer;
GLuint offset;
GLuint max_index;
......@@ -68,7 +68,7 @@ struct brw_array_state {
};
static struct buffer *array_buffer( const struct gl_client_array *array )
static dri_bo *array_buffer( const struct gl_client_array *array )
{
return intel_bufferobj_buffer(intel_buffer_object(array->BufferObj));
}
......@@ -621,7 +621,7 @@ void brw_upload_indices( struct brw_context *brw,
*/
{
struct brw_indexbuffer ib;
struct buffer *buffer = intel_bufferobj_buffer(intel_buffer_object(bufferobj));
dri_bo *buffer = intel_bufferobj_buffer(intel_buffer_object(bufferobj));
memset(&ib, 0, sizeof(ib));
......
......@@ -123,21 +123,18 @@ const struct brw_tracked_state brw_drawing_rect = {
.update = upload_drawing_rect
};
/***********************************************************************
* Binding table pointers
/**
* Upload the binding table pointers, which point each stage's array of surface
* state pointers.
*
* The binding table pointers are relative to the surface state base address,
* which is the BRW_SS_POOL cache buffer.
*/
static void upload_binding_table_pointers(struct brw_context *brw)
{
struct brw_binding_table_pointers btp;
memset(&btp, 0, sizeof(btp));
/* The binding table has been emitted to the SS pool already, so we
* know what its offset is. When the batch buffer is fired, the
* binding table and surface structs will get fixed up to point to
* where the textures actually landed, but that won't change the
* value of the offsets here:
*/
btp.header.opcode = CMD_BINDING_TABLE_PTRS;
btp.header.length = sizeof(btp)/4 - 2;
btp.vs = 0;
......@@ -159,11 +156,12 @@ const struct brw_tracked_state brw_binding_table_pointers = {
};
/***********************************************************************
* Pipelined state pointers. This is the key state packet from which
* the hardware chases pointers to all the uploaded state in VRAM.
/**
* Upload pointers to the per-stage state.
*
* The state pointers in this packet are all relative to the general state
* base address set by CMD_STATE_BASE_ADDRESS, which is the BRW_GS_POOL buffer.
*/
static void upload_pipelined_state_pointers(struct brw_context *brw )
{
struct brw_pipelined_state_pointers psp;
......@@ -233,71 +231,53 @@ const struct brw_tracked_state brw_psp_urb_cbs = {
.update = upload_psp_urb_cbs
};
/***********************************************************************
* Depthbuffer - currently constant, but rotation would change that.
/**
* Upload the depthbuffer offset and format.
*
* We have to do this per state validation as we need to emit the relocation
* in the batch buffer.
*/
static void upload_depthbuffer(struct brw_context *brw)
{
/* 0x79050003 Depth Buffer */
struct intel_context *intel = &brw->intel;
struct intel_region *region = brw->state.depth_region;
struct brw_depthbuffer bd;
memset(&bd, 0, sizeof(bd));
bd.header.bits.opcode = CMD_DEPTH_BUFFER;
bd.header.bits.length = sizeof(bd)/4-2;
bd.dword1.bits.pitch = (region->pitch * region->cpp) - 1;
unsigned int format;
switch (region->cpp) {
case 2:
bd.dword1.bits.format = BRW_DEPTHFORMAT_D16_UNORM;
format = BRW_DEPTHFORMAT_D16_UNORM;
break;
case 4:
if (intel->depth_buffer_is_float)
bd.dword1.bits.format = BRW_DEPTHFORMAT_D32_FLOAT;
format = BRW_DEPTHFORMAT_D32_FLOAT;
else
bd.dword1.bits.format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
break;
default:
assert(0);
return;
}
bd.dword1.bits.depth_offset_disable = 0; /* coordinate offset */
/* The depthbuffer can only use YMAJOR tiling... This is a bit of
* a shame as it clashes with the 2d blitter which only supports
* XMAJOR tiling...
*/
bd.dword1.bits.tile_walk = BRW_TILEWALK_YMAJOR;
bd.dword1.bits.tiled_surface = intel->depth_region->tiled;
bd.dword1.bits.surface_type = BRW_SURFACE_2D;
/* BRW_NEW_LOCK */
bd.dword2_base_addr = bmBufferOffset(intel, region->buffer);
bd.dword3.bits.mipmap_layout = BRW_SURFACE_MIPMAPLAYOUT_BELOW;
bd.dword3.bits.lod = 0;
bd.dword3.bits.width = region->pitch - 1; /* XXX: width ? */
bd.dword3.bits.height = region->height - 1;
bd.dword4.bits.min_array_element = 0;
bd.dword4.bits.depth = 0;
BRW_CACHED_BATCH_STRUCT(brw, &bd);
BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
OUT_BATCH(((region->pitch * region->cpp) - 1) |
(format << 18) |
(BRW_TILEWALK_YMAJOR << 26) |
(region->tiled << 27) |
(BRW_SURFACE_2D << 29));
OUT_RELOC(region->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
((region->pitch - 1) << 6) |
((region->height - 1) << 19));
OUT_BATCH(0);
ADVANCE_BATCH();
}
const struct brw_tracked_state brw_depthbuffer = {
.dirty = {
.mesa = 0,
.brw = BRW_NEW_CONTEXT | BRW_NEW_LOCK,
.cache = 0
},
.update = upload_depthbuffer
.update = upload_depthbuffer,
.always_update = GL_TRUE,
};
......@@ -494,40 +474,37 @@ const struct brw_tracked_state brw_invarient_state = {
.update = upload_invarient_state
};
/* State pool addresses:
/**
* Define the base addresses which some state is referenced from.
*
* This allows us to avoid having to emit relocations in many places for
* cached state, and instead emit pointers inside of large, mostly-static
* state pools. This comes at the expense of memory, and more expensive cache
* misses.
*/
static void upload_state_base_address( struct brw_context *brw )
{
struct intel_context *intel = &brw->intel;
struct brw_state_base_address sba;
memset(&sba, 0, sizeof(sba));
sba.header.opcode = CMD_STATE_BASE_ADDRESS;
sba.header.length = 0x4;
/* BRW_NEW_LOCK */
sba.bits0.general_state_address = bmBufferOffset(intel, brw->pool[BRW_GS_POOL].buffer) >> 5;
sba.bits0.modify_enable = 1;
/* BRW_NEW_LOCK */
sba.bits1.surface_state_address = bmBufferOffset(intel, brw->pool[BRW_SS_POOL].buffer) >> 5;
sba.bits1.modify_enable = 1;
sba.bits2.modify_enable = 1;
sba.bits3.modify_enable = 1;
sba.bits4.modify_enable = 1;
BRW_CACHED_BATCH_STRUCT(brw, &sba);
/* Output the structure (brw_state_base_address) directly to the
* batchbuffer, so we can emit relocations inline.
*/
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
OUT_RELOC(brw->pool[BRW_GS_POOL].buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
1); /* General state base address */
OUT_RELOC(brw->pool[BRW_SS_POOL].buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
1); /* Surface state base address */
OUT_BATCH(1); /* Indirect object base address */
OUT_BATCH(1); /* General state upper bound */
OUT_BATCH(1); /* Indirect object upper bound */
ADVANCE_BATCH();
}
const struct brw_tracked_state brw_state_base_address = {
.dirty = {
.mesa = 0,
.brw = BRW_NEW_CONTEXT | BRW_NEW_LOCK,
.cache = 0
},
.always_update = GL_TRUE,
.update = upload_state_base_address
};
......@@ -107,6 +107,12 @@ GLboolean brw_search_cache( struct brw_cache *cache,
void brw_init_caches( struct brw_context *brw );
void brw_destroy_caches( struct brw_context *brw );
static inline dri_bo *brw_cache_buffer(struct brw_context *brw,
enum brw_cache_id id)
{
return brw->cache[id].pool->buffer;
}
/***********************************************************************
* brw_state_batch.c
*/
......
......@@ -95,8 +95,6 @@ static void clear_batch_cache( struct brw_context *brw )
brw_clear_all_caches(brw);
bmReleaseBuffers(&brw->intel);
brw_invalidate_pools(brw);
}
......
......@@ -187,12 +187,7 @@ GLuint brw_upload_cache( struct brw_cache *cache,
/* Copy data to the buffer:
*/
bmBufferSubData(&cache->brw->intel,
cache->pool->buffer,
offset,
data_size,
data);
dri_bo_subdata(cache->pool->buffer, offset, data_size, data);
cache->brw->state.dirty.cache |= 1<<cache->id;
cache->last_addr = offset;
......
/*
* Copyright © 2007 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
*
*/
#include "mtypes.h"
#include "brw_context.h"
#include "brw_state.h"
#include "brw_defines.h"
/**
* Prints out a header, the contents, and the message associated with
* the hardware state data given.
*
* \param name Name of the state object
* \param data Pointer to the base of the state object
* \param hw_offset Hardware offset of the base of the state data.
* \param index Index of the DWORD being output.
*/
static void
state_out(char *name, uint32_t *data, uint32_t hw_offset, int index,
char *fmt, ...)
{
va_list va;
fprintf(stderr, "%8s: 0x%08x: 0x%08x: ",
name, hw_offset + index * 4, data[index]);
va_start(va, fmt);
vfprintf(stderr, fmt, va);
va_end(va);
}
/** Generic, undecoded state buffer debug printout */
static void
state_struct_out(char *name, dri_bo *buffer, unsigned int pool_offset,
unsigned int state_size)
{
int i;
uint32_t *state;
state = buffer->virtual + pool_offset;
for (i = 0; i < state_size / 4; i++) {
state_out(name, state, buffer->offset + pool_offset, i,
"dword %d\n", i);
}
}
static void dump_wm_surface_state(struct brw_context *brw, dri_bo *ss_buffer)
{
int i;
for (i = 0; i < brw->wm.nr_surfaces; i++) {
unsigned int surfoff = ss_buffer->offset + brw->wm.bind.surf_ss_offset[i];
struct brw_surface_state *surf =
(struct brw_surface_state *)(ss_buffer->virtual +
brw->wm.bind.surf_ss_offset[i]);
uint32_t *surfvals = (uint32_t *)surf;
char name[20];
sprintf(name, "WM SS%d", i);
state_out(name, surfvals, surfoff, 0, "\n");
state_out(name, surfvals, surfoff, 1, "offset\n");
state_out(name, surfvals, surfoff, 2, "%dx%d size, %d mips\n",
surf->ss2.width + 1, surf->ss2.height + 1, surf->ss2.mip_count);
state_out(name, surfvals, surfoff, 3, "pitch %d, %stiled\n",
surf->ss3.pitch + 1, surf->ss3.tiled_surface ? "" : "not ");
state_out(name, surfvals, surfoff, 4, "mip base %d\n",
surf->ss4.min_lod);
}
}
/**
* Print additional debug information associated with the batchbuffer
* when DEBUG_BATCH is set.
*
* For 965, this means mapping the state buffers that would have been referenced
* by the batchbuffer and dumping them.
*
* The buffer offsets printed rely on the buffer containing the last offset
* it was validated at.
*/
void brw_debug_batch(struct intel_context *intel)
{
struct brw_context *brw = brw_context(&intel->ctx);
dri_bo *ss_buffer, *gs_buffer;
ss_buffer = brw->pool[BRW_SS_POOL].buffer;
gs_buffer = brw->pool[BRW_GS_POOL].buffer;
dri_bo_map(ss_buffer, GL_FALSE);
dri_bo_map(gs_buffer, GL_FALSE);
state_struct_out("WM bind", ss_buffer, brw->wm.bind_ss_offset,
4 * brw->wm.nr_surfaces);
dump_wm_surface_state(brw, ss_buffer);
state_struct_out("VS", gs_buffer, brw->vs.state_gs_offset,
sizeof(struct brw_vs_unit_state));
state_struct_out("SF", gs_buffer, brw->sf.state_gs_offset,
sizeof(struct brw_sf_unit_state));
state_struct_out("SF viewport", gs_buffer, brw->sf.state_gs_offset,
sizeof(struct brw_sf_unit_state));
state_struct_out("WM", gs_buffer, brw->wm.state_gs_offset,
sizeof(struct brw_wm_unit_state));
dri_bo_unmap(gs_buffer);
dri_bo_unmap(ss_buffer);
}
......@@ -34,7 +34,7 @@
#include "imports.h"
#include "intel_ioctl.h"
#include "bufmgr.h"
#include "dri_bufmgr.h"
GLboolean brw_pool_alloc( struct brw_mem_pool *pool,
GLuint size,
......@@ -64,28 +64,21 @@ void brw_invalidate_pool( struct intel_context *intel,
{
if (INTEL_DEBUG & DEBUG_STATE)
_mesa_printf("\n\n\n %s \n\n\n", __FUNCTION__);
bmBufferData(intel,
pool->buffer,
pool->size,