Commit 5018fd30 authored by Daniel Vetter's avatar Daniel Vetter Committed by Carl Worth

libIntelXvMC: kill ums leftovers

On i965 class hw, kernel_exec_fencing was 1 always, anyway. And on
i945, this patch kills a memory leak (dunno how, but it does).
Signed-off-by: Daniel Vetter's avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent cb06aa32
...@@ -731,11 +731,8 @@ static Status render_surface(Display * display, ...@@ -731,11 +731,8 @@ static Status render_surface(Display * display,
} }
if (media_state.indirect_data.bo) { if (media_state.indirect_data.bo) {
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_unmap_gtt(media_state.
drm_intel_gem_bo_unmap_gtt(media_state. indirect_data.bo);
indirect_data.bo);
else
drm_intel_bo_unmap(media_state.indirect_data.bo);
drm_intel_bo_unreference(media_state.indirect_data.bo); drm_intel_bo_unreference(media_state.indirect_data.bo);
} }
...@@ -755,10 +752,7 @@ static Status render_surface(Display * display, ...@@ -755,10 +752,7 @@ static Status render_surface(Display * display,
interface_descriptor(&media_state); interface_descriptor(&media_state);
vfe_state(&media_state); vfe_state(&media_state);
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_map_gtt(media_state.indirect_data.bo);
drm_intel_gem_bo_map_gtt(media_state.indirect_data.bo);
else
drm_intel_bo_map(media_state.indirect_data.bo, 1);
block_ptr = media_state.indirect_data.bo->virtual; block_ptr = media_state.indirect_data.bo->virtual;
for (i = first_macroblock; i < num_macroblocks + first_macroblock; i++) { for (i = first_macroblock; i < num_macroblocks + first_macroblock; i++) {
......
...@@ -73,10 +73,7 @@ Bool intelInitBatchBuffer(void) ...@@ -73,10 +73,7 @@ Bool intelInitBatchBuffer(void)
return False; return False;
} }
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
else
drm_intel_bo_map(xvmc_driver->batch.buf, 1);
xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual; xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
xvmc_driver->batch.size = BATCH_SIZE; xvmc_driver->batch.size = BATCH_SIZE;
...@@ -87,10 +84,7 @@ Bool intelInitBatchBuffer(void) ...@@ -87,10 +84,7 @@ Bool intelInitBatchBuffer(void)
void intelFiniBatchBuffer(void) void intelFiniBatchBuffer(void)
{ {
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
else
drm_intel_bo_unmap(xvmc_driver->batch.buf);
drm_intel_bo_unreference(xvmc_driver->batch.buf); drm_intel_bo_unreference(xvmc_driver->batch.buf);
} }
...@@ -99,10 +93,7 @@ void intelFlushBatch(Bool refill) ...@@ -99,10 +93,7 @@ void intelFlushBatch(Bool refill)
{ {
i965_end_batch(); i965_end_batch();
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
drm_intel_gem_bo_unmap_gtt(xvmc_driver->batch.buf);
else
drm_intel_bo_unmap(xvmc_driver->batch.buf);
drm_intel_bo_exec(xvmc_driver->batch.buf, drm_intel_bo_exec(xvmc_driver->batch.buf,
xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr, xvmc_driver->batch.ptr - xvmc_driver->batch.init_ptr,
...@@ -118,10 +109,7 @@ void intelFlushBatch(Bool refill) ...@@ -118,10 +109,7 @@ void intelFlushBatch(Bool refill)
fprintf(stderr, "unable to alloc batch buffer\n"); fprintf(stderr, "unable to alloc batch buffer\n");
} }
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
drm_intel_gem_bo_map_gtt(xvmc_driver->batch.buf);
else
drm_intel_bo_map(xvmc_driver->batch.buf, 1);
xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual; xvmc_driver->batch.init_ptr = xvmc_driver->batch.buf->virtual;
xvmc_driver->batch.size = BATCH_SIZE; xvmc_driver->batch.size = BATCH_SIZE;
......
...@@ -421,8 +421,6 @@ _X_EXPORT Status XvMCCreateContext(Display * display, XvPortID port, ...@@ -421,8 +421,6 @@ _X_EXPORT Status XvMCCreateContext(Display * display, XvPortID port,
XVMC_INFO("decoder type is %s", intel_xvmc_decoder_string(comm->type)); XVMC_INFO("decoder type is %s", intel_xvmc_decoder_string(comm->type));
xvmc_driver->kernel_exec_fencing = comm->kernel_exec_fencing;
/* assign local ctx info */ /* assign local ctx info */
intel_ctx = intel_xvmc_new_context(display); intel_ctx = intel_xvmc_new_context(display);
if (!intel_ctx) { if (!intel_ctx) {
......
...@@ -131,7 +131,6 @@ typedef struct _intel_xvmc_driver { ...@@ -131,7 +131,6 @@ typedef struct _intel_xvmc_driver {
int fd; /* drm file handler */ int fd; /* drm file handler */
dri_bufmgr *bufmgr; dri_bufmgr *bufmgr;
unsigned int kernel_exec_fencing:1;
struct { struct {
unsigned int init_offset; unsigned int init_offset;
......
...@@ -1010,10 +1010,7 @@ static Status put_slice2(Display * display, XvMCContext * context, ...@@ -1010,10 +1010,7 @@ static Status put_slice2(Display * display, XvMCContext * context,
q_scale_code = bit_buf >> 27; q_scale_code = bit_buf >> 27;
if (media_state.slice_data.bo) { if (media_state.slice_data.bo) {
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo);
drm_intel_gem_bo_unmap_gtt(media_state.slice_data.bo);
else
drm_intel_bo_unmap(media_state.slice_data.bo);
drm_intel_bo_unreference(media_state.slice_data.bo); drm_intel_bo_unreference(media_state.slice_data.bo);
} }
...@@ -1022,10 +1019,7 @@ static Status put_slice2(Display * display, XvMCContext * context, ...@@ -1022,10 +1019,7 @@ static Status put_slice2(Display * display, XvMCContext * context,
VLD_MAX_SLICE_SIZE, 64); VLD_MAX_SLICE_SIZE, 64);
if (!media_state.slice_data.bo) if (!media_state.slice_data.bo)
return BadAlloc; return BadAlloc;
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_map_gtt(media_state.slice_data.bo);
drm_intel_gem_bo_map_gtt(media_state.slice_data.bo);
else
drm_intel_bo_map(media_state.slice_data.bo, 1);
memcpy(media_state.slice_data.bo->virtual, slice, nbytes); memcpy(media_state.slice_data.bo->virtual, slice, nbytes);
...@@ -1110,10 +1104,7 @@ static Status render_surface(Display * display, ...@@ -1110,10 +1104,7 @@ static Status render_surface(Display * display,
return ret; return ret;
if (media_state.mb_data.bo) { if (media_state.mb_data.bo) {
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_unmap_gtt(media_state.mb_data.bo);
drm_intel_gem_bo_unmap_gtt(media_state.mb_data.bo);
else
drm_intel_bo_unmap(media_state.mb_data.bo);
drm_intel_bo_unreference(media_state.mb_data.bo); drm_intel_bo_unreference(media_state.mb_data.bo);
} }
...@@ -1125,10 +1116,7 @@ static Status render_surface(Display * display, ...@@ -1125,10 +1116,7 @@ static Status render_surface(Display * display,
surface_size, 64); surface_size, 64);
if (!media_state.mb_data.bo) if (!media_state.mb_data.bo)
return BadAlloc; return BadAlloc;
if (xvmc_driver->kernel_exec_fencing) drm_intel_gem_bo_map_gtt(media_state.mb_data.bo);
drm_intel_gem_bo_map_gtt(media_state.mb_data.bo);
else
drm_intel_bo_map(media_state.mb_data.bo, 1);
block_ptr = media_state.mb_data.bo->virtual; block_ptr = media_state.mb_data.bo->virtual;
unsigned short *mb_block_ptr; unsigned short *mb_block_ptr;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment