Commit 47afc5ee authored by Thomas Hellstrom's avatar Thomas Hellstrom

svga: Add an environment variable to force coherent surface memory

The vmwgfx driver supports emulated coherent surface memory as of version
2.16. Add en environtment variable to enable this functionality for
texture- and buffer maps: SVGA_FORCE_COHERENT.
This environment variable should be used for testing only.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: Brian Paul's avatarBrian Paul <brianp@vmware.com>
parent 1a66ead1
Pipeline #37396 passed with stages
in 11 minutes and 3 seconds
......@@ -310,16 +310,18 @@ svga_buffer_transfer_flush_region(struct pipe_context *pipe,
{
struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_buffer *sbuf = svga_buffer(transfer->resource);
struct svga_context *svga = svga_context(pipe);
unsigned offset = transfer->box.x + box->x;
unsigned length = box->width;
assert(transfer->usage & PIPE_TRANSFER_WRITE);
assert(transfer->usage & PIPE_TRANSFER_FLUSH_EXPLICIT);
mtx_lock(&ss->swc_mutex);
svga_buffer_add_range(sbuf, offset, offset + length);
mtx_unlock(&ss->swc_mutex);
if (!svga->swc->force_coherent || sbuf->swbuf) {
mtx_lock(&ss->swc_mutex);
svga_buffer_add_range(sbuf, offset, offset + length);
mtx_unlock(&ss->swc_mutex);
}
}
......@@ -359,7 +361,8 @@ svga_buffer_transfer_unmap(struct pipe_context *pipe,
sbuf->dma.flags.discard = TRUE;
svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
if (!svga->swc->force_coherent || sbuf->swbuf)
svga_buffer_add_range(sbuf, 0, sbuf->b.b.width0);
}
}
......
......@@ -314,6 +314,15 @@ svga_buffer_hw_storage_unmap(struct svga_context *svga,
ret = SVGA3D_BindGBSurface(swc, sbuf->handle);
assert(ret == PIPE_OK);
}
if (swc->force_coherent) {
ret = SVGA3D_UpdateGBSurface(swc, sbuf->handle);
if (ret != PIPE_OK) {
/* flush and retry */
svga_context_flush(svga, NULL);
ret = SVGA3D_UpdateGBSurface(swc, sbuf->handle);
assert(ret == PIPE_OK);
}
}
}
} else
sws->buffer_unmap(sws, sbuf->hwbuf);
......
......@@ -448,6 +448,9 @@ svga_buffer_upload_gb_command(struct svga_context *svga,
struct pipe_resource *dummy;
unsigned i;
if (swc->force_coherent)
return PIPE_OK;
assert(svga_have_gb_objects(svga));
assert(numBoxes);
assert(sbuf->dma.updates == NULL);
......@@ -645,7 +648,7 @@ svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
unsigned i;
struct pipe_resource *dummy;
if (!sbuf->dma.pending) {
if (!sbuf->dma.pending || svga->swc->force_coherent) {
//debug_printf("no dma pending on buffer\n");
return;
}
......@@ -659,6 +662,7 @@ svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
*/
if (svga_have_gb_objects(svga)) {
struct svga_3d_update_gb_image *update = sbuf->dma.updates;
assert(update);
for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
......@@ -871,6 +875,9 @@ svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf,
memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
}
if (svga->swc->force_coherent)
sbuf->map.num_ranges = 0;
svga_buffer_hw_storage_unmap(svga, sbuf);
/* This user/malloc buffer is now indistinguishable from a gpu buffer */
......@@ -1029,6 +1036,8 @@ svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf,
}
assert(sbuf->handle);
if (svga->swc->force_coherent)
return sbuf->handle;
if (sbuf->map.num_ranges) {
if (!sbuf->dma.pending) {
......
......@@ -401,21 +401,23 @@ svga_texture_transfer_map_direct(struct svga_context *svga,
svga_surfaces_flush(svga);
for (i = 0; i < st->box.d; i++) {
if (svga_have_vgpu10(svga)) {
ret = readback_image_vgpu10(svga, surf, st->slice + i, level,
tex->b.b.last_level + 1);
} else {
ret = readback_image_vgpu9(svga, surf, st->slice + i, level);
if (!svga->swc->force_coherent || tex->imported) {
for (i = 0; i < st->box.d; i++) {
if (svga_have_vgpu10(svga)) {
ret = readback_image_vgpu10(svga, surf, st->slice + i, level,
tex->b.b.last_level + 1);
} else {
ret = readback_image_vgpu9(svga, surf, st->slice + i, level);
}
}
}
svga->hud.num_readbacks++;
SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
svga->hud.num_readbacks++;
SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
assert(ret == PIPE_OK);
(void) ret;
assert(ret == PIPE_OK);
(void) ret;
svga_context_flush(svga, NULL);
svga_context_flush(svga, NULL);
}
/*
* Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
* we could potentially clear the flag for all faces/layers/mips.
......@@ -694,6 +696,15 @@ svga_texture_surface_unmap(struct svga_context *svga,
ret = SVGA3D_BindGBSurface(swc, surf);
assert(ret == PIPE_OK);
}
if (swc->force_coherent) {
ret = SVGA3D_UpdateGBSurface(swc, surf);
if (ret != PIPE_OK) {
/* flush and retry */
svga_context_flush(svga, NULL);
ret = SVGA3D_UpdateGBSurface(swc, surf);
assert(ret == PIPE_OK);
}
}
}
}
......@@ -816,19 +827,22 @@ svga_texture_transfer_unmap_direct(struct svga_context *svga,
box.x, box.y, box.z,
box.w, box.h, box.d);
if (svga_have_vgpu10(svga)) {
unsigned i;
if (!svga->swc->force_coherent || tex->imported) {
if (svga_have_vgpu10(svga)) {
unsigned i;
for (i = 0; i < nlayers; i++) {
ret = update_image_vgpu10(svga, surf, &box,
st->slice + i, transfer->level,
tex->b.b.last_level + 1);
for (i = 0; i < nlayers; i++) {
ret = update_image_vgpu10(svga, surf, &box,
st->slice + i, transfer->level,
tex->b.b.last_level + 1);
assert(ret == PIPE_OK);
}
} else {
assert(nlayers == 1);
ret = update_image_vgpu9(svga, surf, &box, st->slice,
transfer->level);
assert(ret == PIPE_OK);
}
} else {
assert(nlayers == 1);
ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
assert(ret == PIPE_OK);
}
(void) ret;
}
......
......@@ -385,6 +385,7 @@ struct svga_winsys_context
**/
boolean have_gb_objects;
boolean force_coherent;
/**
* Map a guest-backed surface.
......
......@@ -852,6 +852,7 @@ vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
vswc->fctx = debug_flush_ctx_create(TRUE, VMW_DEBUG_FLUSH_STACK);
#endif
vswc->base.force_coherent = vws->force_coherent;
return &vswc->base;
out_no_hash:
......
......@@ -90,12 +90,12 @@ vmw_winsys_create( int fd )
vws->device = stat_buf.st_rdev;
vws->open_count = 1;
vws->ioctl.drm_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
vws->base.have_gb_dma = TRUE;
vws->base.need_to_rebind_resources = FALSE;
vws->force_coherent = FALSE;
if (!vmw_ioctl_init(vws))
goto out_no_ioctl;
vws->base.have_gb_dma = !vws->force_coherent;
vws->base.need_to_rebind_resources = FALSE;
vws->base.have_transfer_from_buffer_cmd = vws->base.have_vgpu10;
vws->fence_ops = vmw_fence_ops_create(vws);
if (!vws->fence_ops)
......
......@@ -104,6 +104,8 @@ struct vmw_winsys_screen
cnd_t cs_cond;
mtx_t cs_mutex;
boolean force_coherent;
};
......
......@@ -243,6 +243,9 @@ vmw_ioctl_gb_surface_create(struct vmw_winsys_screen *vws,
if (usage & SVGA_SURFACE_USAGE_SHARED)
req->base.drm_surface_flags |= drm_vmw_surface_flag_shareable;
if (vws->force_coherent)
req->base.drm_surface_flags |= drm_vmw_surface_flag_coherent;
req->base.drm_surface_flags |= drm_vmw_surface_flag_create_buffer;
req->base.base_size.width = size.width;
req->base.base_size.height = size.height;
......@@ -969,6 +972,7 @@ vmw_ioctl_init(struct vmw_winsys_screen *vws)
drmVersionPtr version;
boolean drm_gb_capable;
boolean have_drm_2_5;
boolean have_drm_2_16;
const char *getenv_val;
VMW_FUNC;
......@@ -985,6 +989,8 @@ vmw_ioctl_init(struct vmw_winsys_screen *vws)
(version->version_major == 2 && version->version_minor > 8);
vws->ioctl.have_drm_2_15 = version->version_major > 2 ||
(version->version_major == 2 && version->version_minor > 14);
have_drm_2_16 = version->version_major > 2 ||
(version->version_major == 2 && version->version_minor > 15);
vws->ioctl.drm_execbuf_version = vws->ioctl.have_drm_2_9 ? 2 : 1;
......@@ -1108,6 +1114,12 @@ vmw_ioctl_init(struct vmw_winsys_screen *vws)
vws->ioctl.num_cap_3d = size / sizeof(uint32_t);
else
vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
if (have_drm_2_16) {
getenv_val = getenv("SVGA_FORCE_COHERENT");
if (getenv_val && strcmp(getenv_val, "0") != 0)
vws->force_coherent = TRUE;
}
} else {
vws->ioctl.num_cap_3d = SVGA3D_DEVCAP_MAX;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment