Commit 823d889b authored by Chia-I Wu's avatar Chia-I Wu Committed by Marge Bot
Browse files

venus: simplify vn_renderer_sync creation



Remove the ability to init/release repeatedly.
Signed-off-by: Chia-I Wu's avatarChia-I Wu <olvaffe@gmail.com>
Reviewed-by: Yiwei Zhang's avatarYiwei Zhang <zzyiwei@chromium.org>
Part-of: <mesa/mesa!11253>
parent 51782878
......@@ -177,20 +177,16 @@ enum vn_renderer_sync_flags {
};
struct vn_renderer_sync_ops {
struct vn_renderer_sync *(*create)(struct vn_renderer *renderer);
void (*destroy)(struct vn_renderer *renderer,
struct vn_renderer_sync *sync);
VkResult (*create)(struct vn_renderer *renderer,
uint64_t initial_val,
uint32_t flags,
struct vn_renderer_sync **out_sync);
/* a sync can be initialized/released multiple times */
VkResult (*init)(struct vn_renderer *renderer,
struct vn_renderer_sync *sync,
uint64_t initial_val,
uint32_t flags);
VkResult (*init_syncobj)(struct vn_renderer *renderer,
struct vn_renderer_sync *sync,
int fd,
bool sync_file);
void (*release)(struct vn_renderer *renderer,
VkResult (*create_from_syncobj)(struct vn_renderer *renderer,
int fd,
bool sync_file,
struct vn_renderer_sync **out_sync);
void (*destroy)(struct vn_renderer *renderer,
struct vn_renderer_sync *sync);
int (*export_syncobj)(struct vn_renderer *renderer,
......@@ -429,88 +425,22 @@ vn_renderer_bo_invalidate(struct vn_renderer *renderer,
}
static inline VkResult
vn_renderer_sync_create_cpu(struct vn_renderer *renderer,
struct vn_renderer_sync **_sync)
{
struct vn_renderer_sync *sync = renderer->sync_ops.create(renderer);
if (!sync)
return VK_ERROR_OUT_OF_HOST_MEMORY;
const uint64_t initial_val = 0;
const uint32_t flags = 0;
VkResult result =
renderer->sync_ops.init(renderer, sync, initial_val, flags);
if (result != VK_SUCCESS) {
renderer->sync_ops.destroy(renderer, sync);
return result;
}
*_sync = sync;
return VK_SUCCESS;
}
static inline VkResult
vn_renderer_sync_create_fence(struct vn_renderer *renderer,
bool signaled,
VkExternalFenceHandleTypeFlags external_handles,
struct vn_renderer_sync **_sync)
{
struct vn_renderer_sync *sync = renderer->sync_ops.create(renderer);
if (!sync)
return VK_ERROR_OUT_OF_HOST_MEMORY;
const uint64_t initial_val = signaled;
const uint32_t flags = VN_RENDERER_SYNC_BINARY |
(external_handles ? VN_RENDERER_SYNC_SHAREABLE : 0);
VkResult result =
renderer->sync_ops.init(renderer, sync, initial_val, flags);
if (result != VK_SUCCESS) {
renderer->sync_ops.destroy(renderer, sync);
return result;
}
*_sync = sync;
return VK_SUCCESS;
}
static inline VkResult
vn_renderer_sync_create_semaphore(
struct vn_renderer *renderer,
VkSemaphoreType type,
uint64_t initial_val,
VkExternalSemaphoreHandleTypeFlags external_handles,
struct vn_renderer_sync **_sync)
vn_renderer_sync_create(struct vn_renderer *renderer,
uint64_t initial_val,
uint32_t flags,
struct vn_renderer_sync **out_sync)
{
struct vn_renderer_sync *sync = renderer->sync_ops.create(renderer);
if (!sync)
return VK_ERROR_OUT_OF_HOST_MEMORY;
const uint32_t flags =
(external_handles ? VN_RENDERER_SYNC_SHAREABLE : 0) |
(type == VK_SEMAPHORE_TYPE_BINARY ? VN_RENDERER_SYNC_BINARY : 0);
VkResult result =
renderer->sync_ops.init(renderer, sync, initial_val, flags);
if (result != VK_SUCCESS) {
renderer->sync_ops.destroy(renderer, sync);
return result;
}
*_sync = sync;
return VK_SUCCESS;
return renderer->sync_ops.create(renderer, initial_val, flags, out_sync);
}
static inline VkResult
vn_renderer_sync_create_empty(struct vn_renderer *renderer,
struct vn_renderer_sync **_sync)
vn_renderer_sync_create_from_syncobj(struct vn_renderer *renderer,
int fd,
bool sync_file,
struct vn_renderer_sync **out_sync)
{
struct vn_renderer_sync *sync = renderer->sync_ops.create(renderer);
if (!sync)
return VK_ERROR_OUT_OF_HOST_MEMORY;
/* no init */
*_sync = sync;
return VK_SUCCESS;
return renderer->sync_ops.create_from_syncobj(renderer, fd, sync_file,
out_sync);
}
static inline void
......@@ -520,31 +450,6 @@ vn_renderer_sync_destroy(struct vn_renderer *renderer,
renderer->sync_ops.destroy(renderer, sync);
}
static inline VkResult
vn_renderer_sync_init_signaled(struct vn_renderer *renderer,
struct vn_renderer_sync *sync)
{
const uint64_t initial_val = 1;
const uint32_t flags = VN_RENDERER_SYNC_BINARY;
return renderer->sync_ops.init(renderer, sync, initial_val, flags);
}
static inline VkResult
vn_renderer_sync_init_syncobj(struct vn_renderer *renderer,
struct vn_renderer_sync *sync,
int fd,
bool sync_file)
{
return renderer->sync_ops.init_syncobj(renderer, sync, fd, sync_file);
}
static inline void
vn_renderer_sync_release(struct vn_renderer *renderer,
struct vn_renderer_sync *sync)
{
renderer->sync_ops.release(renderer, sync);
}
static inline int
vn_renderer_sync_export_syncobj(struct vn_renderer *renderer,
struct vn_renderer_sync *sync,
......
......@@ -951,7 +951,7 @@ virtgpu_sync_export_syncobj(struct vn_renderer *renderer,
}
static void
virtgpu_sync_release(struct vn_renderer *renderer,
virtgpu_sync_destroy(struct vn_renderer *renderer,
struct vn_renderer_sync *_sync)
{
struct virtgpu *gpu = (struct virtgpu *)renderer;
......@@ -959,18 +959,16 @@ virtgpu_sync_release(struct vn_renderer *renderer,
virtgpu_ioctl_syncobj_destroy(gpu, sync->syncobj_handle);
sync->syncobj_handle = 0;
sync->base.sync_id = 0;
free(sync);
}
static VkResult
virtgpu_sync_init_syncobj(struct vn_renderer *renderer,
struct vn_renderer_sync *_sync,
int fd,
bool sync_file)
virtgpu_sync_create_from_syncobj(struct vn_renderer *renderer,
int fd,
bool sync_file,
struct vn_renderer_sync **out_sync)
{
struct virtgpu *gpu = (struct virtgpu *)renderer;
struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;
uint32_t syncobj_handle;
if (sync_file) {
......@@ -987,20 +985,27 @@ virtgpu_sync_init_syncobj(struct vn_renderer *renderer,
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
struct virtgpu_sync *sync = calloc(1, sizeof(*sync));
if (!sync) {
virtgpu_ioctl_syncobj_destroy(gpu, syncobj_handle);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
sync->syncobj_handle = syncobj_handle;
sync->base.sync_id = 0; /* TODO */
*out_sync = &sync->base;
return VK_SUCCESS;
}
static VkResult
virtgpu_sync_init(struct vn_renderer *renderer,
struct vn_renderer_sync *_sync,
uint64_t initial_val,
uint32_t flags)
virtgpu_sync_create(struct vn_renderer *renderer,
uint64_t initial_val,
uint32_t flags,
struct vn_renderer_sync **out_sync)
{
struct virtgpu *gpu = (struct virtgpu *)renderer;
struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;
/* TODO */
if (flags & VN_RENDERER_SYNC_SHAREABLE)
......@@ -1008,46 +1013,34 @@ virtgpu_sync_init(struct vn_renderer *renderer,
/* always false because we don't use binary drm_syncobjs */
const bool signaled = false;
sync->syncobj_handle = virtgpu_ioctl_syncobj_create(gpu, signaled);
if (!sync->syncobj_handle)
const uint32_t syncobj_handle =
virtgpu_ioctl_syncobj_create(gpu, signaled);
if (!syncobj_handle)
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
/* add a signaled fence chain with seqno initial_val */
const int ret = virtgpu_ioctl_syncobj_timeline_signal(
gpu, sync->syncobj_handle, initial_val);
const int ret =
virtgpu_ioctl_syncobj_timeline_signal(gpu, syncobj_handle, initial_val);
if (ret) {
virtgpu_sync_release(&gpu->base, &sync->base);
virtgpu_ioctl_syncobj_destroy(gpu, syncobj_handle);
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
struct virtgpu_sync *sync = calloc(1, sizeof(*sync));
if (!sync) {
virtgpu_ioctl_syncobj_destroy(gpu, syncobj_handle);
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
sync->syncobj_handle = syncobj_handle;
/* we will have a sync_id when shareable is true and virtio-gpu associates
* a host sync object with guest drm_syncobj
*/
sync->base.sync_id = 0;
return VK_SUCCESS;
}
static void
virtgpu_sync_destroy(struct vn_renderer *renderer,
struct vn_renderer_sync *_sync)
{
struct virtgpu_sync *sync = (struct virtgpu_sync *)_sync;
if (sync->syncobj_handle)
virtgpu_sync_release(renderer, &sync->base);
*out_sync = &sync->base;
free(sync);
}
static struct vn_renderer_sync *
virtgpu_sync_create(struct vn_renderer *renderer)
{
struct virtgpu_sync *sync = calloc(1, sizeof(*sync));
if (!sync)
return NULL;
return &sync->base;
return VK_SUCCESS;
}
static void
......@@ -1567,10 +1560,8 @@ virtgpu_init(struct virtgpu *gpu)
gpu->base.bo_ops.invalidate = virtgpu_bo_invalidate;
gpu->base.sync_ops.create = virtgpu_sync_create;
gpu->base.sync_ops.create_from_syncobj = virtgpu_sync_create_from_syncobj;
gpu->base.sync_ops.destroy = virtgpu_sync_destroy;
gpu->base.sync_ops.init = virtgpu_sync_init;
gpu->base.sync_ops.init_syncobj = virtgpu_sync_init_syncobj;
gpu->base.sync_ops.release = virtgpu_sync_release;
gpu->base.sync_ops.export_syncobj = virtgpu_sync_export_syncobj;
gpu->base.sync_ops.reset = virtgpu_sync_reset;
gpu->base.sync_ops.read = virtgpu_sync_read;
......
......@@ -613,7 +613,7 @@ vtest_sync_reset(struct vn_renderer *renderer,
}
static void
vtest_sync_release(struct vn_renderer *renderer,
vtest_sync_destroy(struct vn_renderer *renderer,
struct vn_renderer_sync *_sync)
{
struct vtest *vtest = (struct vtest *)renderer;
......@@ -623,47 +623,29 @@ vtest_sync_release(struct vn_renderer *renderer,
vtest_vcmd_sync_unref(vtest, sync->base.sync_id);
mtx_unlock(&vtest->sock_mutex);
sync->base.sync_id = 0;
free(sync);
}
static VkResult
vtest_sync_init(struct vn_renderer *renderer,
struct vn_renderer_sync *_sync,
uint64_t initial_val,
uint32_t flags)
vtest_sync_create(struct vn_renderer *renderer,
uint64_t initial_val,
uint32_t flags,
struct vn_renderer_sync **out_sync)
{
struct vtest *vtest = (struct vtest *)renderer;
struct vtest_sync *sync = (struct vtest_sync *)_sync;
struct vtest_sync *sync = calloc(1, sizeof(*sync));
if (!sync)
return VK_ERROR_OUT_OF_HOST_MEMORY;
mtx_lock(&vtest->sock_mutex);
sync->base.sync_id = vtest_vcmd_sync_create(vtest, initial_val);
mtx_unlock(&vtest->sock_mutex);
*out_sync = &sync->base;
return VK_SUCCESS;
}
static void
vtest_sync_destroy(struct vn_renderer *renderer,
struct vn_renderer_sync *_sync)
{
struct vtest_sync *sync = (struct vtest_sync *)_sync;
if (sync->base.sync_id)
vtest_sync_release(renderer, &sync->base);
free(sync);
}
static struct vn_renderer_sync *
vtest_sync_create(struct vn_renderer *renderer)
{
struct vtest_sync *sync = calloc(1, sizeof(*sync));
if (!sync)
return NULL;
return &sync->base;
}
static void
vtest_bo_invalidate(struct vn_renderer *renderer,
struct vn_renderer_bo *bo,
......@@ -1054,10 +1036,8 @@ vtest_init(struct vtest *vtest)
vtest->base.bo_ops.invalidate = vtest_bo_invalidate;
vtest->base.sync_ops.create = vtest_sync_create;
vtest->base.sync_ops.create_from_syncobj = NULL;
vtest->base.sync_ops.destroy = vtest_sync_destroy;
vtest->base.sync_ops.init = vtest_sync_init;
vtest->base.sync_ops.init_syncobj = NULL;
vtest->base.sync_ops.release = vtest_sync_release;
vtest->base.sync_ops.export_syncobj = NULL;
vtest->base.sync_ops.reset = vtest_sync_reset;
vtest->base.sync_ops.read = vtest_sync_read;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment