Commit b38879f8 authored by Dave Airlie's avatar Dave Airlie
Browse files

vallium: initial import of the vulkan frontend



This is the initial import of the vallium frontend for gallium.
This is only good enough to run the triangle and the gears demo
(wrongly) from Sascha demos.

Improvements are mostly on the llvmpipe side after this.

It contains an implementation of the Vulkan API which is mapped
onto the gallium API, and is suitable only for SOFTWARE drivers.

Command buffers are recordred into malloced memory, then later
they are played back against the gallium API. The command buffers
are mostly just Vulkan API marshalling but in some places the information is
processed before being put into the command buffer (renderpass stuff).

Execution happens on a separate "graphics" thread, againt the gallium API.

There is only a single queue which wraps a single gallium context.

Resources are allocated via the new resource/memory APIs.
Shaders are created via the context and bound/unbound in the
second thread.

(No HW for reasons - memory management, sw paths for lots of paths,
pointless CPU side queue)

v2: drop mesa_icd, drop cpp_args, drop extra flags, change meson config (Eric)
v2.1: use meson-gallium job

meson pieces:
Reviewed-by: Eric Engestrom's avatarEric Engestrom <eric@engestrom.ch>

overall:
Acked-by: default avatarRoland Scheidegger <sroland@vmware.com>
Part-of: <!6082>
parent 8004fa9c
Pipeline #190742 passed with stages
in 16 minutes and 19 seconds
......@@ -550,6 +550,7 @@ meson-gallium:
-D gallium-nine=true
-D gallium-opencl=disabled
GALLIUM_DRIVERS: "iris,nouveau,kmsro,r300,r600,freedreno,swr,swrast,svga,v3d,vc4,virgl,etnaviv,panfrost,lima,zink"
VULKAN_DRIVERS: swrast
EXTRA_OPTION: >
-D osmesa=gallium
-D tools=all
......
......@@ -243,9 +243,9 @@ _vulkan_drivers = get_option('vulkan-drivers')
if _vulkan_drivers.contains('auto')
if system_has_kms_drm
if host_machine.cpu_family().startswith('x86')
_vulkan_drivers = ['amd', 'intel']
_vulkan_drivers = ['amd', 'intel', 'swrast']
elif ['arm', 'aarch64'].contains(host_machine.cpu_family())
_vulkan_drivers = []
_vulkan_drivers = ['swrast']
else
error('Unknown architecture @0@. Please pass -Dvulkan-drivers to set driver options. Patches gladly accepted to fix this.'.format(
host_machine.cpu_family()))
......@@ -262,8 +262,12 @@ endif
with_intel_vk = _vulkan_drivers.contains('intel')
with_amd_vk = _vulkan_drivers.contains('amd')
with_freedreno_vk = _vulkan_drivers.contains('freedreno')
with_swrast_vk = _vulkan_drivers.contains('swrast')
with_any_vk = _vulkan_drivers.length() != 0
if with_swrast_vk and not with_gallium_softpipe
error('swrast vulkan requires gallium swrast')
endif
if with_dri_swrast and (with_gallium_softpipe or with_gallium_swr)
error('Only one swrast provider can be built')
endif
......
......@@ -166,7 +166,7 @@ option(
'vulkan-drivers',
type : 'array',
value : ['auto'],
choices : ['auto', 'amd', 'freedreno', 'intel'],
choices : ['auto', 'amd', 'freedreno', 'intel', 'swrast'],
description : 'List of vulkan drivers to build. If this is set to auto all drivers applicable to the target OS/architecture will be built'
)
option(
......
val_entrypoints = custom_target(
'val_entrypoints.[ch]',
input : ['val_entrypoints_gen.py', vk_api_xml],
output : ['val_entrypoints.h', 'val_entrypoints.c'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--outdir',
meson.current_build_dir()
],
depend_files : files('val_extensions.py'),
)
val_extensions_c = custom_target(
'val_extensions.c',
input : ['val_extensions.py', vk_api_xml],
output : ['val_extensions.c', 'val_extensions.h'],
command : [
prog_python, '@INPUT0@', '--xml', '@INPUT1@', '--out-c', '@OUTPUT0@',
'--out-h', '@OUTPUT1@'
],
)
libval_files = files(
'val_device.c',
'val_cmd_buffer.c',
'val_descriptor_set.c',
'val_execute.c',
'val_util.c',
'val_image.c',
'val_formats.c',
'val_lower_vulkan_resource.c',
'val_lower_vulkan_resource.h',
'val_lower_input_attachments.c',
'val_pass.c',
'val_pipeline.c',
'val_pipeline_cache.c',
'val_query.c',
'val_wsi.c')
val_deps = []
val_flags = []
if with_platform_x11
val_deps += dep_xcb_dri3
val_flags += [
'-DVK_USE_PLATFORM_XCB_KHR',
'-DVK_USE_PLATFORM_XLIB_KHR',
]
libval_files += files('val_wsi_x11.c')
endif
if with_platform_wayland
val_deps += dep_wayland_client
val_flags += '-DVK_USE_PLATFORM_WAYLAND_KHR'
libval_files += files('val_wsi_wayland.c')
endif
libvallium_st = static_library(
'vallium_st',
[libval_files, val_entrypoints, val_extensions_c ],
link_with : [ libvulkan_wsi ],
c_args : [ val_flags ],
gnu_symbol_visibility : 'hidden',
include_directories : [ inc_include, inc_src, inc_util, inc_gallium, inc_compiler, inc_gallium_aux, inc_vulkan_wsi ],
dependencies : [ idep_nir, idep_mesautil, idep_vulkan_util ]
)
This diff is collapsed.
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#pragma once
static inline unsigned vk_cull_to_pipe(uint32_t vk_cull)
{
/* these correspond */
return vk_cull;
}
static inline unsigned vk_polygon_mode_to_pipe(uint32_t vk_poly_mode)
{
/* these correspond */
return vk_poly_mode;
}
static inline unsigned vk_conv_stencil_op(uint32_t vk_stencil_op)
{
switch (vk_stencil_op) {
case VK_STENCIL_OP_KEEP:
return PIPE_STENCIL_OP_KEEP;
case VK_STENCIL_OP_ZERO:
return PIPE_STENCIL_OP_ZERO;
case VK_STENCIL_OP_REPLACE:
return PIPE_STENCIL_OP_REPLACE;
case VK_STENCIL_OP_INCREMENT_AND_CLAMP:
return PIPE_STENCIL_OP_INCR;
case VK_STENCIL_OP_DECREMENT_AND_CLAMP:
return PIPE_STENCIL_OP_DECR;
case VK_STENCIL_OP_INVERT:
return PIPE_STENCIL_OP_INVERT;
case VK_STENCIL_OP_INCREMENT_AND_WRAP:
return PIPE_STENCIL_OP_INCR_WRAP;
case VK_STENCIL_OP_DECREMENT_AND_WRAP:
return PIPE_STENCIL_OP_DECR_WRAP;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_topology(VkPrimitiveTopology topology)
{
switch (topology) {
case VK_PRIMITIVE_TOPOLOGY_POINT_LIST:
return PIPE_PRIM_POINTS;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST:
return PIPE_PRIM_LINES;
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP:
return PIPE_PRIM_LINE_STRIP;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST:
return PIPE_PRIM_TRIANGLES;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP:
return PIPE_PRIM_TRIANGLE_STRIP;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN:
return PIPE_PRIM_TRIANGLE_FAN;
case VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY:
return PIPE_PRIM_LINES_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY:
return PIPE_PRIM_LINE_STRIP_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY:
return PIPE_PRIM_TRIANGLES_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY:
return PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY;
case VK_PRIMITIVE_TOPOLOGY_PATCH_LIST:
return PIPE_PRIM_PATCHES;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_wrap_mode(enum VkSamplerAddressMode addr_mode)
{
switch (addr_mode) {
case VK_SAMPLER_ADDRESS_MODE_REPEAT:
return PIPE_TEX_WRAP_REPEAT;
case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
return PIPE_TEX_WRAP_MIRROR_REPEAT;
case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
return PIPE_TEX_WRAP_CLAMP_TO_EDGE;
case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
return PIPE_TEX_WRAP_CLAMP_TO_BORDER;
case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
return PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_blend_factor(enum VkBlendFactor vk_factor)
{
switch (vk_factor) {
case VK_BLEND_FACTOR_ZERO:
return PIPE_BLENDFACTOR_ZERO;
case VK_BLEND_FACTOR_ONE:
return PIPE_BLENDFACTOR_ONE;
case VK_BLEND_FACTOR_SRC_COLOR:
return PIPE_BLENDFACTOR_SRC_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR:
return PIPE_BLENDFACTOR_INV_SRC_COLOR;
case VK_BLEND_FACTOR_DST_COLOR:
return PIPE_BLENDFACTOR_DST_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR:
return PIPE_BLENDFACTOR_INV_DST_COLOR;
case VK_BLEND_FACTOR_SRC_ALPHA:
return PIPE_BLENDFACTOR_SRC_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA:
return PIPE_BLENDFACTOR_INV_SRC_ALPHA;
case VK_BLEND_FACTOR_DST_ALPHA:
return PIPE_BLENDFACTOR_DST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA:
return PIPE_BLENDFACTOR_INV_DST_ALPHA;
case VK_BLEND_FACTOR_CONSTANT_COLOR:
return PIPE_BLENDFACTOR_CONST_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR:
return PIPE_BLENDFACTOR_INV_CONST_COLOR;
case VK_BLEND_FACTOR_CONSTANT_ALPHA:
return PIPE_BLENDFACTOR_CONST_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA:
return PIPE_BLENDFACTOR_INV_CONST_ALPHA;
case VK_BLEND_FACTOR_SRC1_COLOR:
return PIPE_BLENDFACTOR_SRC1_COLOR;
case VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR:
return PIPE_BLENDFACTOR_INV_SRC1_COLOR;
case VK_BLEND_FACTOR_SRC1_ALPHA:
return PIPE_BLENDFACTOR_SRC1_ALPHA;
case VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA:
return PIPE_BLENDFACTOR_INV_SRC1_ALPHA;
case VK_BLEND_FACTOR_SRC_ALPHA_SATURATE:
return PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE;
default:
assert(0);
return 0;
}
}
static inline unsigned vk_conv_blend_func(enum VkBlendOp op)
{
switch (op) {
case VK_BLEND_OP_ADD:
return PIPE_BLEND_ADD;
case VK_BLEND_OP_SUBTRACT:
return PIPE_BLEND_SUBTRACT;
case VK_BLEND_OP_REVERSE_SUBTRACT:
return PIPE_BLEND_REVERSE_SUBTRACT;
case VK_BLEND_OP_MIN:
return PIPE_BLEND_MIN;
case VK_BLEND_OP_MAX:
return PIPE_BLEND_MAX;
default:
assert(0);
return 0;
}
}
static inline enum pipe_swizzle vk_conv_swizzle(VkComponentSwizzle swiz)
{
switch (swiz) {
case VK_COMPONENT_SWIZZLE_ZERO:
return PIPE_SWIZZLE_0;
case VK_COMPONENT_SWIZZLE_ONE:
return PIPE_SWIZZLE_1;
case VK_COMPONENT_SWIZZLE_R:
return PIPE_SWIZZLE_X;
case VK_COMPONENT_SWIZZLE_G:
return PIPE_SWIZZLE_Y;
case VK_COMPONENT_SWIZZLE_B:
return PIPE_SWIZZLE_Z;
case VK_COMPONENT_SWIZZLE_A:
return PIPE_SWIZZLE_W;
case VK_COMPONENT_SWIZZLE_IDENTITY:
default:
return PIPE_SWIZZLE_NONE;
}
}
/*
* Copyright © 2019 Red Hat.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "val_private.h"
#include "vk_util.h"
#include "u_math.h"
VkResult val_CreateDescriptorSetLayout(
VkDevice _device,
const VkDescriptorSetLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorSetLayout* pSetLayout)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_descriptor_set_layout *set_layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO);
uint32_t max_binding = 0;
uint32_t immutable_sampler_count = 0;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
max_binding = MAX2(max_binding, pCreateInfo->pBindings[j].binding);
if (pCreateInfo->pBindings[j].pImmutableSamplers)
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
}
size_t size = sizeof(struct val_descriptor_set_layout) +
(max_binding + 1) * sizeof(set_layout->binding[0]) +
immutable_sampler_count * sizeof(struct val_sampler *);
set_layout = vk_zalloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set_layout)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &set_layout->base,
VK_OBJECT_TYPE_DESCRIPTOR_SET_LAYOUT);
/* We just allocate all the samplers at the end of the struct */
struct val_sampler **samplers =
(struct val_sampler **)&set_layout->binding[max_binding + 1];
set_layout->binding_count = max_binding + 1;
set_layout->shader_stages = 0;
set_layout->size = 0;
uint32_t dynamic_offset_count = 0;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
uint32_t b = binding->binding;
set_layout->binding[b].array_size = binding->descriptorCount;
set_layout->binding[b].descriptor_index = set_layout->size;
set_layout->binding[b].type = binding->descriptorType;
set_layout->binding[b].valid = true;
set_layout->size += binding->descriptorCount;
for (gl_shader_stage stage = MESA_SHADER_VERTEX; stage < MESA_SHADER_STAGES; stage++) {
set_layout->binding[b].stage[stage].const_buffer_index = -1;
set_layout->binding[b].stage[stage].shader_buffer_index = -1;
set_layout->binding[b].stage[stage].sampler_index = -1;
set_layout->binding[b].stage[stage].sampler_view_index = -1;
set_layout->binding[b].stage[stage].image_index = -1;
}
if (binding->descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
binding->descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) {
set_layout->binding[b].dynamic_index = dynamic_offset_count;
dynamic_offset_count += binding->descriptorCount;
}
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].sampler_index = set_layout->stage[s].sampler_count;
set_layout->stage[s].sampler_count += binding->descriptorCount;
}
break;
default:
break;
}
switch (binding->descriptorType) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].const_buffer_index = set_layout->stage[s].const_buffer_count;
set_layout->stage[s].const_buffer_count += binding->descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].shader_buffer_index = set_layout->stage[s].shader_buffer_count;
set_layout->stage[s].shader_buffer_count += binding->descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].image_index = set_layout->stage[s].image_count;
set_layout->stage[s].image_count += binding->descriptorCount;
}
break;
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
val_foreach_stage(s, binding->stageFlags) {
set_layout->binding[b].stage[s].sampler_view_index = set_layout->stage[s].sampler_view_count;
set_layout->stage[s].sampler_view_count += binding->descriptorCount;
}
break;
default:
break;
}
if (binding->pImmutableSamplers) {
set_layout->binding[b].immutable_samplers = samplers;
samplers += binding->descriptorCount;
for (uint32_t i = 0; i < binding->descriptorCount; i++)
set_layout->binding[b].immutable_samplers[i] =
val_sampler_from_handle(binding->pImmutableSamplers[i]);
} else {
set_layout->binding[b].immutable_samplers = NULL;
}
set_layout->shader_stages |= binding->stageFlags;
}
set_layout->dynamic_offset_count = dynamic_offset_count;
*pSetLayout = val_descriptor_set_layout_to_handle(set_layout);
return VK_SUCCESS;
}
void val_DestroyDescriptorSetLayout(
VkDevice _device,
VkDescriptorSetLayout _set_layout,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout, _set_layout);
if (!_set_layout)
return;
vk_object_base_finish(&set_layout->base);
vk_free2(&device->alloc, pAllocator, set_layout);
}
VkResult val_CreatePipelineLayout(
VkDevice _device,
const VkPipelineLayoutCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkPipelineLayout* pPipelineLayout)
{
VAL_FROM_HANDLE(val_device, device, _device);
struct val_pipeline_layout *layout;
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (layout == NULL)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
vk_object_base_init(&device->vk, &layout->base,
VK_OBJECT_TYPE_PIPELINE_LAYOUT);
layout->num_sets = pCreateInfo->setLayoutCount;
for (uint32_t set = 0; set < pCreateInfo->setLayoutCount; set++) {
VAL_FROM_HANDLE(val_descriptor_set_layout, set_layout,
pCreateInfo->pSetLayouts[set]);
layout->set[set].layout = set_layout;
}
layout->push_constant_size = 0;
for (unsigned i = 0; i < pCreateInfo->pushConstantRangeCount; ++i) {
const VkPushConstantRange *range = pCreateInfo->pPushConstantRanges + i;
layout->push_constant_size = MAX2(layout->push_constant_size,
range->offset + range->size);
}
layout->push_constant_size = align(layout->push_constant_size, 16);
*pPipelineLayout = val_pipeline_layout_to_handle(layout);
return VK_SUCCESS;
}
void val_DestroyPipelineLayout(
VkDevice _device,
VkPipelineLayout _pipelineLayout,
const VkAllocationCallbacks* pAllocator)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_pipeline_layout, pipeline_layout, _pipelineLayout);
if (!_pipelineLayout)
return;
vk_object_base_finish(&pipeline_layout->base);
vk_free2(&device->alloc, pAllocator, pipeline_layout);
}
VkResult
val_descriptor_set_create(struct val_device *device,
const struct val_descriptor_set_layout *layout,
struct val_descriptor_set **out_set)
{
struct val_descriptor_set *set;
size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
set = vk_alloc(&device->alloc /* XXX: Use the pool */, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set)
return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
/* A descriptor set may not be 100% filled. Clear the set so we can can
* later detect holes in it.
*/
memset(set, 0, size);
vk_object_base_init(&device->vk, &set->base,
VK_OBJECT_TYPE_DESCRIPTOR_SET);
set->layout = layout;
/* Go through and fill out immutable samplers if we have any */
struct val_descriptor *desc = set->descriptors;
for (uint32_t b = 0; b < layout->binding_count; b++) {
if (layout->binding[b].immutable_samplers) {
for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
desc[i].sampler = layout->binding[b].immutable_samplers[i];
}
desc += layout->binding[b].array_size;
}
*out_set = set;
return VK_SUCCESS;
}
void
val_descriptor_set_destroy(struct val_device *device,
struct val_descriptor_set *set)
{
vk_object_base_finish(&set->base);
vk_free(&device->alloc, set);
}
VkResult val_AllocateDescriptorSets(
VkDevice _device,
const VkDescriptorSetAllocateInfo* pAllocateInfo,
VkDescriptorSet* pDescriptorSets)
{
VAL_FROM_HANDLE(val_device, device, _device);
VAL_FROM_HANDLE(val_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS;
struct val_descriptor_set *set;
uint32_t i;
for (i = 0; i < pAllocateInfo->descriptorSetCount; i++) {
VAL_FROM_HANDLE(val_descriptor_set_layout, layout,