Commit 18319a23 authored by Ella Stanforth's avatar Ella Stanforth Committed by Marge Bot
Browse files

v3dv: add support for multi-planar formats, enable YCbCr



Original patches wrote by Ella Stanforth.

Alejandro Piñeiro main changes (skipping the small fixes/typos):
  * Reduced the list of supported formats to
    VK_FORMAT_G8_B8_R8_3PLANE_420_UNORM and
    VK_FORMAT_G8_B8R8_2PLANE_420_UNORM, that are the two only
    mandatory by the spec.
  * Fix format features exposed with YCbCr:
    * Disallow some features not supported with YCbCr (like blitting)
    * Disallow storage image support. Not clear if really useful. Even
      if there are CTS tests, there is an ongoing discussion about the
      possibility to remove them.
    * Expose VK_FORMAT_FEATURE_COSITED_CHROMA_SAMPLES_BIT, that is
      mandatory for the formats supported.
    * Not expose VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT. Some
      CTS tests are failing right now, and it is not mandatory. Likely
      to be revisit later.
    * We are keeping VK_FORMAT_FEATURE_2_DISJOINT_BIT and
      VK_FORMAT_FEATURE_2_MIDPOINT_CHROMA_SAMPLES_BIT. Even if they
      are optional, it is working with the two formats that we are
      exposing. Likely that will need to be refined if we start to
      expose more formats.
  * create_image_view: don't use hardcoded 0x70, but instead doing an
    explicit bit or of VK_IMAGE_ASPECT_PLANE_0/1/2_BIT
  * image_format_plane_features: keep how supported aspects and
    separate stencil check is done. Even if the change introduced was
    correct (not sure about that though), that change is unrelated to
    this work
  * write_image_descriptor: add additional checks for descriptor type,
    to compute properly the offset.
  * Cosmetic changes (don't use // for comments, capital letters, etc)
  * Main changes coming from the review:
     * Not use image aliases. All the info is already on the image
       planes, and some points of the code were confusing as it was
       using always a hardcoded plane 0.
     * Squashed the two original main patches. YCbCr conversion was
       leaking on the multi-planar support, as some support needed
       info coming from the ycbcr structs.
     * Not expose the extension on Android, and explicitly assert that
       we expect plane_count to be 1 always.
  * For a full list of review changes  see MR#19950

Signed-off-by: default avatarElla Stanforth <estanforth@igalia.com>
Signed-off-by: Alejandro Piñeiro's avatarAlejandro Piñeiro <apinheiro@igalia.com>
Part-of: <!19950>
parent 2ef614a2
......@@ -439,7 +439,7 @@ Vulkan 1.1 -- all DONE: anv, lvp, radv, tu, vn
VK_KHR_maintenance3 DONE (anv, lvp, radv, tu, v3dv, vn)
VK_KHR_multiview DONE (anv, lvp, radv, tu, v3dv, vn)
VK_KHR_relaxed_block_layout DONE (anv, lvp, radv, tu, v3dv, vn)
VK_KHR_sampler_ycbcr_conversion DONE (anv, radv, tu, vn)
VK_KHR_sampler_ycbcr_conversion DONE (anv, radv, tu, v3dv, vn)
VK_KHR_shader_draw_parameters DONE (anv, dzn, lvp, radv, tu, vn)
VK_KHR_storage_buffer_storage_class DONE (anv, lvp, panvk, radv, tu, v3dv, vn)
VK_KHR_variable_pointers DONE (anv, lvp, panvk, radv, tu, v3dv, vn)
......
......@@ -256,12 +256,13 @@ v3dv_import_native_buffer_fd(VkDevice device_h,
.fd = os_dupfd_cloexec(native_buffer_fd),
};
assert(image->plane_count == 1);
result =
v3dv_AllocateMemory(device_h,
&(VkMemoryAllocateInfo) {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.pNext = &import_info,
.allocationSize = image->size,
.allocationSize = image->planes[0].size,
.memoryTypeIndex = 0,
},
alloc, &memory_h);
......
......@@ -1157,16 +1157,17 @@ cmd_buffer_state_set_attachment_clear_color(struct v3dv_cmd_buffer *cmd_buffer,
const VkClearColorValue *color)
{
assert(attachment_idx < cmd_buffer->state.pass->attachment_count);
const struct v3dv_render_pass_attachment *attachment =
&cmd_buffer->state.pass->attachments[attachment_idx];
uint32_t internal_type, internal_bpp;
const struct v3dv_format *format =
v3dv_X(cmd_buffer->device, get_format)(attachment->desc.format);
/* We don't allow multi-planar formats for render pass attachments */
assert(format->plane_count == 1);
v3dv_X(cmd_buffer->device, get_internal_type_bpp_for_output_format)
(format->rt_type, &internal_type, &internal_bpp);
(format->planes[0].rt_type, &internal_type, &internal_bpp);
uint32_t internal_size = 4 << internal_bpp;
......
......@@ -28,8 +28,8 @@
/*
* For a given descriptor defined by the descriptor_set it belongs, its
* binding layout, and array_index, it returns the map region assigned to it
* from the descriptor pool bo.
* binding layout, array_index, and plane, it returns the map region assigned
* to it from the descriptor pool bo.
*/
static void *
descriptor_bo_map(struct v3dv_device *device,
......@@ -47,7 +47,7 @@ descriptor_bo_map(struct v3dv_device *device,
return set->pool->bo->map +
set->base_offset + binding_layout->descriptor_offset +
array_index * bo_size;
array_index * binding_layout->plane_stride * bo_size;
}
static bool
......@@ -132,8 +132,11 @@ v3dv_descriptor_map_get_descriptor_bo(struct v3dv_device *device,
const struct v3dv_descriptor_set_binding_layout *binding_layout =
&set->layout->binding[binding_number];
uint32_t bo_size = v3dv_X(device, descriptor_bo_size)(binding_layout->type);
assert(binding_layout->type == VK_DESCRIPTOR_TYPE_INLINE_UNIFORM_BLOCK ||
v3dv_X(device, descriptor_bo_size)(binding_layout->type) > 0);
bo_size > 0);
if (out_type)
*out_type = binding_layout->type;
......@@ -143,7 +146,7 @@ v3dv_descriptor_map_get_descriptor_bo(struct v3dv_device *device,
struct v3dv_cl_reloc reloc = {
.bo = set->pool->bo,
.offset = set->base_offset + binding_layout->descriptor_offset +
array_index * v3dv_X(device, descriptor_bo_size)(binding_layout->type),
array_index * binding_layout->plane_stride * bo_size,
};
return reloc;
......@@ -222,7 +225,7 @@ v3dv_descriptor_map_get_sampler_state(struct v3dv_device *device,
type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER);
if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
reloc.offset += v3dv_X(device, combined_image_sampler_sampler_state_offset)();
reloc.offset += v3dv_X(device, combined_image_sampler_sampler_state_offset)(map->plane[index]);
return reloc;
}
......@@ -250,7 +253,8 @@ v3dv_descriptor_map_get_texture_bo(struct v3dv_descriptor_state *descriptor_stat
assert(descriptor->image_view);
struct v3dv_image *image =
(struct v3dv_image *) descriptor->image_view->vk.image;
return image->mem->bo;
assert(map->plane[index] < image->plane_count);
return image->planes[map->plane[index]].mem->bo;
}
default:
unreachable("descriptor type doesn't has a texture bo");
......@@ -279,16 +283,29 @@ v3dv_descriptor_map_get_texture_shader_state(struct v3dv_device *device,
type == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER);
if (type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
reloc.offset += v3dv_X(device, combined_image_sampler_texture_state_offset)();
reloc.offset += v3dv_X(device, combined_image_sampler_texture_state_offset)(map->plane[index]);
return reloc;
}
#define SHA1_UPDATE_VALUE(ctx, x) _mesa_sha1_update(ctx, &(x), sizeof(x));
static void
sha1_update_ycbcr_conversion(struct mesa_sha1 *ctx,
const struct vk_ycbcr_conversion *conversion)
{
SHA1_UPDATE_VALUE(ctx, conversion->format);
SHA1_UPDATE_VALUE(ctx, conversion->ycbcr_model);
SHA1_UPDATE_VALUE(ctx, conversion->ycbcr_range);
SHA1_UPDATE_VALUE(ctx, conversion->mapping);
SHA1_UPDATE_VALUE(ctx, conversion->chroma_offsets);
SHA1_UPDATE_VALUE(ctx, conversion->chroma_reconstruction);
}
static void
sha1_update_descriptor_set_binding_layout(struct mesa_sha1 *ctx,
const struct v3dv_descriptor_set_binding_layout *layout)
const struct v3dv_descriptor_set_binding_layout *layout,
const struct v3dv_descriptor_set_layout *set_layout)
{
SHA1_UPDATE_VALUE(ctx, layout->type);
SHA1_UPDATE_VALUE(ctx, layout->array_size);
......@@ -297,6 +314,18 @@ sha1_update_descriptor_set_binding_layout(struct mesa_sha1 *ctx,
SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_index);
SHA1_UPDATE_VALUE(ctx, layout->descriptor_offset);
SHA1_UPDATE_VALUE(ctx, layout->immutable_samplers_offset);
SHA1_UPDATE_VALUE(ctx, layout->plane_stride);
if (layout->immutable_samplers_offset) {
const struct v3dv_sampler *immutable_samplers =
v3dv_immutable_samplers(set_layout, layout);
for (unsigned i = 0; i < layout->array_size; i++) {
const struct v3dv_sampler *sampler = &immutable_samplers[i];
if (sampler->conversion)
sha1_update_ycbcr_conversion(ctx, sampler->conversion);
}
}
}
static void
......@@ -310,7 +339,7 @@ sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
SHA1_UPDATE_VALUE(ctx, layout->dynamic_offset_count);
for (uint16_t i = 0; i < layout->binding_count; i++)
sha1_update_descriptor_set_binding_layout(ctx, &layout->binding[i]);
sha1_update_descriptor_set_binding_layout(ctx, &layout->binding[i], layout);
}
......@@ -632,6 +661,13 @@ v3dv_CreateDescriptorSetLayout(VkDevice _device,
uint32_t num_bindings = 0;
uint32_t immutable_sampler_count = 0;
/* for immutable descriptors, the plane stride is the largest plane
* count of all combined image samplers. For mutable descriptors
* this is always 1 since multiplanar images are restricted to
* immutable combined image samplers.
*/
uint8_t plane_stride = 1;
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
num_bindings = MAX2(num_bindings, pCreateInfo->pBindings[j].binding + 1);
......@@ -650,7 +686,15 @@ v3dv_CreateDescriptorSetLayout(VkDevice _device,
if ((desc_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
desc_type == VK_DESCRIPTOR_TYPE_SAMPLER) &&
pCreateInfo->pBindings[j].pImmutableSamplers) {
immutable_sampler_count += pCreateInfo->pBindings[j].descriptorCount;
uint32_t descriptor_count = pCreateInfo->pBindings[j].descriptorCount;
immutable_sampler_count += descriptor_count;
for (uint32_t i = 0; i < descriptor_count; i++) {
const VkSampler vk_sampler =
pCreateInfo->pBindings[j].pImmutableSamplers[i];
VK_FROM_HANDLE(v3dv_sampler, sampler, vk_sampler);
plane_stride = MAX2(plane_stride, sampler->plane_count);
}
}
}
......@@ -728,6 +772,7 @@ v3dv_CreateDescriptorSetLayout(VkDevice _device,
set_layout->binding[binding_number].array_size = binding->descriptorCount;
set_layout->binding[binding_number].descriptor_index = descriptor_count;
set_layout->binding[binding_number].dynamic_offset_index = dynamic_offset_count;
set_layout->binding[binding_number].plane_stride = plane_stride;
if ((binding->descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ||
binding->descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER) &&
......@@ -740,6 +785,8 @@ v3dv_CreateDescriptorSetLayout(VkDevice _device,
samplers += binding->descriptorCount;
samplers_offset += sizeof(struct v3dv_sampler) * binding->descriptorCount;
set_layout->binding[binding_number].plane_stride = plane_stride;
}
set_layout->shader_stages |= binding->stageFlags;
......@@ -754,7 +801,7 @@ v3dv_CreateDescriptorSetLayout(VkDevice _device,
set_layout->bo_size;
set_layout->bo_size +=
v3dv_X(device, descriptor_bo_size)(set_layout->binding[binding_number].type) *
binding->descriptorCount;
binding->descriptorCount * set_layout->binding[binding_number].plane_stride;
} else {
/* We align all our buffers, inline buffers too. We made sure to take
* this account when calculating total BO size requirements at pool
......@@ -906,16 +953,18 @@ descriptor_set_create(struct v3dv_device *device,
layout->binding[b].immutable_samplers_offset);
for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
uint32_t combined_offset =
layout->binding[b].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ?
v3dv_X(device, combined_image_sampler_sampler_state_offset)() : 0;
void *desc_map = descriptor_bo_map(device, set, &layout->binding[b], i);
desc_map += combined_offset;
memcpy(desc_map,
samplers[i].sampler_state,
sizeof(samplers[i].sampler_state));
assert(samplers[i].plane_count <= V3DV_MAX_PLANE_COUNT);
for (uint8_t plane = 0; plane < samplers[i].plane_count; plane++) {
uint32_t combined_offset =
layout->binding[b].type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ?
v3dv_X(device, combined_image_sampler_sampler_state_offset)(plane) : 0;
void *desc_map =
descriptor_bo_map(device, set, &layout->binding[b], i);
desc_map += combined_offset;
memcpy(desc_map, samplers[i].sampler_state,
sizeof(samplers[i].sampler_state));
}
}
}
......@@ -994,11 +1043,16 @@ descriptor_bo_copy(struct v3dv_device *device,
uint32_t src_array_index)
{
assert(dst_binding_layout->type == src_binding_layout->type);
assert(src_binding_layout->plane_stride == dst_binding_layout->plane_stride);
void *dst_map = descriptor_bo_map(device, dst_set, dst_binding_layout, dst_array_index);
void *src_map = descriptor_bo_map(device, src_set, src_binding_layout, src_array_index);
void *dst_map = descriptor_bo_map(device, dst_set, dst_binding_layout,
dst_array_index);
void *src_map = descriptor_bo_map(device, src_set, src_binding_layout,
src_array_index);
memcpy(dst_map, src_map, v3dv_X(device, descriptor_bo_size)(src_binding_layout->type));
memcpy(dst_map, src_map,
v3dv_X(device, descriptor_bo_size)(src_binding_layout->type) *
src_binding_layout->plane_stride);
}
static void
......@@ -1033,26 +1087,39 @@ write_image_descriptor(struct v3dv_device *device,
descriptor->sampler = sampler;
descriptor->image_view = iview;
assert(iview || sampler);
uint8_t plane_count = iview ? iview->plane_count : sampler->plane_count;
void *desc_map = descriptor_bo_map(device, set,
binding_layout, array_index);
if (iview) {
const uint32_t tex_state_index =
iview->vk.view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
desc_type != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? 0 : 1;
memcpy(desc_map,
iview->texture_shader_state[tex_state_index],
sizeof(iview->texture_shader_state[0]));
desc_map += v3dv_X(device, combined_image_sampler_sampler_state_offset)();
}
for (uint8_t plane = 0; plane < plane_count; plane++) {
if (iview) {
uint32_t offset = desc_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ?
v3dv_X(device, combined_image_sampler_texture_state_offset)(plane) : 0;
if (sampler && !binding_layout->immutable_samplers_offset) {
/* For immutable samplers this was already done as part of the
* descriptor set create, as that info can't change later
*/
memcpy(desc_map,
sampler->sampler_state,
sizeof(sampler->sampler_state));
void *plane_desc_map = desc_map + offset;
const uint32_t tex_state_index =
iview->vk.view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
desc_type != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? 0 : 1;
memcpy(plane_desc_map,
iview->planes[plane].texture_shader_state[tex_state_index],
sizeof(iview->planes[plane].texture_shader_state[0]));
}
if (sampler && !binding_layout->immutable_samplers_offset) {
uint32_t offset = desc_type == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER ?
v3dv_X(device, combined_image_sampler_sampler_state_offset)(plane) : 0;
void *plane_desc_map = desc_map + offset;
/* For immutable samplers this was already done as part of the
* descriptor set create, as that info can't change later
*/
memcpy(plane_desc_map,
sampler->sampler_state,
sizeof(sampler->sampler_state));
}
}
}
......@@ -1146,12 +1213,11 @@ v3dv_UpdateDescriptorSets(VkDevice _device,
break;
}
case VK_DESCRIPTOR_TYPE_SAMPLER: {
/* If we are here we shouldn't be modifying a immutable sampler,
* so we don't ensure that would work or not crash. But let the
* validation layers check that
*/
/* If we are here we shouldn't be modifying an immutable sampler */
assert(!binding_layout->immutable_samplers_offset);
const VkDescriptorImageInfo *image_info = writeset->pImageInfo + j;
V3DV_FROM_HANDLE(v3dv_sampler, sampler, image_info->sampler);
write_image_descriptor(device, descriptor, writeset->descriptorType,
set, binding_layout, NULL, sampler,
writeset->dstArrayElement + j);
......@@ -1163,6 +1229,7 @@ v3dv_UpdateDescriptorSets(VkDevice _device,
case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
const VkDescriptorImageInfo *image_info = writeset->pImageInfo + j;
V3DV_FROM_HANDLE(v3dv_image_view, iview, image_info->imageView);
write_image_descriptor(device, descriptor, writeset->descriptorType,
set, binding_layout, iview, NULL,
writeset->dstArrayElement + j);
......@@ -1172,7 +1239,17 @@ v3dv_UpdateDescriptorSets(VkDevice _device,
case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER: {
const VkDescriptorImageInfo *image_info = writeset->pImageInfo + j;
V3DV_FROM_HANDLE(v3dv_image_view, iview, image_info->imageView);
V3DV_FROM_HANDLE(v3dv_sampler, sampler, image_info->sampler);
struct v3dv_sampler *sampler = NULL;
if (!binding_layout->immutable_samplers_offset) {
/* In general we ignore the sampler when updating a combined
* image sampler, but for YCbCr we kwnow that we must use
* immutable combined image samplers
*/
assert(iview->plane_count == 1);
V3DV_FROM_HANDLE(v3dv_sampler, _sampler, image_info->sampler);
sampler = _sampler;
}
write_image_descriptor(device, descriptor, writeset->descriptorType,
set, binding_layout, iview, sampler,
writeset->dstArrayElement + j);
......@@ -1447,23 +1524,3 @@ v3dv_UpdateDescriptorSetWithTemplate(
}
}
}
VKAPI_ATTR VkResult VKAPI_CALL
v3dv_CreateSamplerYcbcrConversion(
VkDevice _device,
const VkSamplerYcbcrConversionCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator,
VkSamplerYcbcrConversion *pYcbcrConversion)
{
unreachable("Ycbcr sampler conversion is not supported");
return VK_SUCCESS;
}
VKAPI_ATTR void VKAPI_CALL
v3dv_DestroySamplerYcbcrConversion(
VkDevice _device,
VkSamplerYcbcrConversion YcbcrConversion,
const VkAllocationCallbacks *pAllocator)
{
unreachable("Ycbcr sampler conversion is not supported");
}
......@@ -151,6 +151,9 @@ get_device_extensions(const struct v3dv_physical_device *device,
.KHR_shader_float_controls = true,
.KHR_shader_non_semantic_info = true,
.KHR_sampler_mirror_clamp_to_edge = true,
#ifndef ANDROID
.KHR_sampler_ycbcr_conversion = true,
#endif
.KHR_spirv_1_4 = true,
.KHR_storage_buffer_storage_class = true,
.KHR_timeline_semaphore = true,
......@@ -1239,7 +1242,11 @@ v3dv_GetPhysicalDeviceFeatures2(VkPhysicalDevice physicalDevice,
/* FIXME: this needs support for non-constant index on UBO/SSBO */
.variablePointers = false,
.protectedMemory = false,
#ifdef ANDROID
.samplerYcbcrConversion = false,
#else
.samplerYcbcrConversion = true,
#endif
.shaderDrawParameters = false,
};
......@@ -2596,14 +2603,28 @@ v3dv_InvalidateMappedMemoryRanges(VkDevice _device,
static void
get_image_memory_requirements(struct v3dv_image *image,
VkImageAspectFlagBits planeAspect,
VkMemoryRequirements2 *pMemoryRequirements)
{
pMemoryRequirements->memoryRequirements = (VkMemoryRequirements) {
.memoryTypeBits = 0x1,
.alignment = image->alignment,
.size = image->size
.alignment = image->planes[0].alignment,
.size = image->non_disjoint_size
};
if (planeAspect != VK_IMAGE_ASPECT_NONE) {
assert(image->format->plane_count > 1);
/* Disjoint images should have a 0 non_disjoint_size */
assert(!pMemoryRequirements->memoryRequirements.size);
uint8_t plane = v3dv_image_aspect_to_plane(image, planeAspect);
VkMemoryRequirements *mem_reqs =
&pMemoryRequirements->memoryRequirements;
mem_reqs->alignment = image->planes[plane].alignment;
mem_reqs->size = image->planes[plane].size;
}
vk_foreach_struct(ext, pMemoryRequirements->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS: {
......@@ -2626,7 +2647,23 @@ v3dv_GetImageMemoryRequirements2(VkDevice device,
VkMemoryRequirements2 *pMemoryRequirements)
{
V3DV_FROM_HANDLE(v3dv_image, image, pInfo->image);
get_image_memory_requirements(image, pMemoryRequirements);
VkImageAspectFlagBits planeAspect = VK_IMAGE_ASPECT_NONE;
vk_foreach_struct_const(ext, pInfo->pNext) {
switch (ext->sType) {
case VK_STRUCTURE_TYPE_IMAGE_PLANE_MEMORY_REQUIREMENTS_INFO: {
VkImagePlaneMemoryRequirementsInfo *req =
(VkImagePlaneMemoryRequirementsInfo *) ext;
planeAspect = req->planeAspect;
break;
}
default:
v3dv_debug_ignored_stype(ext->sType);
break;
}
}
get_image_memory_requirements(image, planeAspect, pMemoryRequirements);
}
VKAPI_ATTR void VKAPI_CALL
......@@ -2644,7 +2681,23 @@ v3dv_GetDeviceImageMemoryRequirementsKHR(
v3dv_image_init(device, pInfo->pCreateInfo, NULL, &image);
assert(result == VK_SUCCESS);
get_image_memory_requirements(&image, pMemoryRequirements);
/* From VkDeviceImageMemoryRequirements spec:
*
* " planeAspect is a VkImageAspectFlagBits value specifying the aspect
* corresponding to the image plane to query. This parameter is ignored
* unless pCreateInfo::tiling is
* VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT, or pCreateInfo::flags has
* VK_IMAGE_CREATE_DISJOINT_BIT set"
*
* We need to explicitly ignore that flag, or following asserts could be
* triggered.
*/
VkImageAspectFlagBits planeAspect =
pInfo->pCreateInfo->tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT ||
pInfo->pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT ?
pInfo->planeAspect : 0;
get_image_memory_requirements(&image, planeAspect, pMemoryRequirements);
}
static void
......@@ -2659,11 +2712,43 @@ bind_image_memory(const VkBindImageMemoryInfo *info)
* the VkMemoryRequirements structure returned from a call to
* vkGetImageMemoryRequirements with image"
*/
assert(info->memoryOffset % image->alignment == 0);
assert(info->memoryOffset < mem->bo->size);
image->mem = mem;
image->mem_offset = info->memoryOffset;
uint64_t offset = info->memoryOffset;
if (image->non_disjoint_size) {
/* We only check for plane 0 as it is the only one that actually starts
* at that offset
*/
assert(offset % image->planes[0].alignment == 0);
for (uint8_t plane = 0; plane < image->plane_count; plane++) {
image->planes[plane].mem = mem;
image->planes[plane].mem_offset = offset;
}
} else {
const VkBindImagePlaneMemoryInfo *plane_mem_info =
vk_find_struct_const(info->pNext, BIND_IMAGE_PLANE_MEMORY_INFO);
assert(plane_mem_info);
/*
* From VkBindImagePlaneMemoryInfo spec:
*
* "If the image’s tiling is VK_IMAGE_TILING_LINEAR or
* VK_IMAGE_TILING_OPTIMAL, then planeAspect must be a single valid
* format plane for the image"
*
* <skip>
*
* "If the image’s tiling is VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT,
* then planeAspect must be a single valid memory plane for the
* image"
*
* So planeAspect should only refer to one plane.
*/
uint8_t plane = v3dv_plane_from_aspect(plane_mem_info->planeAspect);
assert(offset % image->planes[plane].alignment == 0);
image->planes[plane].mem = mem;
image->planes[plane].mem_offset = offset;
}
}
VKAPI_ATTR VkResult VKAPI_CALL
......@@ -2680,11 +2765,13 @@ v3dv_BindImageMemory2(VkDevice _device,
struct v3dv_image *swapchain_image =
v3dv_wsi_get_image_from_swapchain(swapchain_info->swapchain,
swapchain_info->imageIndex);
/* Making the assumption that swapchain images are a single plane */
assert(swapchain_image->plane_count == 1);
VkBindImageMemoryInfo swapchain_bind = {
.sType = VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO,
.image = pBindInfos[i].image,
.memory = v3dv_device_memory_to_handle(swapchain_image->mem),
.memoryOffset = swapchain_image->mem_offset,
.memory = v3dv_device_memory_to_handle(swapchain_image->planes[0].mem),
.memoryOffset = swapchain_image->planes[0].mem_offset,
};
bind_image_memory(&swapchain_bind);
} else
......@@ -2957,6 +3044,8 @@ v3dv_CreateSampler(VkDevice _device,
if (!sampler)
return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
sampler->plane_count = 1;
sampler->compare_enable = pCreateInfo->compareEnable;
sampler->unnormalized_coordinates = pCreateInfo->unnormalizedCoordinates;
......@@ -2964,6 +3053,20 @@ v3dv_CreateSampler(VkDevice _device,
vk_find_struct_const(pCreateInfo->pNext,
SAMPLER_CUSTOM_BORDER_COLOR_CREATE_INFO_EXT);
const VkSamplerYcbcrConversionInfo *ycbcr_conv_info =
vk_find_struct_const(pCreateInfo->pNext, SAMPLER_YCBCR_CONVERSION_INFO);
const struct vk_format_ycbcr_info *ycbcr_info = NULL;
if (ycbcr_conv_info) {
VK_FROM_HANDLE(vk_ycbcr_conversion, conversion, ycbcr_conv_info->conversion);
ycbcr_info = vk_format_get_ycbcr_info(conversion->format);
if (ycbcr_info) {
sampler->plane_count = ycbcr_info->n_planes;
sampler->conversion = conversion;
}
}
v3dv_X(device, pack_sampler_state)(sampler, pCreateInfo, bc_info);
*pSampler = v3dv_sampler_to_handle(sampler);
......
......@@ -30,7 +30,7 @@
#include "vulkan/wsi/wsi_common.h"
const uint8_t *
v3dv_get_format_swizzle(struct v3dv_device *device, VkFormat f)
v3dv_get_format_swizzle(struct v3dv_device *device, VkFormat f, uint8_t plane)
{
const struct v3dv_format *vf = v3dv_X(device, get_format)(f);
static const uint8_t fallback[] = {0, 1, 2, 3};
......@@ -38,7 +38,7 @@ v3dv_get_format_swizzle(struct v3dv_device *device, VkFormat f)
if (!vf)
return fallback;
return vf->swizzle;
return vf->planes[plane].swizzle;
}
bool
......@@ -82,6 +82,9 @@ v3dv_format_swizzle_needs_reverse(const uint8_t *swizzle)
* involved). In these cases, it is safe to choose any format supported by