Commit 9585b28b authored by Joseph Jenner-Bailey's avatar Joseph Jenner-Bailey Committed by Matteo Franchin
Browse files

Implement a vector utility that uses a custom allocator



Add util::vector to provide functionality similar to std::vector,
i.e. growable arrays. util::vector does its allocations via a custom
allocator provided via VkAllocationCallbacks and provides methods to
check whether the allocation fails, e.g. try_push_back() returns false
when the allocation fails and replaces std::vector's push_back() method
that would rather raise an exception to report an allocation failure.

Note that this commit switches on exceptions in the layer by removing
the flag -fno-exceptions.

util::vector is also used to store swapchain images.

Change-Id: I2bf2b24bd06e198c198c4f4aedd8f7fced96a346
Signed-off-by: Joseph Jenner-Bailey's avatarJoe Jenner-Bailey <joe.jenner-bailey@arm.com>
Signed-off-by: Matteo Franchin's avatarMatteo Franchin <matteo.franchin@arm.com>
parent e5f77f73
Pipeline #238300 passed with stage
in 1 minute and 23 seconds
# Copyright (c) 2019 Arm Limited.
# Copyright (c) 2019-2020 Arm Limited.
#
# SPDX-License-Identifier: MIT
#
......@@ -26,8 +26,10 @@ project(VkLayer_window_system_integration CXX)
find_package(PkgConfig REQUIRED)
pkg_check_modules(VULKAN_PKG_CONFIG vulkan)
# Disable C++ exceptions.
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions -Wall")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall")
if (DEFINED DEBUG)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0")
endif()
if(NOT DEFINED VULKAN_CXX_INCLUDE)
set(VULKAN_CXX_INCLUDE ${VULKAN_PKG_CONFIG_INCLUDEDIR})
......@@ -46,6 +48,7 @@ add_library(${PROJECT_NAME} SHARED
layer/surface_api.cpp
layer/swapchain_api.cpp
util/timed_semaphore.cpp
util/custom_allocator.cpp
wsi/swapchain_base.cpp
wsi/wsi_factory.cpp
wsi/headless/surface_properties.cpp
......
/*
* Copyright (c) 2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "custom_allocator.hpp"
extern "C" {
static void *default_allocation(void *, size_t size, size_t, VkSystemAllocationScope)
{
return malloc(size);
}
static void *default_reallocation(void *, void *pOriginal, size_t size, size_t, VkSystemAllocationScope)
{
return realloc(pOriginal, size);
}
static void default_free(void *, void *pMemory)
{
free(pMemory);
}
}
namespace util
{
/* If callbacks is already populated by vulkan then use those specified as default. */
allocator::allocator(const VkAllocationCallbacks *callbacks, VkSystemAllocationScope scope)
{
m_scope = scope;
if (callbacks != nullptr)
{
m_callbacks = *callbacks;
}
else
{
m_callbacks = {};
m_callbacks.pfnAllocation = default_allocation;
m_callbacks.pfnReallocation = default_reallocation;
m_callbacks.pfnFree = default_free;
}
}
} /* namespace util */
/*
* Copyright (c) 2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <new>
#include <vector>
#include <string>
#include <vulkan/vulkan.h>
#pragma once
namespace util
{
/**
* @brief Minimalistic wrapper of VkAllocationCallbacks.
*/
class allocator
{
public:
allocator(const VkAllocationCallbacks *callbacks, VkSystemAllocationScope scope);
template <typename T, typename... arg_types>
T *create(arg_types &&... args) const;
template <typename T>
void destroy(T *obj) const;
VkAllocationCallbacks m_callbacks;
VkSystemAllocationScope m_scope;
};
/**
* @brief Implementation of an allocator that can be used with STL containers.
*/
template <typename T>
class custom_allocator
{
public:
using value_type = T;
using pointer = T *;
custom_allocator(const allocator &alloc)
: m_alloc(alloc)
{
}
template <typename U>
custom_allocator(const custom_allocator<U> &other)
: m_alloc(other.get_data())
{
}
const allocator &get_data() const
{
return m_alloc;
}
pointer allocate(size_t n) const
{
size_t size = n * sizeof(T);
auto &cb = m_alloc.m_callbacks;
void *ret = cb.pfnAllocation(cb.pUserData, size, alignof(T), m_alloc.m_scope);
if (ret == nullptr)
throw std::bad_alloc();
return reinterpret_cast<pointer>(ret);
}
pointer allocate(size_t n, void *ptr) const
{
size_t size = n * sizeof(T);
auto &cb = m_alloc.m_callbacks;
void *ret = cb.pfnReallocation(cb.pUserData, ptr, size, alignof(T), m_alloc.m_scope);
if (ret == nullptr)
throw std::bad_alloc();
return reinterpret_cast<pointer>(ret);
}
void deallocate(void *ptr, size_t) const noexcept
{
m_alloc.m_callbacks.pfnFree(m_alloc.m_callbacks.pUserData, ptr);
}
private:
const allocator m_alloc;
};
template <typename T, typename U>
bool operator==(const custom_allocator<T> &, const custom_allocator<U> &)
{
return true;
}
template <typename T, typename U>
bool operator!=(const custom_allocator<T> &, const custom_allocator<U> &)
{
return false;
}
/**
* @brief Helper method to allocate and construct objects with a custom allocator.
* @return The new object or @c nullptr if allocation failed.
*/
template <typename T, typename... arg_types>
T *allocator::create(arg_types &&... args) const
{
custom_allocator<T> allocator(*this);
T *ptr;
try
{
ptr = allocator.allocate(1);
}
catch (...)
{
return nullptr;
}
try
{
new (ptr) T(std::forward<arg_types>(args)...);
}
catch (...)
{
/* We catch all exceptions thrown while constructing the object, not just
* std::bad_alloc.
*/
allocator.deallocate(ptr, 1);
return nullptr;
}
return ptr;
}
/**
* @brief Helper method to destroy and deallocate objects constructed with create_custom().
*/
template <typename T>
void allocator::destroy(T *obj) const
{
obj->~T();
custom_allocator<T> allocator(*this);
allocator.deallocate(obj, 1);
}
template <typename T>
void destroy_custom(T *obj)
{
T::destroy(obj);
}
/**
* @brief Vector using a Vulkan custom allocator to allocate its elements.
* @note The vector must be passed a custom_allocator during construction and it takes a copy
* of it, meaning that the user is free to destroy the custom_allocator after constructing the
* vector.
*/
template <typename T>
class vector : public std::vector<T, custom_allocator<T>>
{
public:
using base = std::vector<T, custom_allocator<T>>;
using base::base;
/* Delete all methods that can cause allocation failure, i.e. can throw std::bad_alloc.
*
* Rationale: we want to force users to use our corresponding try_... method instead:
* this makes the API slightly more annoying to use, but hopefully safer as it encourages
* users to check for allocation failures, which is important for Vulkan.
*
* Note: deleting each of these methods (below) deletes all its overloads from the base class,
* to be precise: the deleted method covers the methods (all overloads) in the base class.
* Note: clear() is already noexcept since C++11.
*/
void insert() = delete;
void emplace() = delete;
void emplace_back() = delete;
void push_back() = delete;
void resize() = delete;
void reserve() = delete;
/* Note pop_back(), erase(), clear() do not throw std::bad_alloc exceptions. */
/* @brief Like std::vector::push_back, but non throwing.
* @return @c false iff the operation could not be performed due to an allocation failure.
*/
template <typename... arg_types>
bool try_push_back(arg_types &&... args) noexcept
{
try
{
base::push_back(std::forward<arg_types>(args)...);
return true;
}
catch (const std::bad_alloc &e)
{
return false;
}
}
/* @brief push back multiple elements at once
* @return @c false iff the operation could not be performed due to an allocation failure.
*/
bool try_push_back_many(const T *begin, const T *end) noexcept
{
for (const T *it = begin; it != end; ++it)
{
if (!try_push_back(*it))
{
return false;
}
}
return true;
}
/* @brief Like std::vector::resize, but non throwing.
* @return @c false iff the operation could not be performed due to an allocation failure.
*/
template <typename... arg_types>
bool try_resize(arg_types &&... args) noexcept
{
try
{
base::resize(std::forward<arg_types>(args)...);
return true;
}
catch (const std::bad_alloc &e)
{
return false;
}
}
};
} /* namespace util */
\ No newline at end of file
/*
* Copyright (c) 2017-2019 Arm Limited.
* Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
......@@ -53,7 +53,7 @@ namespace wsi
void swapchain_base::page_flip_thread()
{
wsi::swapchain_image *sc_images = m_swapchain_images;
auto &sc_images = m_swapchain_images;
VkResult vk_res = VK_SUCCESS;
uint64_t timeout = UINT64_MAX;
constexpr uint64_t SEMAPHORE_TIMEOUT = 250000000; /* 250 ms. */
......@@ -140,9 +140,8 @@ swapchain_base::swapchain_base(layer::device_private_data &dev_data, const VkAll
, m_thread_sem_defined(false)
, m_first_present(true)
, m_pending_buffer_pool{ nullptr, 0, 0, 0 }
, m_num_swapchain_images(0)
, m_swapchain_images(nullptr)
, m_alloc_callbacks(allocator)
, m_swapchain_images(util::allocator(m_alloc_callbacks, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT))
, m_surface(VK_NULL_HANDLE)
, m_present_mode(VK_PRESENT_MODE_IMMEDIATE_KHR)
, m_descendant(VK_NULL_HANDLE)
......@@ -180,51 +179,20 @@ VkResult swapchain_base::init(VkDevice device, const VkSwapchainCreateInfoKHR *s
return VK_ERROR_INITIALIZATION_FAILED;
}
m_num_swapchain_images = swapchain_create_info->minImageCount;
size_t images_alloc_size = sizeof(swapchain_image) * m_num_swapchain_images;
if (m_alloc_callbacks != nullptr)
{
m_swapchain_images = static_cast<swapchain_image *>(m_alloc_callbacks->pfnAllocation(
m_alloc_callbacks->pUserData, images_alloc_size, alignof(swapchain_image), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT));
}
else
{
m_swapchain_images = static_cast<swapchain_image *>(malloc(images_alloc_size));
}
if (m_swapchain_images == nullptr)
{
m_num_swapchain_images = 0;
/* Init image to invalid values. */
if (!m_swapchain_images.try_resize(swapchain_create_info->minImageCount))
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
/* We have allocated images, we can call the platform init function if something needs to be done. */
result = init_platform(device, swapchain_create_info);
if (result != VK_SUCCESS)
{
return result;
}
for (uint32_t i = 0; i < m_num_swapchain_images; ++i)
{
/* Init image to invalid values. */
m_swapchain_images[i].image = VK_NULL_HANDLE;
m_swapchain_images[i].present_fence = VK_NULL_HANDLE;
m_swapchain_images[i].status = swapchain_image::INVALID;
m_swapchain_images[i].data = nullptr;
}
/* Initialize ring buffer. */
if (m_alloc_callbacks != nullptr)
{
m_pending_buffer_pool.ring = static_cast<uint32_t *>(
m_alloc_callbacks->pfnAllocation(m_alloc_callbacks->pUserData, sizeof(uint32_t) * m_num_swapchain_images,
m_alloc_callbacks->pfnAllocation(m_alloc_callbacks->pUserData, sizeof(uint32_t) * m_swapchain_images.size(),
alignof(uint32_t), VK_SYSTEM_ALLOCATION_SCOPE_OBJECT));
}
else
{
m_pending_buffer_pool.ring = static_cast<uint32_t *>(malloc(sizeof(uint32_t) * m_num_swapchain_images));
m_pending_buffer_pool.ring = static_cast<uint32_t *>(malloc(sizeof(uint32_t) * m_swapchain_images.size()));
}
if (m_pending_buffer_pool.ring == nullptr)
......@@ -234,7 +202,14 @@ VkResult swapchain_base::init(VkDevice device, const VkSwapchainCreateInfoKHR *s
m_pending_buffer_pool.head = 0;
m_pending_buffer_pool.tail = 0;
m_pending_buffer_pool.size = m_num_swapchain_images;
m_pending_buffer_pool.size = m_swapchain_images.size();
/* We have allocated images, we can call the platform init function if something needs to be done. */
result = init_platform(device, swapchain_create_info);
if (result != VK_SUCCESS)
{
return result;
}
VkImageCreateInfo image_create_info = {};
image_create_info.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO;
......@@ -253,16 +228,16 @@ VkResult swapchain_base::init(VkDevice device, const VkSwapchainCreateInfoKHR *s
image_create_info.pQueueFamilyIndices = swapchain_create_info->pQueueFamilyIndices;
image_create_info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
result = m_free_image_semaphore.init(m_num_swapchain_images);
result = m_free_image_semaphore.init(m_swapchain_images.size());
if (result != VK_SUCCESS)
{
assert(result == VK_ERROR_OUT_OF_HOST_MEMORY);
return result;
}
for (unsigned i = 0; i < m_num_swapchain_images; i++)
for (auto& img : m_swapchain_images)
{
result = create_image(image_create_info, m_swapchain_images[i]);
result = create_image(image_create_info, img);
if (result != VK_SUCCESS)
{
return result;
......@@ -330,10 +305,10 @@ void swapchain_base::teardown()
if (m_descendant != VK_NULL_HANDLE)
{
auto *desc = reinterpret_cast<swapchain_base *>(m_descendant);
for (uint32_t i = 0; i < desc->m_num_swapchain_images; ++i)
for (auto& img : desc->m_swapchain_images)
{
if (desc->m_swapchain_images[i].status == swapchain_image::PRESENTED ||
desc->m_swapchain_images[i].status == swapchain_image::PENDING)
if (img.status == swapchain_image::PRESENTED ||
img.status == swapchain_image::PENDING)
{
/* Here we wait for the start_present_semaphore, once this semaphore is up,
* the descendant has finished waiting, we don't want to delete vkImages and vkFences
......@@ -389,28 +364,13 @@ void swapchain_base::teardown()
auto *sc = reinterpret_cast<swapchain_base *>(m_ancestor);
sc->clear_descendant();
}
/* Release the images array. */
if (m_swapchain_images != nullptr)
for (auto& img : m_swapchain_images)
{
for (uint32_t i = 0; i < m_num_swapchain_images; ++i)
{
/* Call implementation specific release */
destroy_image(m_swapchain_images[i]);
}
if (m_alloc_callbacks != nullptr)
{
m_alloc_callbacks->pfnFree(m_alloc_callbacks->pUserData, m_swapchain_images);
}
else
{
free(m_swapchain_images);
}
/* Call implementation specific release */
destroy_image(img);
}
/* Free ring buffer. */
if (m_pending_buffer_pool.ring != nullptr)
{
if (m_alloc_callbacks != nullptr)
......@@ -438,7 +398,7 @@ VkResult swapchain_base::acquire_next_image(uint64_t timeout, VkSemaphore semaph
}
uint32_t i;
for (i = 0; i < m_num_swapchain_images; ++i)
for (i = 0; i < m_swapchain_images.size(); ++i)
{
if (m_swapchain_images[i].status == swapchain_image::FREE)
{
......@@ -448,7 +408,7 @@ VkResult swapchain_base::acquire_next_image(uint64_t timeout, VkSemaphore semaph
}
}
assert(i < m_num_swapchain_images);
assert(i < m_swapchain_images.size());
if (VK_NULL_HANDLE != semaphore || VK_NULL_HANDLE != fence)
{
......@@ -474,13 +434,13 @@ VkResult swapchain_base::get_swapchain_images(uint32_t *swapchain_image_count, V
if (swapchain_images == nullptr)
{
/* Return the number of swapchain images. */
*swapchain_image_count = m_num_swapchain_images;
*swapchain_image_count = m_swapchain_images.size();
return VK_SUCCESS;
}
else
{
assert(m_num_swapchain_images > 0);
assert(m_swapchain_images.size() > 0);
assert(*swapchain_image_count > 0);
/* Populate array, write actual number of images returned. */
......@@ -492,7 +452,7 @@ VkResult swapchain_base::get_swapchain_images(uint32_t *swapchain_image_count, V
current_image++;
if (current_image == m_num_swapchain_images)
if (current_image == m_swapchain_images.size())
{
*swapchain_image_count = current_image;
......@@ -517,10 +477,10 @@ VkResult swapchain_base::queue_present(VkQueue queue, const VkPresentInfoKHR *pr
if (m_descendant != VK_NULL_HANDLE)
{
auto *desc = reinterpret_cast<swapchain_base *>(m_descendant);
for (uint32_t i = 0; i < desc->m_num_swapchain_images; ++i)
for (auto& img : desc->m_swapchain_images)
{
if (desc->m_swapchain_images[i].status == swapchain_image::PRESENTED ||
desc->m_swapchain_images[i].status == swapchain_image::PENDING)
if (img.status == swapchain_image::PRESENTED ||
img.status == swapchain_image::PENDING)
{
descendent_started_presenting = true;
break;
......@@ -583,11 +543,11 @@ VkResult swapchain_base::queue_present(VkQueue queue, const VkPresentInfoKHR *pr
void swapchain_base::deprecate(VkSwapchainKHR descendant)
{
for (unsigned i = 0; i < m_num_swapchain_images; i++)
for (auto& img : m_swapchain_images)
{
if (m_swapchain_images[i].status == swapchain_image::FREE)
if (img.status == swapchain_image::FREE)
{
destroy_image(m_swapchain_images[i]);
destroy_image(img);
}
}
......@@ -600,9 +560,9 @@ void swapchain_base::wait_for_pending_buffers()
int num_acquired_images = 0;
int wait;
for (uint32_t i = 0; i < m_num_swapchain_images; ++i)
for (auto& img : m_swapchain_images)
{
if (m_swapchain_images[i].status == swapchain_image::ACQUIRED)
if (img.status == swapchain_image::ACQUIRED)
{
++num_acquired_images;
}
......@@ -611,7 +571,7 @@ void swapchain_base::wait_for_pending_buffers()
/* Once all the pending buffers are flipped, the swapchain should have images
* in ACQUIRED (application fails to queue them back for presentation), FREE
* and one and only one in PRESENTED. */
wait = m_num_swapchain_images - num_acquired_images - 1;
wait = m_swapchain_images.size() - num_acquired_images - 1;
while (wait > 0)
{
......
/*
* Copyright (c) 2017-2019 Arm Limited.
* Copyright (c) 2017-2020 Arm Limited.
*
* SPDX-License-Identifier: MIT
*
......@@ -37,6 +37,7 @@
#include <layer/private_data.hpp>
#include <util/timed_semaphore.hpp>
#include <util/custom_allocator.hpp>
namespace wsi
{
......@@ -52,12 +53,12 @@ struct swapchain_image
};
/* Implementation specific data */
void *data;
void *data{nullptr};
VkImage image;
status status;
VkImage image{VK_NULL_HANDLE};
status status{swapchain_image::INVALID};
VkFence present_fence;