You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
735 lines
30 KiB
735 lines
30 KiB
/*
|
|
* Copyright (C) 2016 The Android Open Source Project
|
|
*
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
* you may not use this file except in compliance with the License.
|
|
* You may obtain a copy of the License at
|
|
*
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
*
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
* See the License for the specific language governing permissions and
|
|
* limitations under the License.
|
|
*/
|
|
|
|
#include "VulkanManager.h"
|
|
|
|
#include <EGL/egl.h>
|
|
#include <EGL/eglext.h>
|
|
#include <GrBackendSemaphore.h>
|
|
#include <GrBackendSurface.h>
|
|
#include <GrDirectContext.h>
|
|
#include <GrTypes.h>
|
|
#include <android/sync.h>
|
|
#include <ui/FatVector.h>
|
|
#include <vk/GrVkExtensions.h>
|
|
#include <vk/GrVkTypes.h>
|
|
|
|
#include <cstring>
|
|
|
|
#include <gui/TraceUtils.h>
|
|
#include "Properties.h"
|
|
#include "RenderThread.h"
|
|
#include "pipeline/skia/ShaderCache.h"
|
|
#include "renderstate/RenderState.h"
|
|
|
|
namespace android {
|
|
namespace uirenderer {
|
|
namespace renderthread {
|
|
|
|
static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
|
|
// All Vulkan structs that could be part of the features chain will start with the
|
|
// structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
|
|
// so we can get access to the pNext for the next struct.
|
|
struct CommonVulkanHeader {
|
|
VkStructureType sType;
|
|
void* pNext;
|
|
};
|
|
|
|
void* pNext = features.pNext;
|
|
while (pNext) {
|
|
void* current = pNext;
|
|
pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
|
|
free(current);
|
|
}
|
|
}
|
|
|
|
GrVkGetProc VulkanManager::sSkiaGetProp = [](const char* proc_name, VkInstance instance,
|
|
VkDevice device) {
|
|
if (device != VK_NULL_HANDLE) {
|
|
if (strcmp("vkQueueSubmit", proc_name) == 0) {
|
|
return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueSubmit;
|
|
} else if (strcmp("vkQueueWaitIdle", proc_name) == 0) {
|
|
return (PFN_vkVoidFunction)VulkanManager::interceptedVkQueueWaitIdle;
|
|
}
|
|
return vkGetDeviceProcAddr(device, proc_name);
|
|
}
|
|
return vkGetInstanceProcAddr(instance, proc_name);
|
|
};
|
|
|
|
#define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
|
|
#define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
|
|
#define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
|
|
|
|
sp<VulkanManager> VulkanManager::getInstance() {
|
|
// cache a weakptr to the context to enable a second thread to share the same vulkan state
|
|
static wp<VulkanManager> sWeakInstance = nullptr;
|
|
static std::mutex sLock;
|
|
|
|
std::lock_guard _lock{sLock};
|
|
sp<VulkanManager> vulkanManager = sWeakInstance.promote();
|
|
if (!vulkanManager.get()) {
|
|
vulkanManager = new VulkanManager();
|
|
sWeakInstance = vulkanManager;
|
|
}
|
|
|
|
return vulkanManager;
|
|
}
|
|
|
|
VulkanManager::~VulkanManager() {
|
|
if (mDevice != VK_NULL_HANDLE) {
|
|
mDeviceWaitIdle(mDevice);
|
|
mDestroyDevice(mDevice, nullptr);
|
|
}
|
|
|
|
if (mInstance != VK_NULL_HANDLE) {
|
|
mDestroyInstance(mInstance, nullptr);
|
|
}
|
|
|
|
mGraphicsQueue = VK_NULL_HANDLE;
|
|
mDevice = VK_NULL_HANDLE;
|
|
mPhysicalDevice = VK_NULL_HANDLE;
|
|
mInstance = VK_NULL_HANDLE;
|
|
mInstanceExtensionsOwner.clear();
|
|
mInstanceExtensions.clear();
|
|
mDeviceExtensionsOwner.clear();
|
|
mDeviceExtensions.clear();
|
|
free_features_extensions_structs(mPhysicalDeviceFeatures2);
|
|
mPhysicalDeviceFeatures2 = {};
|
|
}
|
|
|
|
void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
|
|
VkResult err;
|
|
|
|
constexpr VkApplicationInfo app_info = {
|
|
VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
|
|
nullptr, // pNext
|
|
"android framework", // pApplicationName
|
|
0, // applicationVersion
|
|
"android framework", // pEngineName
|
|
0, // engineVerison
|
|
mAPIVersion, // apiVersion
|
|
};
|
|
|
|
{
|
|
GET_PROC(EnumerateInstanceExtensionProperties);
|
|
|
|
uint32_t extensionCount = 0;
|
|
err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
|
|
LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
|
|
mInstanceExtensionsOwner.resize(extensionCount);
|
|
err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
|
|
mInstanceExtensionsOwner.data());
|
|
LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
|
|
bool hasKHRSurfaceExtension = false;
|
|
bool hasKHRAndroidSurfaceExtension = false;
|
|
for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
|
|
mInstanceExtensions.push_back(extension.extensionName);
|
|
if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
|
|
hasKHRSurfaceExtension = true;
|
|
}
|
|
if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
|
|
hasKHRAndroidSurfaceExtension = true;
|
|
}
|
|
}
|
|
LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
|
|
}
|
|
|
|
const VkInstanceCreateInfo instance_create = {
|
|
VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
|
|
nullptr, // pNext
|
|
0, // flags
|
|
&app_info, // pApplicationInfo
|
|
0, // enabledLayerNameCount
|
|
nullptr, // ppEnabledLayerNames
|
|
(uint32_t)mInstanceExtensions.size(), // enabledExtensionNameCount
|
|
mInstanceExtensions.data(), // ppEnabledExtensionNames
|
|
};
|
|
|
|
GET_PROC(CreateInstance);
|
|
err = mCreateInstance(&instance_create, nullptr, &mInstance);
|
|
LOG_ALWAYS_FATAL_IF(err < 0);
|
|
|
|
GET_INST_PROC(CreateDevice);
|
|
GET_INST_PROC(DestroyInstance);
|
|
GET_INST_PROC(EnumerateDeviceExtensionProperties);
|
|
GET_INST_PROC(EnumeratePhysicalDevices);
|
|
GET_INST_PROC(GetPhysicalDeviceFeatures2);
|
|
GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
|
|
GET_INST_PROC(GetPhysicalDeviceProperties);
|
|
GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
|
|
|
|
uint32_t gpuCount;
|
|
LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
|
|
LOG_ALWAYS_FATAL_IF(!gpuCount);
|
|
// Just returning the first physical device instead of getting the whole array. Since there
|
|
// should only be one device on android.
|
|
gpuCount = 1;
|
|
err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
|
|
// VK_INCOMPLETE is returned when the count we provide is less than the total device count.
|
|
LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
|
|
|
|
VkPhysicalDeviceProperties physDeviceProperties;
|
|
mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
|
|
LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
|
|
mDriverVersion = physDeviceProperties.driverVersion;
|
|
|
|
// query to get the initial queue props size
|
|
uint32_t queueCount;
|
|
mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
|
|
LOG_ALWAYS_FATAL_IF(!queueCount);
|
|
|
|
// now get the actual queue props
|
|
std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
|
|
mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
|
|
|
|
// iterate to find the graphics queue
|
|
mGraphicsQueueIndex = queueCount;
|
|
for (uint32_t i = 0; i < queueCount; i++) {
|
|
if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
|
|
mGraphicsQueueIndex = i;
|
|
break;
|
|
}
|
|
}
|
|
LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
|
|
|
|
{
|
|
uint32_t extensionCount = 0;
|
|
err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
|
|
nullptr);
|
|
LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
|
|
mDeviceExtensionsOwner.resize(extensionCount);
|
|
err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
|
|
mDeviceExtensionsOwner.data());
|
|
LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
|
|
bool hasKHRSwapchainExtension = false;
|
|
for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
|
|
mDeviceExtensions.push_back(extension.extensionName);
|
|
if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
|
|
hasKHRSwapchainExtension = true;
|
|
}
|
|
}
|
|
LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
|
|
}
|
|
|
|
grExtensions.init(sSkiaGetProp, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
|
|
mInstanceExtensions.data(), mDeviceExtensions.size(),
|
|
mDeviceExtensions.data());
|
|
|
|
LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
|
|
|
|
memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
|
|
features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
|
|
features.pNext = nullptr;
|
|
|
|
// Setup all extension feature structs we may want to use.
|
|
void** tailPNext = &features.pNext;
|
|
|
|
if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
|
|
VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
|
|
blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
|
|
sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
|
|
LOG_ALWAYS_FATAL_IF(!blend);
|
|
blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
|
|
blend->pNext = nullptr;
|
|
*tailPNext = blend;
|
|
tailPNext = &blend->pNext;
|
|
}
|
|
|
|
VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
|
|
ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
|
|
sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
|
|
LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
|
|
ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
|
|
ycbcrFeature->pNext = nullptr;
|
|
*tailPNext = ycbcrFeature;
|
|
tailPNext = &ycbcrFeature->pNext;
|
|
|
|
// query to get the physical device features
|
|
mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
|
|
// this looks like it would slow things down,
|
|
// and we can't depend on it on all platforms
|
|
features.features.robustBufferAccess = VK_FALSE;
|
|
|
|
float queuePriorities[1] = {0.0};
|
|
|
|
void* queueNextPtr = nullptr;
|
|
|
|
VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
|
|
|
|
if (Properties::contextPriority != 0 &&
|
|
grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
|
|
memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
|
|
queuePriorityCreateInfo.sType =
|
|
VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
|
|
queuePriorityCreateInfo.pNext = nullptr;
|
|
switch (Properties::contextPriority) {
|
|
case EGL_CONTEXT_PRIORITY_LOW_IMG:
|
|
queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
|
|
break;
|
|
case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
|
|
queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
|
|
break;
|
|
case EGL_CONTEXT_PRIORITY_HIGH_IMG:
|
|
queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
|
|
break;
|
|
default:
|
|
LOG_ALWAYS_FATAL("Unsupported context priority");
|
|
}
|
|
queueNextPtr = &queuePriorityCreateInfo;
|
|
}
|
|
|
|
const VkDeviceQueueCreateInfo queueInfo = {
|
|
VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
|
|
queueNextPtr, // pNext
|
|
0, // VkDeviceQueueCreateFlags
|
|
mGraphicsQueueIndex, // queueFamilyIndex
|
|
1, // queueCount
|
|
queuePriorities, // pQueuePriorities
|
|
};
|
|
|
|
const VkDeviceCreateInfo deviceInfo = {
|
|
VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
|
|
&features, // pNext
|
|
0, // VkDeviceCreateFlags
|
|
1, // queueCreateInfoCount
|
|
&queueInfo, // pQueueCreateInfos
|
|
0, // layerCount
|
|
nullptr, // ppEnabledLayerNames
|
|
(uint32_t)mDeviceExtensions.size(), // extensionCount
|
|
mDeviceExtensions.data(), // ppEnabledExtensionNames
|
|
nullptr, // ppEnabledFeatures
|
|
};
|
|
|
|
LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
|
|
|
|
GET_DEV_PROC(AllocateCommandBuffers);
|
|
GET_DEV_PROC(BeginCommandBuffer);
|
|
GET_DEV_PROC(CmdPipelineBarrier);
|
|
GET_DEV_PROC(CreateCommandPool);
|
|
GET_DEV_PROC(CreateFence);
|
|
GET_DEV_PROC(CreateSemaphore);
|
|
GET_DEV_PROC(DestroyCommandPool);
|
|
GET_DEV_PROC(DestroyDevice);
|
|
GET_DEV_PROC(DestroyFence);
|
|
GET_DEV_PROC(DestroySemaphore);
|
|
GET_DEV_PROC(DeviceWaitIdle);
|
|
GET_DEV_PROC(EndCommandBuffer);
|
|
GET_DEV_PROC(FreeCommandBuffers);
|
|
GET_DEV_PROC(GetDeviceQueue);
|
|
GET_DEV_PROC(GetSemaphoreFdKHR);
|
|
GET_DEV_PROC(ImportSemaphoreFdKHR);
|
|
GET_DEV_PROC(QueueSubmit);
|
|
GET_DEV_PROC(QueueWaitIdle);
|
|
GET_DEV_PROC(ResetCommandBuffer);
|
|
GET_DEV_PROC(ResetFences);
|
|
GET_DEV_PROC(WaitForFences);
|
|
GET_DEV_PROC(FrameBoundaryANDROID);
|
|
}
|
|
|
|
void VulkanManager::initialize() {
|
|
std::lock_guard _lock{mInitializeLock};
|
|
|
|
if (mDevice != VK_NULL_HANDLE) {
|
|
return;
|
|
}
|
|
|
|
GET_PROC(EnumerateInstanceVersion);
|
|
uint32_t instanceVersion;
|
|
LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
|
|
LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
|
|
|
|
this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
|
|
|
|
mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
|
|
|
|
if (Properties::enablePartialUpdates && Properties::useBufferAge) {
|
|
mSwapBehavior = SwapBehavior::BufferAge;
|
|
}
|
|
}
|
|
|
|
sk_sp<GrDirectContext> VulkanManager::createContext(const GrContextOptions& options,
|
|
ContextType contextType) {
|
|
|
|
GrVkBackendContext backendContext;
|
|
backendContext.fInstance = mInstance;
|
|
backendContext.fPhysicalDevice = mPhysicalDevice;
|
|
backendContext.fDevice = mDevice;
|
|
backendContext.fQueue = mGraphicsQueue;
|
|
backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
|
|
backendContext.fMaxAPIVersion = mAPIVersion;
|
|
backendContext.fVkExtensions = &mExtensions;
|
|
backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
|
|
backendContext.fGetProc = sSkiaGetProp;
|
|
|
|
return GrDirectContext::MakeVulkan(backendContext, options);
|
|
}
|
|
|
|
VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
|
|
return VkFunctorInitParams{
|
|
.instance = mInstance,
|
|
.physical_device = mPhysicalDevice,
|
|
.device = mDevice,
|
|
.queue = mGraphicsQueue,
|
|
.graphics_queue_index = mGraphicsQueueIndex,
|
|
.api_version = mAPIVersion,
|
|
.enabled_instance_extension_names = mInstanceExtensions.data(),
|
|
.enabled_instance_extension_names_length =
|
|
static_cast<uint32_t>(mInstanceExtensions.size()),
|
|
.enabled_device_extension_names = mDeviceExtensions.data(),
|
|
.enabled_device_extension_names_length =
|
|
static_cast<uint32_t>(mDeviceExtensions.size()),
|
|
.device_features_2 = &mPhysicalDeviceFeatures2,
|
|
};
|
|
}
|
|
|
|
Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
|
|
VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
|
|
|
|
if (bufferInfo == nullptr) {
|
|
ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
|
|
return Frame(-1, -1, 0);
|
|
}
|
|
|
|
LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
|
|
|
|
if (bufferInfo->dequeue_fence != -1) {
|
|
struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
|
|
bool isSignalPending = false;
|
|
if (finfo != NULL) {
|
|
isSignalPending = finfo->status != 1;
|
|
sync_file_info_free(finfo);
|
|
}
|
|
if (isSignalPending) {
|
|
int fence_clone = dup(bufferInfo->dequeue_fence);
|
|
if (fence_clone == -1) {
|
|
ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
|
|
errno);
|
|
sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
|
|
} else {
|
|
VkSemaphoreCreateInfo semaphoreInfo;
|
|
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
|
semaphoreInfo.pNext = nullptr;
|
|
semaphoreInfo.flags = 0;
|
|
VkSemaphore semaphore;
|
|
VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
|
|
if (err != VK_SUCCESS) {
|
|
ALOGE("Failed to create import semaphore, err: %d", err);
|
|
close(fence_clone);
|
|
sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
|
|
} else {
|
|
VkImportSemaphoreFdInfoKHR importInfo;
|
|
importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
|
|
importInfo.pNext = nullptr;
|
|
importInfo.semaphore = semaphore;
|
|
importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
|
|
importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
importInfo.fd = fence_clone;
|
|
|
|
err = mImportSemaphoreFdKHR(mDevice, &importInfo);
|
|
if (err != VK_SUCCESS) {
|
|
ALOGE("Failed to import semaphore, err: %d", err);
|
|
mDestroySemaphore(mDevice, semaphore, nullptr);
|
|
close(fence_clone);
|
|
sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
|
|
} else {
|
|
GrBackendSemaphore backendSemaphore;
|
|
backendSemaphore.initVulkan(semaphore);
|
|
// Skia will take ownership of the VkSemaphore and delete it once the wait
|
|
// has finished. The VkSemaphore also owns the imported fd, so it will
|
|
// close the fd when it is deleted.
|
|
bufferInfo->skSurface->wait(1, &backendSemaphore);
|
|
// The following flush blocks the GPU immediately instead of waiting for
|
|
// other drawing ops. It seems dequeue_fence is not respected otherwise.
|
|
// TODO: remove the flush after finding why backendSemaphore is not working.
|
|
bufferInfo->skSurface->flushAndSubmit();
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
|
|
return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
|
|
}
|
|
|
|
struct DestroySemaphoreInfo {
|
|
PFN_vkDestroySemaphore mDestroyFunction;
|
|
VkDevice mDevice;
|
|
VkSemaphore mSemaphore;
|
|
// We need to make sure we don't delete the VkSemaphore until it is done being used by both Skia
|
|
// (including by the GPU) and inside the VulkanManager. So we always start with two refs, one
|
|
// owned by Skia and one owned by the VulkanManager. The refs are decremented each time
|
|
// destroy_semaphore is called with this object. Skia will call destroy_semaphore once it is
|
|
// done with the semaphore and the GPU has finished work on the semaphore. The VulkanManager
|
|
// calls destroy_semaphore after sending the semaphore to Skia and exporting it if need be.
|
|
int mRefs = 2;
|
|
|
|
DestroySemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
|
|
VkSemaphore semaphore)
|
|
: mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {}
|
|
};
|
|
|
|
static void destroy_semaphore(void* context) {
|
|
DestroySemaphoreInfo* info = reinterpret_cast<DestroySemaphoreInfo*>(context);
|
|
--info->mRefs;
|
|
if (!info->mRefs) {
|
|
info->mDestroyFunction(info->mDevice, info->mSemaphore, nullptr);
|
|
delete info;
|
|
}
|
|
}
|
|
|
|
void VulkanManager::finishFrame(SkSurface* surface) {
|
|
ATRACE_NAME("Vulkan finish frame");
|
|
ALOGE_IF(mSwapSemaphore != VK_NULL_HANDLE || mDestroySemaphoreContext != nullptr,
|
|
"finishFrame already has an outstanding semaphore");
|
|
|
|
VkExportSemaphoreCreateInfo exportInfo;
|
|
exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
|
|
exportInfo.pNext = nullptr;
|
|
exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
|
|
VkSemaphoreCreateInfo semaphoreInfo;
|
|
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
|
semaphoreInfo.pNext = &exportInfo;
|
|
semaphoreInfo.flags = 0;
|
|
VkSemaphore semaphore;
|
|
VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
|
|
ALOGE_IF(VK_SUCCESS != err, "VulkanManager::makeSwapSemaphore(): Failed to create semaphore");
|
|
|
|
GrBackendSemaphore backendSemaphore;
|
|
backendSemaphore.initVulkan(semaphore);
|
|
|
|
GrFlushInfo flushInfo;
|
|
if (err == VK_SUCCESS) {
|
|
mDestroySemaphoreContext = new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
|
|
flushInfo.fNumSemaphores = 1;
|
|
flushInfo.fSignalSemaphores = &backendSemaphore;
|
|
flushInfo.fFinishedProc = destroy_semaphore;
|
|
flushInfo.fFinishedContext = mDestroySemaphoreContext;
|
|
} else {
|
|
semaphore = VK_NULL_HANDLE;
|
|
}
|
|
GrSemaphoresSubmitted submitted =
|
|
surface->flush(SkSurface::BackendSurfaceAccess::kPresent, flushInfo);
|
|
GrDirectContext* context = GrAsDirectContext(surface->recordingContext());
|
|
ALOGE_IF(!context, "Surface is not backed by gpu");
|
|
context->submit();
|
|
if (semaphore != VK_NULL_HANDLE) {
|
|
if (submitted == GrSemaphoresSubmitted::kYes) {
|
|
mSwapSemaphore = semaphore;
|
|
if (mFrameBoundaryANDROID) {
|
|
// retrieve VkImage used as render target
|
|
VkImage image = VK_NULL_HANDLE;
|
|
GrBackendRenderTarget backendRenderTarget =
|
|
surface->getBackendRenderTarget(SkSurface::kFlushRead_BackendHandleAccess);
|
|
if (backendRenderTarget.isValid()) {
|
|
GrVkImageInfo info;
|
|
if (backendRenderTarget.getVkImageInfo(&info)) {
|
|
image = info.fImage;
|
|
} else {
|
|
ALOGE("Frame boundary: backend is not vulkan");
|
|
}
|
|
} else {
|
|
ALOGE("Frame boundary: invalid backend render target");
|
|
}
|
|
// frameBoundaryANDROID needs to know about mSwapSemaphore, but
|
|
// it won't wait on it.
|
|
mFrameBoundaryANDROID(mDevice, mSwapSemaphore, image);
|
|
}
|
|
} else {
|
|
destroy_semaphore(mDestroySemaphoreContext);
|
|
mDestroySemaphoreContext = nullptr;
|
|
}
|
|
}
|
|
skiapipeline::ShaderCache::get().onVkFrameFlushed(context);
|
|
}
|
|
|
|
void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect) {
|
|
if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
|
|
ATRACE_NAME("Finishing GPU work");
|
|
mDeviceWaitIdle(mDevice);
|
|
}
|
|
|
|
int fenceFd = -1;
|
|
if (mSwapSemaphore != VK_NULL_HANDLE) {
|
|
VkSemaphoreGetFdInfoKHR getFdInfo;
|
|
getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
|
|
getFdInfo.pNext = nullptr;
|
|
getFdInfo.semaphore = mSwapSemaphore;
|
|
getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
|
|
VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
|
|
ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
|
|
} else {
|
|
ALOGE("VulkanManager::swapBuffers(): Semaphore submission failed");
|
|
|
|
std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
|
|
mQueueWaitIdle(mGraphicsQueue);
|
|
}
|
|
if (mDestroySemaphoreContext) {
|
|
destroy_semaphore(mDestroySemaphoreContext);
|
|
}
|
|
|
|
surface->presentCurrentBuffer(dirtyRect, fenceFd);
|
|
mSwapSemaphore = VK_NULL_HANDLE;
|
|
mDestroySemaphoreContext = nullptr;
|
|
}
|
|
|
|
void VulkanManager::destroySurface(VulkanSurface* surface) {
|
|
// Make sure all submit commands have finished before starting to destroy objects.
|
|
if (VK_NULL_HANDLE != mGraphicsQueue) {
|
|
std::lock_guard<std::mutex> lock(mGraphicsQueueMutex);
|
|
mQueueWaitIdle(mGraphicsQueue);
|
|
}
|
|
mDeviceWaitIdle(mDevice);
|
|
|
|
delete surface;
|
|
}
|
|
|
|
VulkanSurface* VulkanManager::createSurface(ANativeWindow* window,
|
|
ColorMode colorMode,
|
|
sk_sp<SkColorSpace> surfaceColorSpace,
|
|
SkColorType surfaceColorType,
|
|
GrDirectContext* grContext,
|
|
uint32_t extraBuffers) {
|
|
LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
|
|
if (!window) {
|
|
return nullptr;
|
|
}
|
|
|
|
return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
|
|
*this, extraBuffers);
|
|
}
|
|
|
|
status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) {
|
|
if (!hasVkContext()) {
|
|
ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
|
|
return INVALID_OPERATION;
|
|
}
|
|
|
|
// Block GPU on the fence.
|
|
int fenceFd = ::dup(fence);
|
|
if (fenceFd == -1) {
|
|
ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
|
|
return -errno;
|
|
}
|
|
|
|
VkSemaphoreCreateInfo semaphoreInfo;
|
|
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
|
semaphoreInfo.pNext = nullptr;
|
|
semaphoreInfo.flags = 0;
|
|
VkSemaphore semaphore;
|
|
VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
|
|
if (VK_SUCCESS != err) {
|
|
close(fenceFd);
|
|
ALOGE("Failed to create import semaphore, err: %d", err);
|
|
return UNKNOWN_ERROR;
|
|
}
|
|
VkImportSemaphoreFdInfoKHR importInfo;
|
|
importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
|
|
importInfo.pNext = nullptr;
|
|
importInfo.semaphore = semaphore;
|
|
importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
|
|
importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
importInfo.fd = fenceFd;
|
|
|
|
err = mImportSemaphoreFdKHR(mDevice, &importInfo);
|
|
if (VK_SUCCESS != err) {
|
|
mDestroySemaphore(mDevice, semaphore, nullptr);
|
|
close(fenceFd);
|
|
ALOGE("Failed to import semaphore, err: %d", err);
|
|
return UNKNOWN_ERROR;
|
|
}
|
|
|
|
GrBackendSemaphore beSemaphore;
|
|
beSemaphore.initVulkan(semaphore);
|
|
|
|
// Skia will take ownership of the VkSemaphore and delete it once the wait has finished. The
|
|
// VkSemaphore also owns the imported fd, so it will close the fd when it is deleted.
|
|
grContext->wait(1, &beSemaphore);
|
|
grContext->flushAndSubmit();
|
|
|
|
return OK;
|
|
}
|
|
|
|
status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) {
|
|
*nativeFence = -1;
|
|
if (!hasVkContext()) {
|
|
ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
|
|
return INVALID_OPERATION;
|
|
}
|
|
|
|
VkExportSemaphoreCreateInfo exportInfo;
|
|
exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
|
|
exportInfo.pNext = nullptr;
|
|
exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
|
|
VkSemaphoreCreateInfo semaphoreInfo;
|
|
semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
|
|
semaphoreInfo.pNext = &exportInfo;
|
|
semaphoreInfo.flags = 0;
|
|
VkSemaphore semaphore;
|
|
VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
|
|
if (VK_SUCCESS != err) {
|
|
ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
|
|
return INVALID_OPERATION;
|
|
}
|
|
|
|
GrBackendSemaphore backendSemaphore;
|
|
backendSemaphore.initVulkan(semaphore);
|
|
|
|
DestroySemaphoreInfo* destroyInfo =
|
|
new DestroySemaphoreInfo(mDestroySemaphore, mDevice, semaphore);
|
|
// Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
|
|
// which will remove its ref to the semaphore. The VulkanManager must still release its ref,
|
|
// when it is done with the semaphore.
|
|
GrFlushInfo flushInfo;
|
|
flushInfo.fNumSemaphores = 1;
|
|
flushInfo.fSignalSemaphores = &backendSemaphore;
|
|
flushInfo.fFinishedProc = destroy_semaphore;
|
|
flushInfo.fFinishedContext = destroyInfo;
|
|
GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
|
|
grContext->submit();
|
|
|
|
if (submitted == GrSemaphoresSubmitted::kNo) {
|
|
ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
|
|
destroy_semaphore(destroyInfo);
|
|
return INVALID_OPERATION;
|
|
}
|
|
|
|
VkSemaphoreGetFdInfoKHR getFdInfo;
|
|
getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
|
|
getFdInfo.pNext = nullptr;
|
|
getFdInfo.semaphore = semaphore;
|
|
getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
|
|
|
|
int fenceFd = 0;
|
|
|
|
err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
|
|
destroy_semaphore(destroyInfo);
|
|
if (VK_SUCCESS != err) {
|
|
ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
|
|
return INVALID_OPERATION;
|
|
}
|
|
*nativeFence = fenceFd;
|
|
|
|
return OK;
|
|
}
|
|
|
|
} /* namespace renderthread */
|
|
} /* namespace uirenderer */
|
|
} /* namespace android */
|