diff --git a/.vscode/settings.json b/.vscode/settings.json index 89d56cba4..f820c1d25 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -105,5 +105,10 @@ "cmake.buildDirectory": "${workspaceFolder}/build/targets/darwin", "C_Cpp.default.cppStandard": "c++17", "C_Cpp.default.cStandard": "c17", - "C_Cpp.errorSquiggles": "enabledIfIncludesResolve" + "C_Cpp.errorSquiggles": "enabledIfIncludesResolve", + "clangd.arguments": [ + "--compile-commands-dir=${workspaceFolder}/build/targets/darwin", + "--background-index", + "--query-driver=/usr/bin/clang*;${env:ANDROID_NDK_HOME}/toolchains/llvm/prebuilt/darwin-x86_64/bin/*" + ] } \ No newline at end of file diff --git a/cmake/TransmuteCommon.cmake b/cmake/TransmuteCommon.cmake index 5116c1e37..482e28345 100644 --- a/cmake/TransmuteCommon.cmake +++ b/cmake/TransmuteCommon.cmake @@ -1,7 +1,6 @@ # Set the common source files. -file(GLOB TR_COMMON_SOURCE +file(GLOB_RECURSE TR_COMMON_SOURCE "src/common/*.cpp" - "src/common/**/*.cpp" ) # Set the common include directories diff --git a/cmake/TransmuteCore.cmake b/cmake/TransmuteCore.cmake index f2013e2ac..ed97eb50e 100644 --- a/cmake/TransmuteCore.cmake +++ b/cmake/TransmuteCore.cmake @@ -11,7 +11,7 @@ if(NOT TR_ENABLE_INSPECTOR) endif() if(APPLE) - file(GLOB TR_CORE_SOURCE_MM "src/renderer/*.mm") + file(GLOB TR_CORE_SOURCE_MM "src/renderer/*.mm" "src/renderer/metal/*.mm") list(APPEND TR_CORE_SOURCE ${TR_CORE_SOURCE_MM}) endif() diff --git a/src/client/script_bindings/webgl/active_info.cpp b/src/client/script_bindings/webgl/active_info.cpp index c0f9d3e81..803ffe763 100644 --- a/src/client/script_bindings/webgl/active_info.cpp +++ b/src/client/script_bindings/webgl/active_info.cpp @@ -60,5 +60,5 @@ namespace endor } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/active_info.hpp b/src/client/script_bindings/webgl/active_info.hpp index b125c893c..c2d6c9080 100644 --- a/src/client/script_bindings/webgl/active_info.hpp +++ b/src/client/script_bindings/webgl/active_info.hpp @@ -38,5 +38,5 @@ namespace endor }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/client/script_bindings/webgl/framebuffer.cpp b/src/client/script_bindings/webgl/framebuffer.cpp index dbe790869..d38e0687c 100644 --- a/src/client/script_bindings/webgl/framebuffer.cpp +++ b/src/client/script_bindings/webgl/framebuffer.cpp @@ -31,5 +31,5 @@ namespace endor { } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/client/script_bindings/webgl/framebuffer.hpp b/src/client/script_bindings/webgl/framebuffer.hpp index c54189e65..fd3ab4ec2 100644 --- a/src/client/script_bindings/webgl/framebuffer.hpp +++ b/src/client/script_bindings/webgl/framebuffer.hpp @@ -37,5 +37,5 @@ namespace endor }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/object.cpp b/src/client/script_bindings/webgl/object.cpp index b03f846b5..78779213d 100644 --- a/src/client/script_bindings/webgl/object.cpp +++ b/src/client/script_bindings/webgl/object.cpp @@ -28,5 +28,5 @@ namespace endor // WebGLObject is a base class, typically not instantiated directly } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/object.hpp b/src/client/script_bindings/webgl/object.hpp index cd48b93ae..f69bb78c8 100644 --- a/src/client/script_bindings/webgl/object.hpp +++ b/src/client/script_bindings/webgl/object.hpp @@ -45,5 +45,5 @@ namespace endor }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/client/script_bindings/webgl/program.cpp b/src/client/script_bindings/webgl/program.cpp index e4a68a6f8..8c21d7e26 100644 --- a/src/client/script_bindings/webgl/program.cpp +++ b/src/client/script_bindings/webgl/program.cpp @@ -20,5 +20,5 @@ namespace endor } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/client/script_bindings/webgl/program.hpp b/src/client/script_bindings/webgl/program.hpp index ede29c134..0971e7271 100644 --- a/src/client/script_bindings/webgl/program.hpp +++ b/src/client/script_bindings/webgl/program.hpp @@ -32,5 +32,5 @@ namespace endor WebGLProgram(v8::Isolate *isolate, const v8::FunctionCallbackInfo &args); }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/renderbuffer.cpp b/src/client/script_bindings/webgl/renderbuffer.cpp index 7f512b63d..d858602e8 100644 --- a/src/client/script_bindings/webgl/renderbuffer.cpp +++ b/src/client/script_bindings/webgl/renderbuffer.cpp @@ -19,5 +19,5 @@ namespace endor // WebGLRenderbuffer objects are created by WebGL context, not by user code } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/renderbuffer.hpp b/src/client/script_bindings/webgl/renderbuffer.hpp index a9651c800..581ff1b2e 100644 --- a/src/client/script_bindings/webgl/renderbuffer.hpp +++ b/src/client/script_bindings/webgl/renderbuffer.hpp @@ -33,5 +33,5 @@ namespace endor }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/client/script_bindings/webgl/texture.cpp b/src/client/script_bindings/webgl/texture.cpp index 3b51f5dfa..3d034839c 100644 --- a/src/client/script_bindings/webgl/texture.cpp +++ b/src/client/script_bindings/webgl/texture.cpp @@ -27,5 +27,5 @@ namespace endor { } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/texture.hpp b/src/client/script_bindings/webgl/texture.hpp index b40acad08..9272d305c 100644 --- a/src/client/script_bindings/webgl/texture.hpp +++ b/src/client/script_bindings/webgl/texture.hpp @@ -45,5 +45,5 @@ namespace endor }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/client/script_bindings/webgl/uniform_location.cpp b/src/client/script_bindings/webgl/uniform_location.cpp index 597a73e97..ce71bd5d6 100644 --- a/src/client/script_bindings/webgl/uniform_location.cpp +++ b/src/client/script_bindings/webgl/uniform_location.cpp @@ -47,5 +47,5 @@ namespace endor } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/uniform_location.hpp b/src/client/script_bindings/webgl/uniform_location.hpp index a5e7de1f2..a1f98b66d 100644 --- a/src/client/script_bindings/webgl/uniform_location.hpp +++ b/src/client/script_bindings/webgl/uniform_location.hpp @@ -44,5 +44,5 @@ namespace endor }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/client/script_bindings/webgl/vertex_array.cpp b/src/client/script_bindings/webgl/vertex_array.cpp index 3e6ed5f0a..48e8e644b 100644 --- a/src/client/script_bindings/webgl/vertex_array.cpp +++ b/src/client/script_bindings/webgl/vertex_array.cpp @@ -20,5 +20,5 @@ namespace endor { } } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor diff --git a/src/client/script_bindings/webgl/vertex_array.hpp b/src/client/script_bindings/webgl/vertex_array.hpp index 7ba162aa3..a4a03cc1d 100644 --- a/src/client/script_bindings/webgl/vertex_array.hpp +++ b/src/client/script_bindings/webgl/vertex_array.hpp @@ -33,5 +33,5 @@ namespace endor }; } // namespace webgl - } // namespace script_bindings + } // namespace script_bindings } // namespace endor \ No newline at end of file diff --git a/src/common/assert.cpp b/src/common/assert.cpp new file mode 100644 index 000000000..9b2c46261 --- /dev/null +++ b/src/common/assert.cpp @@ -0,0 +1,57 @@ +#include +#include +#include +#include + +namespace transmute::common +{ +#if TR_COMPILER_IS(CLANG) || TR_COMPILER_IS(GCC) + void BreakPoint() + { +#if TR_PLATFORM_IS(X86) + __asm__ __volatile__("int $3\n\t"); +#elif TR_PLATFORM_IS(ARM32) + __asm__ __volatile__("bkpt 0"); +#elif TR_PLATFORM_IS(ARM64) + __asm__ __volatile__("brk 0xf000"); +#elif TR_PLATFORM_IS(LOONGARCH) + __asm__ __volatile__("break 0"); +#elif TR_PLATFORM_IS(RISCV) + __asm__ __volatile__("ebreak"); +#elif TR_PLATFORM_IS(MIPS) + __asm__ __volatile__("break"); +#elif TR_PLATFORM_IS(S390) || TR_PLATFORM_IS(S390X) + __asm__ __volatile__(".word 0x0001"); +#elif TR_PLATFORM_IS(PPC) || TR_PLATFORM_IS(PPC64) + __asm__ __volatile__("twge 2,2"); +#elif TR_PLATFORM_IS(WASM32) || TR_PLATFORM_IS(WASM64) + EM_ASM(debugger;); +#else +#error "Unsupported platform" +#endif + } + +#elif TR_COMPILER_IS(MSVC) + void BreakPoint() + { + __debugbreak(); + } + +#else +#error "Unsupported compiler" +#endif + + void HandleAssertionFailure(const char *file, + const char *function, + int line, + const char *condition) + { + std::cerr << "Assertion failure at " << file << ":" << line << " (" << function + << "): " << condition; +#if defined(TR_ABORT_ON_ASSERT) + abort(); +#else + BreakPoint(); +#endif + } +} diff --git a/src/common/assert.hpp b/src/common/assert.hpp new file mode 100644 index 000000000..f30888b82 --- /dev/null +++ b/src/common/assert.hpp @@ -0,0 +1,10 @@ +#pragma once + +namespace transmute::common +{ + void BreakPoint(); + void HandleAssertionFailure(const char *file, + const char *function, + int line, + const char *condition); +} diff --git a/src/common/command_buffers/gpu/attachment_set.hpp b/src/common/command_buffers/gpu/attachment_set.hpp new file mode 100644 index 000000000..973a2b2b9 --- /dev/null +++ b/src/common/command_buffers/gpu/attachment_set.hpp @@ -0,0 +1,13 @@ +#pragma once + +namespace commandbuffers +{ + class GPUDeviceBase; + + namespace gpu + { + class AttachmentState + { + }; + } +} diff --git a/src/common/command_buffers/gpu/backend_connection.cpp b/src/common/command_buffers/gpu/backend_connection.cpp new file mode 100644 index 000000000..a2eeb0815 --- /dev/null +++ b/src/common/command_buffers/gpu/backend_connection.cpp @@ -0,0 +1,20 @@ +#include + +namespace commandbuffers::gpu +{ + BackendConnection::BackendConnection(GPUInstance *instance, GPUBackendType type) + : instance_(instance) + , type_(type) + { + } + + GPUBackendType BackendConnection::type() const + { + return type_; + } + + GPUInstance *BackendConnection::getInstance() const + { + return instance_; + } +} diff --git a/src/common/command_buffers/gpu/backend_connection.hpp b/src/common/command_buffers/gpu/backend_connection.hpp new file mode 100644 index 000000000..46f1130b5 --- /dev/null +++ b/src/common/command_buffers/gpu/backend_connection.hpp @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace commandbuffers::gpu +{ + class BackendConnection : public NonMovable + { + public: + BackendConnection(GPUInstance *instance, GPUBackendType type); + virtual ~BackendConnection() = default; + + GPUBackendType type() const; + GPUInstance *getInstance() const; + + virtual std::vector> discoverPhysicalDevices(const RequestAdapterOptions &) = 0; + + private: + GPUInstance *instance_ = nullptr; + GPUBackendType type_; + }; +} diff --git a/src/common/command_buffers/gpu/command_allocator.cpp b/src/common/command_buffers/gpu/command_allocator.cpp new file mode 100644 index 000000000..02174770b --- /dev/null +++ b/src/common/command_buffers/gpu/command_allocator.cpp @@ -0,0 +1,281 @@ +// Copyright 2017 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include +#include + +#include + +using namespace std; + +namespace commandbuffers::gpu +{ + // TODO(cwallez@chromium.org): figure out a way to have more type safety for the iterator + + CommandIterator::CommandIterator() + { + reset(); + } + + CommandIterator::~CommandIterator() + { + assert(isEmpty()); + } + + CommandIterator::CommandIterator(CommandIterator &&other) + { + if (!other.isEmpty()) + { + blocks_ = std::move(other.blocks_); + other.reset(); + } + reset(); + } + + CommandIterator &CommandIterator::operator=(CommandIterator &&other) + { + assert(isEmpty()); + if (!other.isEmpty()) + { + blocks_ = std::move(other.blocks_); + other.reset(); + } + reset(); + return *this; + } + + CommandIterator::CommandIterator(CommandAllocator allocator) + : blocks_(allocator.acquireBlocks()) + { + reset(); + } + + void CommandIterator::acquireCommandBlocks(std::vector allocators) + { + assert(isEmpty()); + blocks_.clear(); + + size_t totalBlocksCount = 0; + for (CommandAllocator &allocator : allocators) + { + totalBlocksCount += allocator.getCommandBlocksCount(); + } + + blocks_.reserve(totalBlocksCount); + for (CommandAllocator &allocator : allocators) + { + CommandBlocks blocks = allocator.acquireBlocks(); + if (!blocks.empty()) + { + for (BlockDef &block : blocks) + { + blocks_.push_back(std::move(block)); + } + } + } + reset(); + } + + bool CommandIterator::nextCommandIdInNewBlock(uint32_t *commandId) + { + current_block_++; + if (current_block_ >= blocks_.size()) + { + reset(); + *commandId = detail::kEndOfBlock; + return false; + } + current_ptr_ = transmute::common::AlignPtr(blocks_[current_block_].block.get(), alignof(uint32_t)); + return nextCommandId(commandId); + } + + void CommandIterator::reset() + { + current_block_ = 0; + + if (blocks_.empty()) + { + // This will case the first NextCommandId call to try to move to the next block and stop + // the iteration immediately, without special casing the initialization. + current_ptr_ = reinterpret_cast(&end_of_block_); + } + else + { + current_ptr_ = transmute::common::AlignPtr(blocks_[0].block.get(), alignof(uint32_t)); + } + } + + void CommandIterator::makeEmptyAsDataWasDestroyed() + { + if (isEmpty()) + { + return; + } + + current_ptr_ = reinterpret_cast(&end_of_block_); + blocks_.clear(); + reset(); + assert(isEmpty()); + } + + bool CommandIterator::isEmpty() const + { + return blocks_.empty(); + } + + // Potential TODO(crbug.com/dawn/835): + // - Host the size and pointer to next block in the block itself to avoid having an allocation + // in the vector + // - Assume T's alignof is, say 64bits, static assert it, and make commandAlignment a constant + // in Allocate + // - Be able to optimize allocation to one block, for command buffers expected to live long to + // avoid cache misses + // - Better block allocation, maybe have Dawn API to say command buffer is going to have size + // close to another + + CommandAllocator::CommandAllocator() + { + resetPointers(); + } + + CommandAllocator::~CommandAllocator() + { + reset(); + } + + CommandAllocator::CommandAllocator(CommandAllocator &&other) + : blocks_(std::move(other.blocks_)) + , last_allocation_size_(other.last_allocation_size_) + { + other.blocks_.clear(); + if (!other.isEmpty()) + { + current_ptr_ = other.current_ptr_; + end_ptr_ = other.end_ptr_; + } + else + { + resetPointers(); + } + other.reset(); + } + + CommandAllocator &CommandAllocator::operator=(CommandAllocator &&other) + { + reset(); + if (!other.isEmpty()) + { + std::swap(blocks_, other.blocks_); + last_allocation_size_ = other.last_allocation_size_; + current_ptr_ = other.current_ptr_; + end_ptr_ = other.end_ptr_; + } + other.reset(); + return *this; + } + + void CommandAllocator::reset() + { + resetPointers(); + blocks_.clear(); + last_allocation_size_ = kDefaultBaseAllocationSize; + } + + bool CommandAllocator::isEmpty() const + { + return current_ptr_ == reinterpret_cast(&placeholder_space_[0]); + } + + size_t CommandAllocator::getCommandBlocksCount() const + { + return blocks_.size(); + } + + CommandBlocks &&CommandAllocator::acquireBlocks() + { + assert(current_ptr_ != nullptr && end_ptr_ != nullptr); + assert(transmute::common::IsPtrAligned(current_ptr_, alignof(uint32_t))); + assert(current_ptr_ + sizeof(uint32_t) <= end_ptr_); + *reinterpret_cast(current_ptr_) = detail::kEndOfBlock; + + current_ptr_ = nullptr; + end_ptr_ = nullptr; + return std::move(blocks_); + } + + char *CommandAllocator::allocateInNewBlock(uint32_t commandId, + size_t commandSize, + size_t commandAlignment) + { + // When there is not enough space, we signal the kEndOfBlock, so that the iterator knows + // to move to the next one. kEndOfBlock on the last block means the end of the commands. + uint32_t *idAlloc = reinterpret_cast(current_ptr_); + *idAlloc = detail::kEndOfBlock; + + // We'll request a block that can contain at least the command ID, the command and an + // additional ID to contain the kEndOfBlock tag. + size_t requestedBlockSize = commandSize + kWorstCaseAdditionalSize; + + // The computation of the request could overflow. + if (requestedBlockSize <= commandSize) [[unlikely]] + { + return nullptr; + } + + if (!getNewBlock(requestedBlockSize)) [[unlikely]] + { + return nullptr; + } + return allocate(commandId, commandSize, commandAlignment); + } + + bool CommandAllocator::getNewBlock(size_t minimumSize) + { + // Allocate blocks doubling sizes each time, to a maximum of 16k (or at least minimumSize). + last_allocation_size_ = std::max(minimumSize, std::min(last_allocation_size_ * 2, size_t(16384))); + + auto block = std::unique_ptr(new (std::nothrow) char[last_allocation_size_]); + if (block == nullptr) [[unlikely]] + { + return false; + } + + current_ptr_ = transmute::common::AlignPtr(block.get(), alignof(uint32_t)); + end_ptr_ = block.get() + last_allocation_size_; + blocks_.push_back({last_allocation_size_, std::move(block)}); + return true; + } + + void CommandAllocator::resetPointers() + { + current_ptr_ = reinterpret_cast(&placeholder_space_[0]); + end_ptr_ = reinterpret_cast(&placeholder_space_[1]); + } +} diff --git a/src/common/command_buffers/gpu/command_allocator.hpp b/src/common/command_buffers/gpu/command_allocator.hpp new file mode 100644 index 000000000..49be244c5 --- /dev/null +++ b/src/common/command_buffers/gpu/command_allocator.hpp @@ -0,0 +1,270 @@ +// Copyright 2017 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace commandbuffers::gpu +{ + // These are the lists of blocks, should not be used directly, only through CommandAllocator + // and CommandIterator + struct BlockDef + { + size_t size; + std::unique_ptr block; + }; + using CommandBlocks = std::vector; + + namespace detail + { + constexpr uint32_t kEndOfBlock = std::numeric_limits::max(); + constexpr uint32_t kAdditionalData = std::numeric_limits::max() - 1; + } + + class CommandAllocator; + + class CommandIterator : public NonCopyable + { + public: + CommandIterator(); + ~CommandIterator(); + + CommandIterator(CommandIterator &&other); + CommandIterator &operator=(CommandIterator &&other); + + // Shorthand constructor for acquiring CommandBlocks from a single CommandAllocator. + explicit CommandIterator(CommandAllocator allocator); + + void acquireCommandBlocks(std::vector allocators); + + template + bool nextCommandId(E *commandId) + { + return nextCommandId(reinterpret_cast(commandId)); + } + template + T *nextCommand() + { + return static_cast(nextCommand(sizeof(T), alignof(T))); + } + template + T *nextData(size_t count) + { + return static_cast(nextData(sizeof(T) * count, alignof(T))); + } + + // Sets iterator to the beginning of the commands without emptying the list. This method can + // be used if iteration was stopped early and the iterator needs to be restarted. + void reset(); + + // This method must to be called after commands have been deleted. This indicates that the + // commands have been submitted and they are no longer valid. + void makeEmptyAsDataWasDestroyed(); + + private: + bool isEmpty() const; + + inline bool nextCommandId(uint32_t *commandId) + { + char *idPtr = transmute::common::AlignPtr(current_ptr_, alignof(uint32_t)); + assert(idPtr == reinterpret_cast(&end_of_block_) || + idPtr + sizeof(uint32_t) <= blocks_[current_block_].block.get() + blocks_[current_block_].size); + + uint32_t id = *reinterpret_cast(idPtr); + if (id != detail::kEndOfBlock) + { + current_ptr_ = idPtr + sizeof(uint32_t); + *commandId = id; + return true; + } + return nextCommandIdInNewBlock(commandId); + } + + bool nextCommandIdInNewBlock(uint32_t *commandId); + + inline void *nextCommand(size_t commandSize, size_t commandAlignment) + { + char *commandPtr = transmute::common::AlignPtr(current_ptr_, commandAlignment); + assert(commandPtr + sizeof(commandSize) <= + blocks_[current_block_].block.get() + blocks_[current_block_].size); + + current_ptr_ = commandPtr + commandSize; + return commandPtr; + } + + inline void *nextData(size_t dataSize, size_t dataAlignment) + { + uint32_t id; + bool hasId = nextCommandId(&id); + assert(hasId); + assert(id == detail::kAdditionalData); + + return nextCommand(dataSize, dataAlignment); + } + + CommandBlocks blocks_; + char *current_ptr_ = nullptr; + size_t current_block_ = 0; + // Used to avoid a special case for empty iterators. + uint32_t end_of_block_ = detail::kEndOfBlock; + }; + + class CommandAllocator : public NonCopyable + { + public: + CommandAllocator(); + ~CommandAllocator(); + + // NOTE: A moved-from CommandAllocator is reset to its initial empty state. + CommandAllocator(CommandAllocator &&); + CommandAllocator &operator=(CommandAllocator &&); + + // Frees all blocks held by the allocator and restores it to its initial empty state. + void reset(); + + bool isEmpty() const; + + template + T *allocate(E commandId) + { + static_assert(sizeof(E) == sizeof(uint32_t)); + static_assert(alignof(E) == alignof(uint32_t)); + static_assert(alignof(T) <= kMaxSupportedAlignment); + T *result = reinterpret_cast(allocate(static_cast(commandId), sizeof(T), alignof(T))); + if (!result) + { + return nullptr; + } + new (result) T; + return result; + } + + template + T *allocateData(size_t count) + { + static_assert(alignof(T) <= kMaxSupportedAlignment); + T *result = reinterpret_cast(allocateData(sizeof(T) * count, alignof(T))); + if (!result) + { + return nullptr; + } + for (size_t i = 0; i < count; i++) + { + new (result + i) T; + } + return result; + } + + size_t getCommandBlocksCount() const; + + private: + // This is used for some internal computations and can be any power of two as long as code + // using the CommandAllocator passes the static_asserts. + static constexpr size_t kMaxSupportedAlignment = 8; + + // To avoid checking for overflows at every step of the computations we compute an upper + // bound of the space that will be needed in addition to the command data. + static constexpr size_t kWorstCaseAdditionalSize = + sizeof(uint32_t) + kMaxSupportedAlignment + alignof(uint32_t) + sizeof(uint32_t); + + // The default value of mLastAllocationSize. + static constexpr size_t kDefaultBaseAllocationSize = 2048; + + friend CommandIterator; + CommandBlocks &&acquireBlocks(); + + inline char *allocate(uint32_t commandId, + size_t commandSize, + size_t commandAlignment) + { + assert(current_ptr_ != nullptr); + assert(end_ptr_ != nullptr); + assert(commandId != detail::kEndOfBlock); + + // It should always be possible to allocate one id, for kEndOfBlock tagging, + assert(transmute::common::IsPtrAligned(current_ptr_, alignof(uint32_t))); + assert(end_ptr_ >= current_ptr_); + assert(static_cast(end_ptr_ - current_ptr_) >= sizeof(uint32_t)); + + // The memory after the ID will contain the following: + // - the current ID + // - padding to align the command, maximum kMaxSupportedAlignment + // - the command of size commandSize + // - padding to align the next ID, maximum alignof(uint32_t) + // - the next ID of size sizeof(uint32_t) + + // This can't overflow because by construction current_ptr_ always has space for the next + // ID. + size_t remainingSize = static_cast(end_ptr_ - current_ptr_); + + // The good case were we have enough space for the command data and upper bound of the + // extra required space. + if ((remainingSize >= kWorstCaseAdditionalSize) && + (remainingSize - kWorstCaseAdditionalSize >= commandSize)) + { + uint32_t *idAlloc = reinterpret_cast(current_ptr_); + *idAlloc = commandId; + + char *commandAlloc = transmute::common::AlignPtr(current_ptr_ + sizeof(uint32_t), commandAlignment); + current_ptr_ = transmute::common::AlignPtr(commandAlloc + commandSize, alignof(uint32_t)); + + return commandAlloc; + } + return allocateInNewBlock(commandId, commandSize, commandAlignment); + } + + char *allocateInNewBlock(uint32_t commandId, size_t commandSize, size_t commandAlignment); + + inline char *allocateData(size_t commandSize, size_t commandAlignment) + { + return allocate(detail::kAdditionalData, commandSize, commandAlignment); + } + + bool getNewBlock(size_t minimumSize); + + void resetPointers(); + + CommandBlocks blocks_; + size_t last_allocation_size_ = kDefaultBaseAllocationSize; + + // Data used for the block range at initialization so that the first call to Allocate sees + // there is not enough space and calls GetNewBlock. This avoids having to special case the + // initialization in Allocate. + uint32_t placeholder_space_[1] = {0}; + char *current_ptr_ = nullptr; + char *end_ptr_ = nullptr; + }; +} diff --git a/src/common/command_buffers/gpu/compilation_messages.hpp b/src/common/command_buffers/gpu/compilation_messages.hpp new file mode 100644 index 000000000..c1029cb25 --- /dev/null +++ b/src/common/command_buffers/gpu/compilation_messages.hpp @@ -0,0 +1,23 @@ +#pragma once + +#include +#include +#include +#include + +namespace commandbuffers::gpu +{ + struct CompilationMessageContent + { + std::string message; + GPUCompilationMessageType type; + uint64_t line; + uint64_t column; + }; + + struct ParsedCompilationMessages + { + std::vector messages; + std::vector formattedTintMessages; + }; +} diff --git a/src/common/command_buffers/gpu/encoding_context.cpp b/src/common/command_buffers/gpu/encoding_context.cpp new file mode 100644 index 000000000..7cf412b7b --- /dev/null +++ b/src/common/command_buffers/gpu/encoding_context.cpp @@ -0,0 +1,129 @@ +#include +#include + +using namespace std; + +namespace commandbuffers::gpu +{ + EncodingContext::EncodingContext(Ref device, const GPUHandle *initialEncoder) + : device_(device.get()) + , top_level_encoder_(initialEncoder) + , current_encoder_(initialEncoder) + , status_(Status::kOpen) + { + assert(!initialEncoder->isError()); + } + + EncodingContext::EncodingContext(Ref device, GPUHandle::ErrorTag tag) + : device_(device.get()) + , top_level_encoder_(nullptr) + , current_encoder_(nullptr) + , status_(Status::kErrorAtCreation) + { + } + + EncodingContext::~EncodingContext() + { + destroy(); + } + + void EncodingContext::destroy() + { + debug_group_labels_.clear(); + + if (!were_commands_acquired_) + { + // mIndirectDrawMetadata.clear(); + } + // if (!mWereCommandsAcquired) + // { + // CommandIterator commands = AcquireCommands(); + // FreeCommands(&commands); + // } + + closeWithStatus(Status::kDestroyed); + } + + CommandIterator EncodingContext::acquireCommands() + { + assert(!were_commands_acquired_); + were_commands_acquired_ = true; + + commitCommands(std::move(pending_commands_)); + + CommandIterator commands; + commands.acquireCommandBlocks(std::move(allocators_)); + return commands; + } + + void EncodingContext::handleError(unique_ptr error) + { + // TODO + } + + void EncodingContext::willBeginRenderPass() + { + assert(current_encoder_ == top_level_encoder_); + if (device_->isValidationEnabled() || device_->mayRequireDuplicationOfIndirectParameters()) + { + // When validation is enabled or indirect parameters require duplication, we are going + // to want to capture all commands encoded between and including BeginRenderPassCmd and + // EndRenderPassCmd, and defer their sequencing util after we have a chance to insert + // any necessary validation or duplication commands. To support this we commit any + // current commands now, so that the impending BeginRenderPassCmd starts in a fresh + // CommandAllocator. + commitCommands(std::move(pending_commands_)); + } + } + + void EncodingContext::enterPass(const GPUHandle *passEncoder) + { + // Assert we're at the top level. + assert(current_encoder_ == top_level_encoder_); + assert(passEncoder != nullptr); + + current_encoder_ = passEncoder; + } + + void EncodingContext::exitComputePass(const GPUHandle *passEncoder, + ComputePassResourceUsage usages) + { + assert(current_encoder_ != top_level_encoder_); + assert(current_encoder_ == passEncoder); + + current_encoder_ = top_level_encoder_; + // mComputePassUsages.push_back(std::move(usages)); + } + + bool EncodingContext::finish() + { + return false; + } + + void EncodingContext::ensurePassExited(const GPUHandle *passEncoder) + { + if (current_encoder_ != top_level_encoder_ && current_encoder_ == passEncoder) + { + // The current pass encoder is being deleted. Implicitly end the pass with an error. + current_encoder_ = top_level_encoder_; + // HandleError(DAWN_VALIDATION_ERROR("Command buffer recording ended before %s was ended.", + // passEncoder)); + } + } + + void EncodingContext::pushDebugGroupLabel(string_view groupLabel) + { + } + + void EncodingContext::popDebugGroupLabel() + { + } + + void EncodingContext::commitCommands(CommandAllocator allocator) + { + } + + void EncodingContext::closeWithStatus(Status status) + { + } +} diff --git a/src/common/command_buffers/gpu/encoding_context.hpp b/src/common/command_buffers/gpu/encoding_context.hpp new file mode 100644 index 000000000..e9c74345f --- /dev/null +++ b/src/common/command_buffers/gpu/encoding_context.hpp @@ -0,0 +1,124 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + class GPUCommandEncoder; + + namespace gpu + { + class EncodingContext + { + public: + EncodingContext(Ref device, const GPUHandle *initialEncoder); + EncodingContext(Ref device, GPUHandle::ErrorTag tag); + ~EncodingContext(); + + // Marks the encoding context as destroyed so that any future encodes will fail, and all + // encoded commands are released. + void destroy(); + CommandIterator acquireCommands(); + + void handleError(std::unique_ptr error); + + inline bool consumedError(MaybeError maybeError) + { + if (maybeError.IsError()) [[unlikely]] + { + handleError(maybeError.AcquireError()); + return true; + } + return false; + } + + inline MaybeError validateCanEncodeOn(const GPUHandle *encoder) + { + if (encoder != current_encoder_) [[unlikely]] + { + switch (status_) + { + case Status::kErrorAtCreation: + case Status::kErrorInRecording: + case Status::kDestroyed: + case Status::kFinished: + case Status::kOpen: + break; + } + } + // TODO: validate encoder is valid. + return {}; + } + + template + inline bool tryEncode(const GPUHandle *encoder, EncodeFunction &&encodeFunction) + { + if (consumedError(validateCanEncodeOn(encoder))) + { + return false; + } + assert(!were_commands_acquired_); + return !consumedError(encodeFunction(&pending_commands_)); + } + + // Must be called prior to encoding a BeginRenderPassCmd. Note that it's OK to call this + // and then not actually call EnterPass+ExitRenderPass, for example if some other pass setup + // failed validation before the BeginRenderPassCmd could be encoded. + void willBeginRenderPass(); + + // Functions to set current encoder state + void enterPass(const GPUHandle *passEncoder); + bool exitRenderPass(const GPUHandle *passEncoder, + RenderPassResourceUsageTracker usageTracker, + GPUCommandEncoder *commandEncoder, + gpu::IndirectDrawMetadata indirectDrawMetadata); + void exitComputePass(const GPUHandle *passEncoder, ComputePassResourceUsage usages); + bool finish(); + + // Called when a pass encoder is deleted. Provides an opportunity to clean up if it's the + // mCurrentEncoder. + void ensurePassExited(const GPUHandle *passEncoder); + + void pushDebugGroupLabel(std::string_view groupLabel); + void popDebugGroupLabel(); + + private: + enum class Status + { + kOpen, + kFinished, + kErrorAtCreation, + kErrorInRecording, + kDestroyed, + }; + + void commitCommands(CommandAllocator allocator); + void closeWithStatus(Status status); + + GPUDeviceBase *device_; + const GPUHandle *top_level_encoder_; + const GPUHandle *current_encoder_; + + CommandAllocator pending_commands_; + std::vector allocators_; + bool were_commands_acquired_ = false; + + // Contains pointers to strings allocated inside the command allocators. + std::vector debug_group_labels_; + + Status status_; + std::unique_ptr error_; + }; + } +} diff --git a/src/common/command_buffers/gpu/error.cpp b/src/common/command_buffers/gpu/error.cpp new file mode 100644 index 000000000..e69de29bb diff --git a/src/common/command_buffers/gpu/error.hpp b/src/common/command_buffers/gpu/error.hpp new file mode 100644 index 000000000..30ea0b70b --- /dev/null +++ b/src/common/command_buffers/gpu/error.hpp @@ -0,0 +1,82 @@ +// Copyright 2018 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include +#include + +#include +#include + +namespace commandbuffers::gpu +{ + enum class InternalErrorType : uint32_t + { + kNone = 0, + kValidation = 1, + kDeviceLost = 2, + kInternal = 4, + kOutOfMemory = 8 + }; + + // MaybeError and ResultOrError are meant to be used as return value for function that are not + // expected to, but might fail. The handling of error is potentially much slower than successes. + using MaybeError = jsar::Result; + + template + using ResultOrError = jsar::Result; + + namespace detail + { + template + struct UnwrapResultOrError + { + using type = T; + }; + + template + struct UnwrapResultOrError> + { + using type = T; + }; + + template + struct IsResultOrError + { + static constexpr bool value = false; + }; + + template + struct IsResultOrError> + { + static constexpr bool value = true; + }; + + } +} diff --git a/src/common/command_buffers/gpu/error_data.cpp b/src/common/command_buffers/gpu/error_data.cpp new file mode 100644 index 000000000..191ef39fe --- /dev/null +++ b/src/common/command_buffers/gpu/error_data.cpp @@ -0,0 +1,135 @@ +#include +#include +#include +#include +#include +#include + +using namespace std; + +namespace commandbuffers::gpu +{ + unique_ptr ErrorData::Create(InternalErrorType type, + string message, + const char *file, + const char *function, + int line) + { + unique_ptr error = make_unique(type, move(message)); + error->appendBacktrace(file, function, line); + + auto [var, present] = transmute::common::GetEnvironmentVar("DAWN_DEBUG_BREAK_ON_ERROR"); + if (present && !var.empty() && var != "0") + { + cerr << error->message() << endl; + transmute::common::BreakPoint(); + } + return error; + } + + ErrorData::ErrorData(InternalErrorType type, string message) + : type_(type) + , message_(move(message)) + { + } + + void ErrorData::appendBacktrace(const char *file, const char *function, int line) + { + BacktraceRecord record; + record.file = file; + record.function = function; + record.line = line; + + backtrace_.push_back(move(record)); + } + + void ErrorData::appendContext(string context) + { + contexts_.push_back(move(context)); + } + + void ErrorData::appendDebugGroup(string_view label) + { + debug_groups_.push_back(string(label)); + } + + void ErrorData::appendBackendMessage(string message) + { + backend_messages_.push_back(move(message)); + } + + InternalErrorType ErrorData::type() const + { + return type_; + } + + const string &ErrorData::message() const + { + return message_; + } + + const vector &ErrorData::backtrace() const + { + return backtrace_; + } + + const vector &ErrorData::contexts() const + { + return contexts_; + } + + const vector &ErrorData::debugGroups() const + { + return debug_groups_; + } + + const vector &ErrorData::backendMessages() const + { + return backend_messages_; + } + + string ErrorData::getFormattedMessage() const + { + ostringstream ss; + ss << message_ << "\n"; + + if (!contexts_.empty()) + { + for (auto context : contexts_) + { + ss << " - While " << context << "\n"; + } + } + + // For non-validation errors, or errors that lack a context include the + // stack trace for debugging purposes. + if (contexts_.empty() || type_ != InternalErrorType::kValidation) + { + for (const auto &callsite : backtrace_) + { + ss << " at " << callsite.function << " (" << callsite.file << ":" << callsite.line + << ")\n"; + } + } + + if (!debug_groups_.empty()) + { + ss << "\nDebug group stack:\n"; + for (auto label : debug_groups_) + { + ss << " > \"" << label << "\"\n"; + } + } + + if (!backend_messages_.empty()) + { + ss << "\nBackend messages:\n"; + for (auto message : backend_messages_) + { + ss << " * " << message << "\n"; + } + } + + return ss.str(); + } +} \ No newline at end of file diff --git a/src/common/command_buffers/gpu/error_data.hpp b/src/common/command_buffers/gpu/error_data.hpp new file mode 100644 index 000000000..5cb9ddfd9 --- /dev/null +++ b/src/common/command_buffers/gpu/error_data.hpp @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include +#include + +namespace commandbuffers::gpu +{ + enum class InternalErrorType : uint32_t; + + class [[nodiscard]] ErrorData + { + public: + [[nodiscard]] static std::unique_ptr Create(InternalErrorType type, + std::string message, + const char *file, + const char *function, + int line); + ErrorData(InternalErrorType type, std::string message); + + struct BacktraceRecord + { + const char *file; + const char *function; + int line; + }; + void appendBacktrace(const char *file, const char *function, int line); + void appendContext(std::string context); + void appendDebugGroup(std::string_view label); + void appendBackendMessage(std::string message); + + InternalErrorType type() const; + const std::string &message() const; + const std::vector &backtrace() const; + const std::vector &contexts() const; + const std::vector &debugGroups() const; + const std::vector &backendMessages() const; + + std::string getFormattedMessage() const; + + private: + InternalErrorType type_; + std::string message_; + std::vector backtrace_; + std::vector contexts_; + std::vector debug_groups_; + std::vector backend_messages_; + }; +} \ No newline at end of file diff --git a/src/common/command_buffers/gpu/execution_queue.cpp b/src/common/command_buffers/gpu/execution_queue.cpp new file mode 100644 index 000000000..a8aee76ff --- /dev/null +++ b/src/common/command_buffers/gpu/execution_queue.cpp @@ -0,0 +1,9 @@ +#include + +namespace commandbuffers::gpu +{ + bool ExecutionQueueBase::hasScheduledCommands() const + { + return false; + } +} diff --git a/src/common/command_buffers/gpu/execution_queue.hpp b/src/common/command_buffers/gpu/execution_queue.hpp new file mode 100644 index 000000000..d1d93fc10 --- /dev/null +++ b/src/common/command_buffers/gpu/execution_queue.hpp @@ -0,0 +1,50 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace commandbuffers::gpu +{ + class ExecutionQueueBase : public GPUHandle + { + public: + using Task = std::function; + + // Whether the execution queue has scheduled commands to be submitted or executing. + bool hasScheduledCommands() const; + + // In the 'Normal' mode, currently recorded commands in the backend submitted in the next Tick. + // However in the 'Passive' mode, the submission will be postponed as late as possible, for + // example, until the client has explictly issued a submission. + enum class SubmitMode + { + Normal, + Passive + }; + + // Tracks whether we are in a submit to avoid submit reentrancy. Reentrancy could otherwise + // happen when allocating resources or staging memory during submission (for workarounds, or + // emulation) and the heuristics ask for an early submit to happen (which would cause a + // submit-in-submit and many issues). + bool inSubmit = false; + + protected: + using GPUHandle::GPUHandle; + + private: + // Backend specific wait for idle function. + virtual MaybeError waitForIdleForDestructionImpl() = 0; + + // Indicates whether the backend has pending commands to be submitted as soon as possible. + virtual bool hasPendingCommands() const = 0; + + std::mutex mMutex; + std::condition_variable mCv; + bool mCallingCallbacks = false; + bool mWaitingForIdle = false; + bool mAssumeCompleted = false; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_adapter.cpp b/src/common/command_buffers/gpu/gpu_adapter.cpp new file mode 100644 index 000000000..3d2a2eb97 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_adapter.cpp @@ -0,0 +1,184 @@ +#include +#include +#include +#include + +using namespace std; + +namespace commandbuffers +{ + bool GPUSupportedLimits::operator==(const GPUSupportedLimits &rhs) const + { + const auto &lhs = tie(maxTextureDimension1D, + maxTextureDimension2D, + maxTextureDimension3D, + maxTextureArrayLayers, + maxBindGroups, + maxBindingsPerBindGroup, + maxDynamicUniformBuffersPerPipelineLayout, + maxDynamicStorageBuffersPerPipelineLayout, + maxSampledTexturesPerShaderStage, + maxSamplersPerShaderStage, + maxStorageBuffersPerShaderStage, + maxStorageTexturesPerShaderStage, + maxUniformBuffersPerShaderStage, + maxUniformBufferBindingSize, + maxStorageBufferBindingSize, + minUniformBufferOffsetAlignment, + minStorageBufferOffsetAlignment, + maxVertexBuffers, + maxBufferSize, + maxVertexAttributes, + maxVertexBufferArrayStride, + maxInterStageShaderVariables, + maxColorAttachments, + maxColorAttachmentBytesPerSample, + maxComputeWorkgroupStorageSize, + maxComputeInvocationsPerWorkgroup, + maxComputeWorkgroupSizeX, + maxComputeWorkgroupSizeY, + maxComputeWorkgroupSizeZ, + maxComputeWorkgroupsPerDimension); + return lhs == tie(rhs.maxTextureDimension1D, + rhs.maxTextureDimension2D, + rhs.maxTextureDimension3D, + rhs.maxTextureArrayLayers, + rhs.maxBindGroups, + rhs.maxBindingsPerBindGroup, + rhs.maxDynamicUniformBuffersPerPipelineLayout, + rhs.maxDynamicStorageBuffersPerPipelineLayout, + rhs.maxSampledTexturesPerShaderStage, + rhs.maxSamplersPerShaderStage, + rhs.maxStorageBuffersPerShaderStage, + rhs.maxStorageTexturesPerShaderStage, + rhs.maxUniformBuffersPerShaderStage, + rhs.maxUniformBufferBindingSize, + rhs.maxStorageBufferBindingSize, + rhs.minUniformBufferOffsetAlignment, + rhs.minStorageBufferOffsetAlignment, + rhs.maxVertexBuffers, + rhs.maxBufferSize, + rhs.maxVertexAttributes, + rhs.maxVertexBufferArrayStride, + rhs.maxInterStageShaderVariables, + rhs.maxColorAttachments, + rhs.maxColorAttachmentBytesPerSample, + rhs.maxComputeWorkgroupStorageSize, + rhs.maxComputeInvocationsPerWorkgroup, + rhs.maxComputeWorkgroupSizeX, + rhs.maxComputeWorkgroupSizeY, + rhs.maxComputeWorkgroupSizeZ, + rhs.maxComputeWorkgroupsPerDimension); + } + + GPUAdapterBase::GPUAdapterBase(Ref instance, + Ref physicalDevice, + GPUFeatureLevel level, + GPUPowerPreference powerPreference) + : ErrorMonad() + , instance_(instance) + , physical_device_(physicalDevice) + , feature_level_(level) + , power_preference_(powerPreference) + { + } + + GPUInstance *GPUAdapterBase::instance() const + { + return instance_.get(); + } + + const GPUAdapterInfo &GPUAdapterBase::info() const + { + return info_; + } + + bool GPUAdapterBase::hasFeature(GPUFeatureName feature) const + { + return false; + } + + void GPUAdapterBase::requestDevice(const GPUDeviceDescriptor *descriptor, + function callback) + { + auto device = createDevice(descriptor); + callback(*device); + } + + Ref GPUAdapterBase::createDevice(const GPUDeviceDescriptor *descriptor) + { + return physical_device_->createDevice(shared_from_this(), *descriptor); + } + + gpu::PhysicalDeviceBase *GPUAdapterBase::physicalDevice() + { + return physical_device_.get(); + } + + const gpu::PhysicalDeviceBase *GPUAdapterBase::physicalDevice() const + { + return physical_device_.get(); + } + + GPUFeatureLevel GPUAdapterBase::featureLevel() const + { + return feature_level_; + } + + const string &GPUAdapterBase::name() const + { + return physical_device_->name(); + } + + vector> SortAdapters(vector> adapters, + const RequestAdapterOptions &options) + { + const bool highPerformance = options.powerPreference == GPUPowerPreference::kHighPerformance; + const auto ComputeAdapterTypeRank = [&](const Ref &a) + { + switch (a->physicalDevice()->adapterType()) + { + case GPUAdapterType::kDiscreteGPU: + return highPerformance ? 0 : 1; + case GPUAdapterType::kIntegratedGPU: + return highPerformance ? 1 : 0; + case GPUAdapterType::kCPU: + return 2; + case GPUAdapterType::kUnknown: + return 3; + } + assert(false && "Unhandled GPUAdapterType"); + }; + const auto ComputeBackendTypeRank = [](const Ref &a) + { + switch (a->physicalDevice()->backendType()) + { + // Sort backends generally in order of Core -> Compat -> Testing, + // while preferring OS-specific backends like Metal/D3D. + case GPUBackendType::kMetal: + case GPUBackendType::kD3D12: + return 0; + case GPUBackendType::kVulkan: + return 1; + case GPUBackendType::kD3D11: + return 2; + case GPUBackendType::kOpenGLES: + return 3; + case GPUBackendType::kOpenGL: + return 4; + case GPUBackendType::kWebGPU: + return 5; + case GPUBackendType::kNull: + return 6; + case GPUBackendType::kUndefined: + break; + } + assert(false && "Unhandled GPUBackendType"); + }; + + sort(adapters.begin(), adapters.end(), [&](const Ref &a, const Ref &b) -> bool + { return tuple(ComputeAdapterTypeRank(a), ComputeBackendTypeRank(a)) < + tuple(ComputeAdapterTypeRank(b), ComputeBackendTypeRank(b)); }); + return adapters; + } +} diff --git a/src/common/command_buffers/gpu/gpu_adapter.hpp b/src/common/command_buffers/gpu/gpu_adapter.hpp index 8e37a08e7..50412f540 100644 --- a/src/common/command_buffers/gpu/gpu_adapter.hpp +++ b/src/common/command_buffers/gpu/gpu_adapter.hpp @@ -1,14 +1,31 @@ #pragma once +#include +#include +#include #include #include -#include -#include -#include "./gpu_base.hpp" +#include +#include namespace commandbuffers { + class GPUInstance; + struct GPUDeviceDescriptor; + namespace gpu + { + class PhysicalDeviceBase; + } + + struct RequestAdapterOptions + { + GPUFeatureLevel featureLevel = GPUFeatureLevel::kCore; + GPUPowerPreference powerPreference = GPUPowerPreference::kUndefined; + bool forceFallbackAdapter = false; + GPUBackendType backendType = GPUBackendType::kUndefined; + }; + class GPUAdapterInfo { public: @@ -40,52 +57,41 @@ namespace commandbuffers } }; - class GPUSupportedFeatures : public std::unordered_set + class GPUAdapterBase : ErrorMonad, public std::enable_shared_from_this { public: - GPUSupportedFeatures() - { - // TODO(yorkie): add required features - } - }; + GPUAdapterBase(Ref instance, + Ref physicalDevice, + GPUFeatureLevel level, + GPUPowerPreference powerPreference); - class GPUSupportedLimits : public std::unordered_map - { - public: - GPUSupportedLimits() - { - insert({"maxTextureDimension1D", 8192}); - insert({"maxTextureDimension2D", 8192}); - insert({"maxTextureDimension3D", 2048}); - insert({"maxTextureArrayLayers", 256}); - insert({"maxBindGroups", 4}); - insert({"maxBindGroupEntries", 640}); - } + GPUInstance *instance() const; + const GPUAdapterInfo &info() const; + bool hasFeature(GPUFeatureName) const; + void requestDevice(const GPUDeviceDescriptor *descriptor, + std::function callback); + Ref createDevice(const GPUDeviceDescriptor *descriptor = nullptr); - public: - uint32_t maxTextureDimension1D() const - { - return at("maxTextureDimension1D"); - } - uint32_t maxTextureDimension2D() const - { - return at("maxTextureDimension2D"); - } - uint32_t maxTextureDimension3D() const - { - return at("maxTextureDimension3D"); - } - uint32_t maxTextureArrayLayers() const - { - return at("maxTextureArrayLayers"); - } - uint32_t maxBindGroups() const - { - return at("maxBindGroups"); - } - uint32_t maxBindGroupEntries() const - { - return at("maxBindGroupEntries"); - } + gpu::PhysicalDeviceBase *physicalDevice(); + const gpu::PhysicalDeviceBase *physicalDevice() const; + GPUFeatureLevel featureLevel() const; + + const std::string &name() const; + + private: + Ref instance_; + Ref physical_device_; + GPUAdapterInfo info_; + GPUFeatureLevel feature_level_; + GPUPowerPreference power_preference_; + + bool use_tiered_limits = false; + + // The adapter becomes "consumed" once it has successfully been used to + // create a device. + bool adapter_is_consumed = false; }; + + std::vector> SortAdapters(std::vector> adapters, + const RequestAdapterOptions &options); } diff --git a/src/common/command_buffers/gpu/gpu_base.cpp b/src/common/command_buffers/gpu/gpu_base.cpp new file mode 100644 index 000000000..b0213911b --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_base.cpp @@ -0,0 +1,145 @@ +#include +#include +#include + +using namespace std; + +namespace commandbuffers +{ + bool GPUExtent2D::operator==(const GPUExtent2D &rhs) const + { + const auto &lhs = std::tie(width, height); + return lhs == std::tie(rhs.width, rhs.height); + } + + bool GPUExtent3D::operator==(const GPUExtent3D &rhs) const + { + const auto &lhs = std::tie(width, height, depthOrArrayLayers); + return lhs == std::tie(rhs.width, + rhs.height, + rhs.depthOrArrayLayers); + } + + // + // ErrorMonad Implementation + // + + ErrorMonad::ErrorMonad() + : tag_payload_(kInitializedNoErrorPayload) + { + } + + ErrorMonad::ErrorMonad(ErrorTag) + : tag_payload_(kInitializedErrorPayload) + { + } + ErrorMonad::ErrorMonad(DelayedInitializationTag) + : tag_payload_(kNotInitializedPayload) + { + } + + bool ErrorMonad::initialized() const + { + return (tag_payload_ & kInitializedMask) == kInitialized; + } + + bool ErrorMonad::isError() const + { + assert((tag_payload_ & kInitializedMask) == kInitialized); + return tag_payload_ != kInitializedNoErrorPayload; + } + + void ErrorMonad::setInitializedError() + { + uint64_t previousPayload = fetchAnd(kInitializedErrorPayload); + assert(previousPayload == kNotInitializedPayload); + } + + void ErrorMonad::setInitializedNoError() + { + uint64_t previousPayload = fetchAnd(kInitializedNoErrorPayload); + assert(previousPayload == kNotInitializedPayload); + } + + uint64_t ErrorMonad::fetchAnd(uint64_t arg) + { + return tag_payload_.exchange(arg, memory_order_acq_rel); + } + + // + // GPUObject Implementation + // + + GPUObject::GPUObject(Ref device) + : ErrorMonad() + , device_(device) + { + } + + GPUObject::GPUObject(Ref device, ErrorTag) + : ErrorMonad(kError) + , device_(device) + { + } + + GPUObject::GPUObject(Ref device, DelayedInitializationTag) + : ErrorMonad(kDelayedInitialization) + , device_(device) + { + } + + GPUInstance *GPUObject::instance() const + { + return device_->getInstance(); + } + + Ref GPUObject::device() const + { + return device_; + } + + // + // GPUHandle Implementation + // + + GPUHandle::GPUHandle(Ref device, string_view label) + : GPUObject(device) + , id(Ids.get()) + , label_(string(label)) + { + } + + GPUHandle::GPUHandle(Ref device, + ErrorTag tag, + string_view label) + : GPUObject(device, tag) + , id(Ids.get()) + , label_(string(label)) + { + } + + GPUHandle::GPUHandle(Ref device, + DelayedInitializationTag tag, + string_view label) + : GPUObject(device, tag) + , id(Ids.get()) + , label_(string(label)) + { + } + + GPUHandle::GPUHandle(Ref device, LabelNotImplementedTag tag) + : GPUObject(device) + , id(Ids.get()) + { + } + + void GPUHandle::setLabel(std::string label) + { + label_ = move(label); + } + + const std::string &GPUHandle::getLabel() const + { + return label_; + } +} diff --git a/src/common/command_buffers/gpu/gpu_base.hpp b/src/common/command_buffers/gpu/gpu_base.hpp index 9ed377959..1ee91e666 100644 --- a/src/common/command_buffers/gpu/gpu_base.hpp +++ b/src/common/command_buffers/gpu/gpu_base.hpp @@ -1,59 +1,826 @@ +// Copyright 2018 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + #pragma once +#include #include +#include +#include +#include + #include +#include +#include namespace commandbuffers { - enum class GPUShaderStage + using GPUBindingNumber = uint32_t; + constexpr GPUBindingNumber kMaxBindingsPerBindGroupTyped = GPUBindingNumber(gpu_constants::kMaxBindingsPerBindGroup); + + using GPUClientBindingIndex = uint32_t; + using GPUBindingIndex = uint32_t; + + using GPUBindGroupIndex = uint32_t; + constexpr GPUBindGroupIndex kMaxBindGroupsTyped = GPUBindGroupIndex(gpu_constants::kMaxBindGroups); + + struct GPUConstantEntry { - kVertex, - kFragment, - kCompute, + std::string_view key; + double value; }; - enum class GPUBufferType + struct GPUExtent2D { - kReadOnlyStorage, - kStorage, - kUniform, + uint32_t width; + uint32_t height; + + // Equality operators, mostly for testing. Note that this tests + // strict pointer-pointer equality if the struct contains member pointers. + bool operator==(const GPUExtent2D &rhs) const; + }; + + struct GPUExtent3D + { + uint32_t width; + uint32_t height = 1; + uint32_t depthOrArrayLayers = 1; + + // Equality operators, mostly for testing. Note that this tests + // strict pointer-pointer equality if the struct contains member pointers. + bool operator==(const GPUExtent3D &rhs) const; + }; + + enum class GPUPrimitiveTopology : uint32_t + { + kUndefined, + kPointList, + kLineList, + kLineStrip, + kTriangleList, + kTriangleStrip, }; - enum class GPUIndexFormat + enum class GPUIndexFormat : uint32_t { + kUndefined, kUint16, kUint32, }; - enum class GPUStorageAccess + enum class GPUFrontFace : uint32_t { - kReadOnly, - kReadWrite, - kWriteOnly, + kUndefined, + kCCW, + kCW, }; - enum class GPUSamplerType + enum class GPUCullMode : uint32_t { - kComparison, + kUndefined, + kNone, + kFront, + kBack, + }; + + enum class GPUVertexStepMode : uint32_t + { + kUndefined, + kVertex, + kInstance, + }; + + enum class GPUVertexFormat : uint32_t + { + kUint8, + kUint8x2, + kUint8x4, + kSint8, + kSint8x2, + kSint8x4, + kUnorm8, + kUnorm8x2, + kUnorm8x4, + kSnorm8, + kSnorm8x2, + kSnorm8x4, + kUint16, + kUint16x2, + kUint16x4, + kSint16, + kSint16x2, + kSint16x4, + kUnorm16, + kUnorm16x2, + kUnorm16x4, + kSnorm16, + kSnorm16x2, + kSnorm16x4, + kFloat16, + kFloat16x2, + kFloat16x4, + kFloat32, + kFloat32x2, + kFloat32x3, + kFloat32x4, + kUint32, + kUint32x2, + kUint32x3, + kUint32x4, + kSint32, + kSint32x2, + kSint32x3, + kSint32x4, + kUnorm10_10_10_2, + kUnorm8x4BGRA, + }; + + enum class GPUComponentSwizzle : uint32_t + { + kUndefined, + kZero, + kOne, + kR, + kG, + kB, + kA, + }; + + enum class GPUCompositeAlphaMode : uint32_t + { + kAuto, + kOpaque, + kPremultiplied, + kUnpremultiplied, + kInherit, + }; + + enum class GPUColorWriteMask : uint64_t + { + kNone, + kRed, + kGreen, + kBlue, + kAlpha, + kAll, + }; + + enum class GPUBlendFactor : uint32_t + { + kUndefined, + kZero, + kOne, + kSrc, + kOneMinusSrc, + kSrcAlpha, + kOneMinusSrcAlpha, + kDst, + kOneMinusDst, + kDstAlpha, + kOneMinusDstAlpha, + kSrcAlphaSaturated, + kConstant, + kOneMinusConstant, + kSrc1, + kOneMinusSrc1, + kSrc1Alpha, + kOneMinusSrc1Alpha, + }; + + enum class GPUBlendOperation : uint32_t + { + kUndefined, + kAdd, + kSubtract, + kReverseSubtract, + kMin, + kMax, + }; + + enum class GPUCompareFunction : uint32_t + { + kUndefined, + kNever, + kLess, + kEqual, + kLessEqual, + kGreater, + kNotEqual, + kGreaterEqual, + kAlways, + }; + + enum class GPUStencilOperation : uint32_t + { + kUndefined, + kKeep, + kZero, + kReplace, + kInvert, + kIncrementClamp, + kDecrementClamp, + kIncrementWrap, + kDecrementWrap, + }; + + enum class GPUHandleType : uint32_t + { + kAdapter, + kBindGroup, + kBindGroupLayout, + kBuffer, + kCommandBuffer, + kCommandEncoder, + kComputePassEncoder, + kComputePipeline, + kDevice, + kExternalTexture, + kInstance, + kPipelineLayout, + kQuerySet, + kQueue, + kRenderBundle, + kRenderBundleEncoder, + kRenderPassEncoder, + kRenderPipeline, + kSampler, + kShaderModule, + kSharedBufferMemory, + kSharedFence, + kSharedTextureMemory, + kSurface, + kTexelBufferView, + kTexture, + kTextureView, + + // Additional internal object types. Keep kExtraObjectTypes in sync when updating. + kBindGroupLayoutInternal, + }; + +#define GPU_BACKEND_TYPES(XX) \ + XX(kUndefined, "undefined") \ + XX(kNull, "null") \ + XX(kWebGPU, "webgpu") \ + XX(kD3D11, "d3d11") \ + XX(kD3D12, "d3d12") \ + XX(kMetal, "metal") \ + XX(kVulkan, "vulkan") \ + XX(kOpenGL, "opengl") \ + XX(kOpenGLES, "opengles") + +#define GPU_ADAPTER_TYPES(XX) \ + XX(kDiscreteGPU, "discrete-gpu") \ + XX(kIntegratedGPU, "integrated-gpu") \ + XX(kCPU, "cpu") \ + XX(kUnknown, "unknown") + +#define DECL_GPU_ENUM_ITEM(NAME, _) NAME, +#define ADD_GPU_ENUM(ENUM_TYPE, BASE_TYPE, MAP) \ + enum class ENUM_TYPE : BASE_TYPE \ + { \ + MAP(DECL_GPU_ENUM_ITEM) \ + }; + + ADD_GPU_ENUM(GPUBackendType, uint32_t, GPU_BACKEND_TYPES) + ADD_GPU_ENUM(GPUAdapterType, uint32_t, GPU_ADAPTER_TYPES) + +#undef DECL_GPU_ENUM_ITEM +#undef ADD_GPU_ENUM + + inline std::string to_string(GPUBackendType type) + { +#define XX(NAME, STR) \ + case GPUBackendType::NAME: \ + return STR; + + switch (type) + { + GPU_BACKEND_TYPES(XX) + default: + assert(false && "Unknown GPUBackendType"); + return "unknown"; + } +#undef XX + } + + inline std::string to_string(GPUAdapterType type) + { +#define XX(NAME, STR) \ + case GPUAdapterType::NAME: \ + return STR; + + switch (type) + { + GPU_ADAPTER_TYPES(XX) + default: + assert(false && "Unknown GPUAdapterType"); + return "unknown"; + } +#undef XX + } + + enum class GPUCompilationMessageType : uint32_t + { + kError = 1, + kWarning = 2, + kInfo = 3, + }; + + enum class GPUShaderStage : uint64_t + { + kNone = 0x0000000000000000, + kVertex = 0x0000000000000001, + kFragment = 0x0000000000000002, + kCompute = 0x0000000000000004, + }; + + enum class GPUMapMode : uint64_t + { + kNone = 0x0000000000000000, + kRead = 0x0000000000000001, + kWrite = 0x0000000000000002, + }; + + enum class GPUBufferBindingType : uint32_t + { + kBindingNotUsed, + kUndefined, + kUniform, + kStorage, + kReadOnlyStorage, + }; + + enum class GPUSamplerBindingType : uint32_t + { + kBindingNotUsed, + kUndefined, kFiltering, kNonFiltering, + kComparison, + }; + + enum class GPUTextureAspect : uint32_t + { + kUndefined, + kAll, + kStencilOnly, + kDepthOnly, + kPlane0Only, + kPlane1Only, + kPlane2Only, + }; + + enum class GPUTextureUsage : uint64_t + { + kNone, + kCopySrc, + kCopyDst, + kTextureBinding, + kStorageBinding, + kRenderAttachment, + kTransientAttachment, + kStorageAttachment, + }; + + enum class GPUTextureSampleType : uint32_t + { + kBindingNotUsed, + kUndefined, + kFloat, + kUnfilterableFloat, + kDepth, + kSint, + kUint, + }; + + enum class GPUTextureDimension : uint32_t + { + kUndefined, + k1D, + k2D, + k3D, + }; + + enum class GPUTextureViewDimension : uint32_t + { + kUndefined, + k1D, + k2D, + k2DArray, + kCube, + kCubeArray, + k3D, + }; + + enum class GPUTextureFormat : uint32_t + { + kUndefined, + kR8Unorm, + kR8Snorm, + kR8Uint, + kR8Sint, + kR16Unorm, + kR16Snorm, + kR16Uint, + kR16Sint, + kR16Float, + kRG8Unorm, + kRG8Snorm, + kRG8Uint, + kRG8Sint, + kR32Float, + kR32Uint, + kR32Sint, + kRG16Unorm, + kRG16Snorm, + kRG16Uint, + kRG16Sint, + kRG16Float, + kRGBA8Unorm, + kRGBA8UnormSrgb, + kRGBA8Snorm, + kRGBA8Uint, + kRGBA8Sint, + kBGRA8Unorm, + kBGRA8UnormSrgb, + kRGB10A2Uint, + kRGB10A2Unorm, + kRG11B10Ufloat, + kRGB9E5Ufloat, + kRG32Float, + kRG32Uint, + kRG32Sint, + kRGBA16Unorm, + kRGBA16Snorm, + kRGBA16Uint, + kRGBA16Sint, + kRGBA16Float, + kRGBA32Float, + kRGBA32Uint, + kRGBA32Sint, + kStencil8, + kDepth16Unorm, + kDepth24Plus, + kDepth24PlusStencil8, + kDepth32Float, + kDepth32FloatStencil8, + kBC1RGBAUnorm, + kBC1RGBAUnormSrgb, + kBC2RGBAUnorm, + kBC2RGBAUnormSrgb, + kBC3RGBAUnorm, + kBC3RGBAUnormSrgb, + kBC4RUnorm, + kBC4RSnorm, + kBC5RGUnorm, + kBC5RGSnorm, + kBC6HRGBUfloat, + kBC6HRGBFloat, + kBC7RGBAUnorm, + kBC7RGBAUnormSrgb, + kETC2RGB8Unorm, + kETC2RGB8UnormSrgb, + kETC2RGB8A1Unorm, + kETC2RGB8A1UnormSrgb, + kETC2RGBA8Unorm, + kETC2RGBA8UnormSrgb, + kEACR11Unorm, + kEACR11Snorm, + kEACRG11Unorm, + kEACRG11Snorm, + kASTC4x4Unorm, + kASTC4x4UnormSrgb, + kASTC5x4Unorm, + kASTC5x4UnormSrgb, + kASTC5x5Unorm, + kASTC5x5UnormSrgb, + kASTC6x5Unorm, + kASTC6x5UnormSrgb, + kASTC6x6Unorm, + kASTC6x6UnormSrgb, + kASTC8x5Unorm, + kASTC8x5UnormSrgb, + kASTC8x6Unorm, + kASTC8x6UnormSrgb, + kASTC8x8Unorm, + kASTC8x8UnormSrgb, + kASTC10x5Unorm, + kASTC10x5UnormSrgb, + kASTC10x6Unorm, + kASTC10x6UnormSrgb, + kASTC10x8Unorm, + kASTC10x8UnormSrgb, + kASTC10x10Unorm, + kASTC10x10UnormSrgb, + kASTC12x10Unorm, + kASTC12x10UnormSrgb, + kASTC12x12Unorm, + kASTC12x12UnormSrgb, + kR8BG8Biplanar420Unorm, + kR10X6BG10X6Biplanar420Unorm, + kR8BG8A8Triplanar420Unorm, + kR8BG8Biplanar422Unorm, + kR8BG8Biplanar444Unorm, + kR10X6BG10X6Biplanar422Unorm, + kR10X6BG10X6Biplanar444Unorm, + kExternal, + }; + + struct GPUTextureComponentSwizzle + { + GPUComponentSwizzle r = GPUComponentSwizzle::kUndefined; + GPUComponentSwizzle g = GPUComponentSwizzle::kUndefined; + GPUComponentSwizzle b = GPUComponentSwizzle::kUndefined; + GPUComponentSwizzle a = GPUComponentSwizzle::kUndefined; + }; + + enum class GPUStorageTextureAccess : uint32_t + { + kBindingNotUsed, + kUndefined, + kWriteOnly, + kReadOnly, + kReadWrite, + }; + + struct GPUPrimitiveState + { + GPUPrimitiveTopology topology = GPUPrimitiveTopology::kTriangleList; + GPUIndexFormat stripIndexFormat = GPUIndexFormat::kUndefined; + GPUFrontFace frontFace = GPUFrontFace::kCCW; + GPUCullMode cullMode = GPUCullMode::kNone; + bool unclippedDepth = false; + }; + + struct GPUStencilFaceState + { + GPUCompareFunction compare = GPUCompareFunction::kUndefined; + GPUStencilOperation failOp = GPUStencilOperation::kUndefined; + GPUStencilOperation depthFailOp = GPUStencilOperation::kUndefined; + GPUStencilOperation passOp = GPUStencilOperation::kUndefined; + }; + + struct GPUDepthStencilState + { + GPUTextureFormat format = GPUTextureFormat::kUndefined; + std::optional depthWriteEnabled = std::nullopt; + GPUCompareFunction depthCompare = GPUCompareFunction::kUndefined; + GPUStencilFaceState stencilFront; + GPUStencilFaceState stencilBack; + uint32_t stencilReadMask = 0xFFFFFFFF; + uint32_t stencilWriteMask = 0xFFFFFFFF; + int32_t depthBias = 0; + float depthBiasSlopeScale = 0.f; + float depthBiasClamp = 0.f; + }; + + struct GPUMultisampleState + { + uint32_t count = 1; + uint32_t mask = 0xFFFFFFFF; + bool alphaToCoverageEnabled = false; + }; + + struct GPUBlendComponent + { + GPUBlendOperation operation = GPUBlendOperation::kUndefined; + GPUBlendFactor srcFactor = GPUBlendFactor::kUndefined; + GPUBlendFactor dstFactor = GPUBlendFactor::kUndefined; + }; + + struct GPUBlendState + { + GPUBlendComponent color = {}; + GPUBlendComponent alpha = {}; + }; + + struct GPUColorTargetState + { + GPUTextureFormat format = GPUTextureFormat::kUndefined; + GPUBlendState const *blend = nullptr; + GPUColorWriteMask writeMask = GPUColorWriteMask::kAll; + }; + + struct GPUFragmentState + { + // ShaderModuleBase *module; + std::string_view entryPoint; + size_t constantCount = 0; + GPUConstantEntry const *constants = nullptr; + size_t targetCount; + GPUColorTargetState const *targets = nullptr; + }; + + struct GPUVertexAttribute + { + GPUVertexFormat format = {}; + uint64_t offset; + uint32_t shaderLocation; + }; + + struct GPUVertexBufferLayout + { + GPUVertexStepMode stepMode = GPUVertexStepMode::kUndefined; + uint64_t arrayStride; + size_t attributeCount; + GPUVertexAttribute const *attributes = nullptr; + }; + + struct GPUVertexState + { + // ShaderModuleBase *module; + std::string_view entryPoint; + size_t constantCount = 0; + GPUConstantEntry const *constants = nullptr; + size_t bufferCount = 0; + GPUVertexBufferLayout const *buffers = nullptr; + }; + + enum class GPUDynamicBindingKind : uint32_t + { + kUndefined, + kSampledTexture, + }; + + enum class GPUFeatureName : uint32_t + { + kCoreFeaturesAndLimits, + kDepthClipControl, + }; + + enum class GPUFeatureLevel : uint32_t + { + kUndefined = 0x00000000, + kCompatibility = 0x00000001, + kCore = 0x00000002, + }; + + enum class GPUPowerPreference : uint32_t + { + kUndefined = 0x00000000, + kLowPower = 0x00000001, + kHighPerformance = 0x00000002, + }; + + class GPUSupportedFeatures : public std::unordered_set + { + using std::unordered_set::unordered_set; + }; + + struct GPUSupportedLimits + { + uint32_t maxTextureDimension1D = 8192; + uint32_t maxTextureDimension2D = 8192; + uint32_t maxTextureDimension3D = 2048; + uint32_t maxTextureArrayLayers = 2048; + uint32_t maxBindGroups = 4; + uint32_t maxBindingsPerBindGroup = 640; + uint32_t maxDynamicUniformBuffersPerPipelineLayout = 8; + uint32_t maxDynamicStorageBuffersPerPipelineLayout = 4; + uint32_t maxSampledTexturesPerShaderStage = 16; + uint32_t maxSamplersPerShaderStage = 16; + uint32_t maxStorageBuffersPerShaderStage = 8; + uint32_t maxStorageTexturesPerShaderStage = 8; + uint32_t maxUniformBuffersPerShaderStage = 12; + uint64_t maxUniformBufferBindingSize = 65536; + uint64_t maxStorageBufferBindingSize = 134217728; + uint32_t minUniformBufferOffsetAlignment = 256; + uint32_t minStorageBufferOffsetAlignment = 256; + uint32_t maxVertexBuffers = 8; + uint64_t maxBufferSize = 4294967296; + uint32_t maxVertexAttributes = 16; + uint32_t maxVertexBufferArrayStride = 2048; + uint32_t maxInterStageShaderVariables = 16; + uint32_t maxColorAttachments = 8; + uint32_t maxColorAttachmentBytesPerSample = 32; + uint32_t maxComputeWorkgroupStorageSize = 16384; + uint32_t maxComputeInvocationsPerWorkgroup = 256; + uint32_t maxComputeWorkgroupSizeX = 256; + uint32_t maxComputeWorkgroupSizeY = 256; + uint32_t maxComputeWorkgroupSizeZ = 64; + uint32_t maxComputeWorkgroupsPerDimension = 65535; + + bool operator==(const GPUSupportedLimits &rhs) const; + }; + + struct GPUComputeState + { + // ShaderModuleBase *module; + std::string_view entryPoint; + size_t constantCount = 0; + GPUConstantEntry const *constants = nullptr; + }; + + class ErrorMonad + { + public: + struct ErrorTag + { + }; + static constexpr ErrorTag kError = {}; + + struct DelayedInitializationTag + { + }; + static constexpr DelayedInitializationTag kDelayedInitialization = {}; + + ErrorMonad(); + explicit ErrorMonad(ErrorTag tag); + explicit ErrorMonad(DelayedInitializationTag tag); + virtual ~ErrorMonad() = default; + + // Test if the error state is valid yet. It is an error to check the error state before the + // object is initialized. + bool initialized() const; + + bool isError() const; + + protected: + void setInitializedError(); + void setInitializedNoError(); + + private: + std::atomic tag_payload_; + uint64_t fetchAnd(uint64_t arg); + + static constexpr uint64_t kNotInitializedPayload = 0b11; + static constexpr uint64_t kInitializedErrorPayload = 0b00; + static constexpr uint64_t kInitializedNoErrorPayload = 0b10; + + static constexpr uint64_t kInitializedMask = 0b1; + static constexpr uint64_t kInitialized = 0b0; + }; + + class GPUInstance; + class GPUDeviceBase; + + class GPUObject : public ErrorMonad + { + public: + explicit GPUObject(Ref device); + GPUObject(Ref device, ErrorTag tag); + GPUObject(Ref device, DelayedInitializationTag tag); + + GPUInstance *instance() const; + Ref device() const; + + private: + Ref device_; }; typedef uint32_t GPUIdentifier; - class GPUHandle + class GPUHandle : public GPUObject { private: static inline TrIdGeneratorBase Ids = TrIdGeneratorBase(0); public: - GPUHandle(std::string label = "") - : label(std::move(label)) - , id(Ids.get()) + struct LabelNotImplementedTag { - } + }; + static constexpr LabelNotImplementedTag kLabelNotImplemented = {}; + struct UntrackedByDeviceTag + { + }; + static constexpr UntrackedByDeviceTag kUntrackedByDevice = {}; + + GPUHandle(Ref device, std::string_view label); + GPUHandle(Ref device, ErrorTag tag, std::string_view label = {}); + GPUHandle(Ref device, DelayedInitializationTag tag, std::string_view label = {}); + GPUHandle(Ref device, LabelNotImplementedTag tag); + + virtual GPUHandleType type() const = 0; + void setLabel(std::string label); + const std::string &getLabel() const; public: - const std::string label = ""; const GPUIdentifier id; + + private: + std::string label_ = ""; }; } diff --git a/src/common/command_buffers/gpu/gpu_bind_group.cpp b/src/common/command_buffers/gpu/gpu_bind_group.cpp new file mode 100644 index 000000000..1c3e59e39 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_bind_group.cpp @@ -0,0 +1,20 @@ +#include + +using namespace std; + +namespace commandbuffers +{ + GPUBindGroupBase::GPUBindGroupBase(Ref device, + const GPUBindGroupDescriptor &descriptor, + void *bindingDataStart) + : GPUHandle(device, descriptor.label) + { + } + + GPUBindGroupBase::GPUBindGroupBase(Ref device, + GPUHandle::ErrorTag tag, + string_view label) + : GPUHandle(device, tag, label) + { + } +} diff --git a/src/common/command_buffers/gpu/gpu_bind_group.hpp b/src/common/command_buffers/gpu/gpu_bind_group.hpp index aa14ce744..5a6ea6775 100644 --- a/src/common/command_buffers/gpu/gpu_bind_group.hpp +++ b/src/common/command_buffers/gpu/gpu_bind_group.hpp @@ -4,64 +4,29 @@ #include #include -#include "./gpu_base.hpp" -#include "./gpu_texture.hpp" +#include +#include +#include +#include +#include namespace commandbuffers { - class GPUBindGroupLayout : public GPUHandle + class GPUDeviceBase; + + class GPUBindGroupBase : public GPUHandle { public: - class BufferLayout - { - public: - GPUBufferType type; - bool hasDynamicOffset = false; - uint32_t minBindingSize = 0; // in bytes - }; - - class TextureLayout - { - public: - bool multisampled = false; - }; - - class StorageTextureLayout - { - public: - GPUStorageAccess access; - GPUTextureFormat format; - std::optional viewDimension; - }; - - class ExternalTextureLayout - { - }; + virtual ~GPUBindGroupBase() = default; - class SamplerLayout - { - public: - GPUSamplerType type; - }; - - using ResourceLayout = std::variant; - class Entry - { - public: - uint32_t binding; - GPUShaderStage visibility; - ResourceLayout layout; - }; + protected: + GPUBindGroupBase(Ref device, + const GPUBindGroupDescriptor &descriptor, + void *bindingDataStart); private: - std::vector entries_; - }; + GPUBindGroupBase(Ref device, GPUHandle::ErrorTag tag, std::string_view label); - class GPUBindGroup : public GPUHandle - { + Ref *layout_ = nullptr; }; } diff --git a/src/common/command_buffers/gpu/gpu_bind_group_base.hpp b/src/common/command_buffers/gpu/gpu_bind_group_base.hpp new file mode 100644 index 000000000..e7dd82d9a --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_bind_group_base.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + struct GPUBindGroupEntry + { + uint32_t binding; + GPUBufferBase *buffer = nullptr; + uint64_t offset = 0; + uint64_t size = gpu_constants::kWholeSize; + GPUSamplerBase *sampler = nullptr; + GPUTextureViewBase *textureView = nullptr; + }; + + struct GPUBindGroupDescriptor + { + std::string_view label; + GPUBindGroupLayoutBase *layout; + size_t entryCount = 0; + GPUBindGroupEntry const *entries = nullptr; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_bind_group_layout.cpp b/src/common/command_buffers/gpu/gpu_bind_group_layout.cpp new file mode 100644 index 000000000..456800ab3 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_bind_group_layout.cpp @@ -0,0 +1,37 @@ +#include + +using namespace std; + +namespace commandbuffers +{ + GPUBindGroupLayoutBase::GPUBindGroupLayoutBase(Ref device, + string_view label, + Ref internal) + : GPUHandle(device, label) + , internal_layout_(internal) + { + } + + GPUBindGroupLayoutBase::GPUBindGroupLayoutBase(Ref device, + GPUHandle::ErrorTag tag, + string_view label) + : GPUHandle(device, tag, label) + { + } + + GPUBindGroupLayoutInternalBase *GPUBindGroupLayoutBase::getInternalBindGroupLayout() const + { + return internal_layout_.get(); + } + + bool GPUBindGroupLayoutBase::equal(const GPUBindGroupLayoutBase *other, + bool excludePipelineCompatibiltyToken) const + { + return getInternalBindGroupLayout() == other->getInternalBindGroupLayout(); + } + + bool GPUBindGroupLayoutBase::isEmpty() const + { + return internal_layout_ == nullptr || internal_layout_->isEmpty(); + } +} diff --git a/src/common/command_buffers/gpu/gpu_bind_group_layout.hpp b/src/common/command_buffers/gpu/gpu_bind_group_layout.hpp new file mode 100644 index 000000000..9ead60a1b --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_bind_group_layout.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUBindGroupLayoutBase : public GPUHandle + { + public: + GPUBindGroupLayoutBase(Ref device, + std::string_view label, + Ref internal); + virtual ~GPUBindGroupLayoutBase() = default; + + GPUHandleType type() const override + { + return GPUHandleType::kBindGroupLayout; + } + + GPUBindGroupLayoutInternalBase *getInternalBindGroupLayout() const; + bool equal(const GPUBindGroupLayoutBase *other, + bool excludePipelineCompatibiltyToken = false) const; + + bool isEmpty() const; + + private: + GPUBindGroupLayoutBase(Ref device, + GPUHandle::ErrorTag tag, + std::string_view label); + + private: + Ref internal_layout_; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_bind_group_layout_base.hpp b/src/common/command_buffers/gpu/gpu_bind_group_layout_base.hpp new file mode 100644 index 000000000..af1c5b326 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_bind_group_layout_base.hpp @@ -0,0 +1,58 @@ +#pragma once + +#include +#include +#include +#include + +namespace commandbuffers +{ + struct GPUBufferBindingLayout + { + GPUBufferBindingType type = GPUBufferBindingType::kUniform; + bool hasDynamicOffset = false; + uint64_t minBindingSize = 0; + }; + + struct GPUSamplerBindingLayout + { + GPUSamplerBindingType type = GPUSamplerBindingType::kFiltering; + }; + + struct GPUTextureBindingLayout + { + GPUTextureSampleType sampleType = GPUTextureSampleType::kFloat; + GPUTextureViewDimension viewDimension = GPUTextureViewDimension::k2D; + bool multisampled = false; + }; + + struct GPUStorageTextureBindingLayout + { + GPUStorageTextureAccess access = GPUStorageTextureAccess::kWriteOnly; + GPUTextureFormat format = GPUTextureFormat::kUndefined; + GPUTextureViewDimension viewDimension = GPUTextureViewDimension::k2D; + }; + + struct GPUBindGroupLayoutEntry + { + uint32_t binding; + GPUShaderStage visibility = GPUShaderStage::kNone; + uint32_t bindingArraySize = 0; + + GPUBufferBindingLayout buffer = {GPUBufferBindingType::kBindingNotUsed, false, 0}; + GPUSamplerBindingLayout sampler = {GPUSamplerBindingType::kBindingNotUsed}; + GPUTextureBindingLayout texture = {GPUTextureSampleType::kBindingNotUsed, + GPUTextureViewDimension::k2D, + false}; + GPUStorageTextureBindingLayout storageTexture = {GPUStorageTextureAccess::kBindingNotUsed, + GPUTextureFormat::kUndefined, + GPUTextureViewDimension::k2D}; + }; + + struct GPUBindGroupLayoutDescriptor + { + std::string_view label; + size_t entryCount = 0; + GPUBindGroupLayoutEntry const *entries = nullptr; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_bind_group_layout_internal.cpp b/src/common/command_buffers/gpu/gpu_bind_group_layout_internal.cpp new file mode 100644 index 000000000..597c635b2 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_bind_group_layout_internal.cpp @@ -0,0 +1,96 @@ +#include + +using namespace std; + +namespace commandbuffers +{ + GPUBindGroupLayoutInternalBase::GPUBindGroupLayoutInternalBase(Ref device, + const GPUBindGroupLayoutDescriptor &descriptor) + : GPUHandle(device, descriptor.label) + { + } + + GPUBindGroupLayoutInternalBase::~GPUBindGroupLayoutInternalBase() = default; + + const GPUBindingInfo &GPUBindGroupLayoutInternalBase::getBindingInfo(GPUBindingIndex bindingIndex) const + { + assert(!isError()); + // Assert that this is an internal binding. + assert(bindingIndex < getBindingCount()); + return binding_info_.at(bindingIndex); + } + + const GPUBindingInfo &GPUBindGroupLayoutInternalBase::getClientBindingInfo(GPUClientBindingIndex bindingIndex) const + { + assert(!isError()); + GPUBindingIndex index = GPUBindingIndex(uint32_t(bindingIndex)); + assert(index < binding_info_.size()); + + // Assert this is a user-facing binding and not an private internal binding. + assert(binding_map_.contains(binding_info_.at(index).binding)); + return binding_info_.at(index); + } + + const GPUBindGroupLayoutInternalBase::BindingMap &GPUBindGroupLayoutInternalBase::getBindingMap() const + { + assert(!isError()); + return binding_map_; + } + + GPUBindingIndex GPUBindGroupLayoutInternalBase::asBindingIndex(GPUClientBindingIndex bindingIndex) const + { + assert(!isError()); + // Assert this is a user-facing binding and not a private internal binding, and that it + // represents an internal bindings. + GPUBindingIndex index = GPUBindingIndex(uint32_t(bindingIndex)); + assert(index < getBindingCount()); + assert(binding_map_.contains(binding_info_.at(index).binding)); + return index; + } + + GPUClientBindingIndex GPUBindGroupLayoutInternalBase::getClientBindingIndex(GPUBindingNumber bindingNumber) const + { + assert(!isError()); + const auto &it = binding_map_.find(bindingNumber); + assert(it != binding_map_.end()); + return it->second; + } + + GPUBindingIndex GPUBindGroupLayoutInternalBase::getBindingCount() const + { + assert(!isError()); + return getBindingTypeStart(GPUBindingTypeOrder_ExternalTexture); + } + + + bool GPUBindGroupLayoutInternalBase::isEmpty() const + { + assert(!isError()); + return binding_info_.empty() && !has_dynamic_array_; + } + + string GPUBindGroupLayoutInternalBase::entriesToString() const + { + string entries = "["; + string sep = ""; + const GPUBindGroupLayoutInternalBase::BindingMap &bindingMap = getBindingMap(); + for (const auto [bindingNumber, bindingIndex] : bindingMap) + { + const GPUBindingInfo &bindingInfo = getClientBindingInfo(bindingIndex); + entries += sep + bindingInfo.toString(); + sep = ", "; + } + entries += "]"; + return entries; + } + + GPUBindingIndex GPUBindGroupLayoutInternalBase::getBindingTypeStart(GPUBindingTypeOrder type) const + { + return binding_type_start_[type]; + } + + GPUBindingIndex GPUBindGroupLayoutInternalBase::getBindingTypeEnd(GPUBindingTypeOrder type) const + { + return binding_type_start_[GPUBindingTypeOrder(static_cast(type) + 1)]; + } +} diff --git a/src/common/command_buffers/gpu/gpu_bind_group_layout_internal.hpp b/src/common/command_buffers/gpu/gpu_bind_group_layout_internal.hpp new file mode 100644 index 000000000..a3915fda8 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_bind_group_layout_internal.hpp @@ -0,0 +1,87 @@ +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + + // In the BindGroupLayout, entries are sorted by type for more efficient lookup and iteration. + // This enum is the order that's used and can also be used to index various ranges of entries. + // The enum is public so that helper function can use it during creation of the BindGroupLayout, + // but the order is not meant to be used anywhere else. Use the accessors on the BindGroupLayout for + // logic that relies on the packing or the order. + enum GPUBindingTypeOrder : uint32_t + { + // Buffers + GPUBindingTypeOrder_DynamicBuffer, + GPUBindingTypeOrder_RegularBuffer, + // Textures + GPUBindingTypeOrder_SampledTexture, + GPUBindingTypeOrder_StorageTexture, + GPUBindingTypeOrder_InputAttachment, + // Samplers + GPUBindingTypeOrder_StaticSampler, + GPUBindingTypeOrder_RegularSampler, + // Texel Buffers + GPUBindingTypeOrder_TexelBuffer, + // Start of entries that are expanded in the frontend and aren't actually stored in the bind + // groups. + GPUBindingTypeOrder_ExternalTexture, + GPUBindingTypeOrder_Count, + }; + + class GPUBindGroupLayoutInternalBase : public GPUHandle + { + using BindingMap = std::map; + + public: + GPUBindGroupLayoutInternalBase(Ref device, + const GPUBindGroupLayoutDescriptor &descriptor); + virtual ~GPUBindGroupLayoutInternalBase(); + + GPUHandleType type() const override + { + return GPUHandleType::kBindGroupLayoutInternal; + } + + // Getters for static bindings + const GPUBindingInfo &getBindingInfo(GPUBindingIndex bindingIndex) const; + const GPUBindingInfo &getClientBindingInfo(GPUClientBindingIndex bindingIndex) const; + const BindingMap &getBindingMap() const; + GPUBindingIndex asBindingIndex(GPUClientBindingIndex bindingIndex) const; + GPUClientBindingIndex getClientBindingIndex(GPUBindingNumber bindingNumber) const; + GPUBindingIndex getBindingCount() const; + + public: + bool isEmpty() const; + + std::string entriesToString() const; + + private: + std::unordered_map binding_info_; + GPUBindingCounts validation_binding_counts_ = {}; + + // Keep a list of the start indices for each kind of binding. Then (exclusive) end of a range + // of bindings is the start of the next range. (that's why we use count + 1 entry, to have the + // "end" of the last binding type) + GPUBindingIndex getBindingTypeStart(GPUBindingTypeOrder type) const; + GPUBindingIndex getBindingTypeEnd(GPUBindingTypeOrder type) const; + std::array binding_type_start_; + + BindingMap binding_map_; + + // Information about the dynamic binding array part of the BGL. + bool has_dynamic_array_ = false; + GPUBindingNumber client_dynamic_array_start{0}; + GPUBindingIndex dynamic_array_start{0}; + GPUBindingIndex dynamic_array_metadata_binding{0}; + GPUDynamicBindingKind dynamic_array_kind = GPUDynamicBindingKind::kUndefined; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_binding_info.cpp b/src/common/command_buffers/gpu/gpu_binding_info.cpp new file mode 100644 index 000000000..c63edf3cf --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_binding_info.cpp @@ -0,0 +1,11 @@ +#include + +using namespace std; + +namespace commandbuffers +{ + string GPUBindingInfo::toString() const + { + return "GPUBindingInfo {}"; + } +} diff --git a/src/common/command_buffers/gpu/gpu_binding_info.hpp b/src/common/command_buffers/gpu/gpu_binding_info.hpp new file mode 100644 index 000000000..9c24df985 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_binding_info.hpp @@ -0,0 +1,151 @@ +#pragma once + +#include +#include +#include +#include + +namespace commandbuffers +{ + enum class GPUBindingInfoType + { + Buffer, + Sampler, + Texture, + StorageTexture, + TexelBuffer, + ExternalTexture, + StaticSampler, + // Internal to vulkan only. + InputAttachment, + }; + + struct GPUBufferBindingInfo + { + GPUBufferBindingType type; + uint64_t minBindingSize{0u}; + bool hasDynamicOffset{false}; + + static GPUBufferBindingInfo From(const GPUBufferBindingLayout &); + }; + + struct GPUSamplerBindingInfo + { + GPUSamplerBindingType type; + + static GPUSamplerBindingInfo From(const GPUSamplerBindingLayout &); + }; + + struct GPUTextureBindingInfo + { + GPUTextureSampleType sampleType; + GPUTextureViewDimension viewDimension; + bool multisampled{false}; + + static GPUTextureBindingInfo From(const GPUTextureBindingLayout &); + }; + + struct GPUTexelBufferBindingInfo + { + // TODO: Add fields if needed + }; + + struct GPUStorageTextureBindingInfo + { + GPUTextureFormat format; + GPUTextureViewDimension viewDimension; + GPUStorageTextureAccess access; + + static GPUStorageTextureBindingInfo From(const GPUStorageTextureBindingLayout &); + }; + + struct GPUExternalTextureBindingInfo + { + GPUBindingIndex metadata; + GPUBindingIndex plane0; + GPUBindingIndex plane1; + }; + + struct GPUStaticSamplerBindingInfo + { + // No fields needed for static sampler + }; + + struct GPUInputAttachmentBindingInfo + { + GPUTextureSampleType type; + }; + + struct GPUBindingInfo + { + GPUBindingNumber binding; + GPUShaderStage visibility; + GPUBindingIndex arraySize{1u}; + GPUBindingIndex indexInArray{0u}; + + using BindingLayout = std::variant; + BindingLayout bindingLayout; + + bool operator==(const GPUBindingInfo &rhs) const = default; + std::string toString() const; + }; + + GPUBindingInfoType GetBindingInfoType(const GPUBindingInfo &); + + struct GPUBindingSlot + { + GPUBindGroupIndex group; + GPUBindingNumber binding; + + constexpr bool operator==(const GPUBindingSlot &rhs) const + { + return group == rhs.group && binding == rhs.binding; + } + constexpr bool operator!=(const GPUBindingSlot &rhs) const + { + return !(*this == rhs); + } + constexpr bool operator<(const GPUBindingSlot &rhs) const + { + if (group < rhs.group) + { + return true; + } + if (group > rhs.group) + { + return false; + } + return binding < rhs.binding; + } + }; + + struct GPUBindingCountsPerStage + { + uint32_t sampledTextureCount; + uint32_t samplerCount; + uint32_t storageBufferCount; + uint32_t storageTextureCount; + uint32_t texelBufferCount; + uint32_t uniformBufferCount; + uint32_t externalTextureCount; + uint32_t staticSamplerCount; + }; + + struct GPUBindingCounts + { + uint32_t totalCount; + uint32_t bufferCount; + uint32_t unverifiedBufferCount; + uint32_t dynamicUniformBufferCount; + uint32_t dynamicStorageBufferCount; + uint32_t staticSamplerCount; + GPUPerStage perStage; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_buffer.cpp b/src/common/command_buffers/gpu/gpu_buffer.cpp new file mode 100644 index 000000000..dbc9fbf96 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_buffer.cpp @@ -0,0 +1,15 @@ +#include + +namespace commandbuffers +{ + gpu::MaybeError GPUBufferBase::uploadData(uint64_t bufferOffset, const void *data, size_t size) + { + if (size == 0) + { + return {}; + } + + // TODO: implement uploadData + return {}; + } +} diff --git a/src/common/command_buffers/gpu/gpu_buffer.hpp b/src/common/command_buffers/gpu/gpu_buffer.hpp index e969351f4..2b067d1db 100644 --- a/src/common/command_buffers/gpu/gpu_buffer.hpp +++ b/src/common/command_buffers/gpu/gpu_buffer.hpp @@ -1,12 +1,106 @@ #pragma once -#include "./gpu_base.hpp" +#include +#include +#include +#include +#include namespace commandbuffers { - class GPUBuffer : public GPUHandle + enum class GPUBufferUsage : uint64_t { + kNone, + kMapRead, + kMapWrite, + kCopySrc, + kCopyDst, + kIndex, + kVertex, + kUniform, + kStorage, + kIndirect, + kQueryResolve, + kTexelBuffer, + }; + + struct GPUBufferDescriptor + { + std::string_view label; + GPUBufferUsage usage = GPUBufferUsage::kNone; + uint64_t size; + bool mappedAtCreation = false; + + // Equality operators, mostly for testing. Note that this tests + // strict pointer-pointer equality if the struct contains member pointers. + bool operator==(const GPUBufferDescriptor &rhs) const; + }; + + class GPUBufferBase : public GPUHandle + { + public: + enum class BufferState + { + kUnmapped, + kPendingMap, + kMapped, + kMappedAtCreation, + kHostMappedPersistent, + kSharedMemoryNoAccess, + kDestroyed, + }; + static bool IsMappedState(BufferState state) + { + return state == BufferState::kMapped || + state == BufferState::kMappedAtCreation || + state == BufferState::kHostMappedPersistent; + } + public: - ~GPUBuffer() = default; + virtual ~GPUBufferBase() = default; + + GPUHandleType type() const override final + { + return GPUHandleType::kBuffer; + } + + public: + size_t size() const; + GPUBufferUsage usage() const; + BufferState mapState() const; + void unmap(); + + virtual gpu::MaybeError uploadData(uint64_t bufferOffset, const void *data, size_t size); + + protected: + GPUBufferBase(Ref device, const GPUBufferDescriptor &descriptor); + GPUBufferBase(Ref device, + const GPUBufferDescriptor *descriptor, + GPUHandle::ErrorTag tag); + + bool mapAtCreationInternal(); + + BufferState state() const; + GPUMapMode mapMode() const; + size_t mapOffset() const; + size_t mapSize() const; + + uint64_t mAllocatedSize = 0; + + private: + size_t size_ = 0; + bool is_data_initialized = false; + + GPUBufferUsage usage_ = GPUBufferUsage::kNone; + GPUBufferUsage internal_usage_ = GPUBufferUsage::kNone; + std::atomic map_state_ = BufferState::kUnmapped; + + // A recursive buffer used to implement mappedAtCreation for buffers with non-mappable usage. + Ref staging_buffer_ = nullptr; + + // Mapping specific states. + GPUMapMode mMapMode = GPUMapMode::kNone; + size_t mMapOffset = 0; + size_t mMapSize = 0; }; } diff --git a/src/common/command_buffers/gpu/gpu_command_buffer.cpp b/src/common/command_buffers/gpu/gpu_command_buffer.cpp new file mode 100644 index 000000000..de4402059 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_command_buffer.cpp @@ -0,0 +1,38 @@ +#include +#include + +using namespace std; + +namespace commandbuffers +{ + GPUCommandBufferBase::GPUCommandBufferBase(GPUCommandEncoder *encoder, + const GPUCommandBufferDescriptor *descriptor) + : GPUHandle(encoder->device(), descriptor->label) + { + } + + GPUCommandBufferBase::GPUCommandBufferBase(Ref device, GPUHandle::ErrorTag tag, string_view label) + : GPUHandle(device, tag, label) + { + } + + const string &GPUCommandBufferBase::getEncoderLabel() const + { + return encoder_label_; + } + + void GPUCommandBufferBase::setEncoderLabel(string encoderLabel) + { + encoder_label_ = move(encoderLabel); + } + + const gpu::CommandBufferResourceUsage &GPUCommandBufferBase::getResourceUsages() const + { + return resource_usages_; + } + + const vector &GPUCommandBufferBase::getIndirectDrawMetadata() + { + return indirect_draw_metadata_; + } +} diff --git a/src/common/command_buffers/gpu/gpu_command_buffer.hpp b/src/common/command_buffers/gpu/gpu_command_buffer.hpp index fd3d0a280..96bbf97ec 100644 --- a/src/common/command_buffers/gpu/gpu_command_buffer.hpp +++ b/src/common/command_buffers/gpu/gpu_command_buffer.hpp @@ -2,246 +2,63 @@ #include #include - -#include "./gpu_base.hpp" -#include "./gpu_buffer.hpp" -#include "./gpu_pipeline.hpp" +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include namespace commandbuffers { - class GPUCommand - { - public: - enum GPUCommandType : uint8_t - { - kDraw = 0, - kDrawIndexed, - kDrawIndirect, - kDrawIndexedIndirect, - kSetViewport, - kSetScissor, - kSetRenderPipeline, - kSetIndexBuffer, - kSetVertexBuffer, - kSetBindGroup, - kSetBlendConstant, - kSetStencilReference, - }; - - public: - const GPUCommandType type; - - GPUCommand(GPUCommandType type) - : type(type) - { - } - virtual ~GPUCommand() = default; - }; - - class GPUDrawCommand : public GPUCommand - { - public: - const uint32_t vertexCount; - const uint32_t instanceCount; - const uint32_t firstVertex; - const uint32_t firstInstance; - - GPUDrawCommand(uint32_t vertex_count, uint32_t instance_count, uint32_t first_vertex, uint32_t first_instance) - : GPUCommand(kDraw) - , vertexCount(vertex_count) - , instanceCount(instance_count) - , firstVertex(first_vertex) - , firstInstance(first_instance) - { - } - }; - - class GPUDrawIndexedCommand : public GPUCommand + class GPUCommandEncoder; + struct GPUCommandBufferDescriptor { - public: - const uint32_t indexCount; - const uint32_t instanceCount; - const uint32_t firstIndex; - const int32_t baseVertex; - const uint32_t firstInstance; - - GPUDrawIndexedCommand(uint32_t index_count, - uint32_t instance_count, - uint32_t first_index, - int32_t base_vertex, - uint32_t first_instance) - : GPUCommand(kDrawIndexed) - , indexCount(index_count) - , instanceCount(instance_count) - , firstIndex(first_index) - , baseVertex(base_vertex) - , firstInstance(first_instance) - { - } + std::string_view label; }; - class GPUSetViewportCommand : public GPUCommand + class GPUCommandBufferBase : public GPUHandle { - public: - const float x; - const float y; - const float width; - const float height; - const float minDepth; - const float maxDepth; - - GPUSetViewportCommand(float x, float y, float width, float height, float min_depth, float max_depth) - : GPUCommand(kSetViewport) - , x(x) - , y(y) - , width(width) - , height(height) - , minDepth(min_depth) - , maxDepth(max_depth) - { - } - }; + friend class GPUComputePassEncoderBase; + friend class GPURenderPassEncoderBase; - class GPUSetScissorCommand : public GPUCommand - { public: - const float x; - const float y; - const float width; - const float height; + GPUCommandBufferBase(GPUCommandEncoder *encoder, + const GPUCommandBufferDescriptor *descriptor); + virtual ~GPUCommandBufferBase() = default; - GPUSetScissorCommand(float x, float y, float width, float height) - : GPUCommand(kSetScissor) - , x(x) - , y(y) - , width(width) - , height(height) + GPUHandleType type() const override final { + return GPUHandleType::kCommandBuffer; } - }; - - class GPUSetRenderPipelineCommand : public GPUCommand - { - public: - const GPUIdentifier pipelineId; - GPUSetRenderPipelineCommand(const GPURenderPipeline &pipeline) - : GPUCommand(kSetRenderPipeline) - , pipelineId(pipeline.id) - { - } - }; + const std::string &getEncoderLabel() const; + void setEncoderLabel(std::string encoderLabel); - class GPUSetIndexBufferCommand : public GPUCommand - { - public: - const GPUIdentifier bufferId; - const GPUIndexFormat indexFormat; - const uint32_t offset; - const uint32_t size; + const gpu::CommandBufferResourceUsage &getResourceUsages() const; + const std::vector &getIndirectDrawMetadata(); - GPUSetIndexBufferCommand(const GPUBuffer &buffer, GPUIndexFormat index_format, uint32_t offset, uint32_t size) - : GPUCommand(kSetIndexBuffer) - , bufferId(buffer.id) - , indexFormat(index_format) - , offset(offset) - , size(size) + template + auto useCommands(F func) -> auto { + auto result = func(commands_); + commands_.reset(); + return result; } - }; - class GPUSetVertexBufferCommand : public GPUCommand - { - public: - const uint32_t slot; - const GPUIdentifier bufferId; - const uint32_t offset; - const uint32_t size; - - GPUSetVertexBufferCommand(const uint32_t slot, const GPUBuffer &buffer, uint64_t offset, uint32_t size) - : GPUCommand(kSetVertexBuffer) - , slot(slot) - , bufferId(buffer.id) - , offset(offset) - , size(size) - { - } - }; - - class GPUSetBindGroupCommand : public GPUCommand - { - public: - const GPUIdentifier bindGroupId; - const uint32_t index; - - GPUSetBindGroupCommand(const GPUBindGroup &bindGroup, uint32_t index) - : GPUCommand(kSetBindGroup) - , bindGroupId(bindGroup.id) - , index(index) - { - } - }; - - class GPUSetBlendConstantCommand : public GPUCommand - { - public: - const float r; - const float g; - const float b; - const float a; - - GPUSetBlendConstantCommand(float r, float g, float b, float a) - : GPUCommand(kSetBlendConstant) - , r(r) - , g(g) - , b(b) - , a(a) - { - } - }; - - class GPUSetStencilReferenceCommand : public GPUCommand - { - public: - const uint32_t reference; - - GPUSetStencilReferenceCommand(uint32_t reference) - : GPUCommand(kSetStencilReference) - , reference(reference) - { - } - }; - - class GPUCommandBuffer : public GPUHandle - { - friend class GPUComputePassEncoder; - friend class GPURenderPassEncoder; - - public: - GPUCommandBuffer(std::optional label) - : GPUHandle(label.value_or("GPUCommandBuffer")) - , commands_() - { - } - GPUCommandBuffer(std::optional label, const GPUCommandBuffer &source) - : GPUHandle(label.value_or("GPUCommandBuffer")) - , commands_(source.commands_) - { - } - virtual ~GPUCommandBuffer() = default; - - public: - virtual void execute() = 0; + protected: + gpu::CommandIterator commands_; private: - template - void addCommand(Args &&...args) - { - auto command = std::make_shared(std::forward(args)...); - commands_.push_back(command); - } + GPUCommandBufferBase(Ref device, GPUHandle::ErrorTag tag, std::string_view label); - protected: - std::vector> commands_; + gpu::CommandBufferResourceUsage resource_usages_; + std::vector indirect_draw_metadata_; + std::string encoder_label_; }; } diff --git a/src/common/command_buffers/gpu/gpu_command_encoder.cpp b/src/common/command_buffers/gpu/gpu_command_encoder.cpp new file mode 100644 index 000000000..2d949caf3 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_command_encoder.cpp @@ -0,0 +1,170 @@ +#include +#include +#include +#include +#include + +using namespace std; + +namespace commandbuffers +{ + class RenderPassValidationState final : NonMovable + { + public: + RenderPassValidationState() = default; + ~RenderPassValidationState() = default; + + + uint32_t getRenderWidth() const + { + return render_width_; + } + uint32_t getRenderHeight() const + { + return render_height_; + } + uint32_t getSampleCount() const + { + return sample_count_; + } + uint32_t getImplicitSampleCount() const + { + return implicit_sample_count_; + } + + private: + uint32_t render_width_ = 0; + uint32_t render_height_ = 0; + uint32_t sample_count_ = 0; + // The implicit multisample count used by MSAA render to single sampled. + uint32_t implicit_sample_count_ = 0; + }; + + Ref GPUCommandEncoder::Create(Ref device, + const GPUCommandEncoderDescriptor &descriptor) + { + return AcquireRef(new GPUCommandEncoder(device, descriptor)); + } + + Ref GPUCommandEncoder::MakeError(Ref device, string_view label) + { + return AcquireRef(new GPUCommandEncoder(device, GPUHandle::kError, label)); + } + + GPUCommandEncoder::GPUCommandEncoder(Ref device, const GPUCommandEncoderDescriptor &descriptor) + : GPUHandle(device, descriptor.label) + , encoding_context_(device, this) + , usage_validation_mode_(GPUUsageValidationMode::kDefault) + { + } + + GPUCommandEncoder::GPUCommandEncoder(Ref device, + GPUHandle::ErrorTag tag, + string_view label) + : GPUHandle(device, tag, label) + , encoding_context_(device, this) + , usage_validation_mode_(GPUUsageValidationMode::kDefault) + { + } + + Ref GPUCommandEncoder::beginRenderPass(GPURenderPassDescriptor &descriptor) + { + Ref device_ = device(); + bool depthReadOnly = false; + bool stencilReadOnly = false; + + RenderPassValidationState validationState; + + auto MakeError = [&]() + { + return GPURenderPassEncoder::MakeError(this, + &encoding_context_, + descriptor.label.value_or("")); + }; + + bool success = encoding_context_.tryEncode( + this, + [&](gpu::CommandAllocator *allocator) -> gpu::MaybeError + { + GPUBeginRenderPassCommand *cmd = allocator->allocate(GPUCommand::kBeginRenderPass); + return {}; + }); + + if (success) + { + Ref passEncoder = GPURenderPassEncoder::Create(descriptor, + this, + &encoding_context_, + validationState.getRenderWidth(), + validationState.getRenderHeight(), + depthReadOnly, + stencilReadOnly); + + encoding_context_.enterPass(passEncoder.get()); + return passEncoder; + } + + return MakeError(); + } + + void GPUCommandEncoder::clearBuffer() + { + throw runtime_error("clearBuffer is not implemented"); + } + + void GPUCommandEncoder::copyBufferToBuffer() + { + throw runtime_error("copyBufferToBuffer is not implemented"); + } + + void GPUCommandEncoder::copyBufferToTexture() + { + throw runtime_error("copyBufferToTexture is not implemented"); + } + + void GPUCommandEncoder::copyTextureToBuffer() + { + throw runtime_error("copyTextureToBuffer is not implemented"); + } + + void GPUCommandEncoder::copyTextureToTexture() + { + throw runtime_error("copyTextureToTexture is not implemented"); + } + + unique_ptr GPUCommandEncoder::finish(optional label) const + { + return nullptr; + } + + void GPUCommandEncoder::insertDebugMarker(std::string_view marker) + { + encoding_context_.tryEncode( + this, + [&](gpu::CommandAllocator *allocator) -> gpu::MaybeError + { + GPUInsertDebugMarkerCommand *cmd = allocator->allocate(GPUCommand::kInsertDebugMarker); + return {}; + }); + } + + void GPUCommandEncoder::pushDebugGroup(std::string_view group) + { + throw runtime_error("pushDebugGroup is not implemented"); + } + + void GPUCommandEncoder::popDebugGroup() + { + throw runtime_error("popDebugGroup is not implemented"); + } + + void GPUCommandEncoder::resolveQuerySet() + { + throw runtime_error("resolveQuerySet is not implemented"); + } + + void GPUCommandEncoder::writeTimestamp() + { + throw runtime_error("writeTimestamp is not implemented"); + } +} diff --git a/src/common/command_buffers/gpu/gpu_command_encoder.hpp b/src/common/command_buffers/gpu/gpu_command_encoder.hpp index 78e1971d0..32242cde2 100644 --- a/src/common/command_buffers/gpu/gpu_command_encoder.hpp +++ b/src/common/command_buffers/gpu/gpu_command_encoder.hpp @@ -1,27 +1,71 @@ #pragma once #include +#include -#include "./gpu_base.hpp" -#include "./gpu_command_buffer.hpp" -#include "./gpu_pass_encoder_base.hpp" -#include "./gpu_renderpass_encoder.hpp" +#include +#include +#include +#include +#include +#include +#include namespace commandbuffers { - class GPUCommandEncoder : public GPUHandle + struct GPUCommandEncoderDescriptor { - using GPUHandle::GPUHandle; + std::string_view label; + }; + + enum class GPUUsageValidationMode + { + kDefault, + kInternal, + }; + class GPUCommandEncoder final : public GPUHandle + { public: - virtual ~GPUCommandEncoder() = default; + static Ref Create(Ref device, const GPUCommandEncoderDescriptor &descriptor); + static Ref MakeError(Ref device, std::string_view label); + + GPUHandleType type() const override + { + return GPUHandleType::kCommandEncoder; + } public: // TODO(yorkie): begineComputePass - virtual GPURenderPassEncoder beginRenderPass(GPURenderPassDescriptor &) = 0; - virtual std::unique_ptr finish(std::optional label = std::nullopt) const = 0; + Ref beginRenderPass(GPURenderPassDescriptor &); + void clearBuffer(); + void copyBufferToBuffer(); + void copyBufferToTexture(); + void copyTextureToBuffer(); + void copyTextureToTexture(); + std::unique_ptr finish(std::optional label = std::nullopt) const; + + void insertDebugMarker(std::string_view marker); + void pushDebugGroup(std::string_view group); + void popDebugGroup(); + + void resolveQuerySet(); + void writeTimestamp(); + + private: + GPUCommandEncoder(Ref device, const GPUCommandEncoderDescriptor &descriptor); + GPUCommandEncoder(Ref device, + GPUHandle::ErrorTag tag, + std::string_view label); + + bool validateFinish() const; + + gpu::EncodingContext encoding_context_; + std::unordered_set top_level_buffers_; + std::unordered_set top_level_textures_; + // std::unordered_set used_query_sets_; - protected: - std::shared_ptr current_pass_encoder_; + uint64_t debug_group_stack_size_ = 0; + GPUUsageValidationMode usage_validation_mode_; }; } diff --git a/src/common/command_buffers/gpu/gpu_commands.hpp b/src/common/command_buffers/gpu/gpu_commands.hpp new file mode 100644 index 000000000..20ebbd7a9 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_commands.hpp @@ -0,0 +1,158 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + struct GPUCommand + { + enum CommandType + { + kBeginComputePass, + kBeginOcclusionQuery, + kBeginRenderPass, + kClearBuffer, + kCopyBufferToBuffer, + kCopyBufferToTexture, + kCopyTextureToBuffer, + kCopyTextureToTexture, + kDispatch, + kDispatchIndirect, + kDraw, + kDrawIndexed, + kDrawIndirect, + kDrawIndexedIndirect, + kMultiDrawIndirect, + kMultiDrawIndexedIndirect, + kEndComputePass, + kEndOcclusionQuery, + kEndRenderPass, + kExecuteBundles, + kInsertDebugMarker, + kPixelLocalStorageBarrier, + kPopDebugGroup, + kPushDebugGroup, + kResolveQuerySet, + kSetComputePipeline, + kSetRenderPipeline, + kSetStencilReference, + kSetViewport, + kSetScissorRect, + kSetBlendConstant, + kSetBindGroup, + kSetImmediateData, + kSetIndexBuffer, + kSetVertexBuffer, + kWriteBuffer, + kWriteTimestamp, + }; + const CommandType type; + }; + + struct GPUInsertDebugMarkerCommand + { + uint32_t length; + }; + + + struct GPUBeginRenderPassCommand + { + // Ref attachmentState; + // PerColorAttachment colorAttachments; + // RenderPassDepthStencilAttachmentInfo depthStencilAttachment; + + // std::array storageAttachments; + + // Cache the width and height of all attachments for convenience + uint32_t width; + uint32_t height; + // Used for partial resolve + // ResolveRect resolveRect; + + // Ref occlusionQuerySet; + // TimestampWrites timestampWrites; + std::string label; + }; + + struct GPUDrawCommand + { + const uint32_t vertexCount; + const uint32_t instanceCount; + const uint32_t firstVertex; + const uint32_t firstInstance; + }; + + struct GPUDrawIndexedCommand + { + const uint32_t indexCount; + const uint32_t instanceCount; + const uint32_t firstIndex; + const int32_t baseVertex; + const uint32_t firstInstance; + }; + + struct GPUSetViewportCommand + { + const float x; + const float y; + const float width; + const float height; + const float minDepth; + const float maxDepth; + }; + + struct GPUSetScissorCommand + { + const float x; + const float y; + const float width; + const float height; + }; + + struct GPUSetRenderPipelineCommand + { + const GPUIdentifier pipelineId; + }; + + struct GPUSetIndexBufferCommand + { + const GPUIdentifier bufferId; + const GPUIndexFormat indexFormat; + const uint32_t offset; + const uint32_t size; + }; + + struct GPUSetVertexBufferCommand + { + const uint32_t slot; + const GPUIdentifier bufferId; + const uint32_t offset; + const uint32_t size; + }; + + struct GPUSetBindGroupCommand + { + const GPUIdentifier bindGroupId; + const uint32_t index; + }; + + struct GPUSetBlendConstantCommand + { + const float r; + const float g; + const float b; + const float a; + }; + + struct GPUSetStencilReferenceCommand + { + const uint32_t reference; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_compute_pipeline.cpp b/src/common/command_buffers/gpu/gpu_compute_pipeline.cpp new file mode 100644 index 000000000..01e4b4686 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_compute_pipeline.cpp @@ -0,0 +1,5 @@ +#include + +namespace commandbuffers +{ +} diff --git a/src/common/command_buffers/gpu/gpu_compute_pipeline.hpp b/src/common/command_buffers/gpu/gpu_compute_pipeline.hpp new file mode 100644 index 000000000..b082b9b33 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_compute_pipeline.hpp @@ -0,0 +1,18 @@ +#pragma once + +#include +#include + +namespace commandbuffers +{ + struct GPUComputePipelineDescriptor + { + std::string_view label; + GPUPipelineLayoutBase *layout = nullptr; + GPUComputeState compute; + }; + + class GPUComputePipelineBase : public GPUPipelineBase + { + }; +} diff --git a/src/common/command_buffers/gpu/gpu_constants.hpp b/src/common/command_buffers/gpu/gpu_constants.hpp new file mode 100644 index 000000000..2ca90fa03 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_constants.hpp @@ -0,0 +1,70 @@ +// Copyright 2017 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include + +#define WGPU_ARRAY_LAYER_COUNT_UNDEFINED (UINT32_MAX) +#define WGPU_COPY_STRIDE_UNDEFINED (UINT32_MAX) +#define WGPU_DEPTH_CLEAR_VALUE_UNDEFINED (NAN) +#define WGPU_DEPTH_SLICE_UNDEFINED (UINT32_MAX) +#define WGPU_LIMIT_U32_UNDEFINED (UINT32_MAX) +#define WGPU_LIMIT_U64_UNDEFINED (UINT64_MAX) +#define WGPU_MIP_LEVEL_COUNT_UNDEFINED (UINT32_MAX) +#define WGPU_QUERY_SET_INDEX_UNDEFINED (UINT32_MAX) +#define WGPU_STRLEN (SIZE_MAX) +#define WGPU_WHOLE_MAP_SIZE (SIZE_MAX) +#define WGPU_WHOLE_SIZE (UINT64_MAX) + +namespace commandbuffers::gpu_constants +{ + static constexpr uint32_t kMaxBindGroups = 4u; + static constexpr uint32_t kMaxBindingsPerBindGroup = 1000u; + static constexpr uint8_t kMaxVertexAttributes = 30u; + static constexpr uint8_t kMaxVertexBuffers = 8u; + static constexpr uint32_t kMaxVertexBufferArrayStride = 2048u; + static constexpr uint32_t kMaxBindGroupsPlusVertexBuffers = 24u; + static constexpr uint32_t kNumStages = 3; + static constexpr uint8_t kMaxColorAttachments = 8u; + static constexpr uint32_t kTextureBytesPerRowAlignment = 256u; + static constexpr uint32_t kQueryResolveAlignment = 256u; + static constexpr uint32_t kMaxInterStageShaderVariables = 16u; + static constexpr uint64_t kAssumedMaxBufferSize = 0x80000000u; // Use 2 GB when the limit is unavailable + + static constexpr uint32_t kArrayLayerCountUndefined = WGPU_ARRAY_LAYER_COUNT_UNDEFINED; + static constexpr uint32_t kCopyStrideUndefined = WGPU_COPY_STRIDE_UNDEFINED; + static constexpr float kDepthClearValueUndefined = std::numeric_limits::quiet_NaN(); + static constexpr uint32_t kDepthSliceUndefined = WGPU_DEPTH_SLICE_UNDEFINED; + static constexpr uint32_t kLimitU32Undefined = WGPU_LIMIT_U32_UNDEFINED; + static constexpr uint64_t kLimitU64Undefined = WGPU_LIMIT_U64_UNDEFINED; + static constexpr uint32_t kMipLevelCountUndefined = WGPU_MIP_LEVEL_COUNT_UNDEFINED; + static constexpr uint32_t kQuerySetIndexUndefined = WGPU_QUERY_SET_INDEX_UNDEFINED; + static constexpr size_t kStrlen = WGPU_STRLEN; + static constexpr size_t kWholeMapSize = WGPU_WHOLE_MAP_SIZE; + static constexpr uint64_t kWholeSize = WGPU_WHOLE_SIZE; +}; diff --git a/src/common/command_buffers/gpu/gpu_device.cpp b/src/common/command_buffers/gpu/gpu_device.cpp new file mode 100644 index 000000000..6682e32b9 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_device.cpp @@ -0,0 +1,193 @@ +#include + +namespace commandbuffers +{ + GPUDeviceBase::GPUDeviceBase(Ref adapter, const GPUDeviceDescriptor &descriptor) + : adapter_(adapter) + { + } + + const GPUAdapterInfo &GPUDeviceBase::adapterInfo() const + { + return adapter_->info(); + } + + const GPUSupportedFeatures &GPUDeviceBase::features() const + { + return features_; + } + + const GPUSupportedLimits &GPUDeviceBase::limits() const + { + return limits_; + } + + bool GPUDeviceBase::validateHandle(const GPUHandle &handle) const + { + return false; + } + + GPUInstance *GPUDeviceBase::getInstance() const + { + return adapter_->instance(); + } + + GPUAdapterBase *GPUDeviceBase::getAdapter() const + { + return adapter_.get(); + } + + gpu::PhysicalDeviceBase *GPUDeviceBase::getPhysicalDevice() const + { + return adapter_->physicalDevice(); + } + + Ref GPUDeviceBase::getOrCreateBindGroupLayout( + const GPUBindGroupLayoutDescriptor &descriptor) + { + return nullptr; + } + + GPUBindGroupLayoutBase *GPUDeviceBase::getEmptyBindGroupLayout() + { + assert(empty_bind_group_layout_ != nullptr); + return empty_bind_group_layout_.get(); + } + + GPUPipelineLayoutBase *GPUDeviceBase::getEmptyPipelineLayout() + { + assert(empty_pipeline_layout_ != nullptr); + return empty_pipeline_layout_.get(); + } + + // Object creation methods that be used in a reentrant manner. + Ref GPUDeviceBase::createBindGroup( + const GPUBindGroupDescriptor *descriptor, + GPUUsageValidationMode mode) + { + return createBindGroupImpl(*descriptor); + } + + Ref GPUDeviceBase::createBindGroupLayout(const GPUBindGroupLayoutDescriptor *descriptor, + bool allowInternalBinding) + { + return nullptr; + } + + Ref GPUDeviceBase::createBuffer(const GPUBufferDescriptor *descriptor) + { + return nullptr; + } + + Ref GPUDeviceBase::createCommandEncoder(const GPUCommandEncoderDescriptor *descriptor) + { + return nullptr; + } + + Ref GPUDeviceBase::createComputePipeline(const GPUComputePipelineDescriptor *descriptor) + { + return nullptr; + } + + Ref GPUDeviceBase::createShaderModule(const GPUShaderModuleDescriptor *descriptor, + const std::vector &internalExtensions) + { + return nullptr; + } + + Ref GPUDeviceBase::createTexture(const GPUTextureDescriptor *descriptor) + { + return nullptr; + } + + Ref GPUDeviceBase::createTextureView(const GPUTextureViewDescriptor *descriptor) + { + return nullptr; + } + + GPUDeviceBase::State GPUDeviceBase::getState() const + { + return state_; + } + + bool GPUDeviceBase::isLost() const + { + assert(state_ != State::BeingCreated); + return state_ != State::Alive; + } + + gpu::MaybeError GPUDeviceBase::validateIsAlive() const + { + // TODO(yorkie): implement this. + assert(state_ == State::Alive); + return {}; + } + + bool GPUDeviceBase::isValidationEnabled() const + { + // return !IsToggleEnabled(Toggle::SkipValidation); + return false; + } + + bool GPUDeviceBase::isRobustnessEnabled() const + { + // return !IsToggleEnabled(Toggle::DisableRobustness); + return false; + } + + bool GPUDeviceBase::isCompatibilityMode() const + { + // return !HasFeature(Feature::CoreFeaturesAndLimits); + return false; + } + + bool GPUDeviceBase::isImmediateErrorHandlingEnabled() const + { + // return mIsImmediateErrorHandlingEnabled; + return false; + } + + gpu::MaybeError GPUDeviceBase::tick() + { + if (isLost() || !queue_->hasScheduledCommands()) + { + return {}; + } + + tickImpl(); + return {}; + } + + bool GPUDeviceBase::mayRequireDuplicationOfIndirectParameters() const + { + return false; + } + + void GPUDeviceBase::setLabelImpl() + { + } + + bool GPUDeviceBase::reduceMemoryUsageImpl() + { + return false; + } + + void GPUDeviceBase::performIdleTasksImpl() + { + } + + Ref GPUDeviceBase::createEmptyBindGroupLayout() + { + GPUBindGroupLayoutDescriptor desc = {}; + desc.entryCount = 0; + desc.entries = nullptr; + + return getOrCreateBindGroupLayout(desc); + } + + Ref GPUDeviceBase::createEmptyPipelineLayout() + { + // TODO(yorkie): implement this. + return nullptr; + } +} diff --git a/src/common/command_buffers/gpu/gpu_device.hpp b/src/common/command_buffers/gpu/gpu_device.hpp index 991888ce7..b9ba98918 100644 --- a/src/common/command_buffers/gpu/gpu_device.hpp +++ b/src/common/command_buffers/gpu/gpu_device.hpp @@ -2,61 +2,147 @@ #include #include +#include #include +#include -#include "./gpu_base.hpp" -#include "./gpu_adapter.hpp" -#include "./gpu_command_buffer.hpp" -#include "./gpu_command_encoder.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include namespace commandbuffers { - class GPUQueue : public GPUHandle + namespace gpu { - public: - virtual ~GPUQueue() = default; + class PhysicalDeviceBase; + } - public: - virtual void submit(const std::vector> &) = 0; + struct GPUDeviceDescriptor + { + std::string_view label; + size_t requiredFeatureCount = 0; + GPUFeatureName const *requiredFeatures = nullptr; + GPUSupportedLimits *requiredLimits = nullptr; }; - /** - * The `GPUDevice` interface represents a logical GPU device. - * - * In usually, at client-side WebGPU implementation, it implements the `GPUDevice` interface to send - * `GPUCommandBuffer` to the renderer. And at server-side's renderer, it implements the same `GPUDevice` interface to - * execute the `GPUCommandBuffer` on the corresponding graphics API (e.g. OpenGL, Vulkan, etc.) via RHI. - */ - class GPUDevice : public GPUHandle + class GPUDeviceBase { public: - virtual ~GPUDevice() = default; + GPUDeviceBase(Ref adapter, const GPUDeviceDescriptor &descriptor); + virtual ~GPUDeviceBase() = default; public: - const GPUAdapterInfo &adapterInfo() const - { - return adapter_info_; - } - const GPUSupportedFeatures &features() const - { - return features_; - } - const GPUSupportedLimits &limits() const - { - return limits_; - } - GPUQueue &queueRef() + const GPUAdapterInfo &adapterInfo() const; + const GPUSupportedFeatures &features() const; + const GPUSupportedLimits &limits() const; + + bool validateHandle(const GPUHandle &handle) const; + + GPUInstance *getInstance() const; + GPUAdapterBase *getAdapter() const; + gpu::PhysicalDeviceBase *getPhysicalDevice() const; + + virtual std::unique_ptr createCommandBuffer( + GPUCommandEncoder &encoder, + const GPUCommandBufferDescriptor *descriptor = nullptr) = 0; + + Ref getOrCreateBindGroupLayout( + const GPUBindGroupLayoutDescriptor &descriptor); + + GPUBindGroupLayoutBase *getEmptyBindGroupLayout(); + GPUPipelineLayoutBase *getEmptyPipelineLayout(); + + // Object creation methods that be used in a reentrant manner. + Ref createBindGroup(const GPUBindGroupDescriptor *, + GPUUsageValidationMode mode = GPUUsageValidationMode::kDefault); + Ref createBindGroupLayout(const GPUBindGroupLayoutDescriptor *, + bool allow_internal_binding = false); + Ref createBuffer(const GPUBufferDescriptor *); + Ref createCommandEncoder(const GPUCommandEncoderDescriptor *); + Ref createComputePipeline(const GPUComputePipelineDescriptor *); + Ref createShaderModule(const GPUShaderModuleDescriptor *, + const std::vector &internal_extensions = {}); + Ref createTexture(const GPUTextureDescriptor *); + Ref createTextureView(const GPUTextureViewDescriptor *); + + // The device state which is a combination of creation state and loss state. + // + // - BeingCreated: the device didn't finish creation yet and the frontend cannot be used + // (both for the application calling WebGPU, or re-entrant calls). No work exists on + // the GPU timeline. + // - Alive: the device is usable and might have work happening on the GPU timeline. + // - BeingDisconnected: the device is no longer usable because we are waiting for all + // work on the GPU timeline to finish. (this is to make validation prevent the + // application from adding more work during the transition from Available to + // Disconnected) + // - Disconnected: there is no longer work happening on the GPU timeline and the CPU data + // structures can be safely destroyed without additional synchronization. + // - Destroyed: the device is disconnected and resources have been reclaimed. + enum class State { - assert(queue_ != nullptr); - return *queue_; - } + BeingCreated, + Alive, + BeingDisconnected, + Disconnected, + Destroyed, + }; + State getState() const; + bool isLost() const; + + gpu::MaybeError validateIsAlive() const; + bool isValidationEnabled() const; + bool isRobustnessEnabled() const; + bool isCompatibilityMode() const; + bool isImmediateErrorHandlingEnabled() const; + + gpu::MaybeError tick(); - virtual std::unique_ptr createCommandEncoder(std::optional label) = 0; + virtual bool mayRequireDuplicationOfIndirectParameters() const; protected: GPUAdapterInfo adapter_info_; GPUSupportedFeatures features_; GPUSupportedLimits limits_; - std::unique_ptr queue_; + + private: + virtual Ref createBindGroupImpl(const GPUBindGroupDescriptor &) = 0; + virtual Ref createBindGroupLayoutImpl(const GPUBindGroupLayoutDescriptor &) = 0; + virtual Ref createBufferImpl(const GPUBufferDescriptor &) = 0; + virtual Ref createPipelineLayoutImpl(const GPUPipelineLayoutDescriptor &) = 0; + virtual Ref createShaderModuleImpl(const GPUShaderModuleDescriptor &, + const std::vector &) = 0; + virtual Ref createTextureImpl(const GPUTextureDescriptor &) = 0; + virtual Ref createTextureViewImpl(Ref texture, + const GPUTextureViewDescriptor &) = 0; + virtual Ref createUninitializedComputePipelineImpl(const GPUComputePipelineDescriptor &) = 0; + virtual Ref createUninitializedRenderPipelineImpl(const GPURenderPipelineDescriptor &) = 0; + + virtual bool tickImpl() = 0; + virtual void setLabelImpl(); + virtual bool reduceMemoryUsageImpl(); + virtual void performIdleTasksImpl(); + + Ref createEmptyBindGroupLayout(); + Ref createEmptyPipelineLayout(); + + std::atomic state_ = State::BeingCreated; + Ref adapter_; + Ref queue_; + + Ref empty_bind_group_layout_; + Ref empty_pipeline_layout_; }; } diff --git a/src/common/command_buffers/gpu/gpu_error.cpp b/src/common/command_buffers/gpu/gpu_error.cpp new file mode 100644 index 000000000..c99710cc0 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_error.cpp @@ -0,0 +1,5 @@ +#include + +namespace commandbuffers +{ +} diff --git a/src/common/command_buffers/gpu/gpu_error.hpp b/src/common/command_buffers/gpu/gpu_error.hpp new file mode 100644 index 000000000..16711b32f --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_error.hpp @@ -0,0 +1,10 @@ +#pragma once + +#include + +namespace commandbuffers +{ + class GPUError + { + }; +} diff --git a/src/common/command_buffers/gpu/gpu_info.cpp b/src/common/command_buffers/gpu/gpu_info.cpp new file mode 100644 index 000000000..6d34783ed --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_info.cpp @@ -0,0 +1,900 @@ +#include +#include + +using namespace std; + +namespace commandbuffers::gpu_info +{ + enum class Architecture + { + Unknown, + AMD_GCN1, + AMD_GCN2, + AMD_GCN3, + AMD_GCN4, + AMD_GCN5, + AMD_RDNA1, + AMD_RDNA2, + AMD_RDNA3, + AMD_RDNA4, + AMD_CDNA1, + ARM_Midgard, + ARM_Bifrost, + ARM_Valhall, + ARM_Gen5, + Broadcom_VideoCore, + Google_Swiftshader, + ImgTec_Rogue, + ImgTec_Furian, + ImgTec_Albiorix, + Intel_Gen7, + Intel_Gen8, + Intel_Gen9, + Intel_Gen11, + Intel_Gen12LP, + Intel_Gen12HP, + Intel_XeLPG, + Intel_Xe2LPG, + Intel_Xe2HPG, + Intel_Xe3LPG, + Mesa_Software, + Microsoft_WARP, + Nvidia_Fermi, + Nvidia_Kepler, + Nvidia_Maxwell, + Nvidia_Pascal, + Nvidia_Turing, + Nvidia_Ampere, + Nvidia_Lovelace, + Nvidia_Blackwell, + Nvidia_Volta, + QualcommPCI_Adreno4xx, + QualcommPCI_Adreno5xx, + QualcommPCI_Adreno6xx, + QualcommPCI_Adreno7xx, + QualcommPCI_Adreno8xx, + QualcommACPI_Adreno6xx, + QualcommACPI_Adreno7xx, + Samsung_RDNA2, + Samsung_RDNA3, + }; + + Architecture GetArchitecture(PCIVendorID vendorId, PCIDeviceID deviceId) + { + switch (vendorId) + { + case kVendorID_AMD: + { + switch (deviceId & 0xFFF0) + { + case 0x1300: + case 0x1310: + case 0x6600: + case 0x6610: + case 0x6660: + case 0x6790: + case 0x6800: + case 0x6810: + case 0x6820: + case 0x6830: + return Architecture::AMD_GCN1; + case 0x6640: + case 0x6650: + case 0x67A0: + case 0x67B0: + case 0x9830: + case 0x9850: + return Architecture::AMD_GCN2; + case 0x6900: + case 0x6920: + case 0x6930: + case 0x7300: + case 0x9870: + case 0x98E0: + return Architecture::AMD_GCN3; + case 0x67C0: + case 0x67D0: + case 0x67E0: + case 0x67F0: + case 0x6980: + case 0x6990: + case 0x6FD0: + case 0x9920: + return Architecture::AMD_GCN4; + case 0x66A0: + case 0x6860: + case 0x6870: + case 0x6940: + case 0x69A0: + case 0x15D0: + case 0x1630: + return Architecture::AMD_GCN5; + case 0x7310: + case 0x7340: + case 0x7360: + return Architecture::AMD_RDNA1; + case 0x73A0: + case 0x73B0: + case 0x73D0: + case 0x73E0: + case 0x73F0: + case 0x7400: + case 0x7420: + case 0x7430: + case 0x1430: + case 0x1500: + case 0x15E0: + case 0x1640: + case 0x1680: + case 0x13c0: + case 0x13f0: + return Architecture::AMD_RDNA2; + case 0x7440: + case 0x7470: + case 0x7480: + case 0x15B0: + case 0x7450: + case 0x1900: + case 0x1580: + case 0x1110: + return Architecture::AMD_RDNA3; + case 0x7550: + case 0x7590: + return Architecture::AMD_RDNA4; + case 0x7380: + return Architecture::AMD_CDNA1; + } + } + break; + case kVendorID_ARM: + { + switch (deviceId & 0xF0000000) + { + case 0x00000000: + return Architecture::ARM_Midgard; + case 0x60000000: + case 0x70000000: + return Architecture::ARM_Bifrost; + case 0x90000000: + case 0xA0000000: + case 0xB0000000: + return Architecture::ARM_Valhall; + case 0xC0000000: + case 0xD0000000: + return Architecture::ARM_Gen5; + } + } + break; + case kVendorID_Broadcom: + { + switch (deviceId & 0x00000000) + { + case 0x00000000: + return Architecture::Broadcom_VideoCore; + } + } + break; + case kVendorID_Google: + { + switch (deviceId) + { + case 0xC0DE: + return Architecture::Google_Swiftshader; + } + } + break; + case kVendorID_ImgTec: + { + switch (deviceId & 0xFF000000) + { + case 0x00000000: + case 0x22000000: + case 0x24000000: + return Architecture::ImgTec_Rogue; + case 0x1b000000: + return Architecture::ImgTec_Furian; + case 0x35000000: + case 0x36000000: + return Architecture::ImgTec_Albiorix; + } + } + break; + case kVendorID_Intel: + { + switch (deviceId & 0xFF00) + { + case 0x0100: + case 0x0400: + case 0x0A00: + case 0x0D00: + case 0x0F00: + return Architecture::Intel_Gen7; + case 0x1600: + case 0x2200: + return Architecture::Intel_Gen8; + case 0x1900: + case 0x3100: + case 0x3E00: + case 0x5A00: + case 0x5900: + case 0x8700: + case 0x9B00: + return Architecture::Intel_Gen9; + case 0x8A00: + case 0x4E00: + case 0x9800: + return Architecture::Intel_Gen11; + case 0x4600: + case 0x4C00: + case 0x4900: + case 0x9A00: + case 0xA700: + return Architecture::Intel_Gen12LP; + case 0x4F00: + case 0x5600: + return Architecture::Intel_Gen12HP; + case 0x7D00: + case 0xB600: + return Architecture::Intel_XeLPG; + case 0x6400: + return Architecture::Intel_Xe2LPG; + case 0xE200: + return Architecture::Intel_Xe2HPG; + case 0xB000: + return Architecture::Intel_Xe3LPG; + } + } + break; + case kVendorID_Mesa: + { + switch (deviceId) + { + case 0x0000: + return Architecture::Mesa_Software; + } + } + break; + case kVendorID_Microsoft: + { + switch (deviceId) + { + case 0x8c: + return Architecture::Microsoft_WARP; + } + } + break; + case kVendorID_Nvidia: + { + switch (deviceId & 0xFFFFFF00) + { + case 0x0D00: + return Architecture::Nvidia_Fermi; + case 0x0F00: + case 0x1000: + case 0x1100: + case 0x1200: + return Architecture::Nvidia_Kepler; + case 0x1300: + case 0x1400: + case 0x1600: + case 0x1700: + return Architecture::Nvidia_Maxwell; + case 0x1500: + case 0x1B00: + case 0x1C00: + case 0x1D00: + return Architecture::Nvidia_Pascal; + case 0x1E00: + case 0x1F00: + case 0x2100: + return Architecture::Nvidia_Turing; + case 0x2200: + case 0x2400: + case 0x2500: + case 0x2000: + return Architecture::Nvidia_Ampere; + case 0x2600: + case 0x2700: + case 0x2800: + return Architecture::Nvidia_Lovelace; + case 0x2b00: + case 0x2c00: + case 0x2d00: + case 0x2f00: + return Architecture::Nvidia_Blackwell; + } + switch (deviceId & 0xFF000000) + { + case 0x1e000000: + return Architecture::Nvidia_Kepler; + case 0x92000000: + return Architecture::Nvidia_Maxwell; + case 0x93000000: + return Architecture::Nvidia_Pascal; + case 0x97000000: + return Architecture::Nvidia_Ampere; + case 0xa5000000: + return Architecture::Nvidia_Volta; + } + } + break; + case kVendorID_QualcommPCI: + { + switch (deviceId & 0xFF000000) + { + case 0x04000000: + return Architecture::QualcommPCI_Adreno4xx; + case 0x05000000: + return Architecture::QualcommPCI_Adreno5xx; + case 0x06000000: + return Architecture::QualcommPCI_Adreno6xx; + case 0x07000000: + case 0x43000000: + case 0x36000000: + case 0x37000000: + return Architecture::QualcommPCI_Adreno7xx; + case 0x44000000: + return Architecture::QualcommPCI_Adreno8xx; + } + } + break; + case kVendorID_QualcommACPI: + { + switch (deviceId & 0xFFFFFF00) + { + case 0x41333800: + case 0x36334100: + case 0x41333400: + case 0x36333600: + return Architecture::QualcommACPI_Adreno6xx; + case 0x37314400: + case 0x36334300: + return Architecture::QualcommACPI_Adreno7xx; + } + } + break; + case kVendorID_Samsung: + { + switch (deviceId & 0xFFFFFFFF) + { + case 0x000073A0: + case 0x01300100: + return Architecture::Samsung_RDNA2; + case 0x02600200: + return Architecture::Samsung_RDNA3; + } + } + break; + case kVendorID_Huawei: + { + switch (deviceId & 0xFFFFFFFF) + { + } + } + break; + } + + return Architecture::Unknown; + } + + bool IsAMD(PCIVendorID vendorId) + { + return vendorId == kVendorID_AMD; + } + bool IsApple(PCIVendorID vendorId) + { + return vendorId == kVendorID_Apple; + } + bool IsARM(PCIVendorID vendorId) + { + return vendorId == kVendorID_ARM; + } + bool IsBroadcom(PCIVendorID vendorId) + { + return vendorId == kVendorID_Broadcom; + } + bool IsGoogle(PCIVendorID vendorId) + { + return vendorId == kVendorID_Google; + } + bool IsImgTec(PCIVendorID vendorId) + { + return vendorId == kVendorID_ImgTec; + } + bool IsIntel(PCIVendorID vendorId) + { + return vendorId == kVendorID_Intel; + } + bool IsMesa(PCIVendorID vendorId) + { + return vendorId == kVendorID_Mesa; + } + bool IsMicrosoft(PCIVendorID vendorId) + { + return vendorId == kVendorID_Microsoft; + } + bool IsNvidia(PCIVendorID vendorId) + { + return vendorId == kVendorID_Nvidia; + } + bool IsQualcommPCI(PCIVendorID vendorId) + { + return vendorId == kVendorID_QualcommPCI; + } + bool IsQualcommACPI(PCIVendorID vendorId) + { + return vendorId == kVendorID_QualcommACPI; + } + bool IsSamsung(PCIVendorID vendorId) + { + return vendorId == kVendorID_Samsung; + } + bool IsHuawei(PCIVendorID vendorId) + { + return vendorId == kVendorID_Huawei; + } + + // Architecture checks + + // AMD architectures + bool IsAMDGCN1(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_GCN1; + } + bool IsAMDGCN2(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_GCN2; + } + bool IsAMDGCN3(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_GCN3; + } + bool IsAMDGCN4(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_GCN4; + } + bool IsAMDGCN5(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_GCN5; + } + bool IsAMDRDNA1(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_RDNA1; + } + bool IsAMDRDNA2(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_RDNA2; + } + bool IsAMDRDNA3(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_RDNA3; + } + bool IsAMDRDNA4(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_RDNA4; + } + bool IsAMDCDNA1(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::AMD_CDNA1; + } + // ARM architectures + bool IsARMMidgard(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::ARM_Midgard; + } + bool IsARMBifrost(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::ARM_Bifrost; + } + bool IsARMValhall(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::ARM_Valhall; + } + bool IsARMGen5(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::ARM_Gen5; + } + // Broadcom architectures + bool IsBroadcomVideoCore(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Broadcom_VideoCore; + } + // Google architectures + bool IsGoogleSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Google_Swiftshader; + } + // Img Tec architectures + bool IsImgTecRogue(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::ImgTec_Rogue; + } + bool IsImgTecFurian(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::ImgTec_Furian; + } + bool IsImgTecAlbiorix(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::ImgTec_Albiorix; + } + // Intel architectures + bool IsIntelGen7(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Gen7; + } + bool IsIntelGen8(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Gen8; + } + bool IsIntelGen9(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Gen9; + } + bool IsIntelGen11(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Gen11; + } + bool IsIntelGen12LP(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Gen12LP; + } + bool IsIntelGen12HP(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Gen12HP; + } + bool IsIntelXeLPG(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_XeLPG; + } + bool IsIntelXe2LPG(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Xe2LPG; + } + bool IsIntelXe2HPG(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Xe2HPG; + } + bool IsIntelXe3LPG(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Intel_Xe3LPG; + } + // Mesa architectures + bool IsMesaSoftware(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Mesa_Software; + } + // Microsoft architectures + bool IsMicrosoftWARP(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Microsoft_WARP; + } + // Nvidia architectures + bool IsNvidiaFermi(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Fermi; + } + bool IsNvidiaKepler(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Kepler; + } + bool IsNvidiaMaxwell(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Maxwell; + } + bool IsNvidiaPascal(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Pascal; + } + bool IsNvidiaTuring(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Turing; + } + bool IsNvidiaAmpere(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Ampere; + } + bool IsNvidiaLovelace(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Lovelace; + } + bool IsNvidiaBlackwell(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Blackwell; + } + bool IsNvidiaVolta(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Nvidia_Volta; + } + // QualcommPCI architectures + bool IsQualcommPCIAdreno4xx(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::QualcommPCI_Adreno4xx; + } + bool IsQualcommPCIAdreno5xx(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::QualcommPCI_Adreno5xx; + } + bool IsQualcommPCIAdreno6xx(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::QualcommPCI_Adreno6xx; + } + bool IsQualcommPCIAdreno7xx(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::QualcommPCI_Adreno7xx; + } + bool IsQualcommPCIAdreno8xx(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::QualcommPCI_Adreno8xx; + } + // QualcommACPI architectures + bool IsQualcommACPIAdreno6xx(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::QualcommACPI_Adreno6xx; + } + bool IsQualcommACPIAdreno7xx(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::QualcommACPI_Adreno7xx; + } + // Samsung architectures + bool IsSamsungRDNA2(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Samsung_RDNA2; + } + bool IsSamsungRDNA3(PCIVendorID vendorId, PCIDeviceID deviceId) + { + return GetArchitecture(vendorId, deviceId) == Architecture::Samsung_RDNA3; + } + + // GPUAdapterInfo fields + string GetVendorName(PCIVendorID vendorId) + { + switch (vendorId) + { + case kVendorID_AMD: + return "amd"; + case kVendorID_Apple: + return "apple"; + case kVendorID_ARM: + return "arm"; + case kVendorID_Broadcom: + return "broadcom"; + case kVendorID_Google: + return "google"; + case kVendorID_ImgTec: + return "img-tec"; + case kVendorID_Intel: + return "intel"; + case kVendorID_Mesa: + return "mesa"; + case kVendorID_Microsoft: + return "microsoft"; + case kVendorID_Nvidia: + return "nvidia"; + case kVendorID_QualcommPCI: + return "qualcomm"; + case kVendorID_QualcommACPI: + return "qualcomm"; + case kVendorID_Samsung: + return "samsung"; + case kVendorID_Huawei: + return "huawei"; + } + + return ""; + } + + string GetArchitectureName(PCIVendorID vendorId, PCIDeviceID deviceId) + { + Architecture arch = GetArchitecture(vendorId, deviceId); + switch (arch) + { + case Architecture::Unknown: + return ""; + case Architecture::AMD_GCN1: + return "gcn-1"; + case Architecture::AMD_GCN2: + return "gcn-2"; + case Architecture::AMD_GCN3: + return "gcn-3"; + case Architecture::AMD_GCN4: + return "gcn-4"; + case Architecture::AMD_GCN5: + return "gcn-5"; + case Architecture::AMD_RDNA1: + return "rdna-1"; + case Architecture::AMD_RDNA2: + return "rdna-2"; + case Architecture::AMD_RDNA3: + return "rdna-3"; + case Architecture::AMD_RDNA4: + return "rdna-4"; + case Architecture::AMD_CDNA1: + return "cdna-1"; + case Architecture::ARM_Midgard: + return "midgard"; + case Architecture::ARM_Bifrost: + return "bifrost"; + case Architecture::ARM_Valhall: + return "valhall"; + case Architecture::ARM_Gen5: + return "gen-5"; + case Architecture::Broadcom_VideoCore: + return "videocore"; + case Architecture::Google_Swiftshader: + return "swiftshader"; + case Architecture::ImgTec_Rogue: + return "rogue"; + case Architecture::ImgTec_Furian: + return "furian"; + case Architecture::ImgTec_Albiorix: + return "albiorix"; + case Architecture::Intel_Gen7: + return "gen-7"; + case Architecture::Intel_Gen8: + return "gen-8"; + case Architecture::Intel_Gen9: + return "gen-9"; + case Architecture::Intel_Gen11: + return "gen-11"; + case Architecture::Intel_Gen12LP: + return "gen-12lp"; + case Architecture::Intel_Gen12HP: + return "gen-12hp"; + case Architecture::Intel_XeLPG: + return "xe-lpg"; + case Architecture::Intel_Xe2LPG: + return "xe-2lpg"; + case Architecture::Intel_Xe2HPG: + return "xe-2hpg"; + case Architecture::Intel_Xe3LPG: + return "xe-3lpg"; + case Architecture::Mesa_Software: + return "software"; + case Architecture::Microsoft_WARP: + return "warp"; + case Architecture::Nvidia_Fermi: + return "fermi"; + case Architecture::Nvidia_Kepler: + return "kepler"; + case Architecture::Nvidia_Maxwell: + return "maxwell"; + case Architecture::Nvidia_Pascal: + return "pascal"; + case Architecture::Nvidia_Turing: + return "turing"; + case Architecture::Nvidia_Ampere: + return "ampere"; + case Architecture::Nvidia_Lovelace: + return "lovelace"; + case Architecture::Nvidia_Blackwell: + return "blackwell"; + case Architecture::Nvidia_Volta: + return "volta"; + case Architecture::QualcommPCI_Adreno4xx: + return "adreno-4xx"; + case Architecture::QualcommPCI_Adreno5xx: + return "adreno-5xx"; + case Architecture::QualcommPCI_Adreno6xx: + return "adreno-6xx"; + case Architecture::QualcommPCI_Adreno7xx: + return "adreno-7xx"; + case Architecture::QualcommPCI_Adreno8xx: + return "adreno-8xx"; + case Architecture::QualcommACPI_Adreno6xx: + return "adreno-6xx"; + case Architecture::QualcommACPI_Adreno7xx: + return "adreno-7xx"; + case Architecture::Samsung_RDNA2: + return "rdna-2"; + case Architecture::Samsung_RDNA3: + return "rdna-3"; + } + + return ""; + } + + DriverVersion::DriverVersion() = default; + + DriverVersion::DriverVersion(const std::initializer_list &version) + { + assert(version.size() <= kMaxVersionFields); + driver_version_.assign(version.begin(), version.end()); + } + + uint16_t &DriverVersion::operator[](size_t i) + { + return driver_version_.operator[](i); + } + + const uint16_t &DriverVersion::operator[](size_t i) const + { + return driver_version_.operator[](i); + } + + uint32_t DriverVersion::size() const + { + return driver_version_.size(); + } + + std::string DriverVersion::toString() const + { + std::ostringstream oss; + if (!driver_version_.empty()) + { + // Convert all but the last element to avoid a trailing "." + std::copy(driver_version_.begin(), driver_version_.end() - 1, std::ostream_iterator(oss, ".")); + // Add the last element + oss << driver_version_.back(); + } + return oss.str(); + } + + + IntelGen GetIntelGen(PCIVendorID venderId, PCIDeviceID deviceId) + { + assert(gpu_info::IsIntel(venderId)); + + if (gpu_info::IsIntelGen7(venderId, deviceId)) + { + return IntelGen::Gen7; + } + else if (gpu_info::IsIntelGen8(venderId, deviceId)) + { + return IntelGen::Gen8; + } + else if (gpu_info::IsIntelGen9(venderId, deviceId)) + { + return IntelGen::Gen9; + } + else if (gpu_info::IsIntelGen11(venderId, deviceId)) + { + return IntelGen::Gen11; + } + else if (gpu_info::IsIntelGen12LP(venderId, deviceId)) + { + return IntelGen::Xe; + } + else if (gpu_info::IsIntelGen12HP(venderId, deviceId)) + { + return IntelGen::Xe; + } + else if (gpu_info::IsIntelXeLPG(venderId, deviceId)) + { + return IntelGen::Xe; + } + else if (gpu_info::IsIntelXe2LPG(venderId, deviceId)) + { + return IntelGen::Xe2; + } + else if (gpu_info::IsIntelXe2HPG(venderId, deviceId)) + { + return IntelGen::Xe2; + } + else if (gpu_info::IsIntelXe3LPG(venderId, deviceId)) + { + return IntelGen::Xe3; + } + else + { + return IntelGen::Unknown; + } + } + + QualcommACPIGen GetQualcommACPIGen(PCIVendorID venderId, PCIDeviceID deviceId) + { + assert(gpu_info::IsQualcommACPI(venderId)); + + if (gpu_info::IsQualcommACPIAdreno6xx(venderId, deviceId)) + { + return QualcommACPIGen::Adreno6xx; + } + else if (gpu_info::IsQualcommACPIAdreno7xx(venderId, deviceId)) + { + return QualcommACPIGen::Adreno7xx; + } + else + { + return QualcommACPIGen::Unknown; + } + } +} diff --git a/src/common/command_buffers/gpu/gpu_info.hpp b/src/common/command_buffers/gpu/gpu_info.hpp new file mode 100644 index 000000000..933afa1a5 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_info.hpp @@ -0,0 +1,192 @@ +// Copyright 2019 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include +#include + +namespace commandbuffers::gpu_info +{ + using PCIVendorID = uint32_t; + using PCIDeviceID = uint32_t; + + // Vendor IDs + static constexpr PCIVendorID kVendorID_AMD = 0x1002; + static constexpr PCIVendorID kVendorID_Apple = 0x106b; + static constexpr PCIVendorID kVendorID_ARM = 0x13B5; + static constexpr PCIVendorID kVendorID_Broadcom = 0x14e4; + static constexpr PCIVendorID kVendorID_Google = 0x1AE0; + static constexpr PCIVendorID kVendorID_ImgTec = 0x1010; + static constexpr PCIVendorID kVendorID_Intel = 0x8086; + static constexpr PCIVendorID kVendorID_Mesa = 0x10005; + static constexpr PCIVendorID kVendorID_Microsoft = 0x1414; + static constexpr PCIVendorID kVendorID_Nvidia = 0x10DE; + static constexpr PCIVendorID kVendorID_QualcommPCI = 0x5143; + static constexpr PCIVendorID kVendorID_QualcommACPI = 0x4D4F4351; + static constexpr PCIVendorID kVendorID_Samsung = 0x144d; + static constexpr PCIVendorID kVendorID_Huawei = 0x19e5; + static constexpr uint32_t kMaxVersionFields = 4; + + // Vendor checks + bool IsAMD(PCIVendorID vendorId); + bool IsApple(PCIVendorID vendorId); + bool IsARM(PCIVendorID vendorId); + bool IsBroadcom(PCIVendorID vendorId); + bool IsGoogle(PCIVendorID vendorId); + bool IsImgTec(PCIVendorID vendorId); + bool IsIntel(PCIVendorID vendorId); + bool IsMesa(PCIVendorID vendorId); + bool IsMicrosoft(PCIVendorID vendorId); + bool IsNvidia(PCIVendorID vendorId); + bool IsQualcommPCI(PCIVendorID vendorId); + bool IsQualcommACPI(PCIVendorID vendorId); + bool IsSamsung(PCIVendorID vendorId); + bool IsHuawei(PCIVendorID vendorId); + + // Architecture checks + + // AMD architectures + bool IsAMDGCN1(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDGCN2(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDGCN3(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDGCN4(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDGCN5(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDRDNA1(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDRDNA2(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDRDNA3(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDRDNA4(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsAMDCDNA1(PCIVendorID vendorId, PCIDeviceID deviceId); + + // ARM architectures + bool IsARMMidgard(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsARMBifrost(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsARMValhall(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsARMGen5(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Broadcom architectures + bool IsBroadcomVideoCore(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Google architectures + bool IsGoogleSwiftshader(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Img Tec architectures + bool IsImgTecRogue(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsImgTecFurian(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsImgTecAlbiorix(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Intel architectures + bool IsIntelGen7(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelGen8(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelGen9(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelGen11(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelGen12LP(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelGen12HP(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelXeLPG(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelXe2LPG(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelXe2HPG(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsIntelXe3LPG(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Mesa architectures + bool IsMesaSoftware(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Microsoft architectures + bool IsMicrosoftWARP(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Nvidia architectures + bool IsNvidiaFermi(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaKepler(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaMaxwell(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaPascal(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaTuring(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaAmpere(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaLovelace(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaBlackwell(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsNvidiaVolta(PCIVendorID vendorId, PCIDeviceID deviceId); + + // QualcommPCI architectures + bool IsQualcommPCIAdreno4xx(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsQualcommPCIAdreno5xx(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsQualcommPCIAdreno6xx(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsQualcommPCIAdreno7xx(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsQualcommPCIAdreno8xx(PCIVendorID vendorId, PCIDeviceID deviceId); + + // QualcommACPI architectures + bool IsQualcommACPIAdreno6xx(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsQualcommACPIAdreno7xx(PCIVendorID vendorId, PCIDeviceID deviceId); + + // Samsung architectures + bool IsSamsungRDNA2(PCIVendorID vendorId, PCIDeviceID deviceId); + bool IsSamsungRDNA3(PCIVendorID vendorId, PCIDeviceID deviceId); + + // GPUAdapterInfo fields + std::string GetVendorName(PCIVendorID vendorId); + std::string GetArchitectureName(PCIVendorID vendorId, PCIDeviceID deviceId); + + class DriverVersion + { + public: + DriverVersion(); + DriverVersion(const std::initializer_list &version); + + uint16_t &operator[](size_t i); + const uint16_t &operator[](size_t i) const; + + uint32_t size() const; + std::string toString() const; + + private: + std::vector driver_version_; + }; + + // GPU generation is an internal concept, rather than a value defined by vendors + // Intel generations + enum class IntelGen + { + Unknown = 0, + Gen7 = 7, + Gen8 = 8, + Gen9 = 9, + Gen10 = 10, + Gen11 = 11, + Xe = 12, + Xe2 = 13, + Xe3 = 14, + }; + + // Qualcomm ACPI generations + enum class QualcommACPIGen + { + Unknown = 0, + Adreno6xx = 6, + Adreno7xx = 7, + }; + + IntelGen GetIntelGen(PCIVendorID venderId, PCIDeviceID deviceId); + QualcommACPIGen GetQualcommACPIGen(PCIVendorID venderId, PCIDeviceID deviceId); +} diff --git a/src/common/command_buffers/gpu/gpu_instance.cpp b/src/common/command_buffers/gpu/gpu_instance.cpp new file mode 100644 index 000000000..4c94e32f2 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_instance.cpp @@ -0,0 +1,101 @@ +#include +#include +#include + +using namespace std; + +namespace commandbuffers +{ + // static + Ref GPUInstance::Create(const GPUInstanceDescriptor *descriptor) + { + static constexpr GPUInstanceDescriptor kDefaultDesc = {}; + if (descriptor == nullptr) + { + descriptor = &kDefaultDesc; + } + + auto instance = AcquireRef(new GPUInstance()); + instance->initialize(*descriptor); + return instance; + } + + GPUInstance::GPUInstance() + { + } + + Ref GPUInstance::requestAdapter(const RequestAdapterOptions &options) + { + auto adapters = enumerateAdapters(&options); + if (adapters.empty()) + { + return nullptr; + } + return adapters[0]; + } + + vector> GPUInstance::enumerateAdapters(const RequestAdapterOptions *options) + { + static constexpr RequestAdapterOptions kDefaultOptions = {}; + if (options == nullptr) + { + options = &kDefaultOptions; + } + + vector> adapters; + for (const auto &physicalDevice : enumeratePhysicalDevices(*options)) + { + assert(physicalDevice->supportsFeatureLevel(options->featureLevel, this)); + adapters.push_back(createAdapter(physicalDevice, options->featureLevel, options->powerPreference)); + } + return SortAdapters(std::move(adapters), *options); + } + + void GPUInstance::registerBackend(gpu::BackendConnection *backend) + { + assert(backend != nullptr && "Backend connection cannot be null."); + backend_ = unique_ptr(backend); + } + + void GPUInstance::addDevice(Ref) + { + } + + void GPUInstance::removeDevice(Ref) + { + } + + bool GPUInstance::hasFeature(GPUFeatureName feature) const + { + return instance_features_.find(feature) != instance_features_.end(); + } + + void GPUInstance::initialize(const GPUInstanceDescriptor &descriptor) + { + } + + Ref GPUInstance::createAdapter(Ref physicalDevice, + GPUFeatureLevel featureLevel, + GPUPowerPreference powerPreference) + { + return AcquireRef(new GPUAdapterBase(shared_from_this(), + physicalDevice, + featureLevel, + powerPreference)); + } + + gpu::BackendConnection *GPUInstance::getBackendConnection() const + { + return backend_.get(); + } + + vector> GPUInstance::enumeratePhysicalDevices(const RequestAdapterOptions &options) + { + vector> discoveredPhysicalDevices; + if (backend_) + { + discoveredPhysicalDevices = backend_->discoverPhysicalDevices(options); + } + return discoveredPhysicalDevices; + } +} diff --git a/src/common/command_buffers/gpu/gpu_instance.hpp b/src/common/command_buffers/gpu/gpu_instance.hpp new file mode 100644 index 000000000..9ba3590fe --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_instance.hpp @@ -0,0 +1,66 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + namespace gpu + { + class BackendConnection; + class PhysicalDeviceBase; + } + + struct GPUInstanceDescriptor + { + size_t requiredFeatureCount = 0; + GPUFeatureName const *requiredFeatures = nullptr; + GPUSupportedLimits *requiredLimits = nullptr; + }; + + class GPUInstance final : public std::enable_shared_from_this + { + public: + static Ref Create(const GPUInstanceDescriptor *descriptor = nullptr); + + // Discovers and returns a single adapter based on the `options`. + Ref requestAdapter(const RequestAdapterOptions &options); + + // Discovers and returns a vector of adapters. + // All systems adapters that can be found are returned if no options are passed. + // Otherwise, returns adapters based on the `options`. + std::vector> enumerateAdapters(const RequestAdapterOptions *options = nullptr); + + void registerBackend(gpu::BackendConnection *backend); + void addDevice(Ref); + void removeDevice(Ref); + + bool hasFeature(GPUFeatureName feature) const; + + // Allow AcquireRef to access the private constructor. + friend Ref(::AcquireRef)(GPUInstance *ptr); + + private: + explicit GPUInstance(); + virtual ~GPUInstance() = default; + + void initialize(const GPUInstanceDescriptor &descriptor); + Ref createAdapter(Ref physicalDevice, + GPUFeatureLevel featureLevel, + GPUPowerPreference powerPreference); + + gpu::BackendConnection *getBackendConnection() const; + + // Enumerate physical devices according to options and return them. + std::vector> enumeratePhysicalDevices(const RequestAdapterOptions &options); + + private: + std::unordered_set instance_features_; + std::vector> devices_list_; + std::unique_ptr backend_; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_pass_encoder_base.cpp b/src/common/command_buffers/gpu/gpu_pass_encoder_base.cpp new file mode 100644 index 000000000..5474209bf --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_pass_encoder_base.cpp @@ -0,0 +1,27 @@ +#include +#include + +namespace commandbuffers +{ + GPUPassEncoderBase::GPUPassEncoderBase() + : ended_(false) + { + } + + void GPUPassEncoderBase::begin() + { + ended_ = false; + assert(command_buffer_ != nullptr && "Command buffer must be initialized before beginning pass encoder."); + } + + void GPUPassEncoderBase::end() + { + ended_ = true; + } + + const GPUCommandBufferBase &GPUPassEncoderBase::commandBuffer() const + { + assert(command_buffer_ != nullptr && "Command buffer is not initialized."); + return *command_buffer_; + } +} diff --git a/src/common/command_buffers/gpu/gpu_pass_encoder_base.hpp b/src/common/command_buffers/gpu/gpu_pass_encoder_base.hpp index 7e7ec9715..7d3ba02bc 100644 --- a/src/common/command_buffers/gpu/gpu_pass_encoder_base.hpp +++ b/src/common/command_buffers/gpu/gpu_pass_encoder_base.hpp @@ -2,17 +2,15 @@ #include #include -#include "./gpu_command_buffer.hpp" namespace commandbuffers { + class GPUCommandBufferBase; + class GPUPassEncoderBase { public: - GPUPassEncoderBase() - : ended_(false) - { - } + GPUPassEncoderBase(); virtual ~GPUPassEncoderBase() = default; public: @@ -25,24 +23,12 @@ namespace commandbuffers return false; } - virtual void begin() - { - ended_ = false; - assert(command_buffer_ != nullptr && "Command buffer must be initialized before beginning pass encoder."); - } - virtual void end() - { - ended_ = true; - } - - const GPUCommandBuffer &commandBuffer() const - { - assert(command_buffer_ != nullptr && "Command buffer is not initialized."); - return *command_buffer_; - } + virtual void begin(); + virtual void end(); + const GPUCommandBufferBase &commandBuffer() const; protected: - std::unique_ptr command_buffer_; + std::unique_ptr command_buffer_; bool ended_ = false; }; } diff --git a/src/common/command_buffers/gpu/gpu_per_stage.cpp b/src/common/command_buffers/gpu/gpu_per_stage.cpp new file mode 100644 index 000000000..d911d073d --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_per_stage.cpp @@ -0,0 +1,21 @@ +#include + +using namespace std; +using namespace commandbuffers::gpu_constants; + +namespace commandbuffers +{ + // static + bitset GPUStages::IterateStages(GPUShaderStage stages) + { + bitset bits(static_cast(stages)); + return bits; + } + + // static + GPUShaderStage GPUStages::StageBit(GPUSingleShaderStage stage) + { + assert(static_cast(stage) < kNumStages); + return static_cast(1 << static_cast(stage)); + } +} diff --git a/src/common/command_buffers/gpu/gpu_per_stage.hpp b/src/common/command_buffers/gpu/gpu_per_stage.hpp new file mode 100644 index 000000000..196586fbd --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_per_stage.hpp @@ -0,0 +1,115 @@ +// Copyright 2017 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + enum class GPUSingleShaderStage + { + kVertex, + kFragment, + kCompute + }; + + static_assert(static_cast(GPUSingleShaderStage::kVertex) < gpu_constants::kNumStages); + static_assert(static_cast(GPUSingleShaderStage::kFragment) < gpu_constants::kNumStages); + static_assert(static_cast(GPUSingleShaderStage::kCompute) < gpu_constants::kNumStages); + + static_assert(static_cast(GPUShaderStage::kVertex) == + (1 << static_cast(GPUSingleShaderStage::kVertex))); + static_assert(static_cast(GPUShaderStage::kFragment) == + (1 << static_cast(GPUSingleShaderStage::kFragment))); + static_assert(static_cast(GPUShaderStage::kCompute) == + (1 << static_cast(GPUSingleShaderStage::kCompute))); + + class GPUStages + { + public: + static std::bitset IterateStages(GPUShaderStage stages); + static GPUShaderStage StageBit(GPUSingleShaderStage stage); + + static constexpr GPUShaderStage kAllStages = static_cast((1 << gpu_constants::kNumStages) - 1); + }; + + template + class GPUPerStage + { + public: + GPUPerStage() = default; + explicit GPUPerStage(const T &initialValue) + { + data_.fill(initialValue); + } + + T &operator[](GPUSingleShaderStage stage) + { + assert(static_cast(stage) < gpu_constants::kNumStages); + return data_[static_cast(stage)]; + } + + const T &operator[](GPUSingleShaderStage stage) const + { + assert(static_cast(stage) < gpu_constants::kNumStages); + return data_[static_cast(stage)]; + } + + T &operator[](GPUShaderStage stageBit) + { + uint32_t bit = static_cast(stageBit); + assert(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << gpu_constants::kNumStages)); + return data_[Log2(bit)]; + } + const T &operator[](GPUShaderStage stageBit) const + { + uint32_t bit = static_cast(stageBit); + assert(bit != 0 && IsPowerOfTwo(bit) && bit <= (1 << gpu_constants::kNumStages)); + return data_[Log2(bit)]; + } + + private: + static bool IsPowerOfTwo(uint64_t n) + { + assert(n != 0); + return (n & (n - 1)) == 0; + } + static uint32_t Log2(uint32_t value) + { + assert(value != 0); + return 31 - std::countl_zero(value); + } + + private: + std::array data_; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_pipeline.cpp b/src/common/command_buffers/gpu/gpu_pipeline.cpp new file mode 100644 index 000000000..5f40f5534 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_pipeline.cpp @@ -0,0 +1,52 @@ +#include +#include +#include + +using namespace std; + +namespace commandbuffers +{ + GPUPipelineBase::GPUPipelineBase(Ref device, + Ref layout, + string_view label) + : GPUHandle(device, label) + { + } + + GPUPipelineBase::GPUPipelineBase(shared_ptr device, + GPUHandle::ErrorTag, + string_view label) + : GPUHandle(device, ErrorTag{}, label) + { + } + + const GPUComputePipelineBase *GPUPipelineBase::getAsComputePipeline() const + { + return nullptr; + } + + GPUComputePipelineBase *GPUPipelineBase::getAsComputePipeline() + { + return nullptr; + } + + const GPURenderPipelineBase *GPUPipelineBase::getAsRenderPipeline() const + { + return nullptr; + } + + GPURenderPipelineBase *GPUPipelineBase::getAsRenderPipeline() + { + return nullptr; + } + + GPUPipelineLayoutBase *GPUPipelineBase::layout() + { + return layout_.get(); + } + + const GPUPipelineLayoutBase *GPUPipelineBase::layout() const + { + return layout_.get(); + } +} diff --git a/src/common/command_buffers/gpu/gpu_pipeline.hpp b/src/common/command_buffers/gpu/gpu_pipeline.hpp index a6bcdc46c..70264a6b8 100644 --- a/src/common/command_buffers/gpu/gpu_pipeline.hpp +++ b/src/common/command_buffers/gpu/gpu_pipeline.hpp @@ -4,36 +4,34 @@ #include #include -#include "./gpu_base.hpp" -#include "./gpu_bind_group.hpp" +#include +#include +#include +#include namespace commandbuffers { - class GPUPipelineLayout : public GPUHandle - { - private: - std::vector bind_group_layouts_; - }; + class GPURenderPipelineBase; + class GPUComputePipelineBase; - class GPURenderPipelineDescriptor + class GPUPipelineBase : public GPUHandle { public: - std::optional label; - }; + virtual const GPUComputePipelineBase *getAsComputePipeline() const; + virtual GPUComputePipelineBase *getAsComputePipeline(); - class GPURenderPipeline : public GPUHandle - { - using GPUHandle::GPUHandle; + virtual const GPURenderPipelineBase *getAsRenderPipeline() const; + virtual GPURenderPipelineBase *getAsRenderPipeline(); - public: - GPUBindGroupLayout &getBindGroupLayout(size_t index) - { - if (index < bind_group_layouts_.size()) - return bind_group_layouts_[index]; - throw std::out_of_range("Bind group layout index out of range"); - } + GPUPipelineLayoutBase *layout(); + const GPUPipelineLayoutBase *layout() const; + + protected: + GPUPipelineBase(Ref device, Ref layout, std::string_view label); + GPUPipelineBase(Ref device, GPUHandle::ErrorTag, std::string_view label); private: - std::vector bind_group_layouts_; + GPUShaderStage stage_mask_ = GPUShaderStage::kNone; + Ref layout_; }; } diff --git a/src/common/command_buffers/gpu/gpu_pipeline_layout.hpp b/src/common/command_buffers/gpu/gpu_pipeline_layout.hpp new file mode 100644 index 000000000..571f9adf7 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_pipeline_layout.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include + +namespace commandbuffers +{ + struct GPUPipelineLayoutDescriptor + { + }; + + class GPUPipelineLayoutBase : public GPUHandle + { + private: + std::vector bind_group_layouts_; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_queue.cpp b/src/common/command_buffers/gpu/gpu_queue.cpp new file mode 100644 index 000000000..6d5aba28b --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_queue.cpp @@ -0,0 +1,97 @@ +#include +#include + +namespace commandbuffers +{ + class GPUErrorQueue : public GPUQueueBase + { + public: + explicit GPUErrorQueue(Ref device, std::string_view label) + : GPUQueueBase(device, GPUHandle::kError, label) + { + } + + private: + gpu::MaybeError submitImpl(uint32_t commandCount, GPUCommandBufferBase *const *commands) override + { + assert(false); + } + bool hasPendingCommands() const override + { + assert(false); + } + gpu::MaybeError waitForIdleForDestructionImpl() override + { + return {}; + } + }; + + GPUQueueBase::GPUQueueBase(Ref device, const GPUQueueDescriptor &descriptor) + : gpu::ExecutionQueueBase(device, descriptor.label) + { + } + + GPUQueueBase::GPUQueueBase(Ref device, GPUHandle::ErrorTag, std::string_view label) + : gpu::ExecutionQueueBase(device, label) + { + } + + // static + Ref GPUQueueBase::MakeError(Ref device, std::string_view label) + { + return AcquireRef(new GPUErrorQueue(device, label)); + } + + void GPUQueueBase::submit(uint32_t commandCount, GPUCommandBufferBase *const *commands) + { + } + + void GPUQueueBase::writeBuffer(GPUBufferBase *buffer, + uint64_t bufferOffset, + const void *data, + size_t size) + { + } + + gpu::MaybeError GPUQueueBase::writeBufferImpl(GPUBufferBase *buffer, + uint64_t bufferOffset, + const void *data, + size_t size) + { + return buffer->uploadData(bufferOffset, data, size); + } + + gpu::MaybeError GPUQueueBase::validateSubmit(uint32_t commandCount, GPUCommandBufferBase *const *commands) const + { + return {}; + } + + gpu::MaybeError GPUQueueBase::validateOnSubmittedWorkDone() const + { + return {}; + } + + gpu::MaybeError GPUQueueBase::submitInternal(uint32_t commandCount, GPUCommandBufferBase *const *commands) + { + assert(device()->validateIsAlive().IsSuccess()); + + if (device()->isValidationEnabled()) + { + assert(validateSubmit(commandCount, commands).IsSuccess()); + } + assert(!isError()); + + inSubmit = true; + { + if (submitImpl(commandCount, commands).IsError()) + { + assert(false && "submitImpl failed"); + } + } + inSubmit = false; + + // Call tick() to flush pending work. + assert(device()->tick().IsSuccess()); + return {}; + } +} diff --git a/src/common/command_buffers/gpu/gpu_queue.hpp b/src/common/command_buffers/gpu/gpu_queue.hpp new file mode 100644 index 000000000..ea6542284 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_queue.hpp @@ -0,0 +1,51 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + struct GPUQueueDescriptor + { + std::string_view label; + }; + + class GPUQueueBase : public gpu::ExecutionQueueBase + { + public: + static Ref MakeError(Ref device, std::string_view label); + + GPUHandleType type() const override + { + return GPUHandleType::kQueue; + } + + void submit(uint32_t commandCount, GPUCommandBufferBase *const *commands); + void writeBuffer(GPUBufferBase *buffer, + uint64_t bufferOffset, + const void *data, + size_t size); + + protected: + GPUQueueBase(Ref device, const GPUQueueDescriptor &descriptor); + GPUQueueBase(Ref device, GPUHandle::ErrorTag, std::string_view label); + + virtual gpu::MaybeError submitImpl(uint32_t commandCount, GPUCommandBufferBase *const *commands) = 0; + virtual gpu::MaybeError writeBufferImpl(GPUBufferBase *buffer, + uint64_t bufferOffset, + const void *data, + size_t size); + + private: + gpu::MaybeError validateSubmit(uint32_t commandCount, GPUCommandBufferBase *const *commands) const; + gpu::MaybeError validateOnSubmittedWorkDone() const; + + gpu::MaybeError submitInternal(uint32_t commandCount, GPUCommandBufferBase *const *commands); + }; +} diff --git a/src/common/command_buffers/gpu/gpu_render_bundle.cpp b/src/common/command_buffers/gpu/gpu_render_bundle.cpp new file mode 100644 index 000000000..e0f2d4747 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_render_bundle.cpp @@ -0,0 +1,78 @@ +#include +#include +#include +#include +#include + +using namespace std; + +namespace commandbuffers +{ + GPURenderBundle::GPURenderBundle(GPURenderBundleEncoder *encoder, + const GPURenderBundleDescriptor *descriptor, + Ref attachment_state, + bool depth_readonly, + bool stencil_readonly, + gpu::RenderPassResourceUsage resource_usage, + gpu::IndirectDrawMetadata indirect_draw_metadata) + : GPUHandle(encoder->device(), kLabelNotImplemented) + , commands_(encoder->acquireCommands()) + , attachment_state_(std::move(attachment_state)) + , indirect_draw_metadata_(std::move(indirect_draw_metadata)) + , depth_readonly_(depth_readonly) + , stencil_readonly_(stencil_readonly) + , resource_usage_(std::move(resource_usage)) + , encoder_label_(encoder->getLabel()) + { + } + + GPURenderBundle::GPURenderBundle(Ref device, ErrorTag error_tag, string_view label) + : GPUHandle(device, error_tag, label) + { + } + + const string &GPURenderBundle::getEncoderLabel() const + { + return encoder_label_; + } + + void GPURenderBundle::setEncoderLabel(string encoder_label) + { + encoder_label_ = encoder_label; + } + + gpu::CommandIterator *GPURenderBundle::getCommands() + { + return &commands_; + } + + const gpu::AttachmentState *GPURenderBundle::getAttachmentState() const + { + return attachment_state_.get(); + } + + bool GPURenderBundle::isDepthReadOnly() const + { + return depth_readonly_; + } + + bool GPURenderBundle::isStencilReadOnly() const + { + return stencil_readonly_; + } + + uint64_t GPURenderBundle::getDrawCount() const + { + return draw_count_; + } + + const gpu::RenderPassResourceUsage &GPURenderBundle::getResourceUsage() const + { + return resource_usage_; + } + + const gpu::IndirectDrawMetadata &GPURenderBundle::getIndirectDrawMetadata() + { + return indirect_draw_metadata_; + } +} diff --git a/src/common/command_buffers/gpu/gpu_render_bundle.hpp b/src/common/command_buffers/gpu/gpu_render_bundle.hpp new file mode 100644 index 000000000..9d30ca561 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_render_bundle.hpp @@ -0,0 +1,60 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + class GPURenderBundleEncoder; + struct GPURenderBundleDescriptor + { + std::string_view label; + }; + + class GPURenderBundle final : public GPUHandle + { + public: + GPURenderBundle(GPURenderBundleEncoder *encoder, + const GPURenderBundleDescriptor *descriptor, + Ref attachment_state, + bool depth_readonly, + bool stencil_readonly, + gpu::RenderPassResourceUsage resource_usage, + gpu::IndirectDrawMetadata indirect_draw_metadata); + + GPUHandleType type() const override + { + return GPUHandleType::kRenderBundle; + } + + const std::string &getEncoderLabel() const; + void setEncoderLabel(std::string); + + gpu::CommandIterator *getCommands(); + + const gpu::AttachmentState *getAttachmentState() const; + bool isDepthReadOnly() const; + bool isStencilReadOnly() const; + uint64_t getDrawCount() const; + const gpu::RenderPassResourceUsage &getResourceUsage() const; + const gpu::IndirectDrawMetadata &getIndirectDrawMetadata(); + + private: + GPURenderBundle(Ref device, ErrorTag error_tag, std::string_view label); + + gpu::CommandIterator commands_; + gpu::IndirectDrawMetadata indirect_draw_metadata_; + Ref attachment_state_; + bool depth_readonly_; + bool stencil_readonly_; + uint64_t draw_count_; + gpu::RenderPassResourceUsage resource_usage_; + std::string encoder_label_; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_render_bundle_encoder.cpp b/src/common/command_buffers/gpu/gpu_render_bundle_encoder.cpp new file mode 100644 index 000000000..cacd52ecb --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_render_bundle_encoder.cpp @@ -0,0 +1,46 @@ +#include +#include +#include + +namespace commandbuffers +{ + GPURenderBundleEncoder::GPURenderBundleEncoder(Ref device, + const GPURenderBundleEncoderDescriptor *descriptor) + : RenderEncoderBase(device, + descriptor->label, + &bundle_encoding_context_, + nullptr, + descriptor->depthReadOnly, + descriptor->stencilReadOnly) + , bundle_encoding_context_(device, this) + { + } + + GPURenderBundleEncoder::GPURenderBundleEncoder(Ref device, + ErrorTag error_tag, + std::string_view label) + : RenderEncoderBase(device, &bundle_encoding_context_, error_tag, label) + , bundle_encoding_context_(device, error_tag) + { + } + + Ref GPURenderBundleEncoder::finish(const GPURenderBundleEncoderDescriptor *descriptor) + { + assert(false && "GPURenderBundleEncoder::finish is not implemented"); + } + + gpu::CommandIterator GPURenderBundleEncoder::acquireCommands() + { + return bundle_encoding_context_.acquireCommands(); + } + + gpu::ResultOrError> GPURenderBundleEncoder::finishInternal(const GPURenderBundleEncoderDescriptor *descriptor) + { + assert(false && "GPURenderBundleEncoder::finishInternal is not implemented"); + } + + gpu::MaybeError GPURenderBundleEncoder::validateFinish(const gpu::RenderPassResourceUsage &usages) const + { + return {}; + } +} diff --git a/src/common/command_buffers/gpu/gpu_render_bundle_encoder.hpp b/src/common/command_buffers/gpu/gpu_render_bundle_encoder.hpp new file mode 100644 index 000000000..8f5089a05 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_render_bundle_encoder.hpp @@ -0,0 +1,47 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + class GPURenderBundle; + + struct GPURenderBundleEncoderDescriptor + { + std::string_view label; + size_t colorFormatCount; + GPUTextureFormat const *colorFormats = nullptr; + GPUTextureFormat depthStencilFormat = GPUTextureFormat::kUndefined; + uint32_t sampleCount = 1; + bool depthReadOnly = false; + bool stencilReadOnly = false; + }; + + class GPURenderBundleEncoder final : public gpu::RenderEncoderBase + { + public: + GPUHandleType type() const override + { + return GPUHandleType::kRenderBundleEncoder; + } + + Ref finish(const GPURenderBundleEncoderDescriptor *descriptor); + gpu::CommandIterator acquireCommands(); + + private: + GPURenderBundleEncoder(Ref device, const GPURenderBundleEncoderDescriptor *descriptor); + GPURenderBundleEncoder(Ref device, ErrorTag error_tag, std::string_view label); + + gpu::ResultOrError> finishInternal(const GPURenderBundleEncoderDescriptor *descriptor); + gpu::MaybeError validateFinish(const gpu::RenderPassResourceUsage &usages) const; + + gpu::EncodingContext bundle_encoding_context_; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_render_pipeline.cpp b/src/common/command_buffers/gpu/gpu_render_pipeline.cpp new file mode 100644 index 000000000..4a2edce24 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_render_pipeline.cpp @@ -0,0 +1,13 @@ +#include +#include + +using namespace std; + +namespace commandbuffers +{ + GPURenderPipelineBase::GPURenderPipelineBase(Ref device, + const GPURenderPipelineDescriptor &descriptor) + : GPUPipelineBase(device, descriptor.layout, descriptor.label) + { + } +} diff --git a/src/common/command_buffers/gpu/gpu_render_pipeline.hpp b/src/common/command_buffers/gpu/gpu_render_pipeline.hpp new file mode 100644 index 000000000..448c1bf2e --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_render_pipeline.hpp @@ -0,0 +1,29 @@ +#pragma once + +#include +#include + +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + struct GPURenderPipelineDescriptor + { + std::string_view label; + Ref layout = nullptr; + GPUVertexState vertex; + GPUPrimitiveState primitive; + GPUDepthStencilState const *depthStencil = nullptr; + GPUMultisampleState multisample; + GPUFragmentState const *fragment = nullptr; + }; + + class GPURenderPipelineBase : public GPUPipelineBase + { + public: + GPURenderPipelineBase(Ref device, const GPURenderPipelineDescriptor &descriptor); + }; +} diff --git a/src/common/command_buffers/gpu/gpu_renderpass_encoder.cpp b/src/common/command_buffers/gpu/gpu_renderpass_encoder.cpp new file mode 100644 index 000000000..a98593b64 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_renderpass_encoder.cpp @@ -0,0 +1,58 @@ +#include + +namespace commandbuffers +{ + // static + Ref GPURenderPassEncoder::Create(const GPURenderPassDescriptor &descriptor, + GPUCommandEncoder *encoder, + gpu::EncodingContext *context, + uint32_t renderTargetWidth, + uint32_t renderTargetHeight, + bool depthReadOnly, + bool stencilReadOnly) + { + return AcquireRef(new GPURenderPassEncoder( + nullptr, + descriptor, + encoder, + context, + renderTargetWidth, + renderTargetHeight, + depthReadOnly, + stencilReadOnly)); + } + // static + Ref GPURenderPassEncoder::MakeError(GPUCommandEncoder *encoder, + gpu::EncodingContext *context, + std::string_view label) + { + return AcquireRef( + new GPURenderPassEncoder( + nullptr, + encoder, + context, + GPUHandle::kError, + label)); + } + + GPURenderPassEncoder::GPURenderPassEncoder(Ref device, + const GPURenderPassDescriptor &descriptor, + GPUCommandEncoder *commandEncoder, + gpu::EncodingContext *encodingContext, + uint32_t renderTargetWidth, + uint32_t renderTargetHeight, + bool depthReadOnly, + bool stencilReadOnly) + : GPUHandle(device, descriptor.label.value_or("GPURenderPassEncoder")) + { + } + + GPURenderPassEncoder::GPURenderPassEncoder(Ref device, + GPUCommandEncoder *commandEncoder, + gpu::EncodingContext *encodingContext, + ErrorTag errorTag, + std::string_view label) + : GPUHandle(device, errorTag, label) + { + } +} diff --git a/src/common/command_buffers/gpu/gpu_renderpass_encoder.hpp b/src/common/command_buffers/gpu/gpu_renderpass_encoder.hpp index 04305f4c6..7c4ce24dd 100644 --- a/src/common/command_buffers/gpu/gpu_renderpass_encoder.hpp +++ b/src/common/command_buffers/gpu/gpu_renderpass_encoder.hpp @@ -2,16 +2,20 @@ #include #include +#include -#include "./gpu_base.hpp" -#include "./gpu_pass_encoder_base.hpp" -#include "./gpu_buffer.hpp" -#include "./gpu_command_buffer.hpp" -#include "./gpu_pipeline.hpp" -#include "./gpu_texture_view.hpp" +#include +#include +#include +#include +#include +#include +#include namespace commandbuffers { + class GPUCommandEncoder; + class GPURenderPassDescriptor { public: @@ -32,8 +36,8 @@ namespace commandbuffers float clearColor[4] = {0.0f, 0.0f, 0.0f, 0.0f}; LoadOp loadOp; StoreOp storeOp; - std::optional resolveTarget; - GPUTextureView view; + std::weak_ptr resolveTarget; + std::weak_ptr view; }; class DepthStencilAttachment @@ -47,7 +51,7 @@ namespace commandbuffers std::optional stencilReadOnly; std::optional stencilLoadOp; std::optional stencilStoreOp; - GPUTextureView view; + std::weak_ptr view; }; public: @@ -57,80 +61,62 @@ namespace commandbuffers std::optional depthStencilAttachment; }; - class GPURenderPassEncoder : public GPUPassEncoderBase, - public GPUHandle + class GPURenderPassEncoder final : public GPUPassEncoderBase, + public GPUHandle { public: - GPURenderPassEncoder(std::string label) - : GPUPassEncoderBase() - , GPUHandle(label) + static Ref Create(const GPURenderPassDescriptor &descriptor, + GPUCommandEncoder *encoder, + gpu::EncodingContext *context, + uint32_t renderTargetWidth, + uint32_t renderTargetHeight, + bool depthReadOnly, + bool stencilReadOnly); + static Ref MakeError(GPUCommandEncoder *encoder, + gpu::EncodingContext *context, + std::string_view label); + + GPUHandleType type() const override final { + return GPUHandleType::kRenderPassEncoder; } - - public: bool isRenderPassEncoder() const override { return true; } - void draw(uint32_t vertex_count, - uint32_t instance_count = 1, - uint32_t first_vertex = 0, - uint32_t first_instance = 0) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(vertex_count, - instance_count, - first_vertex, - first_instance); - } - void drawIndexed(uint32_t index_count, - uint32_t instance_count = 1, - uint32_t first_index = 0, - int32_t base_vertex = 0, - uint32_t first_instance = 0) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(index_count, - instance_count, - first_index, - base_vertex, - first_instance); - } - void setViewport(float x, float y, float width, float height, float min_depth = 0.0f, float max_depth = 1.0f) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(x, y, width, height, min_depth, max_depth); - } - void setScissorRect(float x, float y, float width, float height) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(x, y, width, height); - } - void setPipeline(const GPURenderPipeline &pipeline) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(pipeline); - } - void setIndexBuffer(const GPUBuffer &buffer, GPUIndexFormat index_format, uint32_t offset, uint32_t size) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(buffer, index_format, offset, size); - } - void setVertexBuffer(uint32_t slot, const GPUBuffer &buffer, uint32_t offset = 0, uint32_t size = 0) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(slot, buffer, offset, size); - } - void setBlendConstant(float r, float g, float b, float a) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(r, g, b, a); - } - void setStencilReference(uint32_t ref) - { - if (!ended_) [[likely]] - command_buffer_->addCommand(ref); - } + void draw( + uint32_t vertex_count, + uint32_t instance_count = 1, + uint32_t first_vertex = 0, + uint32_t first_instance = 0); + void drawIndexed( + uint32_t index_count, + uint32_t instance_count = 1, + uint32_t first_index = 0, + int32_t base_vertex = 0, + uint32_t first_instance = 0); + void setViewport(float x, float y, float width, float height, float min_depth = 0.0f, float max_depth = 1.0f); + void setScissorRect(float x, float y, float width, float height); + void setPipeline(const GPURenderPipelineBase &pipeline); + void setIndexBuffer(const GPUBufferBase &buffer, GPUIndexFormat index_format, uint32_t offset, uint32_t size); + void setVertexBuffer(uint32_t slot, const GPUBufferBase &buffer, uint32_t offset = 0, uint32_t size = 0); + void setBlendConstant(float r, float g, float b, float a); + void setStencilReference(uint32_t ref); + + protected: + GPURenderPassEncoder(Ref device, + const GPURenderPassDescriptor &descriptor, + GPUCommandEncoder *commandEncoder, + gpu::EncodingContext *encodingContext, + uint32_t renderTargetWidth, + uint32_t renderTargetHeight, + bool depthReadOnly, + bool stencilReadOnly); + GPURenderPassEncoder(Ref device, + GPUCommandEncoder *commandEncoder, + gpu::EncodingContext *encodingContext, + ErrorTag errorTag, + std::string_view label); }; } diff --git a/src/common/command_buffers/gpu/gpu_sampler.cpp b/src/common/command_buffers/gpu/gpu_sampler.cpp new file mode 100644 index 000000000..b940dd017 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_sampler.cpp @@ -0,0 +1,5 @@ +#include + +namespace commandbuffers +{ +} diff --git a/src/common/command_buffers/gpu/gpu_sampler.hpp b/src/common/command_buffers/gpu/gpu_sampler.hpp new file mode 100644 index 000000000..e986ce88a --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_sampler.hpp @@ -0,0 +1,10 @@ +#pragma once + +#include + +namespace commandbuffers +{ + class GPUSamplerBase : public GPUHandle + { + }; +} diff --git a/src/common/command_buffers/gpu/gpu_shader_module.cpp b/src/common/command_buffers/gpu/gpu_shader_module.cpp new file mode 100644 index 000000000..cbc719af3 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_shader_module.cpp @@ -0,0 +1,53 @@ +#include +#include "utility.hpp" + +using namespace std; + +namespace commandbuffers +{ + GPUShaderModuleBase::GPUShaderModuleBase(Ref device, + const GPUShaderModuleDescriptor &descriptor, + vector internal_extensions) + : GPUHandle(device, descriptor.label) + , type_(Type::kGLSL) + , original_spirv_() + , wgsl_code_(descriptor.code) + , internal_extensions_(move(internal_extensions)) + { + } + + GPUShaderModuleBase::GPUShaderModuleBase(Ref device, + GPUHandle::ErrorTag tag, + string_view label, + gpu::ParsedCompilationMessages &&compilation_messages) + : GPUHandle(device, tag, label) + , type_(Type::kUndefined) + { + } + + // static + Ref GPUShaderModuleBase::MakeError(Ref device, + string_view label, + gpu::ParsedCompilationMessages &&compilation_messages) + { + return AcquireRef(new GPUShaderModuleBase( + device, + GPUHandle::kError, + label, + move(compilation_messages))); + } + + void GPUShaderModuleBase::initialize() + { + } + + std::unique_ptr GPUShaderModuleBase::getInitializationError() + { + return nullptr; + } + + bool GPUShaderModuleBase::hasEntryPoint(std::string_view entry_point) const + { + return false; + } +} diff --git a/src/common/command_buffers/gpu/gpu_shader_module.hpp b/src/common/command_buffers/gpu/gpu_shader_module.hpp new file mode 100644 index 000000000..2210f44b2 --- /dev/null +++ b/src/common/command_buffers/gpu/gpu_shader_module.hpp @@ -0,0 +1,64 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + + struct GPUShaderModuleDescriptor + { + std::string_view label; + std::string_view code; + std::vector hints; + }; + + class GPUShaderModuleBase : public GPUHandle + { + public: + static Ref MakeError(Ref device, + std::string_view label, + gpu::ParsedCompilationMessages &&compilation_messages); + + GPUShaderModuleBase(Ref device, + const GPUShaderModuleDescriptor &descriptor, + std::vector internal_extensions); + + GPUHandleType type() const override + { + return GPUHandleType::kShaderModule; + } + + void initialize(); + std::unique_ptr getInitializationError(); + + bool hasEntryPoint(std::string_view entry_point) const; + + private: + GPUShaderModuleBase(Ref device, + GPUHandle::ErrorTag tag, + std::string_view label, + gpu::ParsedCompilationMessages &&compilation_messages); + + enum class Type : uint8_t + { + kUndefined, + kGLSL, + kSpirv, + kWGSL, + }; + Type type_; + std::vector original_spirv_; + std::string wgsl_code_; + + const std::vector internal_extensions_; + }; +} diff --git a/src/common/command_buffers/gpu/gpu_texture.hpp b/src/common/command_buffers/gpu/gpu_texture.hpp index 7a30052cd..8a10499f0 100644 --- a/src/common/command_buffers/gpu/gpu_texture.hpp +++ b/src/common/command_buffers/gpu/gpu_texture.hpp @@ -1,150 +1,35 @@ #pragma once -#include "./gpu_base.hpp" +#include namespace commandbuffers { - enum class GPUTextureFormat - { - // 8-bit formats - kR8UNORM, - kR8SNORM, - kR8UINT, - kR8SINT, - - // 16-bit formats - kR16UNORM, - kR16SNORM, - kR16UINT, - kR16SINT, - kR16FLOAT, - kRG8UNORM, - kRG8SNORM, - kRG8UINT, - kRG8SINT, - - // 32-bit formats - kR32UINT, - kR32SINT, - kR32FLOAT, - kRG16UNORM, - kRG16SNORM, - kRG16UINT, - kRG16SINT, - kRG16FLOAT, - kRGBA8UNORM, - kRGBA8UNORM_SRGB, - kRGBA8SNORM, - kRGBA8UINT, - kRGBA8SINT, - kBGRA8UNORM, - kBGRA8UNORM_SRGB, - - // Packed 32-bit formats - kRGB9E5UFLOAT, - kRGB10A2UINT, - kRGB10A2UNORM, - kRG11B10UFLOAT, - - // 64-bit formats - kRG32UINT, - kRG32SINT, - kRG32FLOAT, - kRGBA16UNORM, - kRGBA16SNORM, - kRGBA16UINT, - kRGBA16SINT, - kRGBA16FLOAT, - - // 128-bit formats - kRGBA32UINT, - kRGBA32SINT, - kRGBA32FLOAT, - - // Depth/stencil formats - kSTENCIL8, - kDEPTH16UNORM, - kDEPTH24PLUS, - kDEPTH24PLUS_STENCIL8, - kDEPTH32FLOAT, - kDEPTH32FLOAT_STENCIL8, - - // BC compressed formats - kBC1_RGBA_UNORM, - kBC1_RGBA_UNORM_SRGB, - kBC2_RGBA_UNORM, - kBC2_RGBA_UNORM_SRGB, - kBC3_RGBA_UNORM, - kBC3_RGBA_UNORM_SRGB, - kBC4_R_UNORM, - kBC4_R_SNORM, - kBC5_RG_UNORM, - kBC5_RG_SNORM, - kBC6H_RGB_UFLOAT, - kBC6H_RGB_FLOAT, - kBC7_RGBA_UNORM, - kBC7_RGBA_UNORM_SRGB, - - // ETC2 compressed formats - kETC2_RGB8UNORM, - kETC2_RGB8UNORM_SRGB, - kETC2_RGB8A1UNORM, - kETC2_RGB8A1UNORM_SRGB, - kETC2_RGBA8UNORM, - kETC2_RGBA8UNORM_SRGB, - kEAC_R11UNORM, - kEAC_R11SNORM, - kEAC_RG11UNORM, - kEAC_RG11SNORM, - - // ASTC compressed formats - kASTC_4x4_UNORM, - kASTC_4x4_UNORM_SRGB, - kASTC_5x4_UNORM, - kASTC_5x4_UNORM_SRGB, - kASTC_5x5_UNORM, - kASTC_5x5_UNORM_SRGB, - kASTC_6x5_UNORM, - kASTC_6x5_UNORM_SRGB, - kASTC_6x6_UNORM, - kASTC_6x6_UNORM_SRGB, - kASTC_8x5_UNORM, - kASTC_8x5_UNORM_SRGB, - kASTC_8x6_UNORM, - kASTC_8x6_UNORM_SRGB, - kASTC_8x8_UNORM, - kASTC_8x8_UNORM_SRGB, - kASTC_10x5_UNORM, - kASTC_10x5_UNORM_SRGB, - kASTC_10x6_UNORM, - kASTC_10x6_UNORM_SRGB, - kASTC_10x8_UNORM, - kASTC_10x8_UNORM_SRGB, - kASTC_10x10_UNORM, - kASTC_10x10_UNORM_SRGB, - kASTC_12x10_UNORM, - kASTC_12x10_UNORM_SRGB, - kASTC_12x12_UNORM, - kASTC_12x12_UNORM_SRGB - }; + class GPUTextureViewBase; - enum class GPUTextureDimension + struct GPUTextureDescriptor { - kTexture1D, - kTexture2D, - kTexture3D, + std::string_view label; + GPUTextureUsage usage = GPUTextureUsage::kNone; + GPUTextureDimension dimension = GPUTextureDimension::k2D; + GPUExtent3D size; + GPUTextureFormat format = GPUTextureFormat::kUndefined; + uint32_t mipLevelCount = 1; + uint32_t sampleCount = 1; + size_t viewFormatCount = 0; + GPUTextureFormat const *viewFormats = nullptr; }; - class GPUTexture : public GPUHandle + class GPUTextureBase : public GPUHandle { public: private: - uint32_t width_; - uint32_t height_; - uint32_t depth_or_array_layers_; + GPUExtent3D base_size_; GPUTextureDimension dimension_; GPUTextureFormat format_; uint32_t mip_level_count_; uint32_t sample_count_; + GPUTextureUsage usage_ = GPUTextureUsage::kNone; + GPUTextureUsage internal_usage_ = GPUTextureUsage::kNone; + GPUTextureViewBase *default_view_ = nullptr; }; } diff --git a/src/common/command_buffers/gpu/gpu_texture_view.hpp b/src/common/command_buffers/gpu/gpu_texture_view.hpp index 557402b71..97c8c8855 100644 --- a/src/common/command_buffers/gpu/gpu_texture_view.hpp +++ b/src/common/command_buffers/gpu/gpu_texture_view.hpp @@ -1,24 +1,54 @@ #pragma once #include +#include #include -#include "./gpu_texture.hpp" +#include +#include +#include namespace commandbuffers { - class GPUTextureView : public GPUHandle + struct GPUTextureViewDescriptor + { + std::string_view label; + GPUTextureFormat format = GPUTextureFormat::kUndefined; + GPUTextureViewDimension dimension = GPUTextureViewDimension::kUndefined; + uint32_t baseMipLevel = 0; + uint32_t mipLevelCount = gpu_constants::kMipLevelCountUndefined; + uint32_t baseArrayLayer = 0; + uint32_t arrayLayerCount = gpu_constants::kArrayLayerCountUndefined; + GPUTextureAspect aspect = GPUTextureAspect::kAll; + GPUTextureUsage usage = GPUTextureUsage::kNone; + }; + + class GPUTextureViewBase : public GPUHandle { public: - const GPUTexture &textureRef() const - { - auto texture = texture_.lock(); - if (!texture) - throw std::runtime_error("GPUTextureView: Texture has been destroyed."); - return *texture; - } + GPUTextureViewBase(Ref texture, const GPUTextureViewDescriptor &); + + Ref texture() const; + const GPUTextureBase &textureRef() const; + + GPUTextureViewDimension dimension() const; + uint32_t getBaseMipLevel() const; + uint32_t getLevelCount() const; + uint32_t getBaseArrayLayer() const; + uint32_t getLayerCount() const; + + GPUTextureComponentSwizzle getSwizzle() const; + bool isSwizzleIdentity() const; private: - std::weak_ptr texture_; + Ref texture_; + GPUTextureViewDimension dimension_; + GPUTextureUsage usage_ = GPUTextureUsage::kNone; + GPUTextureUsage internal_usage_ = GPUTextureUsage::kNone; + GPUComponentSwizzle swizzle_red_ = GPUComponentSwizzle::kR; + GPUComponentSwizzle swizzle_green_ = GPUComponentSwizzle::kG; + GPUComponentSwizzle swizzle_blue_ = GPUComponentSwizzle::kB; + GPUComponentSwizzle swizzle_alpha_ = GPUComponentSwizzle::kA; + bool is_swizzle_identity_ = false; }; } diff --git a/src/common/command_buffers/gpu/indirect_draw_metadata.hpp b/src/common/command_buffers/gpu/indirect_draw_metadata.hpp new file mode 100644 index 000000000..3cb5595e3 --- /dev/null +++ b/src/common/command_buffers/gpu/indirect_draw_metadata.hpp @@ -0,0 +1,9 @@ +#pragma once + +namespace commandbuffers::gpu +{ + class IndirectDrawMetadata + { + // TODO(yorkie): Implement GPUIndirectDrawMetadata class. + }; +} diff --git a/src/common/command_buffers/gpu/pass_resource_usage.hpp b/src/common/command_buffers/gpu/pass_resource_usage.hpp new file mode 100644 index 000000000..4a8ffd7d6 --- /dev/null +++ b/src/common/command_buffers/gpu/pass_resource_usage.hpp @@ -0,0 +1,70 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +namespace commandbuffers::gpu +{ + // Info about how a buffer is used and in which shader stages + struct BufferSyncInfo + { + GPUBufferUsage usage = GPUBufferUsage::kNone; + GPUShaderStage shaderStages = GPUShaderStage::kNone; + }; + + struct TextureSyncInfo + { + // GPUTextureUsage usage = GPUTextureUsage::None; + GPUShaderStage shaderStages = GPUShaderStage::kNone; + bool operator==(const TextureSyncInfo &b) const = default; + }; + + struct SyncScopeResourceUsage + { + std::vector buffers; + std::vector bufferSyncInfos; + + std::vector textures; + // std::vector textureSyncInfos; + + // std::vector externalTextures; + std::vector dynamicBindingArrays; + }; + + struct RenderPassResourceUsage : public SyncScopeResourceUsage + { + // Storage to track the occlusion queries used during the pass. + // std::vector querySets; + // std::vector> queryAvailabilities; + }; + + struct ComputePassResourceUsage + { + std::vector dispatchUsages; + + // All the resources referenced by this compute pass for validation in Queue::Submit. + std::unordered_set referencedBuffers; + std::unordered_set referencedTextures; + // std::unordered_set referencedExternalTextures; + std::unordered_set referencedDynamicBindingArrays; + }; + + using RenderPassUsages = std::vector; + using ComputePassUsages = std::vector; + + struct CommandBufferResourceUsage + { + RenderPassUsages renderPasses; + ComputePassUsages computePasses; + + // Resources used in commands that aren't in a pass. + std::unordered_set> topLevelBuffers; + std::unordered_set> topLevelTextures; + }; +} diff --git a/src/common/command_buffers/gpu/pass_resource_usage_tracker.cpp b/src/common/command_buffers/gpu/pass_resource_usage_tracker.cpp new file mode 100644 index 000000000..e6a389e0f --- /dev/null +++ b/src/common/command_buffers/gpu/pass_resource_usage_tracker.cpp @@ -0,0 +1,5 @@ +#include + +namespace commandbuffers::gpu +{ +} diff --git a/src/common/command_buffers/gpu/pass_resource_usage_tracker.hpp b/src/common/command_buffers/gpu/pass_resource_usage_tracker.hpp new file mode 100644 index 000000000..d86099318 --- /dev/null +++ b/src/common/command_buffers/gpu/pass_resource_usage_tracker.hpp @@ -0,0 +1,16 @@ +#pragma once + +namespace commandbuffers::gpu +{ + class SyncScopeUsageTracker + { + }; + + class ComputePassResourceUsageTracker + { + }; + + class RenderPassResourceUsageTracker : public SyncScopeUsageTracker + { + }; +} diff --git a/src/common/command_buffers/gpu/physical_device.cpp b/src/common/command_buffers/gpu/physical_device.cpp new file mode 100644 index 000000000..585d62729 --- /dev/null +++ b/src/common/command_buffers/gpu/physical_device.cpp @@ -0,0 +1,88 @@ +#include + +using namespace std; + +namespace commandbuffers::gpu +{ + PhysicalDeviceBase::PhysicalDeviceBase(GPUBackendType type) + : backend_type_(type) + { + } + + void PhysicalDeviceBase::initialize() + { + initializeImpl(); + initializeVendorArchitectureImpl(); + } + + Ref PhysicalDeviceBase::createDevice(Ref adapter, + const GPUDeviceDescriptor &descriptor) + { + return createDeviceImpl(adapter, descriptor); + } + + uint32_t PhysicalDeviceBase::vendorId() const + { + return vendor_id_; + } + + uint32_t PhysicalDeviceBase::deviceId() const + { + return device_id_; + } + + const string &PhysicalDeviceBase::vendorName() const + { + return vendor_name_; + } + + const string &PhysicalDeviceBase::architectureName() const + { + return arch_name_; + } + + const string &PhysicalDeviceBase::name() const + { + return name_; + } + + const gpu_info::DriverVersion &PhysicalDeviceBase::driverVersion() const + { + return driver_version_; + } + + const string &PhysicalDeviceBase::driverDescription() const + { + return driver_description_; + } + + GPUAdapterType PhysicalDeviceBase::adapterType() const + { + return adapter_type_; + } + + GPUBackendType PhysicalDeviceBase::backendType() const + { + return backend_type_; + } + + uint32_t PhysicalDeviceBase::subgroupMinSize() const + { + return subgroup_min_size_; + } + + uint32_t PhysicalDeviceBase::subgroupMaxSize() const + { + return subgroup_max_size_; + } + + void PhysicalDeviceBase::enableFeature(GPUFeatureName feature) + { + } + + void PhysicalDeviceBase::initializeVendorArchitectureImpl() + { + vendor_name_ = gpu_info::GetVendorName(vendor_id_); + arch_name_ = gpu_info::GetArchitectureName(vendor_id_, device_id_); + } +} diff --git a/src/common/command_buffers/gpu/physical_device.hpp b/src/common/command_buffers/gpu/physical_device.hpp new file mode 100644 index 000000000..27355cc6f --- /dev/null +++ b/src/common/command_buffers/gpu/physical_device.hpp @@ -0,0 +1,64 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace commandbuffers::gpu +{ + class PhysicalDeviceBase + { + public: + explicit PhysicalDeviceBase(GPUBackendType); + + void initialize(); + Ref createDevice(Ref adapter, const GPUDeviceDescriptor &descriptor); + + uint32_t vendorId() const; + uint32_t deviceId() const; + const std::string &vendorName() const; + const std::string &architectureName() const; + const std::string &name() const; + const gpu_info::DriverVersion &driverVersion() const; + const std::string &driverDescription() const; + GPUAdapterType adapterType() const; + GPUBackendType backendType() const; + uint32_t subgroupMinSize() const; + uint32_t subgroupMaxSize() const; + + virtual bool supportsExternalImages() const = 0; + virtual bool supportsFeatureLevel(GPUFeatureLevel featureLevel, GPUInstance *instance) const = 0; + + protected: + void enableFeature(GPUFeatureName feature); + + private: + virtual Ref createDeviceImpl(Ref adapter, + const GPUDeviceDescriptor &descriptor) = 0; + virtual void initializeImpl() = 0; + virtual void initializeSupportedFeaturesImpl() = 0; + virtual void initializeVendorArchitectureImpl(); + + protected: + uint32_t vendor_id_ = 0xFFFFFFFF; + uint32_t device_id_ = 0xFFFFFFFF; + std::string vendor_name_ = "Unknown"; + std::string arch_name_ = "Unknown"; + std::string name_ = ""; + GPUAdapterType adapter_type_ = GPUAdapterType::kUnknown; + gpu_info::DriverVersion driver_version_; + std::string driver_description_ = ""; + + uint32_t subgroup_min_size_ = 4u; + uint32_t subgroup_max_size_ = 128u; + + private: + GPUBackendType backend_type_ = GPUBackendType::kUndefined; + }; +} diff --git a/src/common/command_buffers/gpu/programmable_encoder.cpp b/src/common/command_buffers/gpu/programmable_encoder.cpp new file mode 100644 index 000000000..da28b56bd --- /dev/null +++ b/src/common/command_buffers/gpu/programmable_encoder.cpp @@ -0,0 +1,52 @@ +#include +#include + +namespace commandbuffers::gpu +{ + using namespace std; + + ProgrammableEncoder::ProgrammableEncoder(Ref device, + string_view label, + EncodingContext *encoding_context) + : GPUHandle(device, label) + , encoding_context_(encoding_context) + , validation_enabled_(device->isValidationEnabled()) + { + } + + ProgrammableEncoder::ProgrammableEncoder(Ref device, + EncodingContext *encodingContext, + ErrorTag errorTag, + string_view label) + : GPUHandle(device, errorTag, label) + , encoding_context_(encodingContext) + , validation_enabled_(device->isValidationEnabled()) + { + } + + void ProgrammableEncoder::insertDebugMarker(string_view group_label) + { + } + + void ProgrammableEncoder::popDebugMarker() + { + } + + void ProgrammableEncoder::pushDebugMarker(string_view group_label) + { + } + + void ProgrammableEncoder::setImmediateData(uint32_t offset, const void *data, size_t size) + { + } + + bool ProgrammableEncoder::isValidationEnabled() const + { + return validation_enabled_; + } + + MaybeError ProgrammableEncoder::validateProgrammableEncoderEnd() const + { + return {}; + } +} \ No newline at end of file diff --git a/src/common/command_buffers/gpu/programmable_encoder.hpp b/src/common/command_buffers/gpu/programmable_encoder.hpp new file mode 100644 index 000000000..4f9677447 --- /dev/null +++ b/src/common/command_buffers/gpu/programmable_encoder.hpp @@ -0,0 +1,45 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace commandbuffers +{ + class GPUDeviceBase; + + namespace gpu + { + class ProgrammableEncoder : public GPUHandle + { + public: + ProgrammableEncoder(Ref device, + std::string_view label, + EncodingContext *encoding_context); + + void insertDebugMarker(std::string_view group_label); + void popDebugMarker(); + void pushDebugMarker(std::string_view group_label); + void setImmediateData(uint32_t offset, const void *data, size_t size); + + protected: + bool isValidationEnabled() const; + MaybeError validateProgrammableEncoderEnd() const; + + // Construct an "error" programmable pass encoder. + ProgrammableEncoder(Ref device, + EncodingContext *encodingContext, + ErrorTag errorTag, + std::string_view label); + + EncodingContext *encoding_context_ = nullptr; + uint64_t debug_group_stack_size_ = 0; + bool ended_ = false; + + private: + const bool validation_enabled_; + }; + } +} diff --git a/src/common/command_buffers/gpu/render_encoder_base.cpp b/src/common/command_buffers/gpu/render_encoder_base.cpp new file mode 100644 index 000000000..9f5b13069 --- /dev/null +++ b/src/common/command_buffers/gpu/render_encoder_base.cpp @@ -0,0 +1,119 @@ +#include +#include +#include "command_buffers/gpu/indirect_draw_metadata.hpp" + +namespace commandbuffers::gpu +{ + using namespace std; + + RenderEncoderBase::RenderEncoderBase(Ref device, + string_view label, + EncodingContext *encoding_context, + Ref attachment_state, + bool depth_readonly, + bool stencil_readonly) + : ProgrammableEncoder(device, label, encoding_context) + , attachment_state_(std::move(attachment_state)) + , disable_base_vertex_(true) + , disable_base_instance_(true) + { + } + + RenderEncoderBase::RenderEncoderBase(Ref device, + EncodingContext *encoding_context, + ErrorTag error_tag, + std::string_view label) + : ProgrammableEncoder(device, encoding_context, error_tag, label) + , disable_base_vertex_(true) + , disable_base_instance_(true) + { + } + + void RenderEncoderBase::draw(uint32_t vertex_count, + uint32_t instance_count, + uint32_t first_vertex, + uint32_t first_instance) + { + } + + void RenderEncoderBase::drawIndexed(uint32_t vertex_count, + uint32_t instance_count, + uint32_t first_index, + int32_t base_vertex, + uint32_t first_instance) + { + } + + void RenderEncoderBase::drawIndirect(GPUBufferBase *indirect_buffer, uint64_t indirect_fffset) + { + } + + void RenderEncoderBase::drawIndexedIndirect(GPUBufferBase *indirect_buffer, uint64_t indirect_fffset) + { + } + + void RenderEncoderBase::multiDrawIndirect( + GPUBufferBase *indirect_buffer, + uint64_t indirect_fffset, + uint32_t max_draw_count, + GPUBufferBase *draw_count_buffer, + uint64_t draw_count_buffer_offset) + { + } + + void RenderEncoderBase::multiDrawIndexedIndirect( + GPUBufferBase *indirect_buffer, + uint64_t indirect_fffset, + uint32_t max_draw_count, + GPUBufferBase *draw_count_buffer, + uint64_t draw_count_buffer_offset) + { + } + + void RenderEncoderBase::setPipeline(GPURenderPipelineBase *pipeline) + { + } + + void RenderEncoderBase::setVertexBuffer(uint32_t slot, GPUBufferBase *buffer, uint64_t offset, uint64_t size) + { + } + + void RenderEncoderBase::setIndexBuffer(GPUBufferBase *buffer, + GPUIndexFormat format, + uint64_t offset, + uint64_t size) + { + } + + void RenderEncoderBase::setBindGroup(uint32_t group_index, + GPUBindGroupBase *group, + uint32_t dynamic_offset_count, + const uint32_t *dynamic_offsets) + { + } + + const AttachmentState *RenderEncoderBase::getAttachmentState() const + { + return attachment_state_.get(); + } + + bool RenderEncoderBase::isDepthReadOnly() const + { + return depth_read_only_; + } + + bool RenderEncoderBase::isStencilReadOnly() const + { + return stencil_read_only_; + } + + uint64_t RenderEncoderBase::getDrawCount() const + { + return draw_count_; + } + + Ref RenderEncoderBase::acquireAttachmentState() + { + return std::move(attachment_state_); + } +} \ No newline at end of file diff --git a/src/common/command_buffers/gpu/render_encoder_base.hpp b/src/common/command_buffers/gpu/render_encoder_base.hpp new file mode 100644 index 000000000..b11bf3c3e --- /dev/null +++ b/src/common/command_buffers/gpu/render_encoder_base.hpp @@ -0,0 +1,92 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "command_buffers/gpu/indirect_draw_metadata.hpp" + +namespace commandbuffers +{ + class GPUDeviceBase; + + namespace gpu + { + class RenderEncoderBase : public ProgrammableEncoder + { + public: + RenderEncoderBase(Ref device, + std::string_view label, + EncodingContext *encoding_context, + Ref attachment_state, + bool depth_readonly, + bool stencil_readonly); + + void draw(uint32_t vertex_count, + uint32_t instance_count = 1, + uint32_t first_vertex = 0, + uint32_t first_instance = 0); + void drawIndexed(uint32_t vertex_count, + uint32_t instance_count, + uint32_t first_index, + int32_t base_vertex, + uint32_t first_instance); + + void drawIndirect(GPUBufferBase *indirect_buffer, uint64_t indirect_fffset); + void drawIndexedIndirect(GPUBufferBase *indirect_buffer, uint64_t indirect_fffset); + + void multiDrawIndirect( + GPUBufferBase *indirect_buffer, + uint64_t indirect_fffset, + uint32_t max_draw_count, + GPUBufferBase *draw_count_buffer = nullptr, + uint64_t draw_count_buffer_offset = 0); + void multiDrawIndexedIndirect( + GPUBufferBase *indirect_buffer, + uint64_t indirect_fffset, + uint32_t max_draw_count, + GPUBufferBase *draw_count_buffer = nullptr, + uint64_t draw_count_buffer_offset = 0); + + void setPipeline(GPURenderPipelineBase *pipeline); + void setVertexBuffer(uint32_t slot, GPUBufferBase *buffer, uint64_t offset, uint64_t size); + void setIndexBuffer(GPUBufferBase *buffer, + GPUIndexFormat format, + uint64_t offset, + uint64_t size); + + void setBindGroup(uint32_t group_index, + GPUBindGroupBase *group, + uint32_t dynamic_offset_count = 0, + const uint32_t *dynamic_offsets = nullptr); + + const AttachmentState *getAttachmentState() const; + bool isDepthReadOnly() const; + bool isStencilReadOnly() const; + uint64_t getDrawCount() const; + Ref acquireAttachmentState(); + + protected: + // Construct an "error" render encoder base. + RenderEncoderBase(Ref device, + EncodingContext *encoding_context, + ErrorTag error_tag, + std::string_view label); + + IndirectDrawMetadata indirect_draw_metadata_; + uint64_t draw_count_ = 0; + + private: + Ref attachment_state_; + const bool disable_base_vertex_; + const bool disable_base_instance_; + bool depth_read_only_ = false; + bool stencil_read_only_ = false; + }; + } +} diff --git a/src/common/command_buffers/webgl_constants.hpp b/src/common/command_buffers/webgl_constants.hpp index a2b30d1b5..0b5ca746b 100644 --- a/src/common/command_buffers/webgl_constants.hpp +++ b/src/common/command_buffers/webgl_constants.hpp @@ -9,6 +9,32 @@ * @see https://developer.mozilla.org/en-US/docs/Web/API/WebGL_API/Constants#standard_webgl_1_constants */ +// Types + +typedef uint32_t WebGLbitfield; +typedef uint8_t WebGLboolean; +typedef int8_t WebGLbyte; +typedef float WebGLclampf; +typedef uint32_t WebGLenum; +typedef float WebGLfloat; +typedef int32_t WebGLint; +typedef int16_t WebGLshort; +typedef int32_t WebGLsizei; +typedef uint8_t WebGLubyte; +typedef uint32_t WebGLuint; +typedef uint16_t WebGLushort; +typedef void WebGLvoid; + +typedef char WebGLchar; +typedef double WebGLdouble; +typedef double WebGLclampd; +typedef uint16_t WebGLhalf; +typedef int64_t WebGLint64; +typedef struct __WebGLsync *WebGLsync; +typedef uint64_t WebGLuint64; +typedef intptr_t WebGLintptr; +typedef intptr_t WebGLsizeiptr; + // Standard WebGL 1 constants // Clearing buffers diff --git a/src/common/compiler.hpp b/src/common/compiler.hpp new file mode 100644 index 000000000..7e9e2586a --- /dev/null +++ b/src/common/compiler.hpp @@ -0,0 +1,124 @@ +#pragma once + +// Copyright 2024 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// TR_COMPILER_IS(CLANG|GCC|MSVC): Compiler detection +// +// Note: clang masquerades as GCC on POSIX and as MSVC on Windows. It must be checked first. +#if defined(__clang__) +#define TR_COMPILER_IS_CLANG 1 +#define TR_COMPILER_IS_GCC 0 +#define TR_COMPILER_IS_MSVC 0 +#elif defined(__GNUC__) +#define TR_COMPILER_IS_CLANG 0 +#define TR_COMPILER_IS_GCC 1 +#define TR_COMPILER_IS_MSVC 0 +#elif defined(_MSC_VER) +#define TR_COMPILER_IS_CLANG 0 +#define TR_COMPILER_IS_GCC 0 +#define TR_COMPILER_IS_MSVC 1 +#else +#error "Unsupported compiler" +#endif + +// Use #if TR_COMPILER_IS(XXX) for compiler specific code. +// Do not use #ifdef or the naked macro TR_COMPILER_IS_XXX. +// This can help avoid common mistakes like not including "compiler.h" and falling into unwanted +// code block as usage of undefined macro "function" will be blocked by the compiler. +#define TR_COMPILER_IS(X) (1 == TR_COMPILER_IS_##X) + +// TR_HAS_ATTRIBUTE +// +// A wrapper around `__has_attribute`. This test whether its operand is recognized by the compiler. +#if defined(__has_attribute) +#define TR_HAS_ATTRIBUTE(x) __has_attribute(x) +#else +#define TR_HAS_ATTRIBUTE(x) 0 +#endif + +// TR_HAS_CPP_ATTRIBUTE +// +// A wrapper around `__has_cpp_attribute`. This test whether its operand is recognized by the +// compiler. +#if defined(__has_cpp_attribute) +#define TR_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x) +#else +#define TR_HAS_CPP_ATTRIBUTE(x) 0 +#endif + +// TR_BUILTIN_UNREACHABLE() +// +// Hints the compiler that a code path is unreachable. +#if TR_COMPILER_IS(MSVC) +#define TR_BUILTIN_UNREACHABLE() __assume(false) +#else +#define TR_BUILTIN_UNREACHABLE() __builtin_unreachable() +#endif + +// TR_LIKELY(EXPR) +// +// Where available, hints the compiler that the expression will be true to help it generate code +// that leads to better branch prediction. +#if TR_COMPILER_IS(GCC) || TR_COMPILER_IS(CLANG) +#define TR_LIKELY(x) __builtin_expect(!!(x), 1) +#else +#define TR_LIKELY(x) (x) +#endif + +// TR_UNLIKELY(EXPR) +// +// Where available, hints the compiler that the expression will be false to help it generate code +// that leads to better branch prediction. +#if TR_COMPILER_IS(GCC) || TR_COMPILER_IS(CLANG) +#define TR_UNLIKELY(x) __builtin_expect(!!(x), 0) +#else +#define TR_UNLIKELY(x) (x) +#endif + +// TR_ASAN_ENABLED() +// +// Checks whether ASan is enabled. +#if TR_COMPILER_IS(CLANG) +#define TR_ASAN_ENABLED() __has_feature(address_sanitizer) +#elif TR_COMPILER_IS(GCC) || TR_COMPILER_IS(MSVC) +#if defined(__SANITIZE_ADDRESS__) +#define TR_ASAN_ENABLED() 1 +#else +#define TR_ASAN_ENABLED() 0 +#endif +#endif + +// TR_NO_SANITIZE(instrumentation) +// +// Annotate a function or a global variable declaration to specify that a particular instrumentation +// or set of instrumentations should not be applied. +#if TR_HAS_ATTRIBUTE(no_sanitize) +#define TR_NO_SANITIZE(instrumentation) __attribute__((no_sanitize(instrumentation))) +#else +#define TR_NO_SANITIZE(instrumentation) +#endif diff --git a/src/common/math_utils.hpp b/src/common/math_utils.hpp index 533af0b1c..e6bde1b2c 100644 --- a/src/common/math_utils.hpp +++ b/src/common/math_utils.hpp @@ -1,6 +1,7 @@ #pragma once #include +#include #include namespace transmute::common @@ -44,5 +45,51 @@ namespace transmute::common { return std::fabs(a - b) <= epsilon; } + + /** + * Check if a number is a power of two. + * + * @param n The number to check, must be non-zero. + * @returns True if n is a power of two, false otherwise. + */ + static bool IsPowerOfTwo(uint64_t n) + { + assert(n != 0); + return (n & (n - 1)) == 0; + } }; + + inline bool IsPtrAligned(const void *ptr, size_t alignment) + { + assert(math_utils::IsPowerOfTwo(alignment)); + assert(alignment != 0); + return (reinterpret_cast(ptr) & (alignment - 1)) == 0; + } + + inline bool IsAligned(uint32_t value, size_t alignment) + { + assert(alignment <= UINT32_MAX); + assert(math_utils::IsPowerOfTwo(alignment)); + assert(alignment != 0); + uint32_t alignment32 = static_cast(alignment); + return (value & (alignment32 - 1)) == 0; + } + + template + inline T *AlignPtr(T *ptr, size_t alignment) + { + assert(math_utils::IsPowerOfTwo(alignment)); + assert(alignment != 0); + return reinterpret_cast((reinterpret_cast(ptr) + (alignment - 1)) & + ~(alignment - 1)); + } + + template + inline const T *AlignPtr(const T *ptr, size_t alignment) + { + assert(math_utils::IsPowerOfTwo(alignment)); + assert(alignment != 0); + return reinterpret_cast((reinterpret_cast(ptr) + (alignment - 1)) & + ~(alignment - 1)); + } } diff --git a/src/common/non_copyable.hpp b/src/common/non_copyable.hpp new file mode 100644 index 000000000..4ed3943fc --- /dev/null +++ b/src/common/non_copyable.hpp @@ -0,0 +1,44 @@ +// Copyright 2021 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +// A base class to make a class non-copyable. +class NonCopyable +{ +public: + constexpr NonCopyable() = default; + ~NonCopyable() = default; + +protected: + NonCopyable(NonCopyable &&) = default; + NonCopyable &operator=(NonCopyable &&) = default; + +private: + NonCopyable(const NonCopyable &) = delete; + void operator=(const NonCopyable &) = delete; +}; diff --git a/src/common/non_movable.hpp b/src/common/non_movable.hpp new file mode 100644 index 000000000..2ff850cec --- /dev/null +++ b/src/common/non_movable.hpp @@ -0,0 +1,42 @@ +// Copyright 2024 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include + +// A base class to make a class non-movable. +class NonMovable : NonCopyable +{ +protected: + constexpr NonMovable() = default; + ~NonMovable() = default; + +private: + NonMovable(NonMovable &&) = delete; + void operator=(NonMovable &&) = delete; +}; diff --git a/src/common/platform.hpp b/src/common/platform.hpp new file mode 100644 index 000000000..2fa8f1a97 --- /dev/null +++ b/src/common/platform.hpp @@ -0,0 +1,320 @@ +// Copyright 2017 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +// Use #if TR_PLATFORM_IS(X) for platform specific code. +// Do not use #ifdef or the naked macro TR_PLATFORM_IS_X. +// This can help avoid common mistakes like not including "Platform.h" and falling into unwanted +// code block as usage of undefined macro "function" will be blocked by the compiler. +#define TR_PLATFORM_IS(X) (1 == TR_PLATFORM_IS_##X) + +// Define platform macros for OSes: +// +// - WINDOWS +// - WIN32 +// - WINUWP +// - POSIX +// - LINUX +// - ANDROID +// - CHROMEOS +// - APPLE +// - IOS +// - TVOS +// - MACOS +// - FUCHSIA +// - EMSCRIPTEN +#if defined(_WIN32) || defined(_WIN64) +#include +#define TR_PLATFORM_IS_WINDOWS 1 +#if WINAPI_FAMILY == WINAPI_FAMILY_DESKTOP_APP +#define TR_PLATFORM_IS_WIN32 1 +#elif WINAPI_FAMILY == WINAPI_FAMILY_PC_APP +#define TR_PLATFORM_IS_WINUWP 1 +#else +#error "Unsupported Windows platform." +#endif + +#elif defined(__linux__) +#define TR_PLATFORM_IS_LINUX 1 +#define TR_PLATFORM_IS_POSIX 1 +#if defined(__ANDROID__) +#define TR_PLATFORM_IS_ANDROID 1 +#elif defined(TR_OS_CHROMEOS) +#define TR_PLATFORM_IS_CHROMEOS 1 +#else +#define TR_PLATFORM_IS_LINUX_DESKTOP 1 +#endif + +#elif defined(__APPLE__) +#define TR_PLATFORM_IS_APPLE 1 +#define TR_PLATFORM_IS_POSIX 1 +#include +#if TARGET_OS_IPHONE +#define TR_PLATFORM_IS_IOS 1 +#if TARGET_OS_TV +#define TR_PLATFORM_IS_TVOS 1 +#endif +#elif TARGET_OS_MAC +#define TR_PLATFORM_IS_MACOS 1 +#else +#error "Unsupported Apple platform." +#endif + +#elif defined(__Fuchsia__) +#define TR_PLATFORM_IS_FUCHSIA 1 +#define TR_PLATFORM_IS_POSIX 1 + +#elif defined(__EMSCRIPTEN__) +#define TR_PLATFORM_IS_EMSCRIPTEN 1 +#define TR_PLATFORM_IS_POSIX 1 +#include + +#else +#error "Unsupported platform." +#endif + +// Define platform macros for CPU architectures: +// +// - X86 +// - I386 +// - X86_64 +// - ARM +// - ARM32 +// - ARM64 +// - LOONGARCH +// - LOONGARCH32 +// - LOONGARCH64 +// - RISCV +// - RISCV32 +// - RISCV64 +// - MIPS +// - MIPS32 +// - MIPS64 +// - S390 +// - S390X +// - PPC +// - PPC64 +#if defined(__i386__) || defined(_M_IX86) +#define TR_PLATFORM_IS_X86 1 +#define TR_PLATFORM_IS_I386 1 +#elif defined(__x86_64__) || defined(_M_X64) +#define TR_PLATFORM_IS_X86 1 +#define TR_PLATFORM_IS_X86_64 1 + +#elif defined(__arm__) || defined(_M_ARM) +#define TR_PLATFORM_IS_ARM 1 +#define TR_PLATFORM_IS_ARM32 1 +#elif defined(__aarch64__) || defined(_M_ARM64) +#define TR_PLATFORM_IS_ARM 1 +#define TR_PLATFORM_IS_ARM64 1 + +#elif defined(__loongarch__) +#define TR_PLATFORM_IS_LOONGARCH 1 +#if __loongarch_grlen == 64 +#define TR_PLATFORM_IS_LOONGARCH64 1 +#else +#define TR_PLATFORM_IS_LOONGARCH32 1 +#endif + +#elif defined(__riscv) +#define TR_PLATFORM_IS_RISCV 1 +#if __riscv_xlen == 32 +#define TR_PLATFORM_IS_RISCV32 1 +#else +#define TR_PLATFORM_IS_RISCV64 1 +#endif + +#elif defined(__mips__) +#define TR_PLATFORM_IS_MIPS 1 +#if _MIPS_SIM == _ABIO32 +#define TR_PLATFORM_IS_MIPS32 1 +#else +#define TR_PLATFORM_IS_MIPS64 1 +#endif + +#elif defined(__s390__) +#define TR_PLATFORM_IS_S390 1 +#elif defined(__s390x__) +#define TR_PLATFORM_IS_S390X 1 + +#elif defined(__PPC__) +#define TR_PLATFORM_IS_PPC 1 +#elif defined(__PPC64__) +#define TR_PLATFORM_IS_PPC64 1 + +#elif defined(__wasm32__) +#define TR_PLATFORM_IS_WASM32 1 +#elif defined(__wasm64__) +#define TR_PLATFORM_IS_WASM64 1 + +#else +#error "Unsupported platform." +#endif + +// Define platform macros for pointer width: +// +// - 64_BIT +// - 32_BIT +#if defined(TR_PLATFORM_IS_X86_64) || defined(TR_PLATFORM_IS_ARM64) || \ + defined(TR_PLATFORM_IS_RISCV64) || defined(TR_PLATFORM_IS_MIPS64) || \ + defined(TR_PLATFORM_IS_S390X) || defined(TR_PLATFORM_IS_PPC64) || \ + defined(TR_PLATFORM_IS_LOONGARCH64) || defined(TR_PLATFORM_IS_WASM64) +#define TR_PLATFORM_IS_64_BIT 1 +static_assert(sizeof(sizeof(char)) == 8, "Expect sizeof(size_t) == 8"); +#elif defined(TR_PLATFORM_IS_I386) || defined(TR_PLATFORM_IS_ARM32) || \ + defined(TR_PLATFORM_IS_RISCV32) || defined(TR_PLATFORM_IS_MIPS32) || \ + defined(TR_PLATFORM_IS_S390) || defined(TR_PLATFORM_IS_PPC32) || \ + defined(TR_PLATFORM_IS_LOONGARCH32) || defined(TR_PLATFORM_IS_WASM32) +#define TR_PLATFORM_IS_32_BIT 1 +static_assert(sizeof(sizeof(char)) == 4, "Expect sizeof(size_t) == 4"); +#else +#error "Unsupported platform" +#endif + +// This section define other platform macros to 0 to avoid undefined macro usage error. +#if !defined(TR_PLATFORM_IS_WINDOWS) +#define TR_PLATFORM_IS_WINDOWS 0 +#endif +#if !defined(TR_PLATFORM_IS_WIN32) +#define TR_PLATFORM_IS_WIN32 0 +#endif +#if !defined(TR_PLATFORM_IS_WINUWP) +#define TR_PLATFORM_IS_WINUWP 0 +#endif + +#if !defined(TR_PLATFORM_IS_POSIX) +#define TR_PLATFORM_IS_POSIX 0 +#endif + +#if !defined(TR_PLATFORM_IS_LINUX) +#define TR_PLATFORM_IS_LINUX 0 +#endif +#if !defined(TR_PLATFORM_IS_ANDROID) +#define TR_PLATFORM_IS_ANDROID 0 +#endif +#if !defined(TR_PLATFORM_IS_CHROMEOS) +#define TR_PLATFORM_IS_CHROMEOS 0 +#endif +#if !defined(TR_PLATFORM_IS_LINUX_DESKTOP) +#define TR_PLATFORM_IS_LINUX_DESKTOP 0 +#endif + +#if !defined(TR_PLATFORM_IS_APPLE) +#define TR_PLATFORM_IS_APPLE 0 +#endif +#if !defined(TR_PLATFORM_IS_IOS) +#define TR_PLATFORM_IS_IOS 0 +#endif +#if !defined(TR_PLATFORM_IS_MACOS) +#define TR_PLATFORM_IS_MACOS 0 +#endif + +#if !defined(TR_PLATFORM_IS_FUCHSIA) +#define TR_PLATFORM_IS_FUCHSIA 0 +#endif +#if !defined(TR_PLATFORM_IS_EMSCRIPTEN) +#define TR_PLATFORM_IS_EMSCRIPTEN 0 +#endif + +#if !defined(TR_PLATFORM_IS_X86) +#define TR_PLATFORM_IS_X86 0 +#endif +#if !defined(TR_PLATFORM_IS_I386) +#define TR_PLATFORM_IS_I386 0 +#endif +#if !defined(TR_PLATFORM_IS_X86_64) +#define TR_PLATFORM_IS_X86_64 0 +#endif + +#if !defined(TR_PLATFORM_IS_ARM) +#define TR_PLATFORM_IS_ARM 0 +#endif +#if !defined(TR_PLATFORM_IS_ARM32) +#define TR_PLATFORM_IS_ARM32 0 +#endif +#if !defined(TR_PLATFORM_IS_ARM64) +#define TR_PLATFORM_IS_ARM64 0 +#endif + +#if !defined(TR_PLATFORM_IS_LOONGARCH) +#define TR_PLATFORM_IS_LOONGARCH 0 +#endif +#if !defined(TR_PLATFORM_IS_LOONGARCH32) +#define TR_PLATFORM_IS_LOONGARCH32 0 +#endif +#if !defined(TR_PLATFORM_IS_LOONGARCH64) +#define TR_PLATFORM_IS_LOONGARCH64 0 +#endif + +#if !defined(TR_PLATFORM_IS_RISCV) +#define TR_PLATFORM_IS_RISCV 0 +#endif +#if !defined(TR_PLATFORM_IS_RISCV32) +#define TR_PLATFORM_IS_RISCV32 0 +#endif +#if !defined(TR_PLATFORM_IS_RISCV64) +#define TR_PLATFORM_IS_RISCV64 0 +#endif + +#if !defined(TR_PLATFORM_IS_MIPS) +#define TR_PLATFORM_IS_MIPS 0 +#endif +#if !defined(TR_PLATFORM_IS_MIPS32) +#define TR_PLATFORM_IS_MIPS32 0 +#endif +#if !defined(TR_PLATFORM_IS_MIPS64) +#define TR_PLATFORM_IS_MIPS64 0 +#endif + +#if !defined(TR_PLATFORM_IS_S390) +#define TR_PLATFORM_IS_S390 0 +#endif +#if !defined(TR_PLATFORM_IS_S390X) +#define TR_PLATFORM_IS_S390X 0 +#endif + +#if !defined(TR_PLATFORM_IS_PPC) +#define TR_PLATFORM_IS_PPC 0 +#endif +#if !defined(TR_PLATFORM_IS_PPC64) +#define TR_PLATFORM_IS_PPC64 0 +#endif + +#if !defined(TR_PLATFORM_IS_WASM32) +#define TR_PLATFORM_IS_WASM32 0 +#endif +#if !defined(TR_PLATFORM_IS_WASM64) +#define TR_PLATFORM_IS_WASM64 0 +#endif + +#if !defined(TR_PLATFORM_IS_64_BIT) +#define TR_PLATFORM_IS_64_BIT 0 +#endif +#if !defined(TR_PLATFORM_IS_32_BIT) +#define TR_PLATFORM_IS_32_BIT 0 +#endif diff --git a/src/common/result.cpp b/src/common/result.cpp new file mode 100644 index 000000000..68266b773 --- /dev/null +++ b/src/common/result.cpp @@ -0,0 +1,47 @@ +// Copyright 2019 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +namespace jsar +{ + // Implementation details of the tagged pointer Results + namespace detail + { + intptr_t MakePayload(const void *pointer, PayloadType type) + { + intptr_t payload = reinterpret_cast(pointer); + assert((payload & 3) == 0); + return payload | type; + } + + PayloadType GetPayloadType(intptr_t payload) + { + return static_cast(payload & 3); + } + } +} diff --git a/src/common/result.hpp b/src/common/result.hpp new file mode 100644 index 000000000..4d1a91cac --- /dev/null +++ b/src/common/result.hpp @@ -0,0 +1,608 @@ +// Copyright 2018 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include +#include +#include +#include +#include + +namespace jsar +{ + // Result is the following sum type (Haskell notation): + // + // data Result T E = Success T | Error E | Empty + // + // It is meant to be used as the return type of functions that might fail. The reason for the Empty + // case is that a Result should never be discarded, only destructured (its error or success moved + // out) or moved into a different Result. The Empty case tags Results that have been moved out and + // Result's destructor should assert on it being Empty. + // + // Since C++ doesn't have efficient sum types for the special cases we care about, we provide + // template specializations for them. + + template + class Result; + + // The interface of Result should look like the following. + // public: + // Result(T&& success); + // Result(std::unique_ptr error); + // + // Result(Result&& other); + // Result& operator=(Result&& other); + // + // ~Result(); + // + // bool IsError() const; + // bool IsSuccess() const; + // + // T&& AcquireSuccess(); + // std::unique_ptr AcquireError(); + + // Specialization of Result for returning errors only via pointers. It is basically a pointer + // where nullptr is both Success and Empty. + template + class [[nodiscard]] Result + { + public: + Result(); + Result(std::unique_ptr error); + + Result(Result &&other); + Result &operator=(Result &&other); + + ~Result(); + + bool IsError() const; + bool IsSuccess() const; + + void AcquireSuccess(); + std::unique_ptr AcquireError(); + + private: + std::unique_ptr mError; + }; + + // Uses SFINAE to try to get alignof(T) but fallback to Default if T isn't defined. + template + constexpr size_t alignof_if_defined_else_default = Default; + + template + constexpr size_t alignof_if_defined_else_default = alignof(T); + + // Specialization of Result when both the error an success are pointers. It is implemented as a + // tagged pointer. The tag for Success is 0 so that returning the value is fastest. + + namespace detail + { + // Utility functions to manipulate the tagged pointer. Some of them don't need to be templated + // but we really want them inlined so we keep them in the headers + enum PayloadType + { + Success = 0, + Error = 1, + Empty = 2, + }; + + intptr_t MakePayload(const void *pointer, PayloadType type); + PayloadType GetPayloadType(intptr_t payload); + + template + static T *GetSuccessFromPayload(intptr_t payload); + template + static E *GetErrorFromPayload(intptr_t payload); + + constexpr static intptr_t kEmptyPayload = Empty; + } // namespace detail + + template + class [[nodiscard]] Result + { + public: + static_assert(alignof_if_defined_else_default >= 4, + "Result reserves two bits for tagging pointers"); + static_assert(alignof_if_defined_else_default >= 4, + "Result reserves two bits for tagging pointers"); + + Result(T *success); + Result(std::unique_ptr error); + + // Support returning a Result from a Result + template + requires std::same_as || std::derived_from + Result(Result &&other); + template + requires std::same_as || std::derived_from + Result &operator=(Result &&other); + + ~Result(); + + bool IsError() const; + bool IsSuccess() const; + + T *AcquireSuccess(); + std::unique_ptr AcquireError(); + + private: + template + friend class Result; + + intptr_t mPayload = detail::kEmptyPayload; + }; + + template + class [[nodiscard]] Result + { + public: + static_assert(alignof_if_defined_else_default >= 4, + "Result reserves two bits for tagging pointers"); + static_assert(alignof_if_defined_else_default >= 4, + "Result reserves two bits for tagging pointers"); + + Result(const T *success); + Result(std::unique_ptr error); + + Result(Result &&other); + Result &operator=(Result &&other); + + ~Result(); + + bool IsError() const; + bool IsSuccess() const; + + const T *AcquireSuccess(); + std::unique_ptr AcquireError(); + + private: + intptr_t mPayload = detail::kEmptyPayload; + }; + + template + class Ref; + + template + class [[nodiscard]] Result, E> + { + public: + static_assert(alignof_if_defined_else_default >= 4, + "Result, E> reserves two bits for tagging pointers"); + static_assert(alignof_if_defined_else_default >= 4, + "Result, E> reserves two bits for tagging pointers"); + + template + requires std::convertible_to + Result(Ref &&success); + template + requires std::convertible_to + Result(const Ref &success); + Result(std::unique_ptr error); + constexpr Result(std::nullptr_t); + + template + requires std::convertible_to + Result(Result, E> &&other); + template + requires std::convertible_to + Result, E> &operator=(Result, E> &&other); + + ~Result(); + + bool IsError() const; + bool IsSuccess() const; + + Ref AcquireSuccess(); + std::unique_ptr AcquireError(); + + private: + template + friend class Result; + + intptr_t mPayload = detail::kEmptyPayload; + }; + + // Catchall definition of Result implemented as a tagged struct. It could be improved to use + // a tagged union instead if it turns out to be a hotspot. T and E must be movable and default + // constructible. + template + class [[nodiscard]] Result + { + public: + Result(T success); + Result(std::unique_ptr error); + + Result(Result &&other); + Result &operator=(Result &&other); + + ~Result(); + + bool IsError() const; + bool IsSuccess() const; + + T AcquireSuccess(); + std::unique_ptr AcquireError(); + + private: + std::variant> mPayload; + }; + + // Implementation of Result + template + Result::Result() + { + } + + template + Result::Result(std::unique_ptr error) + : mError(std::move(error)) + { + } + + template + Result::Result(Result &&other) + : mError(std::move(other.mError)) + { + } + + template + Result &Result::operator=(Result &&other) + { + assert(mError == nullptr); + mError = std::move(other.mError); + return *this; + } + + template + Result::~Result() + { + assert(mError == nullptr); + } + + template + bool Result::IsError() const + { + return mError != nullptr; + } + + template + bool Result::IsSuccess() const + { + return mError == nullptr; + } + + template + void Result::AcquireSuccess() + { + } + + template + std::unique_ptr Result::AcquireError() + { + return std::move(mError); + } + + // Implementation details of the tagged pointer Results + namespace detail + { + + template + T *GetSuccessFromPayload(intptr_t payload) + { + assert(GetPayloadType(payload) == Success); + return reinterpret_cast(payload); + } + + template + E *GetErrorFromPayload(intptr_t payload) + { + assert(GetPayloadType(payload) == Error); + return reinterpret_cast(payload ^ 1); + } + + } // namespace detail + + // Implementation of Result + template + Result::Result(T *success) + : mPayload(detail::MakePayload(success, detail::Success)) + { + } + + template + Result::Result(std::unique_ptr error) + : mPayload(detail::MakePayload(error.release(), detail::Error)) + { + } + + template + template + requires std::same_as || std::derived_from + Result::Result(Result &&other) + : mPayload(other.mPayload) + { + other.mPayload = detail::kEmptyPayload; + } + + template + template + requires std::same_as || std::derived_from + Result &Result::operator=(Result &&other) + { + assert(mPayload == detail::kEmptyPayload); + mPayload = other.mPayload; + other.mPayload = detail::kEmptyPayload; + return *this; + } + + template + Result::~Result() + { + assert(mPayload == detail::kEmptyPayload); + } + + template + bool Result::IsError() const + { + return detail::GetPayloadType(mPayload) == detail::Error; + } + + template + bool Result::IsSuccess() const + { + return detail::GetPayloadType(mPayload) == detail::Success; + } + + template + T *Result::AcquireSuccess() + { + T *success = detail::GetSuccessFromPayload(mPayload); + mPayload = detail::kEmptyPayload; + return success; + } + + template + std::unique_ptr Result::AcquireError() + { + std::unique_ptr error(detail::GetErrorFromPayload(mPayload)); + mPayload = detail::kEmptyPayload; + return std::move(error); + } + + // Implementation of Result + template + Result::Result(const T *success) + : mPayload(detail::MakePayload(success, detail::Success)) + { + } + + template + Result::Result(std::unique_ptr error) + : mPayload(detail::MakePayload(error.release(), detail::Error)) + { + } + + template + Result::Result(Result &&other) + : mPayload(other.mPayload) + { + other.mPayload = detail::kEmptyPayload; + } + + template + Result &Result::operator=(Result &&other) + { + assert(mPayload == detail::kEmptyPayload); + mPayload = other.mPayload; + other.mPayload = detail::kEmptyPayload; + return *this; + } + + template + Result::~Result() + { + assert(mPayload == detail::kEmptyPayload); + } + + template + bool Result::IsError() const + { + return detail::GetPayloadType(mPayload) == detail::Error; + } + + template + bool Result::IsSuccess() const + { + return detail::GetPayloadType(mPayload) == detail::Success; + } + + template + const T *Result::AcquireSuccess() + { + T *success = detail::GetSuccessFromPayload(mPayload); + mPayload = detail::kEmptyPayload; + return success; + } + + template + std::unique_ptr Result::AcquireError() + { + std::unique_ptr error(detail::GetErrorFromPayload(mPayload)); + mPayload = detail::kEmptyPayload; + return std::move(error); + } + + // Implementation of Result, E> + template + constexpr Result, E>::Result(std::nullptr_t) + : Result(Ref(nullptr)) + { + } + + template + template + requires std::convertible_to + Result, E>::Result(Ref &&success) + : mPayload(detail::MakePayload(success.Detach(), detail::Success)) + { + } + + template + template + requires std::convertible_to + Result, E>::Result(const Ref &success) + : Result(Ref(success)) + { + } + + template + Result, E>::Result(std::unique_ptr error) + : mPayload(detail::MakePayload(error.release(), detail::Error)) + { + } + + template + template + requires std::convertible_to + Result, E>::Result(Result, E> &&other) + : mPayload(other.mPayload) + { + other.mPayload = detail::kEmptyPayload; + } + + template + template + requires std::convertible_to + Result, E> &Result, E>::operator=(Result, E> &&other) + { + assert(mPayload == detail::kEmptyPayload); + mPayload = other.mPayload; + other.mPayload = detail::kEmptyPayload; + return *this; + } + + template + Result, E>::~Result() + { + assert(mPayload == detail::kEmptyPayload); + } + + template + bool Result, E>::IsError() const + { + return detail::GetPayloadType(mPayload) == detail::Error; + } + + template + bool Result, E>::IsSuccess() const + { + return detail::GetPayloadType(mPayload) == detail::Success; + } + + template + Ref Result, E>::AcquireSuccess() + { + assert(IsSuccess()); + Ref success = AcquireRef(detail::GetSuccessFromPayload(mPayload)); + mPayload = detail::kEmptyPayload; + return success; + } + + template + std::unique_ptr Result, E>::AcquireError() + { + assert(IsError()); + std::unique_ptr error(detail::GetErrorFromPayload(mPayload)); + mPayload = detail::kEmptyPayload; + return std::move(error); + } + + // Implementation of Result + template + Result::Result(T success) + : mPayload(std::move(success)) + { + } + + template + Result::Result(std::unique_ptr error) + : mPayload(std::move(error)) + { + } + + template + Result::~Result() + { + // Happens if you ignore a result. + assert(std::holds_alternative(mPayload)); + } + + template + Result::Result(Result &&other) + { + *this = std::move(other); + } + + template + Result &Result::operator=(Result &&other) + { + assert(std::holds_alternative(mPayload)); + std::swap(mPayload, other.mPayload); + return *this; + } + + template + bool Result::IsError() const + { + return std::holds_alternative>(mPayload); + } + + template + bool Result::IsSuccess() const + { + return std::holds_alternative(mPayload); + } + + template + T Result::AcquireSuccess() + { + assert(IsSuccess()); + auto payload = std::move(mPayload); + mPayload = {}; + return std::move(std::get(payload)); + } + + template + std::unique_ptr Result::AcquireError() + { + assert(IsError()); + auto payload = std::move(mPayload); + mPayload = {}; + return std::move(std::get>(payload)); + } +} diff --git a/src/common/utility.hpp b/src/common/utility.hpp index 470f07221..fab9e355d 100644 --- a/src/common/utility.hpp +++ b/src/common/utility.hpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #ifndef TR_UNLIKELY @@ -32,6 +33,25 @@ public: \ private: \ void *operator new(size_t) = delete +template +using Ref = std::shared_ptr; + +template +inline Ref AcquireRef(T *ptr) +{ + auto ref = std::shared_ptr(ptr, [](T *p) + { delete p; }); + + // If T derives from std::enable_shared_from_this, set the weak_from_this pointer. + if constexpr (std::derived_from>) + { + ref->weak_from_this() = ref; + } + + // Return the shared pointer. + return ref; +} + /** * Shared reference is a template class that holds the shared pointer of a type. * @@ -264,4 +284,20 @@ namespace transmute::common template concept derived_from = std::derived_from; #endif + + inline std::pair GetEnvironmentVar(const char *variableName) + { + char *value = getenv(variableName); + return value == nullptr ? std::make_pair(std::string(), false) + : std::make_pair(std::string(value), true); + } + + inline bool SetEnvironmentVar(const char *variableName, const char *value) + { + if (value == nullptr) + { + return unsetenv(variableName) == 0; + } + return setenv(variableName, value, 1) == 0; + } } diff --git a/src/common/wgsl/enums.h b/src/common/wgsl/enums.h new file mode 100644 index 000000000..36185006e --- /dev/null +++ b/src/common/wgsl/enums.h @@ -0,0 +1,59 @@ +// Copyright 2025 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include + +namespace wgsl +{ + enum class CoreDiagnosticRule : uint8_t + { + kUndefined, + kDerivativeUniformity, + kSubgroupUniformity, + }; + + enum class Extension : uint8_t + { + kUndefined, + kChromiumDisableUniformityAnalysis, + kChromiumExperimentalBarycentricCoord, + kChromiumExperimentalDynamicBinding, + kChromiumExperimentalFramebufferFetch, + kChromiumExperimentalImmediate, + kChromiumExperimentalPixelLocal, + kChromiumExperimentalSubgroupMatrix, + kChromiumInternalGraphite, + kChromiumInternalInputAttachments, + kClipDistances, + kDualSourceBlending, + kF16, + kPrimitiveIndex, + kSubgroups, + }; +} diff --git a/src/renderer/content_renderer.cpp b/src/renderer/content_renderer.cpp index bade263c3..991fcc2b0 100644 --- a/src/renderer/content_renderer.cpp +++ b/src/renderer/content_renderer.cpp @@ -6,6 +6,9 @@ #include "./content_renderer.hpp" #include "./render_api.hpp" +#include "command_buffers/base.hpp" +#include "renderer/gles/context_app.hpp" +#include "utility.hpp" namespace renderer { @@ -47,6 +50,7 @@ namespace renderer , targetFrameRate(constellation->renderer->clientDefaultFrameRate) , glContext(nullptr) , glContextForBackup(nullptr) + , context_webgl_(nullptr) { assert(xrDevice != nullptr); stereoFrameForBackup = make_unique(true, 0xf); @@ -78,6 +82,12 @@ namespace renderer } } + void TrContentRenderer::initialize() + { + Ref self = shared_from_this(); + context_webgl_ = AcquireRef(new TrContextWebGL(self)); + } + void TrContentRenderer::onCommandBuffersExecuting() { lastFrameHasOutOfMemoryError = false; @@ -166,6 +176,12 @@ namespace renderer // such as `defaultCommandBufferRequests` or `stereoFramesList`, otherwise it will be deleted in this function. void TrContentRenderer::onCommandBufferRequestReceived(TrCommandBufferBase *req) { + // Send the command buffer to ContextWebGL if it is the WebGL-specific command buffer. + if (!commandbuffers::CommandTypes::IsXRFrameControl(req->type)) + { + context_webgl_->receiveIncomingCall(dynamic_cast(*req)); + } + if (!req->renderingInfo.isValid() && !commandbuffers::CommandTypes::IsXRFrameControl(req->type)) { unique_lock lock(commandBufferRequestsMutex); diff --git a/src/renderer/content_renderer.hpp b/src/renderer/content_renderer.hpp index 4ee88e213..b6460308e 100644 --- a/src/renderer/content_renderer.hpp +++ b/src/renderer/content_renderer.hpp @@ -4,6 +4,7 @@ #include #include #include +#include #include #include #include @@ -12,11 +13,14 @@ #include #include #include + #include #include - -#include "./gles/context_storage.hpp" -#include "./render_api.hpp" +#include +#include +#include +#include +#include using namespace std; using namespace commandbuffers; @@ -64,19 +68,27 @@ namespace renderer * @param constellation The constellation that the content belongs to. * @return The created content renderer. */ - static inline std::shared_ptr Make(std::shared_ptr content, - uint8_t contextId, - TrConstellation *constellation) + static inline Ref Make(Ref content, + uint8_t contextId, + TrConstellation *constellation) { assert(content != nullptr); assert(contextId >= commandbuffers::MinimumContextId); - return std::make_shared(content, contextId, constellation); + + auto renderer = std::make_shared(content, contextId, constellation); + renderer->initialize(); + return renderer; } public: - TrContentRenderer(std::shared_ptr content, uint8_t contextId, TrConstellation *constellation); + TrContentRenderer(Ref content, uint8_t contextId, TrConstellation *constellation); ~TrContentRenderer(); + /** + * Initialize the content renderer, create the ContextWebGL, and initialize the render passes. + */ + void initialize(); + public: // public lifecycle void onCommandBuffersExecuting(); void onCommandBuffersExecuted(); @@ -85,7 +97,7 @@ namespace renderer bool sendCommandBufferResponse(TrCommandBufferResponse &res); // Returns the current using GL context. ContextGLApp *getContextGL() const; - inline shared_ptr getContent() const + inline Ref getContent() const { return content.lock(); } @@ -93,6 +105,19 @@ namespace renderer TrRenderer &getRendererRef() const; public: + Ref renderResource() const + { + return render_resource_; + } + Ref opaqueRenderPass() const + { + return opaque_renderpass_; + } + Ref transparentRenderPass() const + { + return transparent_renderpass_; + } + /** * Dispatch a command buffer request. * @@ -206,6 +231,13 @@ namespace renderer std::chrono::milliseconds frameDuration = std::chrono::milliseconds(0); std::chrono::milliseconds maxFrameDuration = std::chrono::milliseconds(0); + private: + Ref context_webgl_; + Ref render_resource_; + Ref opaque_renderpass_; + Ref transparent_renderpass_; + Ref offscreen_renderpass_; + private: // frame rate control uint32_t targetFrameRate; }; diff --git a/src/renderer/context_webgl.cpp b/src/renderer/context_webgl.cpp new file mode 100644 index 000000000..750687470 --- /dev/null +++ b/src/renderer/context_webgl.cpp @@ -0,0 +1,1169 @@ +#include + +#include +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + namespace details + { + ObjectBase::ObjectBase(WebGLuint id) + : id(id) + { + } + + bool ObjectBase::isTexture() const + { + return false; + } + + bool ObjectBase::isBuffer() const + { + return false; + } + + bool ObjectBase::isFramebuffer() const + { + return false; + } + + bool ObjectBase::isRenderbuffer() const + { + return false; + } + + bool ObjectBase::isVertexArrayObject() const + { + return false; + } + + string ObjectBase::toString() const + { + return to_string(id); + } + + void ObjectBase::set(WebGLuint id) + { + this->id = id; + } + + void BindableObject::setTarget(const ObjectTargetBase &target) + { + this->target = target.value(); + } + + Shader::Shader(WebGLuint id, WebGLenum type) + : ObjectBase(id) + , type(type) + { + assert(type == WEBGL_VERTEX_SHADER || + type == WEBGL_FRAGMENT_SHADER); + } + + string Shader::toString() const + { + string type_str = "Unknown"; + if (type == WEBGL_VERTEX_SHADER) + type_str = "Vertex"; + else if (type == WEBGL_FRAGMENT_SHADER) + type_str = "Fragment"; + return "Shader(" + type_str + " id=" + to_string(id) + ")"; + } + + void Uniforms::set(WebGLint loc, WebGLfloat v0) + { + (*this)[loc] = SingleFloatValue{{v0}}; + } + + void Uniforms::set(WebGLint loc, WebGLfloat v0, WebGLfloat v1) + { + (*this)[loc] = TwoFloatValue{{v0, v1}}; + } + + void Uniforms::set(WebGLint loc, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2) + { + (*this)[loc] = ThreeFloatValue{{v0, v1, v2}}; + } + + void Uniforms::set(WebGLint loc, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2, WebGLfloat v3) + { + (*this)[loc] = FourFloatValue{{v0, v1, v2, v3}}; + } + + void Uniforms::set(WebGLint loc, const FloatValues &values) + { + (*this)[loc] = values; + } + + void Uniforms::set(WebGLint loc, WebGLint v0) + { + (*this)[loc] = SingleIntValue{{v0}}; + } + + void Uniforms::set(WebGLint loc, WebGLint v0, WebGLint v1) + { + (*this)[loc] = TwoIntValue{{v0, v1}}; + } + + void Uniforms::set(WebGLint loc, WebGLint v0, WebGLint v1, WebGLint v2) + { + (*this)[loc] = ThreeIntValue{{v0, v1, v2}}; + } + + void Uniforms::set(WebGLint loc, WebGLint v0, WebGLint v1, WebGLint v2, WebGLint v3) + { + (*this)[loc] = FourIntValue{{v0, v1, v2, v3}}; + } + + void Uniforms::set(WebGLint loc, const IntValues &values) + { + (*this)[loc] = values; + } + + void Uniforms::set(WebGLint loc, WebGLuint v0) + { + (*this)[loc] = SingleUintValue{{v0}}; + } + + void Uniforms::set(WebGLint loc, WebGLuint v0, WebGLuint v1) + { + (*this)[loc] = TwoUintValue{{v0, v1}}; + } + + void Uniforms::set(WebGLint loc, WebGLuint v0, WebGLuint v1, WebGLuint v2) + { + (*this)[loc] = ThreeUintValue{{v0, v1, v2}}; + } + + void Uniforms::set(WebGLint loc, WebGLuint v0, WebGLuint v1, WebGLuint v2, WebGLuint v3) + { + (*this)[loc] = FourUintValue{{v0, v1, v2, v3}}; + } + + void Uniforms::set(WebGLint loc, const UintValues &values) + { + (*this)[loc] = values; + } + + Program::Program(WebGLuint id) + : ObjectBase(id) + , vertexShader(nullptr) + , fragmentShader(nullptr) + { + } + + string Texture::toString() const + { + string target_str = "Unknown"; + if (target == WEBGL_TEXTURE_2D) + target_str = "2D"; + else if (target == WEBGL2_TEXTURE_2D_ARRAY) + target_str = "2D[]"; + else if (target == WEBGL2_TEXTURE_3D) + target_str = "3D"; + else if (target == WEBGL_TEXTURE_CUBE_MAP) + target_str = "CubeMap"; + + return "Texture(" + target_str + " id=" + to_string(id) + ")"; + } + + void Texture::setSize(WebGLsizei width, WebGLsizei height, WebGLsizei depth) + { + size[0] = width; + size[1] = height; + size[2] = depth; + } + } + + TrContextWebGL::TrContextWebGL(Ref content_renderer) + : content_renderer_(content_renderer) + { + } + + TrContextWebGL::~TrContextWebGL() + { + } + + void TrContextWebGL::receiveIncomingCall(const TrCommandBufferRequest &req) + { + switch (req.type) + { + // Textures + case COMMAND_BUFFER_ACTIVE_TEXTURE_REQ: + { + const auto &typed_req = To(req); + glActiveTexture(typed_req.activeUnit); + break; + } + case COMMAND_BUFFER_BIND_TEXTURE_REQ: + { + const auto &typed_req = To(req); + glBindTexture(typed_req.target, typed_req.texture); + break; + } + case COMMAND_BUFFER_COPY_TEXTURE_IMAGE_2D_REQ: + { + const auto &typed_req = To(req); + glCopyTexImage2D( + typed_req.target, + typed_req.internalFormat, + typed_req.level, + typed_req.x, + typed_req.y, + typed_req.width, + typed_req.height, + typed_req.border); + break; + } + case COMMAND_BUFFER_COPY_TEXTURE_SUB_IMAGE_2D_REQ: + { + const auto &typed_req = To(req); + glCopyTexSubImage2D( + typed_req.target, + typed_req.level, + typed_req.xoffset, + typed_req.yoffset, + typed_req.x, + typed_req.y, + typed_req.width, + typed_req.height); + break; + } + case COMMAND_BUFFER_DELETE_TEXTURE_REQ: + { + const auto &typed_req = To(req); + glDeleteTextures(1, (const WebGLuint *)&typed_req.texture); + break; + } + case COMMAND_BUFFER_CREATE_TEXTURE_REQ: + { + glCreateTypedObject(textures_, req); + break; + } + case COMMAND_BUFFER_TEXTURE_IMAGE_2D_REQ: + { + const auto &typed_req = To(req); + glTexImage2D( + typed_req.target, + typed_req.level, + typed_req.internalformat, + typed_req.width, + typed_req.height, + typed_req.border, + typed_req.format, + typed_req.type, + typed_req.pixels); + break; + } + case COMMAND_BUFFER_TEXTURE_IMAGE_3D_REQ: + { + const auto &typed_req = To(req); + glTexImage3D( + typed_req.target, + typed_req.level, + typed_req.internalformat, + typed_req.width, + typed_req.height, + typed_req.depth, + typed_req.border, + typed_req.format, + typed_req.type, + typed_req.pixels); + break; + } + case COMMAND_BUFFER_TEXTURE_PARAMETERI_REQ: + { + const auto &typed_req = To(req); + glTexParameteri(typed_req.target, typed_req.pname, typed_req.param); + break; + } + case COMMAND_BUFFER_TEXTURE_PARAMETERF_REQ: + { + const auto &typed_req = To(req); + glTexParameterf(typed_req.target, typed_req.pname, typed_req.param); + break; + } + case COMMAND_BUFFER_TEXTURE_STORAGE_2D_REQ: + { + const auto &typed_req = To(req); + glTexStorage2D( + typed_req.target, + typed_req.levels, + typed_req.internalformat, + typed_req.width, + typed_req.height); + break; + } + case COMMAND_BUFFER_TEXTURE_STORAGE_3D_REQ: + { + const auto &typed_req = To(req); + glTexStorage3D( + typed_req.target, + typed_req.levels, + typed_req.internalformat, + typed_req.width, + typed_req.height, + typed_req.depth); + break; + } + case COMMAND_BUFFER_TEXTURE_SUB_IMAGE_2D_REQ: + { + const auto &typed_req = To(req); + glTexSubImage2D( + typed_req.target, + typed_req.level, + typed_req.xoffset, + typed_req.yoffset, + typed_req.width, + typed_req.height, + typed_req.format, + typed_req.type, + typed_req.pixels); + break; + } + case COMMAND_BUFFER_TEXTURE_SUB_IMAGE_3D_REQ: + { + const auto &typed_req = To(req); + glTexSubImage3D( + typed_req.target, + typed_req.level, + typed_req.xoffset, + typed_req.yoffset, + typed_req.zoffset, + typed_req.width, + typed_req.height, + typed_req.depth, + typed_req.format, + typed_req.type, + typed_req.pixels); + break; + } + + // Rendering + case COMMAND_BUFFER_CLEAR_REQ: + { + const auto &typed_req = To(req); + glClear(typed_req.mask); + break; + } + case COMMAND_BUFFER_CLEAR_BUFFERIV_REQ: + { + const auto &typed_req = To(req); + glClearBufferiv(typed_req.buffer, + typed_req.drawbuffer, + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_CLEAR_BUFFERUIV_REQ: + { + const auto &typed_req = To(req); + glClearBufferuiv(typed_req.buffer, + typed_req.drawbuffer, + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_CLEAR_BUFFERFI_REQ: + { + const auto &typed_req = To(req); + glClearBufferfi(typed_req.buffer, + typed_req.drawbuffer, + typed_req.depth, + typed_req.stencil); + break; + } + case COMMAND_BUFFER_CLEAR_BUFFERFV_REQ: + { + const auto &typed_req = To(req); + glClearBufferfv(typed_req.buffer, + typed_req.drawbuffer, + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_CLEAR_COLOR_REQ: + { + const auto &typed_req = To(req); + glClearColor(typed_req.r, typed_req.g, typed_req.b, typed_req.a); + break; + } + case COMMAND_BUFFER_CLEAR_DEPTH_REQ: + { + const auto &typed_req = To(req); + glClearDepth(typed_req.depth); + break; + } + case COMMAND_BUFFER_CLEAR_STENCIL_REQ: + { + const auto &typed_req = To(req); + glClearStencil(typed_req.stencil); + break; + } + + // Frame Buffers + case COMMAND_BUFFER_BIND_FRAMEBUFFER_REQ: + { + const auto &typed_req = To(req); + glBindFramebuffer(typed_req.target, typed_req.framebuffer); + break; + } + case COMMAND_BUFFER_BIND_RENDERBUFFER_REQ: + { + const auto &typed_req = To(req); + glBindRenderbuffer(typed_req.target, typed_req.renderbuffer); + break; + } + case COMMAND_BUFFER_BLIT_FRAMEBUFFER_REQ: + { + const auto &typed_req = To(req); + glBlitFramebuffer( + typed_req.srcX0, + typed_req.srcY0, + typed_req.srcX1, + typed_req.srcY1, + typed_req.dstX0, + typed_req.dstY0, + typed_req.dstX1, + typed_req.dstY1, + typed_req.mask, + typed_req.filter); + break; + } + case COMMAND_BUFFER_CHECK_FRAMEBUFFER_STATUS_REQ: + { + const auto &typed_req = To(req); + glCheckFramebufferStatus(typed_req.target); + break; + } + case COMMAND_BUFFER_DELETE_FRAMEBUFFER_REQ: + { + const auto &typed_req = To(req); + glDeleteFramebuffers(1, &typed_req.framebuffer); + break; + } + case COMMAND_BUFFER_DELETE_RENDERBUFFER_REQ: + { + const auto &typed_req = To(req); + glDeleteRenderbuffers(1, &typed_req.renderbuffer); + break; + } + case COMMAND_BUFFER_DRAW_BUFFERS_REQ: + { + const auto &typed_req = To(req); + glDrawBuffers(typed_req.n, typed_req.bufs); + break; + } + case COMMAND_BUFFER_FRAMEBUFFER_RENDERBUFFER_REQ: + { + const auto &typed_req = To(req); + glFramebufferRenderbuffer(typed_req.target, + typed_req.attachment, + typed_req.renderbufferTarget, + typed_req.renderbuffer); + break; + } + case COMMAND_BUFFER_FRAMEBUFFER_TEXTURE2D_REQ: + { + const auto &typed_req = To(req); + glFramebufferTexture2D(typed_req.target, + typed_req.attachment, + typed_req.textarget, + typed_req.texture, + typed_req.level); + break; + } + case COMMAND_BUFFER_FRAMEBUFFER_TEXTURE_LAYER_REQ: + { + const auto &typed_req = To(req); + glFramebufferTextureLayer(typed_req.target, + typed_req.attachment, + typed_req.texture, + typed_req.level, + typed_req.layer); + break; + } + case COMMAND_BUFFER_CREATE_FRAMEBUFFER_REQ: + { + glCreateTypedObject(framebuffers_, req); + break; + } + case COMMAND_BUFFER_CREATE_RENDERBUFFER_REQ: + { + glCreateTypedObject(renderbuffers_, req); + break; + } + case COMMAND_BUFFER_GENERATE_MIPMAP_REQ: + { + const auto &typed_req = To(req); + glGenerateMipmap(typed_req.target); + break; + } + case COMMAND_BUFFER_RENDERBUFFER_STORAGE_REQ: + { + const auto &typed_req = To(req); + glRenderbufferStorage(typed_req.target, + typed_req.internalformat, + typed_req.width, + typed_req.height); + break; + } + case COMMAND_BUFFER_RENDERBUFFER_STORAGE_MULTISAMPLE_REQ: + { + const auto &typed_req = To(req); + glRenderbufferStorageMultisample(typed_req.target, + typed_req.samples, + typed_req.internalformat, + typed_req.width, + typed_req.height); + break; + } + + // Shaders + case COMMAND_BUFFER_BIND_ATTRIB_LOCATION_REQ: + { + const auto &typed_req = To(req); + glBindAttribLocation(typed_req.program, + typed_req.attribIndex, + typed_req.attribName.c_str()); + break; + } + case COMMAND_BUFFER_COMPILE_SHADER_REQ: + { + const auto &typed_req = To(req); + glCompileShader(typed_req.shader); + break; + } + case COMMAND_BUFFER_CREATE_PROGRAM_REQ: + { + const auto &typed_req = To(req); + auto index = glCreateProgram(); + programs_[index]->set(req.id); + break; + } + case COMMAND_BUFFER_CREATE_SHADER_REQ: + { + const auto &typed_req = To(req); + auto index = glCreateShader(typed_req.shaderType); + shaders_[index]->set(req.id); + break; + } + case COMMAND_BUFFER_DELETE_PROGRAM_REQ: + { + const auto &typed_req = To(req); + glDeleteProgram(typed_req.clientId); + break; + } + case COMMAND_BUFFER_DELETE_SHADER_REQ: + { + const auto &typed_req = To(req); + glDeleteShader(typed_req.shader); + break; + } + case COMMAND_BUFFER_ATTACH_SHADER_REQ: + { + const auto &typed_req = To(req); + glAttachShader(typed_req.program, typed_req.shader); + break; + } + case COMMAND_BUFFER_DETACH_SHADER_REQ: + { + const auto &typed_req = To(req); + glDetachShader(typed_req.program, typed_req.shader); + break; + } + case COMMAND_BUFFER_LINK_PROGRAM_REQ: + { + const auto &typed_req = To(req); + glLinkProgram(typed_req.clientId); + break; + } + case COMMAND_BUFFER_SHADER_SOURCE_REQ: + { + const auto &typed_req = To(req); + glShaderSource(typed_req.shader, + 1, + (const WebGLchar **)&typed_req.sourceStr, + reinterpret_cast(&typed_req.sourceSize)); + break; + } + case COMMAND_BUFFER_UNIFORM1F_REQ: + { + const auto &typed_req = To(req); + glUniform1f(typed_req.location, typed_req.v0); + break; + } + case COMMAND_BUFFER_UNIFORM2F_REQ: + { + const auto &typed_req = To(req); + glUniform2f(typed_req.location, typed_req.v0, typed_req.v1); + break; + } + case COMMAND_BUFFER_UNIFORM3F_REQ: + { + const auto &typed_req = To(req); + glUniform3f(typed_req.location, typed_req.v0, typed_req.v1, typed_req.v2); + break; + } + case COMMAND_BUFFER_UNIFORM4F_REQ: + { + const auto &typed_req = To(req); + glUniform4f(typed_req.location, typed_req.v0, typed_req.v1, typed_req.v2, typed_req.v3); + break; + } + case COMMAND_BUFFER_UNIFORM1I_REQ: + { + const auto &typed_req = To(req); + glUniform1i(typed_req.location, typed_req.v0); + break; + } + case COMMAND_BUFFER_UNIFORM2I_REQ: + { + const auto &typed_req = To(req); + glUniform2i(typed_req.location, typed_req.v0, typed_req.v1); + break; + } + case COMMAND_BUFFER_UNIFORM3I_REQ: + { + const auto &typed_req = To(req); + glUniform3i(typed_req.location, typed_req.v0, typed_req.v1, typed_req.v2); + break; + } + case COMMAND_BUFFER_UNIFORM4I_REQ: + { + const auto &typed_req = To(req); + glUniform4i(typed_req.location, typed_req.v0, typed_req.v1, typed_req.v2, typed_req.v3); + break; + } + case COMMAND_BUFFER_UNIFORM1FV_REQ: + { + const auto &typed_req = To(req); + glUniform1fv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM2FV_REQ: + { + const auto &typed_req = To(req); + glUniform2fv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM3FV_REQ: + { + const auto &typed_req = To(req); + glUniform3fv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM4FV_REQ: + { + const auto &typed_req = To(req); + glUniform4fv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM1IV_REQ: + { + const auto &typed_req = To(req); + glUniform1iv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM2IV_REQ: + { + const auto &typed_req = To(req); + glUniform2iv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM3IV_REQ: + { + const auto &typed_req = To(req); + glUniform3iv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM4IV_REQ: + { + const auto &typed_req = To(req); + glUniform4iv(typed_req.location, + typed_req.values.size(), + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM_MATRIX2FV_REQ: + { + const auto &typed_req = To(req); + glUniformMatrix2fv(typed_req.location, + typed_req.values.size(), + typed_req.transpose, + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM_MATRIX3FV_REQ: + { + const auto &typed_req = To(req); + glUniformMatrix3fv(typed_req.location, + typed_req.values.size(), + typed_req.transpose, + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM_MATRIX4FV_REQ: + { + const auto &typed_req = To(req); + glUniformMatrix4fv(typed_req.location, + typed_req.values.size(), + typed_req.transpose, + typed_req.values.data()); + break; + } + case COMMAND_BUFFER_UNIFORM_BLOCK_BINDING_REQ: + { + const auto &typed_req = To(req); + glUniformBlockBinding(typed_req.program, + typed_req.uniformBlockIndex, + typed_req.uniformBlockBinding); + break; + } + case COMMAND_BUFFER_USE_PROGRAM_REQ: + { + const auto &typed_req = To(req); + glUseProgram(typed_req.clientId); + break; + } + case COMMAND_BUFFER_VALIDATE_PROGRAM_REQ: + { + const auto &typed_req = To(req); + glValidateProgram(typed_req.clientId); + break; + } + + // Buffer Objects + case COMMAND_BUFFER_BIND_BUFFER_REQ: + { + const auto &typed_req = To(req); + glBindBuffer(typed_req.target, typed_req.buffer); + break; + } + case COMMAND_BUFFER_BIND_BUFFER_BASE_REQ: + { + const auto &typed_req = To(req); + glBindBufferBase(typed_req.target, + typed_req.index, + typed_req.buffer); + break; + } + case COMMAND_BUFFER_BIND_BUFFER_RANGE_REQ: + { + const auto &typed_req = To(req); + glBindBufferRange(typed_req.target, + typed_req.index, + typed_req.buffer, + typed_req.offset, + typed_req.size); + break; + } + case COMMAND_BUFFER_BUFFER_DATA_REQ: + { + const auto &typed_req = To(req); + glBufferData(typed_req.target, + typed_req.size, + typed_req.data, + typed_req.usage); + break; + } + case COMMAND_BUFFER_BUFFER_SUB_DATA_REQ: + { + const auto &typed_req = To(req); + glBufferSubData(typed_req.target, + typed_req.offset, + typed_req.size, + typed_req.data); + break; + } + case COMMAND_BUFFER_DELETE_BUFFER_REQ: + { + const auto &typed_req = To(req); + glDeleteBuffers(1, &typed_req.buffer); + break; + } + case COMMAND_BUFFER_DISABLE_VERTEX_ATTRIB_ARRAY_REQ: + { + const auto &typed_req = To(req); + glDisableVertexAttribArray(typed_req.location); + break; + } + case COMMAND_BUFFER_DRAW_ARRAYS_REQ: + { + const auto &typed_req = To(req); + glDrawArrays(typed_req.mode, + typed_req.first, + typed_req.count); + break; + } + case COMMAND_BUFFER_DRAW_ARRAYS_INSTANCED_REQ: + { + const auto &typed_req = To(req); + glDrawArraysInstanced(typed_req.mode, + typed_req.first, + typed_req.count, + typed_req.instanceCount); + break; + } + case COMMAND_BUFFER_DRAW_ELEMENTS_REQ: + { + const auto &typed_req = To(req); + glDrawElements(typed_req.mode, + typed_req.count, + typed_req.indicesType, + nullptr); + break; + } + case COMMAND_BUFFER_DRAW_ELEMENTS_INSTANCED_REQ: + { + const auto &typed_req = To(req); + glDrawElementsInstanced(typed_req.mode, + typed_req.count, + typed_req.indicesType, + nullptr, + typed_req.instanceCount); + break; + } + case COMMAND_BUFFER_DRAW_RANGE_ELEMENTS_REQ: + { + const auto &typed_req = To(req); + glDrawRangeElements(typed_req.mode, + typed_req.start, + typed_req.end, + typed_req.count, + typed_req.indicesType, + nullptr); + break; + } + case COMMAND_BUFFER_ENABLE_VERTEX_ATTRIB_ARRAY_REQ: + { + const auto &typed_req = To(req); + glEnableVertexAttribArray(typed_req.location); + break; + } + case COMMAND_BUFFER_CREATE_BUFFER_REQ: + { + glCreateTypedObject(buffers_, req); + break; + } + case COMMAND_BUFFER_VERTEX_ATTRIB_1F_REQ: + { + const auto &typed_req = To(req); + glVertexAttrib1f(typed_req.location, + typed_req.v0); + break; + } + case COMMAND_BUFFER_VERTEX_ATTRIB_2F_REQ: + { + const auto &typed_req = To(req); + glVertexAttrib2f(typed_req.location, + typed_req.v0, + typed_req.v1); + break; + } + case COMMAND_BUFFER_VERTEX_ATTRIB_3F_REQ: + { + const auto &typed_req = To(req); + glVertexAttrib3f(typed_req.location, + typed_req.v0, + typed_req.v1, + typed_req.v2); + break; + } + case COMMAND_BUFFER_VERTEX_ATTRIB_4F_REQ: + { + const auto &typed_req = To(req); + glVertexAttrib4f(typed_req.location, + typed_req.v0, + typed_req.v1, + typed_req.v2, + typed_req.v3); + break; + } + case COMMAND_BUFFER_VERTEX_ATTRIB_DIVISOR_REQ: + { + const auto &typed_req = To(req); + glVertexAttribDivisor(typed_req.location, + typed_req.divisor); + break; + } + case COMMAND_BUFFER_VERTEX_ATTRIB_POINTER_REQ: + { + const auto &typed_req = To(req); + glVertexAttribPointer(typed_req.location, + typed_req.size, + typed_req.type, + typed_req.normalized, + typed_req.stride, + typed_req.offset); + break; + } + case COMMAND_BUFFER_VERTEX_ATTRIB_IPOINTER_REQ: + { + const auto &typed_req = To(req); + glVertexAttribIPointer(typed_req.location, + typed_req.size, + typed_req.type, + typed_req.stride, + typed_req.offset); + break; + } + + // State Management + case COMMAND_BUFFER_BLEND_COLOR_REQ: + { + const auto &typed_req = To(req); + glBlendColor(typed_req.red, + typed_req.green, + typed_req.blue, + typed_req.alpha); + break; + } + case COMMAND_BUFFER_BLEND_EQUATION_REQ: + { + const auto &typed_req = To(req); + glBlendEquation(typed_req.mode); + break; + } + case COMMAND_BUFFER_BLEND_EQUATION_SEPARATE_REQ: + { + const auto &typed_req = To(req); + glBlendEquationSeparate(typed_req.modeRGB, + typed_req.modeAlpha); + break; + } + case COMMAND_BUFFER_BLEND_FUNC_REQ: + { + const auto &typed_req = To(req); + glBlendFunc(typed_req.sfactor, + typed_req.dfactor); + break; + } + case COMMAND_BUFFER_BLEND_FUNC_SEPARATE_REQ: + { + const auto &typed_req = To(req); + glBlendFuncSeparate(typed_req.srcRGB, + typed_req.dstRGB, + typed_req.srcAlpha, + typed_req.dstAlpha); + break; + } + case COMMAND_BUFFER_COLOR_MASK_REQ: + { + const auto &typed_req = To(req); + glColorMask(typed_req.red, + typed_req.green, + typed_req.blue, + typed_req.alpha); + break; + } + case COMMAND_BUFFER_CULL_FACE_REQ: + { + const auto &typed_req = To(req); + glCullFace(typed_req.mode); + break; + } + case COMMAND_BUFFER_DEPTH_FUNC_REQ: + { + const auto &typed_req = To(req); + glDepthFunc(typed_req.func); + break; + } + case COMMAND_BUFFER_DEPTH_MASK_REQ: + { + const auto &typed_req = To(req); + glDepthMask(typed_req.flag); + break; + } + case COMMAND_BUFFER_DEPTH_RANGE_REQ: + { + const auto &typed_req = To(req); + glDepthRange(typed_req.n, typed_req.f); + break; + } + case COMMAND_BUFFER_DISABLE_REQ: + { + const auto &typed_req = To(req); + glDisable(typed_req.cap); + break; + } + case COMMAND_BUFFER_ENABLE_REQ: + { + const auto &typed_req = To(req); + glEnable(typed_req.cap); + break; + } + case COMMAND_BUFFER_FRONT_FACE_REQ: + { + const auto &typed_req = To(req); + glFrontFace(typed_req.mode); + break; + } + case COMMAND_BUFFER_HINT_REQ: + { + const auto &typed_req = To(req); + glHint(typed_req.target, typed_req.mode); + break; + } + case COMMAND_BUFFER_LINE_WIDTH_REQ: + { + const auto &typed_req = To(req); + glLineWidth(typed_req.width); + break; + } + case COMMAND_BUFFER_PIXEL_STOREI_REQ: + { + const auto &typed_req = To(req); + glPixelStorei(typed_req.pname, typed_req.param); + break; + } + case COMMAND_BUFFER_POLYGON_OFFSET_REQ: + { + const auto &typed_req = To(req); + glPolygonOffset(typed_req.factor, typed_req.units); + break; + } + case COMMAND_BUFFER_SET_SCISSOR_REQ: + { + const auto &typed_req = To(req); + glScissor(typed_req.x, typed_req.y, typed_req.width, typed_req.height); + break; + } + case COMMAND_BUFFER_STENCIL_FUNC_REQ: + { + const auto &typed_req = To(req); + glStencilFunc(typed_req.func, typed_req.ref, typed_req.mask); + break; + } + case COMMAND_BUFFER_STENCIL_FUNC_SEPARATE_REQ: + { + const auto &typed_req = To(req); + glStencilFuncSeparate(typed_req.face, + typed_req.func, + typed_req.ref, + typed_req.mask); + break; + } + case COMMAND_BUFFER_STENCIL_MASK_REQ: + { + const auto &typed_req = To(req); + glStencilMask(typed_req.mask); + break; + } + case COMMAND_BUFFER_STENCIL_MASK_SEPARATE_REQ: + { + const auto &typed_req = To(req); + glStencilMaskSeparate(typed_req.face, typed_req.mask); + break; + } + case COMMAND_BUFFER_STENCIL_OP_REQ: + { + const auto &typed_req = To(req); + glStencilOp(typed_req.fail, typed_req.zfail, typed_req.zpass); + break; + } + case COMMAND_BUFFER_STENCIL_OP_SEPARATE_REQ: + { + const auto &typed_req = To(req); + glStencilOpSeparate(typed_req.face, + typed_req.fail, + typed_req.zfail, + typed_req.zpass); + break; + } + case COMMAND_BUFFER_SET_VIEWPORT_REQ: + { + const auto &typed_req = To(req); + glViewport(typed_req.x, typed_req.y, typed_req.width, typed_req.height); + break; + } + + // Vertex Array Objects + case COMMAND_BUFFER_BIND_VERTEX_ARRAY_REQ: + { + const auto &typed_req = To(req); + glBindVertexArray(typed_req.vertexArray); + break; + } + case COMMAND_BUFFER_DELETE_VERTEX_ARRAY_REQ: + { + const auto &typed_req = To(req); + glDeleteVertexArrays(1, &typed_req.vertexArray); + break; + } + case COMMAND_BUFFER_CREATE_VERTEX_ARRAY_REQ: + { + const auto &typed_req = To(req); + WebGLuint vao; + glGenVertexArrays(1, &vao); + break; + } + + default: + break; + } + } + + void TrContextWebGL::debugPrintPrograms(int depth) + { + debugPrintObjects("Programs", programs_, depth); + } + + void TrContextWebGL::debugPrintShaderModules(int depth) + { + debugPrintObjects("Shaders", shaders_, depth); + } + + void TrContextWebGL::debugPrintBuffers(int depth) + { + debugPrintObjects("Buffers", buffers_, depth); + } + + void TrContextWebGL::debugPrintTextures(int depth) + { + debugPrintObjects("Textures", textures_, depth); + } + + void TrContextWebGL::debugPrintFramebuffers(int depth) + { + debugPrintObjects("Framebuffers", framebuffers_, depth); + } + + void TrContextWebGL::debugPrintRenderbuffers(int depth) + { + debugPrintObjects("Renderbuffers", renderbuffers_, depth); + } + + void TrContextWebGL::debugPrint() + { + cerr << "[WebGL] Objects Summary:" << endl; + { + debugPrintPrograms(2); + debugPrintShaderModules(2); + debugPrintBuffers(2); + debugPrintTextures(2); + debugPrintFramebuffers(2); + debugPrintRenderbuffers(2); + } + } +} diff --git a/src/renderer/context_webgl.hpp b/src/renderer/context_webgl.hpp new file mode 100644 index 000000000..ad39915e2 --- /dev/null +++ b/src/renderer/context_webgl.hpp @@ -0,0 +1,961 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace renderer +{ + class TrContentRenderer; + + namespace details + { + class ObjectTargetBase + { + public: + ObjectTargetBase(WebGLenum target) + : target_(target) + { + } + + inline WebGLenum value() const + { + return target_; + } + + inline bool operator==(const ObjectTargetBase &rhs) const + { + return target_ == rhs.target_; + } + + struct HashKey + { + size_t operator()(const ObjectTargetBase &t) const noexcept + { + return std::hash{}(t.value()); + } + }; + + protected: + WebGLenum target_; + }; + + class BufferTarget final : public ObjectTargetBase + { + public: + enum + { + kArrayBuffer = WEBGL_ARRAY_BUFFER, + kElementArrayBuffer = WEBGL_ELEMENT_ARRAY_BUFFER, + }; + + BufferTarget(WebGLenum target) + : ObjectTargetBase(target) + { + assert(target_ == kArrayBuffer || + target_ == kElementArrayBuffer); + } + }; + + class TextureTarget final : public ObjectTargetBase + { + public: + enum + { + k2D = WEBGL_TEXTURE_2D, + k3D = WEBGL2_TEXTURE_3D, + k2DArray = WEBGL2_TEXTURE_2D_ARRAY, + }; + + TextureTarget(WebGLenum target) + : ObjectTargetBase(target) + { + assert(target_ == k2D || + target_ == k3D || + target_ == k2DArray); + } + }; + + class FramebufferTarget final : public ObjectTargetBase + { + public: + enum + { + kFramebuffer = WEBGL_FRAMEBUFFER, + kReadFramebuffer = WEBGL2_READ_FRAMEBUFFER, + kDrawFramebuffer = WEBGL2_DRAW_FRAMEBUFFER, + }; + + FramebufferTarget(WebGLenum target) + : ObjectTargetBase(target) + { + assert(target_ == kFramebuffer || + target_ == kReadFramebuffer || + target_ == kDrawFramebuffer); + } + }; + + class RenderbufferTarget final : public ObjectTargetBase + { + public: + enum + { + kRenderbuffer = WEBGL_RENDERBUFFER, + }; + + RenderbufferTarget(WebGLenum target) + : ObjectTargetBase(target) + { + assert(target_ == kRenderbuffer); + } + }; + + class ObjectBase + { + public: + ObjectBase() = default; + ObjectBase(WebGLuint id); + virtual ~ObjectBase() = default; + + virtual bool isTexture() const; + virtual bool isBuffer() const; + virtual bool isFramebuffer() const; + virtual bool isRenderbuffer() const; + virtual bool isVertexArrayObject() const; + virtual std::string toString() const; + + void set(WebGLuint id); + WebGLuint id; + }; + + struct BindableObject : public ObjectBase + { + using ObjectBase::ObjectBase; + + public: + void setTarget(const ObjectTargetBase &); + + WebGLenum target; + }; + + class Shader final : public ObjectBase + { + public: + Shader(WebGLuint id, WebGLenum type); + std::string toString() const; + + WebGLenum type; + std::string source; + }; + + using SingleFloatValue = std::array; + using TwoFloatValue = std::array; + using ThreeFloatValue = std::array; + using FourFloatValue = std::array; + using FloatValues = std::vector; + + using SingleIntValue = std::array; + using TwoIntValue = std::array; + using ThreeIntValue = std::array; + using FourIntValue = std::array; + using IntValues = std::vector; + + using SingleUintValue = std::array; + using TwoUintValue = std::array; + using ThreeUintValue = std::array; + using FourUintValue = std::array; + using UintValues = std::vector; + + using UniformValue = std::variant< + SingleFloatValue, + TwoFloatValue, + ThreeFloatValue, + FourFloatValue, + FloatValues, + SingleIntValue, + TwoIntValue, + ThreeIntValue, + FourIntValue, + IntValues, + SingleUintValue, + TwoUintValue, + ThreeUintValue, + FourUintValue, + UintValues>; + class Uniforms : public std::unordered_map + { + using std::unordered_map::unordered_map; + + public: + void set(WebGLint loc, WebGLfloat v0); + void set(WebGLint loc, WebGLfloat v0, WebGLfloat v1); + void set(WebGLint loc, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2); + void set(WebGLint loc, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2, WebGLfloat v3); + void set(WebGLint loc, const FloatValues &values); + + void set(WebGLint loc, WebGLint v0); + void set(WebGLint loc, WebGLint v0, WebGLint v1); + void set(WebGLint loc, WebGLint v0, WebGLint v1, WebGLint v2); + void set(WebGLint loc, WebGLint v0, WebGLint v1, WebGLint v2, WebGLint v3); + void set(WebGLint loc, const IntValues &values); + + void set(WebGLint loc, WebGLuint v0); + void set(WebGLint loc, WebGLuint v0, WebGLuint v1); + void set(WebGLint loc, WebGLuint v0, WebGLuint v1, WebGLuint v2); + void set(WebGLint loc, WebGLuint v0, WebGLuint v1, WebGLuint v2, WebGLuint v3); + void set(WebGLint loc, const UintValues &values); + }; + + class Program final : public ObjectBase + { + public: + Program(WebGLuint id); + + Ref vertexShader; + Ref fragmentShader; + Uniforms uniforms; + }; + + class Texture final : public BindableObject + { + using BindableObject::BindableObject; + + public: + bool isTexture() const override + { + return true; + } + std::string toString() const override; + void setSize(WebGLsizei width, WebGLsizei height, WebGLsizei depth = 0); + + WebGLsizei size[3]; + WebGLsizei mipLevels; + WebGLenum internalformat; + WebGLenum compressedInternalformat; + }; + + class Buffer final : public BindableObject + { + using BindableObject::BindableObject; + + public: + bool isBuffer() const override + { + return true; + } + + WebGLsizei size; + }; + + class Renderbuffer final : public BindableObject + { + using BindableObject::BindableObject; + + public: + bool isRenderbuffer() const override + { + return true; + } + }; + + class FramebufferAttachment + { + Ref texture; + Ref renderbuffer; + }; + + class Framebuffer final : public BindableObject + { + using BindableObject::BindableObject; + + public: + bool isFramebuffer() const override + { + return true; + } + + Ref colorAttachment; + Ref depthAttachment; + Ref stencilAttachment; + }; + + class VertexArrayObject final : public BindableObject + { + using BindableObject::BindableObject; + + public: + bool isVertexArrayObject() const override + { + return true; + } + }; + + struct Capabilities + { + using Map = std::unordered_map; + + inline void enable(WebGLenum cap) + { + caps_[cap] = true; + } + + inline void disable(WebGLenum cap) + { + caps_[cap] = false; + } + + inline WebGLboolean isEnabled(WebGLenum cap) const + { + auto it = caps_.find(cap); + return it != caps_.end() ? it->second : false; + } + + private: + Map caps_; + }; + } + + class TrContextWebGL + { + public: + TrContextWebGL(Ref content_renderer); + ~TrContextWebGL(); + + void receiveIncomingCall(const commandbuffers::TrCommandBufferRequest &); + + private: + /** + * @brief Convert the request to the given type. + * + * @tparam T The type to convert. + * @param req The request to convert. + * @return const T& The converted request. + */ + template + static const T &To(const commandbuffers::TrCommandBufferRequest &req) + { + return dynamic_cast(req); + } + + // Textures + void glActiveTexture(WebGLenum texture); + void glBindTexture(WebGLenum target, WebGLuint texture); + void glCompressedTexImage2D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei border, + WebGLsizei imageSize, + const WebGLvoid *data); + void glCompressedTexImage3D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLsizei border, + WebGLsizei imageSize, + const WebGLvoid *data); + void glCompressedTexSubImage2D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLsizei width, + WebGLsizei height, + WebGLenum format, + WebGLsizei imageSize, + const WebGLvoid *data); + void glCompressedTexSubImage3D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLint zoffset, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLenum format, + WebGLsizei imageSize, + const WebGLvoid *data); + void glCopyTexImage2D(WebGLenum target, + WebGLenum internalformat, + WebGLint level, + WebGLint x, + WebGLint y, + WebGLsizei width, + WebGLsizei height, + WebGLint border); + void glCopyTexSubImage2D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLint x, + WebGLint y, + WebGLsizei width, + WebGLsizei height); + void glDeleteTextures(WebGLsizei n, const WebGLuint *textures); + void glGenTextures(WebGLsizei n, WebGLuint *textures); + void glGetTexParameter(WebGLenum target, WebGLenum pname, WebGLint *params); + WebGLboolean glIsTexture(WebGLuint texture); + void glTexImage2D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei border, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data); + void glTexImage3D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLsizei border, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data); + void glTexParameterf(WebGLenum target, WebGLenum pname, WebGLfloat param); + void glTexParameteri(WebGLenum target, WebGLenum pname, WebGLint param); + void glTexParameterfv(WebGLenum target, WebGLenum pname, const WebGLfloat *params); + void glTexParameteriv(WebGLenum target, WebGLenum pname, const WebGLint *params); + void glTexStorage2D(WebGLenum target, + WebGLint levels, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height); + void glTexStorage3D(WebGLenum target, + WebGLint levels, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth); + void glTexSubImage2D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLsizei width, + WebGLsizei height, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data); + void glTexSubImage3D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLint zoffset, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data); + + // Rendering + void glClear(WebGLbitfield mask); + void glClearBufferiv(WebGLenum buffer, WebGLint drawbuffer, const WebGLint *value); + void glClearBufferuiv(WebGLenum buffer, WebGLint drawbuffer, const WebGLuint *value); + void glClearBufferfv(WebGLenum buffer, WebGLint drawbuffer, const WebGLfloat *value); + void glClearBufferfi(WebGLenum buffer, WebGLint drawbuffer, WebGLfloat depth, WebGLint stencil); + void glClearColor(WebGLfloat red, WebGLfloat green, WebGLfloat blue, WebGLfloat alpha); + void glClearDepthf(WebGLfloat depth); + void glClearStencil(WebGLint s); + void glFinish(); + void glFlush(); + void glReadBuffer(WebGLenum buffer); + void glReadPixels(WebGLint x, WebGLint y, WebGLsizei width, WebGLsizei height, WebGLenum format, WebGLenum type, WebGLvoid *pixels); + + // Frame Buffers + void glBindFramebuffer(WebGLenum target, WebGLuint framebuffer); + void glBindRenderbuffer(WebGLenum target, WebGLuint renderbuffer); + void glBlitFramebuffer(WebGLint srcX0, + WebGLint srcY0, + WebGLint srcX1, + WebGLint srcY1, + WebGLint dstX0, + WebGLint dstY0, + WebGLint dstX1, + WebGLint dstY1, + WebGLbitfield mask, + WebGLenum filter); + void glCheckFramebufferStatus(WebGLenum target); + void glDeleteFramebuffers(WebGLsizei n, const WebGLuint *framebuffers); + void glDeleteRenderbuffers(WebGLsizei n, const WebGLuint *renderbuffers); + void glDrawBuffers(WebGLsizei n, const WebGLenum *buffers); + void glFramebufferRenderbuffer(WebGLenum target, WebGLenum attachment, WebGLenum renderbuffertarget, WebGLuint renderbuffer); + void glFramebufferTexture2D(WebGLenum target, WebGLenum attachment, WebGLenum textarget, WebGLuint texture, WebGLint level); + void glFramebufferTextureLayer(WebGLenum target, WebGLenum attachment, WebGLuint texture, WebGLint level, WebGLint layer); + void glGenFramebuffers(WebGLsizei n, WebGLuint *framebuffers); + void glGenRenderbuffers(WebGLsizei n, WebGLuint *renderbuffers); + void glGenerateMipmap(WebGLenum target); + void glGetFramebufferAttachmentParameteriv(WebGLenum target, WebGLenum attachment, WebGLenum pname, WebGLint *params); + void glGetRenderbufferParameteriv(WebGLenum target, WebGLenum pname, WebGLint *params); + void glInvalidateFramebuffer(WebGLenum target, WebGLsizei n, const WebGLenum *attachments); + void glInvalidateSubFramebuffer(WebGLenum target, + WebGLsizei n, + const WebGLenum *attachments, + WebGLint x, + WebGLint y, + WebGLsizei width, + WebGLsizei height); + WebGLboolean glIsFramebuffer(WebGLuint framebuffer); + WebGLboolean glIsRenderbuffer(WebGLuint renderbuffer); + void glRenderbufferStorage(WebGLenum target, WebGLenum internalformat, WebGLsizei width, WebGLsizei height); + void glRenderbufferStorageMultisample(WebGLenum target, + WebGLsizei samples, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height); + + // Shaders + void glBindAttribLocation(WebGLuint program, WebGLuint index, const WebGLchar *name); + void glCompileShader(WebGLuint shader); + WebGLuint glCreateProgram(); + WebGLuint glCreateShader(WebGLenum type); + void glDeleteProgram(WebGLuint program); + void glDeleteShader(WebGLuint shader); + void glAttachShader(WebGLuint program, WebGLuint shader); + void glDetachShader(WebGLuint program, WebGLuint shader); + void glGetActiveAttrib(WebGLuint program, + WebGLuint index, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLint *size, + WebGLenum *type, + WebGLchar *name); + void glGetActiveUniform(WebGLuint program, + WebGLuint index, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLint *size, + WebGLenum *type, + WebGLchar *name); + void glGetActiveUniformBlockName(WebGLuint program, + WebGLuint index, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLchar *name); + void glGetActiveUniformBlockiv(WebGLuint program, + WebGLuint index, + WebGLenum pname, + WebGLint *params); + void glGetActiveUniformsiv(WebGLuint program, + WebGLsizei count, + const WebGLuint *uniforms, + WebGLenum pname, + WebGLint *params); + void glGetAttachedShaders(WebGLuint program, WebGLsizei maxCount, WebGLsizei *count, WebGLuint *shaders); + void glGetAttribLocation(WebGLuint program, const WebGLchar *name); + void glGetFragDataLocation(WebGLuint program, const WebGLchar *name); + void glGetProgramBinary(WebGLuint program, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLenum binaryFormat, + WebGLsizei *binaryLength, + WebGLbyte *binary); + void glGetProgramInfoLog(WebGLuint program, WebGLsizei maxLength, WebGLsizei *length, WebGLchar *infoLog); + void glGetProgramiv(WebGLuint program, WebGLenum pname, WebGLint *params); + void glGetShaderInfoLog(WebGLuint shader, WebGLsizei maxLength, WebGLsizei *length, WebGLchar *infoLog); + void glGetShaderPrecisionFormat(WebGLenum shadertype, WebGLenum precisiontype, WebGLint *range, WebGLint *precision); + void glGetShaderSource(WebGLuint shader, WebGLsizei maxLength, WebGLsizei *length, WebGLchar *source); + void glGetShaderiv(WebGLuint shader, WebGLenum pname, WebGLint *params); + void glGetUniformfv(WebGLuint program, WebGLuint location, WebGLsizei count, WebGLfloat *params); + void glGetUniformiv(WebGLuint program, WebGLuint location, WebGLsizei count, WebGLint *params); + void glGetUniformuiv(WebGLuint program, WebGLuint location, WebGLsizei count, WebGLuint *params); + void glGetUniformBlockIndex(WebGLuint program, const WebGLchar *name); + void glGetUniformIndices(WebGLuint program, WebGLsizei count, const WebGLchar **names, WebGLuint *indices); + void glGetUniformLocation(WebGLuint program, const WebGLchar *name); + WebGLboolean glIsProgram(WebGLuint program); + WebGLboolean glIsShader(WebGLuint shader); + void glLinkProgram(WebGLuint program); + void glProgramBinary(WebGLuint program, WebGLenum binaryFormat, const WebGLbyte *binary, WebGLsizei binaryLength); + void glProgramParameteri(WebGLuint program, WebGLenum pname, WebGLint param); + void glReleaseShaderCompiler(); + void glShaderBinary(WebGLuint shader, WebGLenum binaryFormat, const WebGLbyte *binary, WebGLsizei binaryLength); + void glShaderSource(WebGLuint shader, WebGLsizei count, const WebGLchar **string, const WebGLint *length); + void glUniform1f(WebGLuint location, WebGLfloat v0); + void glUniform2f(WebGLuint location, WebGLfloat v0, WebGLfloat v1); + void glUniform3f(WebGLuint location, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2); + void glUniform4f(WebGLuint location, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2, WebGLfloat v3); + void glUniform1i(WebGLuint location, WebGLint v0); + void glUniform2i(WebGLuint location, WebGLint v0, WebGLint v1); + void glUniform3i(WebGLuint location, WebGLint v0, WebGLint v1, WebGLint v2); + void glUniform4i(WebGLuint location, WebGLint v0, WebGLint v1, WebGLint v2, WebGLint v3); + void glUniform1ui(WebGLuint location, WebGLuint v0); + void glUniform2ui(WebGLuint location, WebGLuint v0, WebGLuint v1); + void glUniform3ui(WebGLuint location, WebGLuint v0, WebGLuint v1, WebGLuint v2); + void glUniform4ui(WebGLuint location, WebGLuint v0, WebGLuint v1, WebGLuint v2, WebGLuint v3); + void glUniform1fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value); + void glUniform2fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value); + void glUniform3fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value); + void glUniform4fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value); + void glUniform1iv(WebGLuint location, WebGLsizei count, const WebGLint *value); + void glUniform2iv(WebGLuint location, WebGLsizei count, const WebGLint *value); + void glUniform3iv(WebGLuint location, WebGLsizei count, const WebGLint *value); + void glUniform4iv(WebGLuint location, WebGLsizei count, const WebGLint *value); + void glUniform1uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value); + void glUniform2uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value); + void glUniform3uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value); + void glUniform4uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value); + void glUniformMatrix2fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix3fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix4fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix2x3fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix3x2fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix2x4fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix4x2fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix3x4fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformMatrix4x3fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value); + void glUniformBlockBinding(WebGLuint program, WebGLuint uniformBlockIndex, WebGLuint bindingPoint); + void glUseProgram(WebGLuint program); + void glValidateProgram(WebGLuint program); + + // Buffer Objects + void glBindBuffer(WebGLenum target, WebGLuint buffer); + void glBindBufferBase(WebGLenum target, WebGLuint bindingPoint, WebGLuint buffer); + void glBindBufferRange(WebGLenum target, WebGLuint bindingPoint, WebGLuint buffer, WebGLintptr offset, WebGLsizeiptr size); + void glBufferData(WebGLenum target, WebGLsizeiptr size, const WebGLvoid *data, WebGLenum usage); + void glBufferSubData(WebGLenum target, WebGLintptr offset, WebGLsizeiptr size, const WebGLvoid *data); + void glCopyBufferSubData(WebGLenum readTarget, + WebGLenum writeTarget, + WebGLintptr readOffset, + WebGLintptr writeOffset, + WebGLsizeiptr size); + void glDeleteBuffers(WebGLsizei count, const WebGLuint *buffers); + void glDisableVertexAttribArray(WebGLuint index); + void glDrawArrays(WebGLenum mode, WebGLint first, WebGLsizei count); + void glDrawArraysInstanced(WebGLenum mode, WebGLint first, WebGLsizei count, WebGLsizei instanceCount); + void glDrawElements(WebGLenum mode, WebGLsizei count, WebGLenum type, const WebGLvoid *indices); + void glDrawElementsInstanced(WebGLenum mode, WebGLsizei count, WebGLenum type, const WebGLvoid *indices, WebGLsizei instanceCount); + void glDrawRangeElements(WebGLenum mode, WebGLuint start, WebGLuint end, WebGLsizei count, WebGLenum type, const WebGLvoid *indices); + void glEnableVertexAttribArray(WebGLuint index); + void glFlushMappedBufferRange(WebGLenum target, WebGLintptr offset, WebGLsizeiptr size); + void glGenBuffers(WebGLsizei n, WebGLuint *buffers); + void glGetBufferParameter(WebGLenum target, WebGLenum pname, WebGLint *params); + void glGetBufferParameteriv(WebGLenum target, WebGLenum pname, WebGLint *params); + void glGetBufferPointerv(WebGLenum target, WebGLenum pname, WebGLvoid **params); + void glGetVertexAttrib(WebGLuint index, WebGLenum pname, WebGLint *params); + void glGetVertexAttribPointerv(WebGLuint index, WebGLenum pname, WebGLvoid **pointer); + WebGLboolean glIsBuffer(WebGLuint buffer); + void glMapBufferRange(WebGLenum target, WebGLintptr offset, WebGLsizeiptr length, WebGLbitfield access); + void glUnmapBuffer(WebGLenum target); + void glVertexAttrib1f(WebGLuint index, WebGLfloat x); + void glVertexAttrib2f(WebGLuint index, WebGLfloat x, WebGLfloat y); + void glVertexAttrib3f(WebGLuint index, WebGLfloat x, WebGLfloat y, WebGLfloat z); + void glVertexAttrib4f(WebGLuint index, WebGLfloat x, WebGLfloat y, WebGLfloat z, WebGLfloat w); + void glVertexAttrib1fv(WebGLuint index, const WebGLfloat *v); + void glVertexAttrib2fv(WebGLuint index, const WebGLfloat *v); + void glVertexAttrib3fv(WebGLuint index, const WebGLfloat *v); + void glVertexAttrib4fv(WebGLuint index, const WebGLfloat *v); + void glVertexAttribDivisor(WebGLuint index, WebGLuint divisor); + void glVertexAttribPointer(WebGLuint index, + WebGLint size, + WebGLenum type, + WebGLboolean normalized, + WebGLsizei stride, + WebGLintptr offset); + void glVertexAttribIPointer(WebGLuint index, + WebGLint size, + WebGLenum type, + WebGLsizei stride, + WebGLintptr offset); + + // State Management + void glBlendColor(WebGLfloat red, WebGLfloat green, WebGLfloat blue, WebGLfloat alpha); + void glBlendEquation(WebGLenum mode); + void glBlendEquationSeparate(WebGLenum modeRGB, WebGLenum modeAlpha); + void glBlendFunc(WebGLenum sfactor, WebGLenum dfactor); + void glBlendFuncSeparate(WebGLenum srcRGB, WebGLenum dstRGB, WebGLenum srcAlpha, WebGLenum dstAlpha); + void glColorMask(WebGLboolean red, WebGLboolean green, WebGLboolean blue, WebGLboolean alpha); + void glCullFace(WebGLenum mode); + void glDepthFunc(WebGLenum func); + void glDepthMask(WebGLboolean flag); + void glDepthRangef(WebGLfloat near, WebGLfloat far); + void glDisable(WebGLenum cap); + void glEnable(WebGLenum cap); + void glFrontFace(WebGLenum mode); + void glGet(WebGLenum pname, WebGLint *params); + WebGLenum glGetError(); + void glHint(WebGLenum target, WebGLenum mode); + WebGLboolean glIsEnabled(WebGLenum cap); + void glLineWidth(WebGLfloat width); + void glPixelStorei(WebGLenum pname, WebGLint param); + void glPolygonOffset(WebGLfloat factor, WebGLfloat units); + void glSampleCoverage(WebGLfloat value, WebGLboolean invert); + void glScissor(WebGLint x, WebGLint y, WebGLsizei width, WebGLsizei height); + void glStencilFunc(WebGLenum func, WebGLint ref, WebGLuint mask); + void glStencilFuncSeparate(WebGLenum face, WebGLenum func, WebGLint ref, WebGLuint mask); + void glStencilMask(WebGLuint mask); + void glStencilMaskSeparate(WebGLenum face, WebGLuint mask); + void glStencilOp(WebGLenum fail, WebGLenum zfail, WebGLenum zpass); + void glStencilOpSeparate(WebGLenum face, WebGLenum fail, WebGLenum zfail, WebGLenum zpass); + void glViewport(WebGLint x, WebGLint y, WebGLsizei width, WebGLsizei height); + + // Transform Feedback + void glBeginTransformFeedback(WebGLenum primitiveMode); + void glBindTransformFeedback(WebGLenum target, WebGLuint transformFeedback); + void glDeleteTransformFeedbacks(WebGLsizei n, const WebGLuint *transformFeedbacks); + void glEndTransformFeedback(); + void glGenTransformFeedbacks(WebGLsizei n, WebGLuint *transformFeedbacks); + void glGetTransformFeedbackVarying(WebGLuint program, + WebGLuint index, + WebGLsizei bufSize, + WebGLsizei *length, + WebGLsizei *size, + WebGLenum *type, + WebGLchar *name); + void glIsTransformFeedback(WebGLuint transformFeedback); + void glPauseTransformFeedback(); + void glResumeTransformFeedback(); + void glTransformFeedbackVaryings(WebGLuint program, + WebGLsizei count, + const WebGLchar **varyings, + WebGLenum bufferMode); + + // Utility + void glGetInternalformativ(WebGLenum target, WebGLenum internalformat, WebGLsizei propCount, WebGLenum *props, WebGLint *params); + void glGetString(WebGLenum pname, WebGLchar *params); + void glGetStringi(WebGLenum pname, WebGLuint index); + + // Queries + void glBeginQuery(WebGLenum target, WebGLuint id); + void glDeleteQueries(WebGLsizei n, const WebGLuint *ids); + void glEndQuery(WebGLenum target); + void glGenQueries(WebGLsizei n, WebGLuint *ids); + void glGetQueryObjectuiv(WebGLuint id, WebGLenum pname, WebGLuint *params); + void glGetQueryiv(WebGLenum target, WebGLenum pname, WebGLint *params); + void glIsQuery(WebGLuint id); + + // Syncing + void glClientWaitSync(WebGLsync sync, WebGLbitfield flags, WebGLuint64 timeout); + void glDeleteSync(WebGLsync sync); + WebGLsync glFenceSync(WebGLenum condition, WebGLbitfield flags); + void glGetSynciv(WebGLsync sync, WebGLenum pname, WebGLsizei bufSize, WebGLsizei *length, WebGLint *values); + WebGLboolean glIsSync(WebGLsync sync); + void glWaitSync(WebGLsync sync, WebGLbitfield flags, WebGLuint64 timeout); + + // Vertex Array Objects + void glBindVertexArray(WebGLuint array); + void glDeleteVertexArrays(WebGLsizei n, const WebGLuint *arrays); + void glGenVertexArrays(WebGLsizei n, WebGLuint *arrays); + WebGLboolean glIsVertexArray(WebGLuint array); + + // Samplers + void glBindSampler(WebGLuint unit, WebGLuint sampler); + void glDeleteSamplers(WebGLsizei n, const WebGLuint *samplers); + void glGenSamplers(WebGLsizei n, WebGLuint *samplers); + void glGetSamplerParameter(WebGLuint sampler, WebGLenum pname, WebGLint *params); + void glIsSampler(WebGLuint sampler); + void glSamplerParameter(WebGLuint sampler, WebGLenum pname, WebGLint param); + + // Internal Utilities + template + requires std::is_base_of_v + void glGenTypedObjects(std::vector> &source_list, + WebGLsizei n, + WebGLuint *generated_list) + { + if (n <= 0 || generated_list == nullptr) + return; + + size_t old_size = source_list.size(); + source_list.resize(old_size + static_cast(n)); + + for (size_t i = 0; i < n; i++) + { + Ref object = AcquireRef(new ObjectType()); + source_list[old_size + i] = object; + generated_list[i] = object->id; + } + } + + /** + * A convenience function to create a WebGL object such as buffer, texture, framebuffer, renderbuffer in this context implementation. + * + * @tparam ReqType + * @param source_list A list of WebGL objects to store the created object. + * @param req A request to create a WebGL object. + */ + template + requires std::is_base_of_v + void glCreateTypedObject(std::vector> &source_list, + const commandbuffers::TrCommandBufferRequest &req) + { + const auto &typed_req = To(req); + size_t size_before = source_list.size(); + { + WebGLint obj; + glGenTypedObjects(source_list, 1, (WebGLuint *)&obj); + assert(obj == 0 && "object must be the initial object"); + } + + Ref created_object = source_list[size_before]; + assert(created_object != nullptr && "object must be created"); + created_object->set(req.id); + debugPrint(); + } + + template + requires((sizeof...(Args) > 0) && (((std::is_same_v) && ...) || + ((std::is_same_v) && ...) || + ((std::is_same_v) && ...))) + void glSetUniform(WebGLuint location, Args... args) + { + if (current_program_ == nullptr) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + current_program_->uniforms.set(location, args...); + } + + // Debug Utilities + template + requires std::is_base_of_v + void debugPrintObjects(const std::string &label, + const std::vector> &list, + int depth = 0) + { + const std::string prefix = std::string(depth, ' '); + + if (depth > 0) // Not printing [WebGL] prefix for nested. + cerr << prefix; + else + cerr << "[WebGL] "; + cerr << label << ": {"; + + if (list.empty()) + { + cerr << " (empty) }"; + } + else + { + cerr << endl; + int n = 0; + for (const auto &obj : list) + cerr << prefix << " ." << n++ << " = " << obj->toString() << endl; + cerr << prefix << "}"; + } + cerr << endl; + } + + void debugPrintPrograms(int depth = 0); + void debugPrintShaderModules(int depth = 0); + void debugPrintBuffers(int depth = 0); + void debugPrintTextures(int depth = 0); + void debugPrintFramebuffers(int depth = 0); + void debugPrintRenderbuffers(int depth = 0); + void debugPrint(); + + Ref content_renderer_; + + template + class ObjectList : public std::vector> + { + using Base = std::vector>; + using Base::Base; + + public: + Ref get(WebGLuint id) + { + for (const auto &obj : *this) + { + if (obj->id == id) + return obj; + } + return nullptr; + } + + bool has(WebGLuint id) + { + return get(id) != nullptr; + } + + bool remove(WebGLuint id) + { + for (auto it = this->begin(); it != this->end(); it++) + { + if ((*it)->id == id) + { + this->erase(it); + return true; + } + } + return false; + } + }; + + ObjectList shaders_; + ObjectList programs_; + + ObjectList buffers_; + ObjectList textures_; + ObjectList framebuffers_; + ObjectList renderbuffers_; + ObjectList vertex_array_objects_; + + template + class BindingMap : public std::unordered_map, typename Target::HashKey> + { + using Base = std::unordered_map, typename Target::HashKey>; + using Base::Base; + }; + using BufferBindingMap = BindingMap; + using TextureBindingMap = BindingMap; + using FramebufferBindingMap = BindingMap; + using RenderbufferBindingMap = BindingMap; + + BufferBindingMap buffer_bindings_; + TextureBindingMap texture_bindings_; + FramebufferBindingMap framebuffer_bindings_; + RenderbufferBindingMap renderbuffer_bindings_; + + Ref current_vertex_array_object_; + Ref current_program_; + + WebGLenum last_error_ = WEBGL_NO_ERROR; + details::Capabilities caps_; + WebGLenum active_texture_unit_ = WEBGL_TEXTURE0; + + WebGLfloat clear_color_[4]; + WebGLfloat clear_depth_; + WebGLint clear_stencil_; + + WebGLfloat blend_color_[4]; + WebGLenum blend_equation_rgb_; + WebGLenum blend_equation_alpha_; + WebGLenum blend_sfactor_rgb_; + WebGLenum blend_dfactor_rgb_; + WebGLenum blend_sfactor_alpha_; + WebGLenum blend_dfactor_alpha_; + WebGLboolean color_mask_[4]; + + WebGLenum cull_face_; + WebGLenum front_face_; + WebGLenum depth_func_; + WebGLboolean depth_mask_; + WebGLfloat depth_range_[2]; + + WebGLenum stencil_func_; + WebGLint stencil_ref_; + WebGLuint stencil_mask_; + + WebGLfloat line_width_; + WebGLfloat polygon_offset_factor_; + WebGLfloat polygon_offset_units_; + WebGLfloat sample_coverage_value_; + WebGLboolean sample_coverage_invert_; + WebGLint scissor_box_[4]; + WebGLint viewport_[4]; + }; +} diff --git a/src/renderer/context_webgl_buffers.cpp b/src/renderer/context_webgl_buffers.cpp new file mode 100644 index 000000000..00dacbf86 --- /dev/null +++ b/src/renderer/context_webgl_buffers.cpp @@ -0,0 +1,234 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glBindBuffer(WebGLenum target, WebGLuint id) + { + auto buffer = buffers_.get(id); + if (!buffer) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + auto buffer_target = details::BufferTarget(target); + buffer_bindings_[buffer_target] = buffer; + } + + void TrContextWebGL::glBindBufferBase(WebGLenum target, + WebGLuint bindingPoint, + WebGLuint buffer) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glBindBufferRange(WebGLenum target, + WebGLuint bindingPoint, + WebGLuint buffer, + WebGLintptr offset, + WebGLsizeiptr size) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glBufferData(WebGLenum target, + WebGLsizeiptr size, + const WebGLvoid *data, + WebGLenum usage) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glBufferSubData(WebGLenum target, + WebGLintptr offset, + WebGLsizeiptr size, + const WebGLvoid *data) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glCopyBufferSubData(WebGLenum readTarget, + WebGLenum writeTarget, + WebGLintptr readOffset, + WebGLintptr writeOffset, + WebGLsizeiptr size) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDeleteBuffers(WebGLsizei count, const WebGLuint *buffers) + { + for (WebGLsizei i = 0; i < count; i++) + { + for (auto it = buffers_.begin(); it != buffers_.end(); ++it) + { + Ref buffer = *it; + if (buffer->id == buffers[i]) + { + buffers_.erase(it); + break; + } + } + } + } + + void TrContextWebGL::glDisableVertexAttribArray(WebGLuint index) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDrawArrays(WebGLenum mode, WebGLint first, WebGLsizei count) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDrawArraysInstanced(WebGLenum mode, + WebGLint first, + WebGLsizei count, + WebGLsizei instanceCount) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDrawElements(WebGLenum mode, + WebGLsizei count, + WebGLenum type, + const WebGLvoid *indices) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDrawElementsInstanced(WebGLenum mode, WebGLsizei count, WebGLenum type, const WebGLvoid *indices, WebGLsizei instanceCount) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDrawRangeElements(WebGLenum mode, WebGLuint start, WebGLuint end, WebGLsizei count, WebGLenum type, const WebGLvoid *indices) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glEnableVertexAttribArray(WebGLuint index) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glFlushMappedBufferRange(WebGLenum target, WebGLintptr offset, WebGLsizeiptr size) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGenBuffers(WebGLsizei n, WebGLuint *buffers) + { + glGenTypedObjects(buffers_, n, buffers); + } + + void TrContextWebGL::glGetBufferParameter(WebGLenum target, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetBufferParameteriv(WebGLenum target, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetBufferPointerv(WebGLenum target, WebGLenum pname, WebGLvoid **params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetVertexAttrib(WebGLuint index, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetVertexAttribPointerv(WebGLuint index, WebGLenum pname, WebGLvoid **pointer) + { + /* TODO(yorkie): implement */ + } + + WebGLboolean TrContextWebGL::glIsBuffer(WebGLuint buffer) + { + return buffers_.has(buffer); + } + + void TrContextWebGL::glMapBufferRange(WebGLenum target, WebGLintptr offset, WebGLsizeiptr length, WebGLbitfield access) + { /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUnmapBuffer(WebGLenum target) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib1f(WebGLuint index, WebGLfloat x) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib2f(WebGLuint index, WebGLfloat x, WebGLfloat y) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib3f(WebGLuint index, WebGLfloat x, WebGLfloat y, WebGLfloat z) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib4f(WebGLuint index, WebGLfloat x, WebGLfloat y, WebGLfloat z, WebGLfloat w) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib1fv(WebGLuint index, const WebGLfloat *v) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib2fv(WebGLuint index, const WebGLfloat *v) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib3fv(WebGLuint index, const WebGLfloat *v) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttrib4fv(WebGLuint index, const WebGLfloat *v) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttribDivisor(WebGLuint index, WebGLuint divisor) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttribPointer(WebGLuint index, + WebGLint size, + WebGLenum type, + WebGLboolean normalized, + WebGLsizei stride, + WebGLintptr offset) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glVertexAttribIPointer(WebGLuint index, + WebGLint size, + WebGLenum type, + WebGLsizei stride, + WebGLintptr offset) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_framebuffers.cpp b/src/renderer/context_webgl_framebuffers.cpp new file mode 100644 index 000000000..769cd514b --- /dev/null +++ b/src/renderer/context_webgl_framebuffers.cpp @@ -0,0 +1,164 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glBindFramebuffer(WebGLenum target, WebGLuint id) + { + auto framebuffer = framebuffers_.get(id); + if (!framebuffer) [[unlikely]] + { + last_error_ = WEBGL_INVALID_FRAMEBUFFER_OPERATION; + return; + } + + if (target != WEBGL_FRAMEBUFFER && + target != WEBGL2_DRAW_FRAMEBUFFER && + target != WEBGL2_READ_FRAMEBUFFER) [[unlikely]] + { + last_error_ = WEBGL_INVALID_ENUM; + return; + } + + auto framebuffer_target = details::FramebufferTarget(target); + framebuffer_bindings_[framebuffer_target] = framebuffer; + } + + void TrContextWebGL::glBindRenderbuffer(WebGLenum target, WebGLuint id) + { + auto renderbuffer = renderbuffers_.get(id); + if (!renderbuffer) [[unlikely]] + { + last_error_ = WEBGL_INVALID_FRAMEBUFFER_OPERATION; + return; + } + + if (target != WEBGL_RENDERBUFFER) [[unlikely]] + { + last_error_ = WEBGL_INVALID_ENUM; + return; + } + + auto renderbuffer_target = details::RenderbufferTarget(target); + renderbuffer_bindings_[renderbuffer_target] = renderbuffer; + } + + void TrContextWebGL::glBlitFramebuffer(WebGLint srcX0, + WebGLint srcY0, + WebGLint srcX1, + WebGLint srcY1, + WebGLint dstX0, + WebGLint dstY0, + WebGLint dstX1, + WebGLint dstY1, + WebGLbitfield mask, + WebGLenum filter) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glCheckFramebufferStatus(WebGLenum target) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDeleteFramebuffers(WebGLsizei n, const WebGLuint *framebuffers) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDeleteRenderbuffers(WebGLsizei n, const WebGLuint *renderbuffers) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDrawBuffers(WebGLsizei n, const WebGLenum *buffers) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glFramebufferRenderbuffer(WebGLenum target, WebGLenum attachment, WebGLenum renderbuffertarget, WebGLuint renderbuffer) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glFramebufferTexture2D(WebGLenum target, WebGLenum attachment, WebGLenum textarget, WebGLuint texture, WebGLint level) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glFramebufferTextureLayer(WebGLenum target, WebGLenum attachment, WebGLuint texture, WebGLint level, WebGLint layer) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGenFramebuffers(WebGLsizei n, WebGLuint *framebuffers) + { + glGenTypedObjects(framebuffers_, n, framebuffers); + } + + void TrContextWebGL::glGenRenderbuffers(WebGLsizei n, WebGLuint *renderbuffers) + { + glGenTypedObjects(renderbuffers_, n, renderbuffers); + } + + void TrContextWebGL::glGenerateMipmap(WebGLenum target) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetFramebufferAttachmentParameteriv(WebGLenum target, WebGLenum attachment, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetRenderbufferParameteriv(WebGLenum target, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glInvalidateFramebuffer(WebGLenum target, WebGLsizei n, const WebGLenum *attachments) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glInvalidateSubFramebuffer(WebGLenum target, + WebGLsizei n, + const WebGLenum *attachments, + WebGLint x, + WebGLint y, + WebGLsizei width, + WebGLsizei height) + { + /* TODO(yorkie): implement */ + } + + WebGLboolean TrContextWebGL::glIsFramebuffer(WebGLuint framebuffer) + { + return framebuffers_.has(framebuffer); + } + + WebGLboolean TrContextWebGL::glIsRenderbuffer(WebGLuint renderbuffer) + { + return renderbuffers_.has(renderbuffer); + } + + void TrContextWebGL::glRenderbufferStorage(WebGLenum target, WebGLenum internalformat, WebGLsizei width, WebGLsizei height) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glRenderbufferStorageMultisample(WebGLenum target, + WebGLsizei samples, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_queries.cpp b/src/renderer/context_webgl_queries.cpp new file mode 100644 index 000000000..b59b2e007 --- /dev/null +++ b/src/renderer/context_webgl_queries.cpp @@ -0,0 +1,45 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glBeginQuery(WebGLenum target, WebGLuint id) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDeleteQueries(WebGLsizei n, const WebGLuint *ids) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glEndQuery(WebGLenum target) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGenQueries(WebGLsizei n, WebGLuint *ids) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetQueryObjectuiv(WebGLuint id, WebGLenum pname, WebGLuint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetQueryiv(WebGLenum target, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glIsQuery(WebGLuint id) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_rendering.cpp b/src/renderer/context_webgl_rendering.cpp new file mode 100644 index 000000000..2a75ab407 --- /dev/null +++ b/src/renderer/context_webgl_rendering.cpp @@ -0,0 +1,92 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + // --- Rendering --- + void TrContextWebGL::glClear(WebGLbitfield mask) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glClearBufferiv(WebGLenum buffer, + WebGLint drawbuffer, + const WebGLint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glClearBufferuiv(WebGLenum buffer, + WebGLint drawbuffer, + const WebGLuint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glClearBufferfv(WebGLenum buffer, + WebGLint drawbuffer, + const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glClearBufferfi(WebGLenum buffer, + WebGLint drawbuffer, + WebGLfloat depth, + WebGLint stencil) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glClearColor(WebGLfloat red, + WebGLfloat green, + WebGLfloat blue, + WebGLfloat alpha) + { + clear_color_[0] = red; + clear_color_[1] = green; + clear_color_[2] = blue; + clear_color_[3] = alpha; + } + + void TrContextWebGL::glClearDepthf(WebGLfloat depth) + { + clear_depth_ = depth; + } + + void TrContextWebGL::glClearStencil(WebGLint s) + { + clear_stencil_ = s; + } + + void TrContextWebGL::glFinish() + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glFlush() + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glReadBuffer(WebGLenum buffer) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glReadPixels(WebGLint x, + WebGLint y, + WebGLsizei width, + WebGLsizei height, + WebGLenum format, + WebGLenum type, + WebGLvoid *pixels) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_samplers.cpp b/src/renderer/context_webgl_samplers.cpp new file mode 100644 index 000000000..ecd5aa024 --- /dev/null +++ b/src/renderer/context_webgl_samplers.cpp @@ -0,0 +1,40 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glBindSampler(WebGLuint unit, WebGLuint sampler) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDeleteSamplers(WebGLsizei n, const WebGLuint *samplers) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGenSamplers(WebGLsizei n, WebGLuint *samplers) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetSamplerParameter(WebGLuint sampler, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glIsSampler(WebGLuint sampler) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glSamplerParameter(WebGLuint sampler, WebGLenum pname, WebGLint param) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_shaders.cpp b/src/renderer/context_webgl_shaders.cpp new file mode 100644 index 000000000..c6a2b3140 --- /dev/null +++ b/src/renderer/context_webgl_shaders.cpp @@ -0,0 +1,508 @@ +#include +#include +#include +#include +#include "utility.hpp" + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glBindAttribLocation(WebGLuint program, WebGLuint index, const WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glCompileShader(WebGLuint shader) + { + /* TODO(yorkie): implement */ + } + + WebGLuint TrContextWebGL::glCreateProgram() + { + WebGLuint id = programs_.size(); + auto program = AcquireRef(new details::Program(id)); + programs_.push_back(program); + return program->id; + } + + WebGLuint TrContextWebGL::glCreateShader(WebGLenum type) + { + WebGLuint id = shaders_.size(); + auto shader = AcquireRef(new details::Shader(id, type)); + shaders_.push_back(shader); + return shader->id; + } + + void TrContextWebGL::glDeleteProgram(WebGLuint program) + { + programs_.remove(program); + } + + void TrContextWebGL::glDeleteShader(WebGLuint shader) + { + shaders_.remove(shader); + } + + void TrContextWebGL::glAttachShader(WebGLuint program_id, WebGLuint shader_id) + { + auto program = programs_.get(program_id); + auto shader = shaders_.get(shader_id); + if (program == nullptr || shader == nullptr) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + if (shader->type == WEBGL_VERTEX_SHADER) + program->vertexShader = shader; + else if (shader->type == WEBGL_FRAGMENT_SHADER) + program->fragmentShader = shader; + else [[unlikely]] + last_error_ = WEBGL_INVALID_OPERATION; + } + + void TrContextWebGL::glDetachShader(WebGLuint program_id, WebGLuint shader_id) + { + auto program = programs_.get(program_id); + auto shader = shaders_.get(shader_id); + if (program == nullptr || shader == nullptr) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + if (shader->type == WEBGL_VERTEX_SHADER) + program->vertexShader = nullptr; + else if (shader->type == WEBGL_FRAGMENT_SHADER) + program->fragmentShader = nullptr; + else [[unlikely]] + last_error_ = WEBGL_INVALID_OPERATION; + } + + void TrContextWebGL::glGetActiveAttrib(WebGLuint program, + WebGLuint index, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLint *size, + WebGLenum *type, + WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetActiveUniform(WebGLuint program, + WebGLuint index, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLint *size, + WebGLenum *type, + WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetActiveUniformBlockName(WebGLuint program, + WebGLuint index, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetActiveUniformBlockiv(WebGLuint program, + WebGLuint index, + WebGLenum pname, + WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetActiveUniformsiv(WebGLuint program, + WebGLsizei count, + const WebGLuint *uniforms, + WebGLenum pname, + WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetAttachedShaders(WebGLuint program, WebGLsizei maxCount, WebGLsizei *count, WebGLuint *shaders) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetAttribLocation(WebGLuint program, const WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetFragDataLocation(WebGLuint program, const WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetProgramBinary(WebGLuint program, + WebGLsizei maxLength, + WebGLsizei *length, + WebGLenum binaryFormat, + WebGLsizei *binaryLength, + WebGLbyte *binary) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetProgramInfoLog(WebGLuint program, WebGLsizei maxLength, WebGLsizei *length, WebGLchar *infoLog) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetProgramiv(WebGLuint program, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetShaderInfoLog(WebGLuint shader, WebGLsizei maxLength, WebGLsizei *length, WebGLchar *infoLog) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetShaderPrecisionFormat(WebGLenum shadertype, WebGLenum precisiontype, WebGLint *range, WebGLint *precision) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetShaderSource(WebGLuint shader_id, WebGLsizei maxLength, WebGLsizei *length, WebGLchar *source) + { + auto shader = shaders_.get(shader_id); + if (shader == nullptr) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + if (length) + *length = 0; + if (source && maxLength > 0) + source[0] = '\0'; + return; + } + + const string &src = shader->source; + + if (maxLength <= 0 || source == nullptr) + { + if (length) + *length = 0; + return; + } + + // Reserve one byte for null-terminator. + size_t total_len = src.size(); + size_t copy_len = std::min(total_len, static_cast(std::max(0, maxLength - 1))); + + if (copy_len > 0) + src.copy(source, copy_len); + source[copy_len] = '\0'; + + if (length) + *length = static_cast(copy_len); + } + + void TrContextWebGL::glGetShaderiv(WebGLuint shader, WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetUniformfv(WebGLuint program, WebGLuint location, WebGLsizei count, WebGLfloat *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetUniformiv(WebGLuint program, WebGLuint location, WebGLsizei count, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetUniformuiv(WebGLuint program, WebGLuint location, WebGLsizei count, WebGLuint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetUniformBlockIndex(WebGLuint program, const WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetUniformIndices(WebGLuint program, WebGLsizei count, const WebGLchar **names, WebGLuint *indices) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetUniformLocation(WebGLuint program, const WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + WebGLboolean TrContextWebGL::glIsProgram(WebGLuint program) + { + return programs_.has(program); + } + + WebGLboolean TrContextWebGL::glIsShader(WebGLuint shader) + { + return shaders_.has(shader); + } + + void TrContextWebGL::glLinkProgram(WebGLuint program) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glProgramBinary(WebGLuint program, WebGLenum binaryFormat, const WebGLbyte *binary, WebGLsizei binaryLength) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glProgramParameteri(WebGLuint program, WebGLenum pname, WebGLint param) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glReleaseShaderCompiler() + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glShaderBinary(WebGLuint shader, + WebGLenum binaryFormat, + const WebGLbyte *binary, + WebGLsizei binaryLength) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glShaderSource(WebGLuint shader_id, + WebGLsizei count, + const WebGLchar **string, + const WebGLint *length) + { + auto shader = shaders_.get(shader_id); + if (shader == nullptr) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + // Concatenate provided strings into a single source, honoring optional lengths. + std::string src; + if (string != nullptr && count > 0) + { + for (WebGLsizei i = 0; i < count; ++i) + { + const WebGLchar *s = string[i]; + if (s == nullptr) + continue; + + if (length != nullptr) + { + WebGLint n = length[i]; + if (n < 0) + src.append(s); // Use full C-string when length is negative + else + src.append(s, static_cast(n)); + } + else + { + src.append(s); + } + } + } + + shader->source = std::move(src); + } + + void TrContextWebGL::glUniform1f(WebGLuint location, WebGLfloat v0) + { + glSetUniform(location, v0); + } + + void TrContextWebGL::glUniform2f(WebGLuint location, WebGLfloat v0, WebGLfloat v1) + { + glSetUniform(location, v0, v1); + } + + void TrContextWebGL::glUniform3f(WebGLuint location, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2) + { + glSetUniform(location, v0, v1, v2); + } + + void TrContextWebGL::glUniform4f(WebGLuint location, WebGLfloat v0, WebGLfloat v1, WebGLfloat v2, WebGLfloat v3) + { + glSetUniform(location, v0, v1, v2, v3); + } + + void TrContextWebGL::glUniform1i(WebGLuint location, WebGLint v0) + { + glSetUniform(location, v0); + } + + void TrContextWebGL::glUniform2i(WebGLuint location, WebGLint v0, WebGLint v1) + { + glSetUniform(location, v0, v1); + } + + void TrContextWebGL::glUniform3i(WebGLuint location, WebGLint v0, WebGLint v1, WebGLint v2) + { + glSetUniform(location, v0, v1, v2); + } + + void TrContextWebGL::glUniform4i(WebGLuint location, WebGLint v0, WebGLint v1, WebGLint v2, WebGLint v3) + { + glSetUniform(location, v0, v1, v2, v3); + } + + void TrContextWebGL::glUniform1ui(WebGLuint location, WebGLuint v0) + { + // TODO + } + + void TrContextWebGL::glUniform2ui(WebGLuint location, WebGLuint v0, WebGLuint v1) + { + glSetUniform(location, v0, v1); + } + + void TrContextWebGL::glUniform3ui(WebGLuint location, WebGLuint v0, WebGLuint v1, WebGLuint v2) + { + glSetUniform(location, v0, v1, v2); + } + + void TrContextWebGL::glUniform4ui(WebGLuint location, WebGLuint v0, WebGLuint v1, WebGLuint v2, WebGLuint v3) + { + glSetUniform(location, v0, v1, v2, v3); + } + + void TrContextWebGL::glUniform1fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value) + { + } + + void TrContextWebGL::glUniform2fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform3fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform4fv(WebGLuint location, WebGLsizei count, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform1iv(WebGLuint location, WebGLsizei count, const WebGLint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform2iv(WebGLuint location, WebGLsizei count, const WebGLint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform3iv(WebGLuint location, WebGLsizei count, const WebGLint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform4iv(WebGLuint location, WebGLsizei count, const WebGLint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform1uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform2uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform3uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniform4uiv(WebGLuint location, WebGLsizei count, const WebGLuint *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix2fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix3fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix4fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix2x3fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix3x2fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix2x4fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix4x2fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix3x4fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformMatrix4x3fv(WebGLuint location, WebGLsizei count, WebGLboolean transpose, const WebGLfloat *value) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUniformBlockBinding(WebGLuint program, WebGLuint uniformBlockIndex, WebGLuint bindingPoint) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glUseProgram(WebGLuint program) + { + if (program == 0) + { + current_program_ = nullptr; + } + else + { + current_program_ = programs_.get(program); + } + } + + void TrContextWebGL::glValidateProgram(WebGLuint program) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_states.cpp b/src/renderer/context_webgl_states.cpp new file mode 100644 index 000000000..dc177d89a --- /dev/null +++ b/src/renderer/context_webgl_states.cpp @@ -0,0 +1,193 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + + // --- State Management --- + void TrContextWebGL::glBlendColor(WebGLfloat red, + WebGLfloat green, + WebGLfloat blue, + WebGLfloat alpha) + { + blend_color_[0] = red; + blend_color_[1] = green; + blend_color_[2] = blue; + blend_color_[3] = alpha; + } + + void TrContextWebGL::glBlendEquation(WebGLenum mode) + { + blend_equation_rgb_ = mode; + blend_equation_alpha_ = mode; + } + + void TrContextWebGL::glBlendEquationSeparate(WebGLenum mode_rgb, WebGLenum mode_alpha) + { + blend_equation_rgb_ = mode_rgb; + blend_equation_alpha_ = mode_alpha; + } + + void TrContextWebGL::glBlendFunc(WebGLenum sfactor, WebGLenum dfactor) + { + blend_sfactor_rgb_ = sfactor; + blend_dfactor_rgb_ = dfactor; + blend_sfactor_alpha_ = sfactor; + blend_dfactor_alpha_ = dfactor; + } + + void TrContextWebGL::glBlendFuncSeparate(WebGLenum src_rgb, + WebGLenum dst_rgb, + WebGLenum src_alpha, + WebGLenum dst_alpha) + { + blend_sfactor_rgb_ = src_rgb; + blend_dfactor_rgb_ = dst_rgb; + blend_sfactor_alpha_ = src_alpha; + blend_dfactor_alpha_ = dst_alpha; + } + + void TrContextWebGL::glColorMask(WebGLboolean red, + WebGLboolean green, + WebGLboolean blue, + WebGLboolean alpha) + { + color_mask_[0] = red; + color_mask_[1] = green; + color_mask_[2] = blue; + color_mask_[3] = alpha; + } + + void TrContextWebGL::glCullFace(WebGLenum mode) + { + cull_face_ = mode; + } + + void TrContextWebGL::glDepthFunc(WebGLenum func) + { + depth_func_ = func; + } + + void TrContextWebGL::glDepthMask(WebGLboolean flag) + { + depth_mask_ = flag; + } + + void TrContextWebGL::glDepthRangef(WebGLfloat near, WebGLfloat far) + { + depth_range_[0] = near; + depth_range_[1] = far; + } + + void TrContextWebGL::glDisable(WebGLenum cap) + { + caps_.disable(cap); + } + + void TrContextWebGL::glEnable(WebGLenum cap) + { + caps_.enable(cap); + } + + void TrContextWebGL::glFrontFace(WebGLenum mode) + { + front_face_ = mode; + } + + void TrContextWebGL::glGet(WebGLenum pname, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + WebGLenum TrContextWebGL::glGetError() + { + WebGLenum error = last_error_; + last_error_ = WEBGL_NO_ERROR; + return error; + } + + void TrContextWebGL::glHint(WebGLenum target, WebGLenum mode) + { /* TODO(yorkie): implement */ + } + + WebGLboolean TrContextWebGL::glIsEnabled(WebGLenum cap) + { + return caps_.isEnabled(cap); + } + + void TrContextWebGL::glLineWidth(WebGLfloat width) + { + line_width_ = width; + } + + void TrContextWebGL::glPixelStorei(WebGLenum pname, WebGLint param) + { /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glPolygonOffset(WebGLfloat factor, WebGLfloat units) + { + polygon_offset_factor_ = factor; + polygon_offset_units_ = units; + } + + void TrContextWebGL::glSampleCoverage(WebGLfloat value, WebGLboolean invert) + { + sample_coverage_value_ = value; + sample_coverage_invert_ = invert; + } + + void TrContextWebGL::glScissor(WebGLint x, WebGLint y, WebGLsizei width, WebGLsizei height) + { + scissor_box_[0] = x; + scissor_box_[1] = y; + scissor_box_[2] = width; + scissor_box_[3] = height; + } + + void TrContextWebGL::glStencilFunc(WebGLenum func, WebGLint ref, WebGLuint mask) + { + stencil_func_ = func; + stencil_ref_ = ref; + stencil_mask_ = mask; + } + + void TrContextWebGL::glStencilFuncSeparate(WebGLenum face, + WebGLenum func, + WebGLint ref, + WebGLuint mask) + { + stencil_func_ = func; + stencil_ref_ = ref; + stencil_mask_ = mask; + } + + void TrContextWebGL::glStencilMask(WebGLuint mask) + { + stencil_mask_ = mask; + } + + void TrContextWebGL::glStencilMaskSeparate(WebGLenum face, WebGLuint mask) + { /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glStencilOp(WebGLenum fail, WebGLenum zfail, WebGLenum zpass) + { /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glStencilOpSeparate(WebGLenum face, WebGLenum fail, WebGLenum zfail, WebGLenum zpass) + { /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glViewport(WebGLint x, WebGLint y, WebGLsizei width, WebGLsizei height) + { + viewport_[0] = x; + viewport_[1] = y; + viewport_[2] = width; + viewport_[3] = height; + } +} diff --git a/src/renderer/context_webgl_syncing.cpp b/src/renderer/context_webgl_syncing.cpp new file mode 100644 index 000000000..7d1c96fe0 --- /dev/null +++ b/src/renderer/context_webgl_syncing.cpp @@ -0,0 +1,42 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glClientWaitSync(WebGLsync sync, WebGLbitfield flags, WebGLuint64 timeout) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDeleteSync(WebGLsync sync) + { + /* TODO(yorkie): implement */ + } + + WebGLsync TrContextWebGL::glFenceSync(WebGLenum condition, WebGLbitfield flags) + { + /* TODO(yorkie): implement */ + return nullptr; + } + + void TrContextWebGL::glGetSynciv(WebGLsync sync, WebGLenum pname, WebGLsizei bufSize, WebGLsizei *length, WebGLint *values) + { + /* TODO(yorkie): implement */ + } + + WebGLboolean TrContextWebGL::glIsSync(WebGLsync sync) + { + /* TODO(yorkie): implement */ + return false; + } + + void TrContextWebGL::glWaitSync(WebGLsync sync, WebGLbitfield flags, WebGLuint64 timeout) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_texture.cpp b/src/renderer/context_webgl_texture.cpp new file mode 100644 index 000000000..06082f5a1 --- /dev/null +++ b/src/renderer/context_webgl_texture.cpp @@ -0,0 +1,257 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glActiveTexture(WebGLenum unit) + { + active_texture_unit_ = unit; + } + + void TrContextWebGL::glBindTexture(WebGLenum target, WebGLuint id) + { + auto texture = textures_.get(id); + if (!texture) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + auto texture_target = details::TextureTarget(target); + texture->setTarget(texture_target); + texture_bindings_[texture_target] = texture; + } + + void TrContextWebGL::glCopyTexImage2D(WebGLenum target, + WebGLenum internalformat, + WebGLint level, + WebGLint x, + WebGLint y, + WebGLsizei width, + WebGLsizei height, + WebGLint border) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glCopyTexSubImage2D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLint x, + WebGLint y, + WebGLsizei width, + WebGLsizei height) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glDeleteTextures(WebGLsizei n, const WebGLuint *textures) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glGenTextures(WebGLsizei n, WebGLuint *textures) + { + glGenTypedObjects(textures_, n, textures); + } + + void TrContextWebGL::glGetTexParameter(WebGLenum target, WebGLenum pname, WebGLint *params) + { + // TODO(yorkie): implement + } + + WebGLboolean TrContextWebGL::glIsTexture(WebGLuint texture) + { + return textures_.has(texture); + } + + void TrContextWebGL::glTexImage2D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei border, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data) + { + auto texture = texture_bindings_.at(target); + if (!texture) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + texture->mipLevels = level; + texture->internalformat = internalformat; + texture->setSize(width, height); + } + + void TrContextWebGL::glTexImage3D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLsizei border, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data) + { + auto texture = texture_bindings_.at(target); + if (!texture) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + texture->mipLevels = level; + texture->internalformat = internalformat; + texture->setSize(width, height, depth); + } + + // --- Additional texture APIs --- + void TrContextWebGL::glCompressedTexImage2D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei border, + WebGLsizei imageSize, + const WebGLvoid *data) + { + auto texture = texture_bindings_.at(target); + if (!texture) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + texture->mipLevels = level; + texture->internalformat = internalformat; + texture->setSize(width, height); + } + + void TrContextWebGL::glCompressedTexImage3D(WebGLenum target, + WebGLint level, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLsizei border, + WebGLsizei imageSize, + const WebGLvoid *data) + { + auto texture = texture_bindings_.at(target); + if (!texture) [[unlikely]] + { + last_error_ = WEBGL_INVALID_OPERATION; + return; + } + + texture->mipLevels = level; + texture->internalformat = internalformat; + texture->setSize(width, height, depth); + } + + void TrContextWebGL::glCompressedTexSubImage2D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLsizei width, + WebGLsizei height, + WebGLenum format, + WebGLsizei imageSize, + const WebGLvoid *data) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glCompressedTexSubImage3D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLint zoffset, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLenum format, + WebGLsizei imageSize, + const WebGLvoid *data) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexParameterf(WebGLenum target, WebGLenum pname, WebGLfloat param) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexParameteri(WebGLenum target, WebGLenum pname, WebGLint param) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexParameterfv(WebGLenum target, WebGLenum pname, const WebGLfloat *params) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexParameteriv(WebGLenum target, WebGLenum pname, const WebGLint *params) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexStorage2D(WebGLenum target, + WebGLint levels, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexStorage3D(WebGLenum target, + WebGLint levels, + WebGLenum internalformat, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexSubImage2D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLsizei width, + WebGLsizei height, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data) + { + // TODO(yorkie): implement + } + + void TrContextWebGL::glTexSubImage3D(WebGLenum target, + WebGLint level, + WebGLint xoffset, + WebGLint yoffset, + WebGLint zoffset, + WebGLsizei width, + WebGLsizei height, + WebGLsizei depth, + WebGLenum format, + WebGLenum type, + const WebGLvoid *data) + { + // TODO(yorkie): implement + } +} diff --git a/src/renderer/context_webgl_transform_feedback.cpp b/src/renderer/context_webgl_transform_feedback.cpp new file mode 100644 index 000000000..ebda623bb --- /dev/null +++ b/src/renderer/context_webgl_transform_feedback.cpp @@ -0,0 +1,69 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glBeginTransformFeedback(WebGLenum primitiveMode) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glBindTransformFeedback(WebGLenum target, WebGLuint transformFeedback) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glDeleteTransformFeedbacks(WebGLsizei n, const WebGLuint *transformFeedbacks) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glEndTransformFeedback() + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGenTransformFeedbacks(WebGLsizei n, WebGLuint *transformFeedbacks) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetTransformFeedbackVarying(WebGLuint program, + WebGLuint index, + WebGLsizei bufSize, + WebGLsizei *length, + WebGLsizei *size, + WebGLenum *type, + WebGLchar *name) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glIsTransformFeedback(WebGLuint transformFeedback) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glPauseTransformFeedback() + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glResumeTransformFeedback() + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glTransformFeedbackVaryings(WebGLuint program, + WebGLsizei count, + const WebGLchar **varyings, + WebGLenum bufferMode) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_utility.cpp b/src/renderer/context_webgl_utility.cpp new file mode 100644 index 000000000..a301f54f6 --- /dev/null +++ b/src/renderer/context_webgl_utility.cpp @@ -0,0 +1,25 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glGetInternalformativ(WebGLenum target, WebGLenum internalformat, WebGLsizei propCount, WebGLenum *props, WebGLint *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetString(WebGLenum pname, WebGLchar *params) + { + /* TODO(yorkie): implement */ + } + + void TrContextWebGL::glGetStringi(WebGLenum pname, WebGLuint index) + { + /* TODO(yorkie): implement */ + } +} diff --git a/src/renderer/context_webgl_vao.cpp b/src/renderer/context_webgl_vao.cpp new file mode 100644 index 000000000..3f78776ef --- /dev/null +++ b/src/renderer/context_webgl_vao.cpp @@ -0,0 +1,30 @@ +#include +#include +#include +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + void TrContextWebGL::glBindVertexArray(WebGLuint array) + { + current_vertex_array_object_ = vertex_array_objects_.get(array); + } + + void TrContextWebGL::glDeleteVertexArrays(WebGLsizei n, const WebGLuint *arrays) + { + // vertex_array_objects_.remove(arrays, n); + } + + void TrContextWebGL::glGenVertexArrays(WebGLsizei n, WebGLuint *arrays) + { + glGenTypedObjects(vertex_array_objects_, n, arrays); + } + + WebGLboolean TrContextWebGL::glIsVertexArray(WebGLuint array) + { + return vertex_array_objects_.has(array); + } +} diff --git a/src/renderer/gles/context_app.hpp b/src/renderer/gles/context_app.hpp index 0a330467a..ba84297b5 100644 --- a/src/renderer/gles/context_app.hpp +++ b/src/renderer/gles/context_app.hpp @@ -9,7 +9,6 @@ #include "./context_storage.hpp" #include "./framebuffer.hpp" -#include "./gpu_command_encoder_impl.hpp" class ContextGLHost; class ContextGLApp : public ContextGLStorage diff --git a/src/renderer/gles/gpu_backend_impl.cpp b/src/renderer/gles/gpu_backend_impl.cpp new file mode 100644 index 000000000..77e7fcf94 --- /dev/null +++ b/src/renderer/gles/gpu_backend_impl.cpp @@ -0,0 +1,37 @@ +#include +#include +#include +#include + +using namespace std; +using namespace commandbuffers; + +namespace gles +{ + GPUBackend::GPUBackend(GPUInstance *instance, GPUBackendType type) + : BackendConnection(instance, type) + { + } + + vector> GPUBackend::discoverPhysicalDevices(const RequestAdapterOptions &options) + { + if (options.forceFallbackAdapter) + return {}; + if (options.featureLevel != GPUFeatureLevel::kCompatibility) + return {}; + + vector> devices; + { + Ref display = DisplayEGL::CreateFromCurrent(); + Ref newDevice = GPUPhysicalDeviceImpl::Create(type(), display, true); + devices.push_back(newDevice); + } + return devices; + } + + gpu::BackendConnection *Connect(GPUInstance *instance, GPUBackendType type) + { + assert(type == GPUBackendType::kOpenGL || type == GPUBackendType::kOpenGLES); + return new GPUBackend(instance, type); + } +} diff --git a/src/renderer/gles/gpu_backend_impl.hpp b/src/renderer/gles/gpu_backend_impl.hpp new file mode 100644 index 000000000..2ecb1a67e --- /dev/null +++ b/src/renderer/gles/gpu_backend_impl.hpp @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include +#include + +namespace gles +{ + class GPUBackend final : public commandbuffers::gpu::BackendConnection + { + public: + GPUBackend(commandbuffers::GPUInstance *instance, commandbuffers::GPUBackendType type); + + std::vector> discoverPhysicalDevices( + const commandbuffers::RequestAdapterOptions &options) override; + }; +} diff --git a/src/renderer/gles/gpu_command_buffer_impl.cpp b/src/renderer/gles/gpu_command_buffer_impl.cpp index 2a9692e3d..d399d34a9 100644 --- a/src/renderer/gles/gpu_command_buffer_impl.cpp +++ b/src/renderer/gles/gpu_command_buffer_impl.cpp @@ -7,45 +7,14 @@ namespace gles using namespace std; using namespace commandbuffers; - GPUCommandBufferImpl::GPUCommandBufferImpl(optional label) - : GPUCommandBuffer(move(label)) - , framebuffer_(0) + GPUCommandBufferImpl::GPUCommandBufferImpl(GPUCommandEncoder *encoder, const GPUCommandBufferDescriptor *descriptor) + : GPUCommandBufferBase(encoder, descriptor) { } - GPUCommandBufferImpl::GPUCommandBufferImpl(optional label, const GPURenderPassEncoderImpl &renderpass_encoder) - : GPUCommandBuffer(move(label), renderpass_encoder.commandBuffer()) - , framebuffer_(renderpass_encoder.framebuffer_) - { - assert(framebuffer_ > 0 && "Framebuffer must be bound before creating GPUCommandBufferImpl"); - } - void GPUCommandBufferImpl::execute() { - GLFramebuffer framebuffer(framebuffer_, false); - GLFramebufferScope framebuffer_scope(GL_FRAMEBUFFER, framebuffer); - - // TODO(yorkie): execute the commands in the command buffer. - for (const auto &command : commands_) - { - switch (command->type) - { - case GPUCommand::kDraw: - onDraw(*dynamic_pointer_cast(command)); - break; - case GPUCommand::kDrawIndexed: - onDraw(*dynamic_pointer_cast(command)); - break; - case GPUCommand::kSetViewport: - setViewport(*dynamic_pointer_cast(command)); - break; - case GPUCommand::kSetScissor: - setScissorRect(*dynamic_pointer_cast(command)); - break; - default: - break; - } - } + // TODO } void GPUCommandBufferImpl::onDraw(const commandbuffers::GPUDrawCommand &command) diff --git a/src/renderer/gles/gpu_command_buffer_impl.hpp b/src/renderer/gles/gpu_command_buffer_impl.hpp index 5a147498b..2733ee92d 100644 --- a/src/renderer/gles/gpu_command_buffer_impl.hpp +++ b/src/renderer/gles/gpu_command_buffer_impl.hpp @@ -4,20 +4,19 @@ #include #include #include - +#include #include "./common.hpp" -#include "./gpu_renderpass_encoder_impl.hpp" namespace gles { - class GPUCommandBufferImpl : public commandbuffers::GPUCommandBuffer + class GPUCommandBufferImpl : public commandbuffers::GPUCommandBufferBase { public: - GPUCommandBufferImpl(std::optional label); - GPUCommandBufferImpl(std::optional label, const GPURenderPassEncoderImpl &); + GPUCommandBufferImpl(commandbuffers::GPUCommandEncoder *encoder, + const commandbuffers::GPUCommandBufferDescriptor *descriptor); public: - void execute() override; + void execute(); private: void onDraw(const commandbuffers::GPUDrawCommand &); diff --git a/src/renderer/gles/gpu_command_encoder_impl.cpp b/src/renderer/gles/gpu_command_encoder_impl.cpp deleted file mode 100644 index fac46a6e4..000000000 --- a/src/renderer/gles/gpu_command_encoder_impl.cpp +++ /dev/null @@ -1,54 +0,0 @@ -#include "./common.hpp" -#include "./gpu_command_encoder_impl.hpp" -#include "./gpu_command_buffer_impl.hpp" -#include "./gpu_renderpass_encoder_impl.hpp" - -namespace gles -{ - using namespace std; - using namespace commandbuffers; - - GPURenderPassEncoder GPUCommandEncoderImpl::beginRenderPass(GPURenderPassDescriptor &) - { - assert(false && "beginRenderPass not implemented in gles::GPUCommandEncoderImpl"); - } - - std::unique_ptr GPUCommandEncoderImpl::finish(std::optional label) const - { - if (current_pass_encoder_ == nullptr) - return nullptr; - - if (current_pass_encoder_->isRenderPassEncoder()) - { - const auto &renderpass_encoder = dynamic_cast(*current_pass_encoder_); - return unique_ptr(new GPUCommandBufferImpl(label, renderpass_encoder)); - } - else - { - return nullptr; - } - } - - GPURenderPassEncoder &GPUCommandEncoderImpl::getOrStartRecordingRenderPass() - { - if (current_pass_encoder_ == nullptr || !current_pass_encoder_->isRenderPassEncoder()) - { - unique_ptr renderpass_encoder = make_unique(); - renderpass_encoder->begin(); - current_pass_encoder_ = move(renderpass_encoder); - } - - auto renderpass_encoder = dynamic_pointer_cast(current_pass_encoder_); - assert(renderpass_encoder != nullptr && "Current pass encoder must be a GPURenderPassEncoderImpl"); - return *renderpass_encoder; - } - - bool GPUCommandEncoderImpl::isRenderPassWith(GLuint target_framebuffer) const - { - if (current_pass_encoder_ == nullptr || !current_pass_encoder_->isRenderPassEncoder()) - return false; - - const auto &renderpass_encoder = dynamic_cast(*current_pass_encoder_); - return renderpass_encoder.targetFramebuffer() == target_framebuffer; - } -} diff --git a/src/renderer/gles/gpu_command_encoder_impl.hpp b/src/renderer/gles/gpu_command_encoder_impl.hpp deleted file mode 100644 index 188883daa..000000000 --- a/src/renderer/gles/gpu_command_encoder_impl.hpp +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include -#include -#include -#include -#include "./gpu_command_buffer_impl.hpp" - -namespace gles -{ - class GPUCommandEncoderImpl : public commandbuffers::GPUCommandEncoder - { - friend class GPUDeviceImpl; - using commandbuffers::GPUCommandEncoder::GPUCommandEncoder; - - public: - commandbuffers::GPURenderPassEncoder beginRenderPass(commandbuffers::GPURenderPassDescriptor &) override; - std::unique_ptr finish(std::optional label = std::nullopt) const override; - - public: - commandbuffers::GPURenderPassEncoder &getOrStartRecordingRenderPass(); - bool isRenderPassWith(GLuint target_framebuffer) const; - }; -} diff --git a/src/renderer/gles/gpu_device_impl.cpp b/src/renderer/gles/gpu_device_impl.cpp index e453a664d..7b8ceb800 100644 --- a/src/renderer/gles/gpu_device_impl.cpp +++ b/src/renderer/gles/gpu_device_impl.cpp @@ -2,33 +2,25 @@ #include #include -#include "./common.hpp" -#include "./gpu_device_impl.hpp" -#include "./gpu_command_encoder_impl.hpp" +#include +#include namespace gles { using namespace std; using namespace commandbuffers; - GPUQueueImpl::GPUQueueImpl() - : GPUQueue() + // static + Ref GPUDeviceImpl::Create(Ref adapter, const GPUDeviceDescriptor &descriptor) { - // Initialize the queue if needed. - // For GLES, this might not require any specific initialization. + Ref device = AcquireRef(new GPUDeviceImpl(adapter, descriptor)); + device->initialize(descriptor); + return device; } - void GPUQueueImpl::submit(const vector> &command_buffers) + GPUDeviceImpl::GPUDeviceImpl(Ref adapter, const GPUDeviceDescriptor &descriptor) + : GPUDeviceBase(adapter, descriptor) { - for (const auto &command_buffer : command_buffers) - command_buffer->execute(); - } - - GPUDeviceImpl::GPUDeviceImpl() - : GPUDevice() - { - queue_ = make_unique(); - auto get_gl_string = [](GLenum name) -> string { const char *str = (const char *)glGetString(name); @@ -65,8 +57,75 @@ namespace gles DEBUG(LOG_TAG_RENDERER, "GPU Device Info: %s", adapter_info_.toString().c_str()); } - unique_ptr GPUDeviceImpl::createCommandEncoder(optional label) + bool GPUDeviceImpl::initialize(const GPUDeviceDescriptor &descriptor) + { + return true; + } + + unique_ptr GPUDeviceImpl::createCommandBuffer(GPUCommandEncoder &encoder, + const GPUCommandBufferDescriptor *descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createBindGroupImpl( + const GPUBindGroupDescriptor &descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createBindGroupLayoutImpl( + const GPUBindGroupLayoutDescriptor &descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createBufferImpl( + const GPUBufferDescriptor &descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createPipelineLayoutImpl( + const GPUPipelineLayoutDescriptor &descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createShaderModuleImpl( + const GPUShaderModuleDescriptor &descriptor, + const std::vector &internalExtensions) + { + return nullptr; + } + + Ref GPUDeviceImpl::createTextureImpl( + const GPUTextureDescriptor &descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createTextureViewImpl( + Ref texture, + const GPUTextureViewDescriptor &descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createUninitializedComputePipelineImpl( + const GPUComputePipelineDescriptor &descriptor) + { + return nullptr; + } + + Ref GPUDeviceImpl::createUninitializedRenderPipelineImpl( + const GPURenderPipelineDescriptor &descriptor) + { + return nullptr; + } + + bool GPUDeviceImpl::tickImpl() { - return unique_ptr(new GPUCommandEncoderImpl(label.value_or(""))); + return true; } } diff --git a/src/renderer/gles/gpu_device_impl.hpp b/src/renderer/gles/gpu_device_impl.hpp index 81d1317e7..5df5a22eb 100644 --- a/src/renderer/gles/gpu_device_impl.hpp +++ b/src/renderer/gles/gpu_device_impl.hpp @@ -1,23 +1,50 @@ #pragma once #include +#include #include namespace gles { - class GPUQueueImpl : public commandbuffers::GPUQueue + class GPUDeviceImpl final : public commandbuffers::GPUDeviceBase { public: - GPUQueueImpl(); - void submit(const std::vector> &) override; - }; + static Ref Create(Ref adapter, + const commandbuffers::GPUDeviceDescriptor &descriptor); - class GPUDeviceImpl : public commandbuffers::GPUDevice - { - public: - GPUDeviceImpl(); + private: + GPUDeviceImpl(Ref adapter, + const commandbuffers::GPUDeviceDescriptor &descriptor); + + bool initialize(const commandbuffers::GPUDeviceDescriptor &descriptor); public: - std::unique_ptr createCommandEncoder(std::optional label) override; + std::unique_ptr createCommandBuffer( + commandbuffers::GPUCommandEncoder &encoder, + const commandbuffers::GPUCommandBufferDescriptor *descriptor = nullptr) override; + + private: + Ref createBindGroupImpl( + const commandbuffers::GPUBindGroupDescriptor &) override; + Ref createBindGroupLayoutImpl( + const commandbuffers::GPUBindGroupLayoutDescriptor &) override; + Ref createBufferImpl( + const commandbuffers::GPUBufferDescriptor &) override; + Ref createPipelineLayoutImpl( + const commandbuffers::GPUPipelineLayoutDescriptor &) override; + Ref createShaderModuleImpl( + const commandbuffers::GPUShaderModuleDescriptor &, + const std::vector &) override; + Ref createTextureImpl( + const commandbuffers::GPUTextureDescriptor &) override; + Ref createTextureViewImpl( + Ref texture, + const commandbuffers::GPUTextureViewDescriptor &) override; + Ref createUninitializedComputePipelineImpl( + const commandbuffers::GPUComputePipelineDescriptor &) override; + Ref createUninitializedRenderPipelineImpl( + const commandbuffers::GPURenderPipelineDescriptor &) override; + + bool tickImpl() override; }; } diff --git a/src/renderer/gles/gpu_display_egl.cpp b/src/renderer/gles/gpu_display_egl.cpp new file mode 100644 index 000000000..b5fbf68ea --- /dev/null +++ b/src/renderer/gles/gpu_display_egl.cpp @@ -0,0 +1,23 @@ +#include + +using namespace std; +using namespace commandbuffers; + +namespace gles +{ + // static + Ref DisplayEGL::CreateFromCurrent() + { + Ref display = AcquireRef(new DisplayEGL(GPUBackendType::kOpenGLES)); + display->initializeFromCurrent(); + return display; + } + + DisplayEGL::DisplayEGL(GPUBackendType type) + { + } + + void DisplayEGL::initializeFromCurrent() + { + } +} diff --git a/src/renderer/gles/gpu_display_egl.hpp b/src/renderer/gles/gpu_display_egl.hpp new file mode 100644 index 000000000..72cb4bf7c --- /dev/null +++ b/src/renderer/gles/gpu_display_egl.hpp @@ -0,0 +1,18 @@ +#pragma once + +#include +#include + +namespace gles +{ + class DisplayEGL + { + public: + static Ref CreateFromCurrent(); + + explicit DisplayEGL(commandbuffers::GPUBackendType); + + private: + void initializeFromCurrent(); + }; +} diff --git a/src/renderer/gles/gpu_physical_device_impl.cpp b/src/renderer/gles/gpu_physical_device_impl.cpp new file mode 100644 index 000000000..51651f222 --- /dev/null +++ b/src/renderer/gles/gpu_physical_device_impl.cpp @@ -0,0 +1,147 @@ +// Copyright 2022 The Dawn & Tint Authors +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// 3. Neither the name of the copyright holder nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include +#include +#include + +using namespace std; +using namespace commandbuffers; + +namespace gles +{ + struct Vendor + { + const char *vendorName; + uint32_t vendorId; + }; + + const Vendor kVendors[] = {{"ATI", gpu_info::kVendorID_AMD}, + {"ARM", gpu_info::kVendorID_ARM}, + {"Imagination", gpu_info::kVendorID_ImgTec}, + {"Intel", gpu_info::kVendorID_Intel}, + {"NVIDIA", gpu_info::kVendorID_Nvidia}, + {"Qualcomm", gpu_info::kVendorID_QualcommPCI}}; + + uint32_t GetVendorIdFromVendors(const char *vendor) + { + uint32_t vendorId = 0; + for (const auto &it : kVendors) + { + // Matching vendor name with vendor string + if (strstr(vendor, it.vendorName) != nullptr) + { + vendorId = it.vendorId; + break; + } + } + return vendorId; + } + + uint32_t GetDeviceIdFromRender(string_view render) + { + uint32_t deviceId = 0; + size_t pos = render.find("(0x"); + if (pos == string_view::npos) + { + pos = render.find("(0X"); + } + if (pos == string_view::npos) + { + return deviceId; + } + render.remove_prefix(pos + 3); + + // The first character after the prefix must be hexadecimal, otherwise an invalid argument + // exception is thrown. + if (!render.empty() && isxdigit(static_cast(*render.data()))) + { + deviceId = static_cast(stoul(render.data(), nullptr, 16)); + } + + return deviceId; + } + + // static + Ref GPUPhysicalDeviceImpl::Create(GPUBackendType backendType, + Ref display, + bool forceES31AndMinExtensions) + { + Ref physicalDevice = AcquireRef(new GPUPhysicalDeviceImpl(backendType, display)); + physicalDevice->initialize(); + return physicalDevice; + } + + GPUPhysicalDeviceImpl::GPUPhysicalDeviceImpl(GPUBackendType backendType, Ref display) + : PhysicalDeviceBase(backendType) + { + } + + DisplayEGL *GPUPhysicalDeviceImpl::display() const + { + return display_.get(); + } + + bool GPUPhysicalDeviceImpl::supportsExternalImages() const + { + return backendType() == GPUBackendType::kOpenGLES; + } + + bool GPUPhysicalDeviceImpl::supportsFeatureLevel(GPUFeatureLevel featureLevel, + GPUInstance *instance) const + { + return featureLevel == GPUFeatureLevel::kCompatibility; + } + + void GPUPhysicalDeviceImpl::initializeImpl() + { + name_ = reinterpret_cast(glGetString(GL_RENDERER)); + + const char *vendor = reinterpret_cast(glGetString(GL_VENDOR)); + vendor_id_ = GetVendorIdFromVendors(vendor); + + driver_description_ = string("OpenGL Version ") + + reinterpret_cast(glGetString(GL_VERSION)); + + if (name_.find("SwiftShader") != std::string::npos) + { + adapter_type_ = GPUAdapterType::kCPU; + } + } + + void GPUPhysicalDeviceImpl::initializeSupportedFeaturesImpl() + { + } + + Ref GPUPhysicalDeviceImpl::createDeviceImpl(Ref adapter, + const GPUDeviceDescriptor &descriptor) + { + // TODO(yorkie): handle opengl context? + Ref device = GPUDeviceImpl::Create(adapter, descriptor); + return device; + } +} diff --git a/src/renderer/gles/gpu_physical_device_impl.hpp b/src/renderer/gles/gpu_physical_device_impl.hpp new file mode 100644 index 000000000..5d2f986e3 --- /dev/null +++ b/src/renderer/gles/gpu_physical_device_impl.hpp @@ -0,0 +1,38 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace gles +{ + class GPUPhysicalDeviceImpl final : public commandbuffers::gpu::PhysicalDeviceBase + { + public: + static Ref Create(commandbuffers::GPUBackendType backendType, + Ref display, + bool forceES31AndMinExtensions); + + DisplayEGL *display() const; + + bool supportsExternalImages() const override; + bool supportsFeatureLevel(commandbuffers::GPUFeatureLevel featureLevel, + commandbuffers::GPUInstance *instance) const override; + + private: + GPUPhysicalDeviceImpl(commandbuffers::GPUBackendType backendType, + Ref display); + + void initializeImpl() override; + void initializeSupportedFeaturesImpl() override; + + Ref createDeviceImpl( + Ref adapter, + const commandbuffers::GPUDeviceDescriptor &descriptor) override; + + private: + Ref display_; + }; +} diff --git a/src/renderer/gles/gpu_pipeline_impl.cpp b/src/renderer/gles/gpu_pipeline_impl.cpp new file mode 100644 index 000000000..046aaaf7e --- /dev/null +++ b/src/renderer/gles/gpu_pipeline_impl.cpp @@ -0,0 +1,8 @@ +#include + +namespace gles +{ + GPUPipelineImpl::GPUPipelineImpl() + { + } +} diff --git a/src/renderer/gles/gpu_pipeline_impl.hpp b/src/renderer/gles/gpu_pipeline_impl.hpp index 51001b5a8..65d7d00e6 100644 --- a/src/renderer/gles/gpu_pipeline_impl.hpp +++ b/src/renderer/gles/gpu_pipeline_impl.hpp @@ -6,16 +6,9 @@ namespace gles { - class GPURenderPipelineImpl : public commandbuffers::GPURenderPipeline + class GPUPipelineImpl { public: - GPURenderPipelineImpl(GLuint program) - : commandbuffers::GPURenderPipeline("GPURenderPipeline") - , program_(program) - { - } - - private: - GLuint program_; + GPUPipelineImpl(); }; } diff --git a/src/renderer/gles/gpu_queue_impl.cpp b/src/renderer/gles/gpu_queue_impl.cpp new file mode 100644 index 000000000..e8280ecdc --- /dev/null +++ b/src/renderer/gles/gpu_queue_impl.cpp @@ -0,0 +1,44 @@ +#include + +namespace gles +{ + using namespace std; + using namespace commandbuffers; + + GPUQueueImpl::GPUQueueImpl(Ref device, + const GPUQueueDescriptor &descriptor) + : GPUQueueBase(device, descriptor) + { + } + + // static + gpu::ResultOrError> GPUQueueImpl::Create(Ref device, + const GPUQueueDescriptor &descriptor) + { + return AcquireRef(new GPUQueueImpl(device, descriptor)); + } + + gpu::MaybeError GPUQueueImpl::submitImpl(uint32_t commandCount, + GPUCommandBufferBase *const *commands) + { + return {}; + } + + gpu::MaybeError GPUQueueImpl::writeBufferImpl(GPUBufferBase *buffer, + uint64_t bufferOffset, + const void *data, + size_t size) + { + return {}; + } + + bool GPUQueueImpl::hasPendingCommands() const + { + return has_pending_commands_; + } + + gpu::MaybeError GPUQueueImpl::waitForIdleForDestructionImpl() + { + return {}; + } +} diff --git a/src/renderer/gles/gpu_queue_impl.hpp b/src/renderer/gles/gpu_queue_impl.hpp new file mode 100644 index 000000000..5edd50568 --- /dev/null +++ b/src/renderer/gles/gpu_queue_impl.hpp @@ -0,0 +1,37 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace gles +{ + class GPUQueueImpl : public commandbuffers::GPUQueueBase + { + public: + static commandbuffers::gpu::ResultOrError> Create( + Ref device, + const commandbuffers::GPUQueueDescriptor &descriptor); + + private: + GPUQueueImpl(Ref device, + const commandbuffers::GPUQueueDescriptor &descriptor); + + commandbuffers::gpu::MaybeError submitImpl( + uint32_t commandCount, + commandbuffers::GPUCommandBufferBase *const *commands) override; + commandbuffers::gpu::MaybeError writeBufferImpl( + commandbuffers::GPUBufferBase *buffer, + uint64_t bufferOffset, + const void *data, + size_t size) override; + + bool hasPendingCommands() const override; + commandbuffers::gpu::MaybeError waitForIdleForDestructionImpl() override; + + uint32_t egl_sync_type_; + bool has_pending_commands_ = false; + }; +} diff --git a/src/renderer/gles/gpu_renderpass_encoder_impl.cpp b/src/renderer/gles/gpu_renderpass_encoder_impl.cpp deleted file mode 100644 index 6885cc5e3..000000000 --- a/src/renderer/gles/gpu_renderpass_encoder_impl.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include "./common.hpp" -#include "./gpu_command_buffer_impl.hpp" -#include "./gpu_renderpass_encoder_impl.hpp" - -namespace gles -{ - using namespace std; - using namespace commandbuffers; - - GPURenderPassEncoderImpl::GPURenderPassEncoderImpl(std::string label) - : commandbuffers::GPURenderPassEncoder(label) - , framebuffer_(0) - { - } - - void GPURenderPassEncoderImpl::begin() - { - glGetIntegerv(GL_FRAMEBUFFER_BINDING, reinterpret_cast(&framebuffer_)); - assert(framebuffer_ > 0 && "Framebuffer must be bound before beginning render pass"); - command_buffer_ = make_unique(label); - - GPURenderPassEncoder::begin(); - } -} diff --git a/src/renderer/gles/gpu_renderpass_encoder_impl.hpp b/src/renderer/gles/gpu_renderpass_encoder_impl.hpp deleted file mode 100644 index edba7a185..000000000 --- a/src/renderer/gles/gpu_renderpass_encoder_impl.hpp +++ /dev/null @@ -1,25 +0,0 @@ -#pragma once - -#include -#include -#include "./common.hpp" - -namespace gles -{ - class GPURenderPassEncoderImpl : public commandbuffers::GPURenderPassEncoder - { - friend class GPUCommandBufferImpl; - - public: - GPURenderPassEncoderImpl(std::string label = "GPURenderPassEncoder"); - - void begin() override; - GLuint targetFramebuffer() const - { - return framebuffer_; - } - - private: - GLuint framebuffer_; - }; -} diff --git a/src/renderer/metal/gpu_backend_mtl.h b/src/renderer/metal/gpu_backend_mtl.h new file mode 100644 index 000000000..6e35dc0ac --- /dev/null +++ b/src/renderer/metal/gpu_backend_mtl.h @@ -0,0 +1,18 @@ +#pragma once + +#include +#include +#include +#include + +namespace metal +{ + class GPUBackend final : public commandbuffers::gpu::BackendConnection + { + public: + GPUBackend(commandbuffers::GPUInstance *instance, commandbuffers::GPUBackendType type); + + std::vector> discoverPhysicalDevices( + const commandbuffers::RequestAdapterOptions &options) override; + }; +} diff --git a/src/renderer/metal/gpu_backend_mtl.mm b/src/renderer/metal/gpu_backend_mtl.mm new file mode 100644 index 000000000..1a4aeb674 --- /dev/null +++ b/src/renderer/metal/gpu_backend_mtl.mm @@ -0,0 +1,23 @@ +#include + +using namespace std; +using namespace commandbuffers; + +namespace metal +{ + GPUBackend::GPUBackend(GPUInstance *instance, GPUBackendType type) + : BackendConnection(instance, type) + { + } + + vector> GPUBackend::discoverPhysicalDevices(const RequestAdapterOptions &options) + { + return {}; + } + + gpu::BackendConnection *Connect(GPUInstance *instance, GPUBackendType type) + { + assert(type == GPUBackendType::kMetal); + return new GPUBackend(instance, type); + } +} diff --git a/src/renderer/render_api.cpp b/src/renderer/render_api.cpp index 25754890f..04da9a378 100644 --- a/src/renderer/render_api.cpp +++ b/src/renderer/render_api.cpp @@ -8,18 +8,102 @@ #include "xr/frame.hpp" using namespace std; +using namespace commandbuffers; -void TrRenderHardwareInterface::SubmitGPUCommandBuffer(vector> &commandBuffers) +#if SUPPORT_D3D11 // D3D11 backend +namespace d3d11 { - gpuDevice->queueRef().submit(commandBuffers); + extern gpu::BackendConnection *Connect(GPUInstance *, GPUBackendType); } +#endif -unique_ptr TrRenderHardwareInterface::CreateCommandEncoder() +#if SUPPORT_OPENGL_UNIFIED // OpenGL / OpenGL ES backend +namespace gles { - return gpuDevice->createCommandEncoder(""); + extern gpu::BackendConnection *Connect(GPUInstance *, GPUBackendType); } +#endif -void TrRenderHardwareInterface::AddCommandBuffer(commandbuffers::TrCommandBufferBase *commandBuffer) +#if SUPPORT_METAL // Metal backend +namespace metal +{ + extern gpu::BackendConnection *Connect(GPUInstance *, GPUBackendType); +} +#endif + +#if SUPPORT_VULKAN // Vulkan backend +namespace vulkan +{ + extern gpu::BackendConnection *Connect(GPUInstance *, GPUBackendType); +} +#endif + +TrRenderHardwareInterface::TrRenderHardwareInterface(RHIBackendType backend_type) + : backendType(backend_type) + , gpuInstance(GPUInstance::Create()) +{ + RequestAdapterOptions requestOptions; + requestOptions.featureLevel = GPUFeatureLevel::kCompatibility; + requestOptions.powerPreference = GPUPowerPreference::kHighPerformance; + + switch (backend_type) + { +#if SUPPORT_D3D11 + case RHIBackendType::D3D11: + gpuInstance->registerBackend(d3d11::Connect(gpuInstance.get(), GPUBackendType::kD3D11)); + requestOptions.backendType = GPUBackendType::kD3D11; + break; +#endif + +#if SUPPORT_OPENGL_UNIFIED + case RHIBackendType::OpenGLCore: + gpuInstance->registerBackend(gles::Connect(gpuInstance.get(), GPUBackendType::kOpenGL)); + requestOptions.backendType = GPUBackendType::kOpenGL; + break; + + case RHIBackendType::OpenGLESv2: + case RHIBackendType::OpenGLESv3: + gpuInstance->registerBackend(gles::Connect(gpuInstance.get(), GPUBackendType::kOpenGLES)); + requestOptions.backendType = GPUBackendType::kOpenGLES; + break; +#endif + +#if SUPPORT_METAL + case RHIBackendType::Metal: + gpuInstance->registerBackend(metal::Connect(gpuInstance.get(), GPUBackendType::kMetal)); + requestOptions.backendType = GPUBackendType::kMetal; + break; +#endif + +#if SUPPORT_VULKAN + case RHIBackendType::VULKAN: + gpuInstance->registerBackend(vulkan::Connect(gpuInstance.get(), GPUBackendType::kVulkan)); + requestOptions.backendType = GPUBackendType::kVulkan; + break; +#endif + + default: + // Unsupported backend type + assert(false && "Unsupported RHI backend type."); + } + + gpuAdapter = gpuInstance->requestAdapter(requestOptions); + gpuDevice = gpuAdapter->createDevice(); + assert(gpuDevice != nullptr && "Failed to create GPU device"); +} + +void TrRenderHardwareInterface::SubmitGPUCommandBuffer(vector> &commandBuffers) +{ + // TODO(yorkie): Handle the submission result and errors. +} + +unique_ptr TrRenderHardwareInterface::CreateCommandEncoder() +{ + // TODO(yorkie): Create and return a GPU command encoder. + return nullptr; +} + +void TrRenderHardwareInterface::AddCommandBuffer(TrCommandBufferBase *commandBuffer) { unique_lock lock(m_CommandBuffersMutex); m_CommandBuffers.push_back(commandBuffer); diff --git a/src/renderer/render_api.hpp b/src/renderer/render_api.hpp index 329f59b56..9cfa82e74 100644 --- a/src/renderer/render_api.hpp +++ b/src/renderer/render_api.hpp @@ -7,11 +7,13 @@ #include #include -#include #include +#include +#include #include #include #include +#include #include #include @@ -104,11 +106,7 @@ class TrRenderHardwareInterface friend class RHIFactory; public: - TrRenderHardwareInterface(RHIBackendType backend_type, std::unique_ptr gpu_device = nullptr) - : backendType(backend_type) - , gpuDevice(std::move(gpu_device)) - { - } + TrRenderHardwareInterface(RHIBackendType backend_type); virtual ~TrRenderHardwareInterface() = default; /** @@ -158,7 +156,7 @@ class TrRenderHardwareInterface /** * Submit a GPUCommandBuffer list to the GPU device for execution. */ - void SubmitGPUCommandBuffer(std::vector> &); + void SubmitGPUCommandBuffer(std::vector> &); std::unique_ptr CreateCommandEncoder(); /** @@ -271,7 +269,7 @@ class TrRenderHardwareInterface /** * @returns the `renderer::TrRenderer` shared pointer to use. */ - inline std::shared_ptr GetRenderer() + inline Ref GetRenderer() { return renderer.lock(); } @@ -325,10 +323,9 @@ class TrRenderHardwareInterface */ bool m_PrintsContext = false; - /** - * The GPU device instance. - */ - std::unique_ptr gpuDevice = nullptr; + Ref gpuInstance = nullptr; + Ref gpuAdapter = nullptr; + Ref gpuDevice = nullptr; /** * The default command buffer queue. diff --git a/src/renderer/render_api_opengles.cpp b/src/renderer/render_api_opengles.cpp index 5459a69e5..00acf7f4f 100644 --- a/src/renderer/render_api_opengles.cpp +++ b/src/renderer/render_api_opengles.cpp @@ -60,7 +60,7 @@ class RHI_OpenGL : public TrRenderHardwareInterface public: RHI_OpenGL(RHIBackendType backend_type) - : TrRenderHardwareInterface(backend_type, make_unique()) + : TrRenderHardwareInterface(backend_type) { memset(m_TmpMatrixL, 0, 16); memset(m_TmpMatrixR, 0, 16); @@ -2208,6 +2208,9 @@ class RHI_OpenGL : public TrRenderHardwareInterface auto type = req->indicesType; auto indices = reinterpret_cast(req->indicesOffset); + PrintDebugInfo(req, nullptr, nullptr, options); + DumpDrawCallInfo(DEBUG_TAG, "DrawElements", options.isDefaultQueue(), mode, count, type, indices); + reqContentRenderer->getContextGL()->drawElements(mode, count, type, indices); if (TR_UNLIKELY(CheckError(req, reqContentRenderer) != GL_NO_ERROR || options.printsCall)) { diff --git a/src/renderer/render_pass.cpp b/src/renderer/render_pass.cpp new file mode 100644 index 000000000..060061ca2 --- /dev/null +++ b/src/renderer/render_pass.cpp @@ -0,0 +1,58 @@ +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + TrRenderPass::TrRenderPass(RenderPassType type, const string &name) + : type_(type) + , name_(name) + , active_(false) + , command_encoder_(nullptr) + , renderpass_encoder_(nullptr) + { + } + + TrRenderPass::~TrRenderPass() + { + } + + RenderPassType TrRenderPass::type() const + { + return type_; + } + + const string &TrRenderPass::name() const + { + return name_; + } + + bool TrRenderPass::isActive() const + { + return active_; + } + + void TrRenderPass::receiveIncomingRequest(const TrCommandBufferRequest &request) + { + } + + void TrRenderPass::begin() + { + active_ = true; + { + renderpass_encoder_ = command_encoder_->beginRenderPass(renderpass_descriptor_); + } + } + + void TrRenderPass::end() + { + active_ = false; + renderpass_encoder_->end(); + } + + unique_ptr TrRenderPass::finish(optional label) + { + return command_encoder_->finish(label); + } +} diff --git a/src/renderer/render_pass.hpp b/src/renderer/render_pass.hpp new file mode 100644 index 000000000..5ac2b1056 --- /dev/null +++ b/src/renderer/render_pass.hpp @@ -0,0 +1,78 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace renderer +{ + enum class RenderPassType + { + /** + * Opaque render pass for rendering opaque objects. + * This is the default pass for objects without transparency. + */ + kOpaque, + /** + * Transparent render pass for rendering transparent objects. + * Objects with blending enabled should be routed here. + */ + kTransparent, + /** + * Offscreen render pass for rendering to off-screen targets. + * Used when the framebuffer is different from the main render target. + */ + kOffscreen, + }; + + inline const char *RenderPassTypeToString(RenderPassType type) + { + switch (type) + { + case RenderPassType::kOpaque: + return "Opaque"; + case RenderPassType::kTransparent: + return "Transparent"; + case RenderPassType::kOffscreen: + return "Offscreen"; + default: + return "Unknown"; + } + } + + /** + * The `TrRenderPass` class represents a render pass that manages a collection of command buffers. + * It wraps the `GPURenderPassEncoder` and provides an interface for adding and executing command buffers + * based on the pass type (opaque, transparent, or offscreen). + */ + class TrRenderPass final + { + public: + TrRenderPass(RenderPassType type, const std::string &name); + ~TrRenderPass(); + + RenderPassType type() const; + const std::string &name() const; + bool isActive() const; + + void receiveIncomingRequest(const commandbuffers::TrCommandBufferRequest &request); + void begin(); + void end(); + std::unique_ptr finish( + std::optional label = std::nullopt); + + private: + RenderPassType type_; + std::string name_; + bool active_; + Ref command_encoder_; + Ref renderpass_encoder_; + commandbuffers::GPURenderPassDescriptor renderpass_descriptor_; + }; +} diff --git a/src/renderer/render_resource.cpp b/src/renderer/render_resource.cpp new file mode 100644 index 000000000..1a09e0778 --- /dev/null +++ b/src/renderer/render_resource.cpp @@ -0,0 +1,62 @@ +#include + +namespace renderer +{ + using namespace std; + using namespace commandbuffers; + + TrRenderResource::TrRenderResource(Ref device) + : device_(device) + , pipelines_() + , buffers_() + , textures_() + { + } + + Ref TrRenderResource::createShaderModule( + const GPUShaderModuleDescriptor *descriptor) + { + auto shader_module = device_->createShaderModule(descriptor); + shader_modules_.emplace(shader_module->id, shader_module); + return shader_module; + } + + Ref TrRenderResource::createPipeline() + { + return nullptr; + } + + Ref TrRenderResource::createBuffer( + const GPUBufferDescriptor *descriptor) + { + auto buffer = device_->createBuffer(descriptor); + buffers_.emplace(buffer->id, buffer); + return buffer; + } + + Ref TrRenderResource::createTexture( + const GPUTextureDescriptor *descriptor) + { + return nullptr; + } + + Ref TrRenderResource::getShaderModule(int id) + { + return shader_modules_.at(id); + } + + Ref TrRenderResource::getPipeline(int id) + { + return pipelines_.at(id); + } + + Ref TrRenderResource::getBuffer(int id) + { + return buffers_.at(id); + } + + Ref TrRenderResource::getTexture(int id) + { + return textures_.at(id); + } +} diff --git a/src/renderer/render_resource.hpp b/src/renderer/render_resource.hpp new file mode 100644 index 000000000..f41f8809d --- /dev/null +++ b/src/renderer/render_resource.hpp @@ -0,0 +1,39 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace renderer +{ + class TrRenderResource final + { + public: + TrRenderResource(Ref device); + + Ref createShaderModule( + const commandbuffers::GPUShaderModuleDescriptor *descriptor); + Ref createPipeline(); + Ref createBuffer( + const commandbuffers::GPUBufferDescriptor *descriptor); + Ref createTexture( + const commandbuffers::GPUTextureDescriptor *descriptor); + + Ref getShaderModule(int id); + Ref getPipeline(int id); + Ref getBuffer(int id); + Ref getTexture(int id); + + private: + Ref device_; + std::unordered_map> shader_modules_; + std::unordered_map> pipelines_; + std::unordered_map> buffers_; + std::unordered_map> textures_; + }; +}