diff --git a/3rdparty/spirv-cross/CMakeLists.txt b/3rdparty/spirv-cross/CMakeLists.txt new file mode 100644 index 000000000..e00f8c779 --- /dev/null +++ b/3rdparty/spirv-cross/CMakeLists.txt @@ -0,0 +1,180 @@ +# Copyright 2016 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +cmake_minimum_required(VERSION 2.8) +project(SPIRV-Cross) +enable_testing() + +option(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS "Instead of throwing exceptions assert" OFF) + +if(${CMAKE_GENERATOR} MATCHES "Makefile") + if(${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR}) + message(FATAL_ERROR "Build out of tree to avoid overwriting Makefile") + endif() +endif() + +set(spirv-compiler-options "") +set(spirv-compiler-defines "") + +if(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS) + set(spirv-compiler-defines ${spirv-compiler-defines} SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS) +endif() + +# To specify special debug or optimization options, use +# -DCMAKE_CXX_COMPILE_FLAGS +# However, we require the C++11 dialect. +if (NOT "${MSVC}") + set(spirv-compiler-options ${spirv-compiler-options} -std=c++11 -Wall -Wextra -Werror -Wshadow) + set(spirv-compiler-defines ${spirv-compiler-defines} __STDC_LIMIT_MACROS) + + if(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS) + set(spirv-compiler-options ${spirv-compiler-options} -fno-exceptions) + endif() +endif() + +macro(extract_headers out_abs file_list) + set(${out_abs}) # absolute paths + foreach(_a ${file_list}) + # get_filename_component only returns the longest extension, so use a regex + string(REGEX REPLACE ".*\\.(h|hpp)" "\\1" ext ${_a}) + if(("${ext}" STREQUAL "h") OR ("${ext}" STREQUAL "hpp")) + list(APPEND ${out_abs} "${_a}") + endif() + endforeach() +endmacro() + +macro(spirv_cross_add_library name config_name) + add_library(${name} ${ARGN}) + extract_headers(hdrs "${ARGN}") + target_include_directories(${name} PUBLIC + $ + $) + set_target_properties(${name} PROPERTIES + PUBLIC_HEADERS "${hdrs}") + target_compile_options(${name} PRIVATE ${spirv-compiler-options}) + target_compile_definitions(${name} PRIVATE ${spirv-compiler-defines}) + install(TARGETS ${name} + EXPORT ${config_name}Config + RUNTIME DESTINATION bin + LIBRARY DESTINATION lib + ARCHIVE DESTINATION lib + PUBLIC_HEADER DESTINATION include/spirv_cross) + install(FILES ${hdrs} DESTINATION include/spirv_cross) + install(EXPORT ${config_name}Config DESTINATION share/${config_name}/cmake) + export(TARGETS ${name} FILE ${config_name}Config.cmake) +endmacro() + + +spirv_cross_add_library(spirv-cross-core spirv_cross_core STATIC + ${CMAKE_CURRENT_SOURCE_DIR}/GLSL.std.450.h + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_common.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_parser.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_parser.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_parsed_ir.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_parsed_ir.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cfg.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cfg.cpp) + +spirv_cross_add_library(spirv-cross-glsl spirv_cross_glsl STATIC + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_glsl.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_glsl.hpp) + +spirv_cross_add_library(spirv-cross-cpp spirv_cross_cpp STATIC + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cpp.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cpp.cpp) + +spirv_cross_add_library(spirv-cross-reflect spirv_cross_reflect STATIC + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_reflect.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_reflect.cpp) + +spirv_cross_add_library(spirv-cross-msl spirv_cross_msl STATIC + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_msl.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_msl.cpp) + +spirv_cross_add_library(spirv-cross-hlsl spirv_cross_hlsl STATIC + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_hlsl.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_hlsl.cpp) + +spirv_cross_add_library(spirv-cross-util spirv_cross_util STATIC + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_util.hpp + ${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_util.cpp) + +add_executable(spirv-cross main.cpp) +target_compile_options(spirv-cross PRIVATE ${spirv-compiler-options}) +target_compile_definitions(spirv-cross PRIVATE ${spirv-compiler-defines}) + +install(TARGETS spirv-cross RUNTIME DESTINATION bin) +target_link_libraries(spirv-cross spirv-cross-glsl spirv-cross-hlsl spirv-cross-cpp spirv-cross-reflect spirv-cross-msl spirv-cross-util spirv-cross-core) +target_link_libraries(spirv-cross-util spirv-cross-core) +target_link_libraries(spirv-cross-glsl spirv-cross-core) +target_link_libraries(spirv-cross-msl spirv-cross-glsl) +target_link_libraries(spirv-cross-hlsl spirv-cross-glsl) +target_link_libraries(spirv-cross-cpp spirv-cross-glsl) + +# Set up tests, using only the simplest modes of the test_shaders +# script. You have to invoke the script manually to: +# - Update the reference files +# - Get cycle counts from malisc +# - Keep failing outputs +find_package(PythonInterp) +if (${PYTHONINTERP_FOUND}) + if (${PYTHON_VERSION_MAJOR} GREATER 2) + add_test(NAME spirv-cross-test + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-no-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-no-opt + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-metal + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --metal --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-msl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-metal-no-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --metal --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-msl-no-opt + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-hlsl + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --hlsl --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-hlsl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-hlsl-no-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --hlsl --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-hlsl-no-opt + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --opt --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-metal-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --metal --opt --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-msl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-hlsl-opt + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --hlsl --opt --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-hlsl + WORKING_DIRECTORY $) + add_test(NAME spirv-cross-test-reflection + COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --reflect --parallel + ${CMAKE_CURRENT_SOURCE_DIR}/shaders-reflection + WORKING_DIRECTORY $) + endif() +else() + message(WARNING "Testing disabled. Could not find python3. If you have python3 installed try running " + "cmake with -DPYTHON_EXECUTABLE:FILEPATH=/path/to/python3 to help it find the executable") +endif() diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/CODE_OF_CONDUCT.md b/3rdparty/spirv-cross/CODE_OF_CONDUCT.md similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/CODE_OF_CONDUCT.md rename to 3rdparty/spirv-cross/CODE_OF_CONDUCT.md diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/GLSL.std.450.h b/3rdparty/spirv-cross/GLSL.std.450.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/GLSL.std.450.h rename to 3rdparty/spirv-cross/GLSL.std.450.h diff --git a/3rdparty/spirv-cross/LICENSE b/3rdparty/spirv-cross/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/3rdparty/spirv-cross/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/3rdparty/spirv-cross/Makefile b/3rdparty/spirv-cross/Makefile new file mode 100644 index 000000000..0564b650a --- /dev/null +++ b/3rdparty/spirv-cross/Makefile @@ -0,0 +1,41 @@ +TARGET := spirv-cross + +SOURCES := $(wildcard spirv_*.cpp) +CLI_SOURCES := main.cpp + +OBJECTS := $(SOURCES:.cpp=.o) +CLI_OBJECTS := $(CLI_SOURCES:.cpp=.o) + +STATIC_LIB := lib$(TARGET).a + +DEPS := $(OBJECTS:.o=.d) $(CLI_OBJECTS:.o=.d) + +CXXFLAGS += -std=c++11 -Wall -Wextra -Wshadow -D__STDC_LIMIT_MACROS + +ifeq ($(DEBUG), 1) + CXXFLAGS += -O0 -g +else + CXXFLAGS += -O2 -DNDEBUG +endif + +ifeq ($(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS), 1) + CXXFLAGS += -DSPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS -fno-exceptions +endif + +all: $(TARGET) + +-include $(DEPS) + +$(TARGET): $(CLI_OBJECTS) $(STATIC_LIB) + $(CXX) -o $@ $(CLI_OBJECTS) $(STATIC_LIB) $(LDFLAGS) + +$(STATIC_LIB): $(OBJECTS) + $(AR) rcs $@ $(OBJECTS) + +%.o: %.cpp + $(CXX) -c -o $@ $< $(CXXFLAGS) -MMD + +clean: + rm -f $(TARGET) $(OBJECTS) $(CLI_OBJECTS) $(STATIC_LIB) $(DEPS) + +.PHONY: clean diff --git a/3rdparty/spirv-cross/README.md b/3rdparty/spirv-cross/README.md new file mode 100644 index 000000000..6bf7d37d3 --- /dev/null +++ b/3rdparty/spirv-cross/README.md @@ -0,0 +1,355 @@ +# SPIRV-Cross + +SPIRV-Cross is a tool designed for parsing and converting SPIR-V to other shader languages. + +[![Build Status](https://travis-ci.org/KhronosGroup/SPIRV-Cross.svg?branch=master)](https://travis-ci.org/KhronosGroup/SPIRV-Cross) +[![Build Status](https://ci.appveyor.com/api/projects/status/github/KhronosGroup/SPIRV-Cross?svg=true&branch=master)](https://ci.appveyor.com/project/HansKristian-ARM/SPIRV-Cross) + +## Features + + - Convert SPIR-V to readable, usable and efficient GLSL + - Convert SPIR-V to readable, usable and efficient Metal Shading Language (MSL) + - Convert SPIR-V to readable, usable and efficient HLSL + - Convert SPIR-V to debuggable C++ [EXPERIMENTAL] + - Convert SPIR-V to a JSON reflection format [EXPERIMENTAL] + - Reflection API to simplify the creation of Vulkan pipeline layouts + - Reflection API to modify and tweak OpDecorations + - Supports "all" of vertex, fragment, tessellation, geometry and compute shaders. + +SPIRV-Cross tries hard to emit readable and clean output from the SPIR-V. +The goal is to emit GLSL or MSL that looks like it was written by a human and not awkward IR/assembly-like code. + +NOTE: Individual features are expected to be mostly complete, but it is possible that certain obscure GLSL features are not yet supported. +However, most missing features are expected to be "trivial" improvements at this stage. + +## Building + +SPIRV-Cross has been tested on Linux, OSX and Windows. + +The make and CMake build flavors offer the option to treat exceptions as assertions. To disable exceptions for make just append `SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS=1` to the command line. For CMake append `-DSPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS=ON`. By default exceptions are enabled. + +### Linux and macOS + +Just run `make` on the command line. A recent GCC (4.8+) or Clang (3.x+) compiler is required as SPIRV-Cross uses C++11 extensively. + +### Windows + +MinGW-w64 based compilation works with `make`, and an MSVC 2013 solution is also included. + +## Usage + +### Using the C++ API + +For more in-depth documentation than what's provided in this README, please have a look at the [Wiki](https://github.com/KhronosGroup/SPIRV-Cross/wiki). + +To perform reflection and convert to other shader languages you can use the SPIRV-Cross API. +For example: + +```c++ +#include "spirv_glsl.hpp" +#include +#include + +extern std::vector load_spirv_file(); + +int main() +{ + // Read SPIR-V from disk or similar. + std::vector spirv_binary = load_spirv_file(); + + spirv_cross::CompilerGLSL glsl(std::move(spirv_binary)); + + // The SPIR-V is now parsed, and we can perform reflection on it. + spirv_cross::ShaderResources resources = glsl.get_shader_resources(); + + // Get all sampled images in the shader. + for (auto &resource : resources.sampled_images) + { + unsigned set = glsl.get_decoration(resource.id, spv::DecorationDescriptorSet); + unsigned binding = glsl.get_decoration(resource.id, spv::DecorationBinding); + printf("Image %s at set = %u, binding = %u\n", resource.name.c_str(), set, binding); + + // Modify the decoration to prepare it for GLSL. + glsl.unset_decoration(resource.id, spv::DecorationDescriptorSet); + + // Some arbitrary remapping if we want. + glsl.set_decoration(resource.id, spv::DecorationBinding, set * 16 + binding); + } + + // Set some options. + spirv_cross::CompilerGLSL::Options options; + options.version = 310; + options.es = true; + glsl.set_options(options); + + // Compile to GLSL, ready to give to GL driver. + std::string source = glsl.compile(); +} +``` + +#### Integrating SPIRV-Cross in a custom build system + +To add SPIRV-Cross to your own codebase, just copy the source and header files from root directory +and build the relevant .cpp files you need. Make sure to build with C++11 support, e.g. `-std=c++11` in GCC and Clang. +Alternatively, the Makefile generates a libspirv-cross.a static library during build that can be linked in. + +### Creating a SPIR-V file from GLSL with glslang + +``` +glslangValidator -H -V -o test.spv test.frag +``` + +### Converting a SPIR-V file to GLSL ES + +``` +glslangValidator -H -V -o test.spv shaders/comp/basic.comp +./spirv-cross --version 310 --es test.spv +``` + +#### Converting to desktop GLSL + +``` +glslangValidator -H -V -o test.spv shaders/comp/basic.comp +./spirv-cross --version 330 test.spv --output test.comp +``` + +#### Disable prettifying optimizations + +``` +glslangValidator -H -V -o test.spv shaders/comp/basic.comp +./spirv-cross --version 310 --es test.spv --output test.comp --force-temporary +``` + +### Using shaders generated from C++ backend + +Please see `samples/cpp` where some GLSL shaders are compiled to SPIR-V, decompiled to C++ and run with test data. +Reading through the samples should explain how to use the C++ interface. +A simple Makefile is included to build all shaders in the directory. + +### Implementation notes + +When using SPIR-V and SPIRV-Cross as an intermediate step for cross-compiling between high level languages there are some considerations to take into account, +as not all features used by one high-level language are necessarily supported natively by the target shader language. +SPIRV-Cross aims to provide the tools needed to handle these scenarios in a clean and robust way, but some manual action is required to maintain compatibility. + +#### HLSL source to GLSL + +##### HLSL entry points + +When using SPIR-V shaders compiled from HLSL, there are some extra things you need to take care of. +First make sure that the entry point is used correctly. +If you forget to set the entry point correctly in glslangValidator (-e MyFancyEntryPoint), +you will likely encounter this error message: + +``` +Cannot end a function before ending the current block. +Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid. +``` + +##### Vertex/Fragment interface linking + +HLSL relies on semantics in order to effectively link together shader stages. In the SPIR-V generated by glslang, the transformation from HLSL to GLSL ends up looking like + +```c++ +struct VSOutput { + // SV_Position is rerouted to gl_Position + float4 position : SV_Position; + float4 coord : TEXCOORD0; +}; + +VSOutput main(...) {} +``` + +```c++ +struct VSOutput { + float4 coord; +} +layout(location = 0) out VSOutput _magicNameGeneratedByGlslang; +``` + +While this works, be aware of the type of the struct which is used in the vertex stage and the fragment stage. +There may be issues if the structure type name differs in vertex stage and fragment stage. + +You can make use of the reflection interface to force the name of the struct type. + +``` +// Something like this for both vertex outputs and fragment inputs. +compiler.set_name(varying_resource.base_type_id, "VertexFragmentLinkage"); +``` + +Some platform may require identical variable name for both vertex outputs and fragment inputs. (for example MacOSX) +to rename varaible base on location, please add +``` +--rename-interface-variable +``` + +#### HLSL source to legacy GLSL/ESSL + +HLSL tends to emit varying struct types to pass data between vertex and fragment. +This is not supported in legacy GL/GLES targets, so to support this, varying structs are flattened. +This is done automatically, but the API user might need to be aware that this is happening in order to support all cases. + +Modern GLES code like this: +```c++ +struct Output { + vec4 a; + vec2 b; +}; +out Output vout; +``` + +Is transformed into: +```c++ +struct Output { + vec4 a; + vec2 b; +}; +varying vec4 Output_a; +varying vec2 Output_b; +``` + +Note that now, both the struct name and the member names will participate in the linking interface between vertex and fragment, so +API users might want to ensure that both the struct names and member names match so that vertex outputs and fragment inputs can link properly. + + +#### Separate image samplers (HLSL/Vulkan) for backends which do not support it (GLSL) + +Another thing you need to remember is when using samplers and textures in HLSL these are separable, and not directly compatible with GLSL. If you need to use this with desktop GL/GLES, you need to call `Compiler::build_combined_image_samplers` first before calling `Compiler::compile`, or you will get an exception. + +```c++ +// From main.cpp +// Builds a mapping for all combinations of images and samplers. +compiler->build_combined_image_samplers(); + +// Give the remapped combined samplers new names. +// Here you can also set up decorations if you want (binding = #N). +for (auto &remap : compiler->get_combined_image_samplers()) +{ + compiler->set_name(remap.combined_id, join("SPIRV_Cross_Combined", compiler->get_name(remap.image_id), + compiler->get_name(remap.sampler_id))); +} +``` + +If your target is Vulkan GLSL, `--vulkan-semantics` will emit separate image samplers as you'd expect. +The command line client calls `Compiler::build_combined_image_samplers` automatically, but if you're calling the library, you'll need to do this yourself. + +#### Descriptor sets (Vulkan GLSL) for backends which do not support them (HLSL/GLSL/Metal) + +Descriptor sets are unique to Vulkan, so make sure that descriptor set + binding is remapped to a flat binding scheme (set always 0), so that other APIs can make sense of the bindings. +This can be done with `Compiler::set_decoration(id, spv::DecorationDescriptorSet)`. + +#### Linking by name for targets which do not support explicit locations (legacy GLSL/ESSL) + +Modern GLSL and HLSL sources (and SPIR-V) relies on explicit layout(location) qualifiers to guide the linking process between shader stages, +but older GLSL relies on symbol names to perform the linking. When emitting shaders with older versions, these layout statements will be removed, +so it is important that the API user ensures that the names of I/O variables are sanitized so that linking will work properly. +The reflection API can rename variables, struct types and struct members to deal with these scenarios using `Compiler::set_name` and friends. + +#### Clip-space conventions + +SPIRV-Cross can perform some common clip space conversions on gl_Position/SV_Position by enabling `CompilerGLSL::Options.vertex.fixup_clipspace`. +While this can be convenient, it is recommended to modify the projection matrices instead as that can achieve the same result. + +For GLSL targets, enabling this will convert a shader which assumes `[0, w]` depth range (Vulkan / D3D / Metal) into `[-w, w]` range. +For MSL and HLSL targets, enabling this will convert a shader in `[-w, w]` depth range (OpenGL) to `[0, w]` depth range. + +By default, the CLI will not enable `fixup_clipspace`, but in the API you might want to set an explicit value using `CompilerGLSL::set_options()`. + +Y-flipping of gl_Position and similar is also supported. +The use of this is discouraged, because relying on vertex shader Y-flipping tends to get quite messy. +To enable this, set `CompilerGLSL::Options.vertex.flip_vert_y` or `--flip-vert-y` in CLI. + +## Contributing + +Contributions to SPIRV-Cross are welcome. See Testing and Licensing sections for details. + +### Testing + +SPIRV-Cross maintains a test suite of shaders with reference output of how the output looks after going through a roundtrip through +glslangValidator then back through SPIRV-Cross again. The reference files are stored inside the repository in order to be able to track regressions. + +All pull requests should ensure that test output does not change unexpectedly. This can be tested with: + +``` +./test_shaders.py shaders +./test_shaders.py shaders --opt +./test_shaders.py shaders-hlsl --hlsl +./test_shaders.py shaders-hlsl --hlsl --opt +./test_shaders.py shaders-msl --msl +./test_shaders.py shaders-msl --msl --opt +``` + +although there are a couple of convenience script for doing this: + +``` +./checkout_glslang_spirv_tools.sh # Checks out glslang and SPIRV-Tools at a fixed revision which matches the reference output. +./build_glslang_spirv_tools.sh # Builds glslang and SPIRV-Tools. +./test_shaders.sh # Runs over all changes and makes sure that there are no deltas compared to reference files. +``` + +However, when improving SPIRV-Cross there are of course legitimate cases where reference output should change. +In these cases, run: + +``` +./update_test_shaders.sh +``` + +to update the reference files and include these changes as part of the pull request. +Always make sure you are running the correct version of glslangValidator as well as SPIRV-Tools when updating reference files. +See `checkout_glslang_spirv_tools.sh`. + +In short, the master branch should always be able to run `./test_shaders.py shaders` and friends without failure. +SPIRV-Cross uses Travis CI to test all pull requests, so it is not strictly needed to perform testing yourself if you have problems running it locally. +A pull request which does not pass testing on Travis will not be accepted however. + +When adding support for new features to SPIRV-Cross, a new shader and reference file should be added which covers usage of the new shader features in question. + +### Licensing + +Contributors of new files should add a copyright header at the top of every new source code file with their copyright +along with the Apache 2.0 licensing stub. + +### Formatting + +SPIRV-Cross uses `clang-format` to automatically format code. +Please use `clang-format` with the style sheet found in `.clang-format` to automatically format code before submitting a pull request. + +To make things easy, the `format_all.sh` script can be used to format all +source files in the library. In this directory, run the following from the +command line: + + ./format_all.sh + +## ABI concerns + +### SPIR-V headers + +The current repository uses the latest SPIR-V and GLSL.std.450 headers. +SPIR-V files created from older headers could have ABI issues. + +## Regression testing + +In shaders/ a collection of shaders are maintained for purposes of regression testing. +The current reference output is contained in reference/. +`./test_shaders.py shaders` can be run to perform regression testing. + +See `./test_shaders.py --help` for more. + +### Metal backend + +To test the roundtrip path GLSL -> SPIR-V -> MSL, `--msl` can be added, e.g. `./test_shaders.py --msl shaders-msl`. + +### HLSL backend + +To test the roundtrip path GLSL -> SPIR-V -> HLSL, `--hlsl` can be added, e.g. `./test_shaders.py --hlsl shaders-hlsl`. + +### Updating regression tests + +When legitimate changes are found, use `--update` flag to update regression files. +Otherwise, `./test_shaders.py` will fail with error code. + +### Mali Offline Compiler cycle counts + +To obtain a CSV of static shader cycle counts before and after going through spirv-cross, add +`--malisc` flag to `./test_shaders`. This requires the Mali Offline Compiler to be installed in PATH. + diff --git a/3rdparty/spirv-cross/appveyor.yml b/3rdparty/spirv-cross/appveyor.yml new file mode 100644 index 000000000..2f427f180 --- /dev/null +++ b/3rdparty/spirv-cross/appveyor.yml @@ -0,0 +1,31 @@ + +environment: + matrix: + - GENERATOR: "Visual Studio 12 2013 Win64" + CONFIG: Debug + + - GENERATOR: "Visual Studio 12 2013 Win64" + CONFIG: Release + + - GENERATOR: "Visual Studio 14 2015 Win64" + CONFIG: Debug + + - GENERATOR: "Visual Studio 14 2015 Win64" + CONFIG: Release + + - GENERATOR: "Visual Studio 12 2013" + CONFIG: Debug + + - GENERATOR: "Visual Studio 12 2013" + CONFIG: Release + + - GENERATOR: "Visual Studio 14 2015" + CONFIG: Debug + + - GENERATOR: "Visual Studio 14 2015" + CONFIG: Release + +build_script: + - git submodule update --init + - cmake "-G%GENERATOR%" -H. -B_builds + - cmake --build _builds --config "%CONFIG%" diff --git a/3rdparty/spirv-cross/build_glslang_spirv_tools.sh b/3rdparty/spirv-cross/build_glslang_spirv_tools.sh new file mode 100755 index 000000000..7689c2678 --- /dev/null +++ b/3rdparty/spirv-cross/build_glslang_spirv_tools.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +PROFILE=Release +if [ ! -z $1 ]; then + PROFILE=$1 +fi + +if [ ! -z $2 ]; then + NPROC="--parallel $2" +fi + +echo "Building glslang." +mkdir -p external/glslang-build +cd external/glslang-build +cmake ../glslang -DCMAKE_BUILD_TYPE=$PROFILE -DCMAKE_INSTALL_PREFIX=output +cmake --build . --config $PROFILE --target install ${NPROC} +cd ../.. + +echo "Building SPIRV-Tools." +mkdir -p external/spirv-tools-build +cd external/spirv-tools-build +cmake ../spirv-tools -DCMAKE_BUILD_TYPE=$PROFILE -DSPIRV_WERROR=OFF -DCMAKE_INSTALL_PREFIX=output +cmake --build . --config $PROFILE --target install ${NPROC} +cd ../.. + diff --git a/3rdparty/spirv-cross/checkout_glslang_spirv_tools.sh b/3rdparty/spirv-cross/checkout_glslang_spirv_tools.sh new file mode 100755 index 000000000..c90d1daae --- /dev/null +++ b/3rdparty/spirv-cross/checkout_glslang_spirv_tools.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +GLSLANG_REV=91ac4290bcf2cb930b4fb0981f09c00c0b6797e1 +SPIRV_TOOLS_REV=9bfe0eb25e3dfdf4f3fd86ab6c0cda009c9bd661 +SPIRV_HEADERS_REV=d5b2e1255f706ce1f88812217e9a554f299848af + +if [ -d external/glslang ]; then + echo "Updating glslang to revision $GLSLANG_REV." + cd external/glslang + git fetch origin + git checkout $GLSLANG_REV +else + echo "Cloning glslang revision $GLSLANG_REV." + mkdir -p external + cd external + git clone git://github.com/KhronosGroup/glslang.git + cd glslang + git checkout $GLSLANG_REV +fi +cd ../.. + +if [ -d external/spirv-tools ]; then + echo "Updating SPIRV-Tools to revision $SPIRV_TOOLS_REV." + cd external/spirv-tools + git fetch origin + git checkout $SPIRV_TOOLS_REV +else + echo "Cloning SPIRV-Tools revision $SPIRV_TOOLS_REV." + mkdir -p external + cd external + git clone git://github.com/KhronosGroup/SPIRV-Tools.git spirv-tools + cd spirv-tools + git checkout $SPIRV_TOOLS_REV +fi + +if [ -d external/spirv-headers ]; then + cd external/spirv-headers + git pull origin master + git checkout $SPIRV_HEADERS_REV + cd ../.. +else + git clone git://github.com/KhronosGroup/SPIRV-Headers.git external/spirv-headers + cd external/spirv-headers + git checkout $SPIRV_HEADERS_REV + cd ../.. +fi + +cd ../.. + diff --git a/3rdparty/spirv-cross/format_all.sh b/3rdparty/spirv-cross/format_all.sh new file mode 100755 index 000000000..fcfffc57f --- /dev/null +++ b/3rdparty/spirv-cross/format_all.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +#for file in spirv_*.{cpp,hpp} include/spirv_cross/*.{hpp,h} samples/cpp/*.cpp main.cpp +for file in spirv_*.{cpp,hpp} main.cpp +do + echo "Formatting file: $file ..." + clang-format -style=file -i $file +done diff --git a/3rdparty/spirv-cross/include/spirv_cross/barrier.hpp b/3rdparty/spirv-cross/include/spirv_cross/barrier.hpp new file mode 100644 index 000000000..bfcd22843 --- /dev/null +++ b/3rdparty/spirv-cross/include/spirv_cross/barrier.hpp @@ -0,0 +1,79 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_BARRIER_HPP +#define SPIRV_CROSS_BARRIER_HPP + +#include +#include + +namespace spirv_cross +{ +class Barrier +{ +public: + Barrier() + { + count.store(0); + iteration.store(0); + } + + void set_release_divisor(unsigned divisor) + { + this->divisor = divisor; + } + + static inline void memoryBarrier() + { + std::atomic_thread_fence(std::memory_order_seq_cst); + } + + void reset_counter() + { + count.store(0); + iteration.store(0); + } + + void wait() + { + unsigned target_iteration = iteration.load(std::memory_order_relaxed) + 1; + // Overflows cleanly. + unsigned target_count = divisor * target_iteration; + + // Barriers don't enforce memory ordering. + // Be as relaxed about the barrier as we possibly can! + unsigned c = count.fetch_add(1u, std::memory_order_relaxed); + + if (c + 1 == target_count) + { + iteration.store(target_iteration, std::memory_order_relaxed); + } + else + { + // If we have more threads than the CPU, don't hog the CPU for very long periods of time. + while (iteration.load(std::memory_order_relaxed) != target_iteration) + std::this_thread::yield(); + } + } + +private: + unsigned divisor = 1; + std::atomic count; + std::atomic iteration; +}; +} + +#endif diff --git a/3rdparty/spirv-cross/include/spirv_cross/external_interface.h b/3rdparty/spirv-cross/include/spirv_cross/external_interface.h new file mode 100644 index 000000000..1d26f1e1e --- /dev/null +++ b/3rdparty/spirv-cross/include/spirv_cross/external_interface.h @@ -0,0 +1,126 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_EXTERNAL_INTERFACE_H +#define SPIRV_CROSS_EXTERNAL_INTERFACE_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +typedef struct spirv_cross_shader spirv_cross_shader_t; + +struct spirv_cross_interface +{ + spirv_cross_shader_t *(*construct)(void); + void (*destruct)(spirv_cross_shader_t *thiz); + void (*invoke)(spirv_cross_shader_t *thiz); +}; + +void spirv_cross_set_stage_input(spirv_cross_shader_t *thiz, unsigned location, void *data, size_t size); + +void spirv_cross_set_stage_output(spirv_cross_shader_t *thiz, unsigned location, void *data, size_t size); + +void spirv_cross_set_push_constant(spirv_cross_shader_t *thiz, void *data, size_t size); + +void spirv_cross_set_uniform_constant(spirv_cross_shader_t *thiz, unsigned location, void *data, size_t size); + +void spirv_cross_set_resource(spirv_cross_shader_t *thiz, unsigned set, unsigned binding, void **data, size_t size); + +const struct spirv_cross_interface *spirv_cross_get_interface(void); + +typedef enum spirv_cross_builtin { + SPIRV_CROSS_BUILTIN_POSITION = 0, + SPIRV_CROSS_BUILTIN_FRAG_COORD = 1, + SPIRV_CROSS_BUILTIN_WORK_GROUP_ID = 2, + SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS = 3, + SPIRV_CROSS_NUM_BUILTINS +} spirv_cross_builtin; + +void spirv_cross_set_builtin(spirv_cross_shader_t *thiz, spirv_cross_builtin builtin, void *data, size_t size); + +#define SPIRV_CROSS_NUM_DESCRIPTOR_SETS 4 +#define SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS 16 +#define SPIRV_CROSS_NUM_STAGE_INPUTS 16 +#define SPIRV_CROSS_NUM_STAGE_OUTPUTS 16 +#define SPIRV_CROSS_NUM_UNIFORM_CONSTANTS 32 + +enum spirv_cross_format +{ + SPIRV_CROSS_FORMAT_R8_UNORM = 0, + SPIRV_CROSS_FORMAT_R8G8_UNORM = 1, + SPIRV_CROSS_FORMAT_R8G8B8_UNORM = 2, + SPIRV_CROSS_FORMAT_R8G8B8A8_UNORM = 3, + + SPIRV_CROSS_NUM_FORMATS +}; + +enum spirv_cross_wrap +{ + SPIRV_CROSS_WRAP_CLAMP_TO_EDGE = 0, + SPIRV_CROSS_WRAP_REPEAT = 1, + + SPIRV_CROSS_NUM_WRAP +}; + +enum spirv_cross_filter +{ + SPIRV_CROSS_FILTER_NEAREST = 0, + SPIRV_CROSS_FILTER_LINEAR = 1, + + SPIRV_CROSS_NUM_FILTER +}; + +enum spirv_cross_mipfilter +{ + SPIRV_CROSS_MIPFILTER_BASE = 0, + SPIRV_CROSS_MIPFILTER_NEAREST = 1, + SPIRV_CROSS_MIPFILTER_LINEAR = 2, + + SPIRV_CROSS_NUM_MIPFILTER +}; + +struct spirv_cross_miplevel +{ + const void *data; + unsigned width, height; + size_t stride; +}; + +struct spirv_cross_sampler_info +{ + const struct spirv_cross_miplevel *mipmaps; + unsigned num_mipmaps; + + enum spirv_cross_format format; + enum spirv_cross_wrap wrap_s; + enum spirv_cross_wrap wrap_t; + enum spirv_cross_filter min_filter; + enum spirv_cross_filter mag_filter; + enum spirv_cross_mipfilter mip_filter; +}; + +typedef struct spirv_cross_sampler_2d spirv_cross_sampler_2d_t; +spirv_cross_sampler_2d_t *spirv_cross_create_sampler_2d(const struct spirv_cross_sampler_info *info); +void spirv_cross_destroy_sampler_2d(spirv_cross_sampler_2d_t *samp); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/3rdparty/spirv-cross/include/spirv_cross/image.hpp b/3rdparty/spirv-cross/include/spirv_cross/image.hpp new file mode 100644 index 000000000..73de894f8 --- /dev/null +++ b/3rdparty/spirv-cross/include/spirv_cross/image.hpp @@ -0,0 +1,62 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_IMAGE_HPP +#define SPIRV_CROSS_IMAGE_HPP + +#ifndef GLM_SWIZZLE +#define GLM_SWIZZLE +#endif + +#ifndef GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS +#endif + +#include + +namespace spirv_cross +{ +template +struct image2DBase +{ + virtual ~image2DBase() = default; + inline virtual T load(glm::ivec2 coord) const + { + return T(0, 0, 0, 1); + } + inline virtual void store(glm::ivec2 coord, const T &v) + { + } +}; + +typedef image2DBase image2D; +typedef image2DBase iimage2D; +typedef image2DBase uimage2D; + +template +inline T imageLoad(const image2DBase &image, glm::ivec2 coord) +{ + return image.load(coord); +} + +template +void imageStore(image2DBase &image, glm::ivec2 coord, const T &value) +{ + image.store(coord, value); +} +} + +#endif diff --git a/3rdparty/spirv-cross/include/spirv_cross/internal_interface.hpp b/3rdparty/spirv-cross/include/spirv_cross/internal_interface.hpp new file mode 100644 index 000000000..e56223dfd --- /dev/null +++ b/3rdparty/spirv-cross/include/spirv_cross/internal_interface.hpp @@ -0,0 +1,603 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_INTERNAL_INTERFACE_HPP +#define SPIRV_CROSS_INTERNAL_INTERFACE_HPP + +// This file must only be included by the shader generated by spirv-cross! + +#ifndef GLM_FORCE_SWIZZLE +#define GLM_FORCE_SWIZZLE +#endif + +#ifndef GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS +#endif + +#include + +#include "barrier.hpp" +#include "external_interface.h" +#include "image.hpp" +#include "sampler.hpp" +#include "thread_group.hpp" +#include +#include + +namespace internal +{ +// Adaptor helpers to adapt GLSL access chain syntax to C++. +// Don't bother with arrays of arrays on uniforms ... +// Would likely need horribly complex variadic template munging. + +template +struct Interface +{ + enum + { + ArraySize = 1, + Size = sizeof(T) + }; + + Interface() + : ptr(0) + { + } + T &get() + { + assert(ptr); + return *ptr; + } + + T *ptr; +}; + +// For array types, return a pointer instead. +template +struct Interface +{ + enum + { + ArraySize = U, + Size = U * sizeof(T) + }; + + Interface() + : ptr(0) + { + } + T *get() + { + assert(ptr); + return ptr; + } + + T *ptr; +}; + +// For case when array size is 1, avoid double dereference. +template +struct PointerInterface +{ + enum + { + ArraySize = 1, + Size = sizeof(T *) + }; + enum + { + PreDereference = true + }; + + PointerInterface() + : ptr(0) + { + } + + T &get() + { + assert(ptr); + return *ptr; + } + + T *ptr; +}; + +// Automatically converts a pointer down to reference to match GLSL syntax. +template +struct DereferenceAdaptor +{ + DereferenceAdaptor(T **ptr) + : ptr(ptr) + { + } + T &operator[](unsigned index) const + { + return *(ptr[index]); + } + T **ptr; +}; + +// We can't have a linear array of T* since T* can be an abstract type in case of samplers. +// We also need a list of pointers since we can have run-time length SSBOs. +template +struct PointerInterface +{ + enum + { + ArraySize = U, + Size = sizeof(T *) * U + }; + enum + { + PreDereference = false + }; + PointerInterface() + : ptr(0) + { + } + + DereferenceAdaptor get() + { + assert(ptr); + return DereferenceAdaptor(ptr); + } + + T **ptr; +}; + +// Resources can be more abstract and be unsized, +// so we need to have an array of pointers for those cases. +template +struct Resource : PointerInterface +{ +}; + +// POD with no unknown sizes, so we can express these as flat arrays. +template +struct UniformConstant : Interface +{ +}; +template +struct StageInput : Interface +{ +}; +template +struct StageOutput : Interface +{ +}; +template +struct PushConstant : Interface +{ +}; +} + +struct spirv_cross_shader +{ + struct PPSize + { + PPSize() + : ptr(0) + , size(0) + { + } + void **ptr; + size_t size; + }; + + struct PPSizeResource + { + PPSizeResource() + : ptr(0) + , size(0) + , pre_dereference(false) + { + } + void **ptr; + size_t size; + bool pre_dereference; + }; + + PPSizeResource resources[SPIRV_CROSS_NUM_DESCRIPTOR_SETS][SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS]; + PPSize stage_inputs[SPIRV_CROSS_NUM_STAGE_INPUTS]; + PPSize stage_outputs[SPIRV_CROSS_NUM_STAGE_OUTPUTS]; + PPSize uniform_constants[SPIRV_CROSS_NUM_UNIFORM_CONSTANTS]; + PPSize push_constant; + PPSize builtins[SPIRV_CROSS_NUM_BUILTINS]; + + template + void register_builtin(spirv_cross_builtin builtin, const U &value) + { + assert(!builtins[builtin].ptr); + + builtins[builtin].ptr = (void **)&value.ptr; + builtins[builtin].size = sizeof(*value.ptr) * U::ArraySize; + } + + void set_builtin(spirv_cross_builtin builtin, void *data, size_t size) + { + assert(builtins[builtin].ptr); + assert(size >= builtins[builtin].size); + + *builtins[builtin].ptr = data; + } + + template + void register_resource(const internal::Resource &value, unsigned set, unsigned binding) + { + assert(set < SPIRV_CROSS_NUM_DESCRIPTOR_SETS); + assert(binding < SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS); + assert(!resources[set][binding].ptr); + + resources[set][binding].ptr = (void **)&value.ptr; + resources[set][binding].size = internal::Resource::Size; + resources[set][binding].pre_dereference = internal::Resource::PreDereference; + } + + template + void register_stage_input(const internal::StageInput &value, unsigned location) + { + assert(location < SPIRV_CROSS_NUM_STAGE_INPUTS); + assert(!stage_inputs[location].ptr); + + stage_inputs[location].ptr = (void **)&value.ptr; + stage_inputs[location].size = internal::StageInput::Size; + } + + template + void register_stage_output(const internal::StageOutput &value, unsigned location) + { + assert(location < SPIRV_CROSS_NUM_STAGE_OUTPUTS); + assert(!stage_outputs[location].ptr); + + stage_outputs[location].ptr = (void **)&value.ptr; + stage_outputs[location].size = internal::StageOutput::Size; + } + + template + void register_uniform_constant(const internal::UniformConstant &value, unsigned location) + { + assert(location < SPIRV_CROSS_NUM_UNIFORM_CONSTANTS); + assert(!uniform_constants[location].ptr); + + uniform_constants[location].ptr = (void **)&value.ptr; + uniform_constants[location].size = internal::UniformConstant::Size; + } + + template + void register_push_constant(const internal::PushConstant &value) + { + assert(!push_constant.ptr); + + push_constant.ptr = (void **)&value.ptr; + push_constant.size = internal::PushConstant::Size; + } + + void set_stage_input(unsigned location, void *data, size_t size) + { + assert(location < SPIRV_CROSS_NUM_STAGE_INPUTS); + assert(stage_inputs[location].ptr); + assert(size >= stage_inputs[location].size); + + *stage_inputs[location].ptr = data; + } + + void set_stage_output(unsigned location, void *data, size_t size) + { + assert(location < SPIRV_CROSS_NUM_STAGE_OUTPUTS); + assert(stage_outputs[location].ptr); + assert(size >= stage_outputs[location].size); + + *stage_outputs[location].ptr = data; + } + + void set_uniform_constant(unsigned location, void *data, size_t size) + { + assert(location < SPIRV_CROSS_NUM_UNIFORM_CONSTANTS); + assert(uniform_constants[location].ptr); + assert(size >= uniform_constants[location].size); + + *uniform_constants[location].ptr = data; + } + + void set_push_constant(void *data, size_t size) + { + assert(push_constant.ptr); + assert(size >= push_constant.size); + + *push_constant.ptr = data; + } + + void set_resource(unsigned set, unsigned binding, void **data, size_t size) + { + assert(set < SPIRV_CROSS_NUM_DESCRIPTOR_SETS); + assert(binding < SPIRV_CROSS_NUM_DESCRIPTOR_BINDINGS); + assert(resources[set][binding].ptr); + assert(size >= resources[set][binding].size); + + // We're using the regular PointerInterface, dereference ahead of time. + if (resources[set][binding].pre_dereference) + *resources[set][binding].ptr = *data; + else + *resources[set][binding].ptr = data; + } +}; + +namespace spirv_cross +{ +template +struct BaseShader : spirv_cross_shader +{ + void invoke() + { + static_cast(this)->main(); + } +}; + +struct FragmentResources +{ + internal::StageOutput gl_FragCoord; + void init(spirv_cross_shader &s) + { + s.register_builtin(SPIRV_CROSS_BUILTIN_FRAG_COORD, gl_FragCoord); + } +#define gl_FragCoord __res->gl_FragCoord.get() +}; + +template +struct FragmentShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + FragmentShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct VertexResources +{ + internal::StageOutput gl_Position; + void init(spirv_cross_shader &s) + { + s.register_builtin(SPIRV_CROSS_BUILTIN_POSITION, gl_Position); + } +#define gl_Position __res->gl_Position.get() +}; + +template +struct VertexShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + VertexShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct TessEvaluationResources +{ + inline void init(spirv_cross_shader &) + { + } +}; + +template +struct TessEvaluationShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + TessEvaluationShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct TessControlResources +{ + inline void init(spirv_cross_shader &) + { + } +}; + +template +struct TessControlShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + TessControlShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct GeometryResources +{ + inline void init(spirv_cross_shader &) + { + } +}; + +template +struct GeometryShader : BaseShader> +{ + inline void main() + { + impl.main(); + } + + GeometryShader() + { + resources.init(*this); + impl.__res = &resources; + } + + T impl; + Res resources; +}; + +struct ComputeResources +{ + internal::StageInput gl_WorkGroupID__; + internal::StageInput gl_NumWorkGroups__; + void init(spirv_cross_shader &s) + { + s.register_builtin(SPIRV_CROSS_BUILTIN_WORK_GROUP_ID, gl_WorkGroupID__); + s.register_builtin(SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS, gl_NumWorkGroups__); + } +#define gl_WorkGroupID __res->gl_WorkGroupID__.get() +#define gl_NumWorkGroups __res->gl_NumWorkGroups__.get() + + Barrier barrier__; +#define barrier() __res->barrier__.wait() +}; + +struct ComputePrivateResources +{ + uint32_t gl_LocalInvocationIndex__; +#define gl_LocalInvocationIndex __priv_res.gl_LocalInvocationIndex__ + glm::uvec3 gl_LocalInvocationID__; +#define gl_LocalInvocationID __priv_res.gl_LocalInvocationID__ + glm::uvec3 gl_GlobalInvocationID__; +#define gl_GlobalInvocationID __priv_res.gl_GlobalInvocationID__ +}; + +template +struct ComputeShader : BaseShader> +{ + inline void main() + { + resources.barrier__.reset_counter(); + + for (unsigned z = 0; z < WorkGroupZ; z++) + for (unsigned y = 0; y < WorkGroupY; y++) + for (unsigned x = 0; x < WorkGroupX; x++) + impl[z][y][x].__priv_res.gl_GlobalInvocationID__ = + glm::uvec3(WorkGroupX, WorkGroupY, WorkGroupZ) * resources.gl_WorkGroupID__.get() + + glm::uvec3(x, y, z); + + group.run(); + group.wait(); + } + + ComputeShader() + : group(&impl[0][0][0]) + { + resources.init(*this); + resources.barrier__.set_release_divisor(WorkGroupX * WorkGroupY * WorkGroupZ); + + unsigned i = 0; + for (unsigned z = 0; z < WorkGroupZ; z++) + { + for (unsigned y = 0; y < WorkGroupY; y++) + { + for (unsigned x = 0; x < WorkGroupX; x++) + { + impl[z][y][x].__priv_res.gl_LocalInvocationID__ = glm::uvec3(x, y, z); + impl[z][y][x].__priv_res.gl_LocalInvocationIndex__ = i++; + impl[z][y][x].__res = &resources; + } + } + } + } + + T impl[WorkGroupZ][WorkGroupY][WorkGroupX]; + ThreadGroup group; + Res resources; +}; + +inline void memoryBarrierShared() +{ + Barrier::memoryBarrier(); +} +inline void memoryBarrier() +{ + Barrier::memoryBarrier(); +} +// TODO: Rest of the barriers. + +// Atomics +template +inline T atomicAdd(T &v, T a) +{ + static_assert(sizeof(std::atomic) == sizeof(T), "Cannot cast properly to std::atomic."); + + // We need explicit memory barriers in GLSL to enfore any ordering. + // FIXME: Can we really cast this? There is no other way I think ... + return std::atomic_fetch_add_explicit(reinterpret_cast *>(&v), a, std::memory_order_relaxed); +} +} + +void spirv_cross_set_stage_input(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size) +{ + shader->set_stage_input(location, data, size); +} + +void spirv_cross_set_stage_output(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size) +{ + shader->set_stage_output(location, data, size); +} + +void spirv_cross_set_uniform_constant(spirv_cross_shader_t *shader, unsigned location, void *data, size_t size) +{ + shader->set_uniform_constant(location, data, size); +} + +void spirv_cross_set_resource(spirv_cross_shader_t *shader, unsigned set, unsigned binding, void **data, size_t size) +{ + shader->set_resource(set, binding, data, size); +} + +void spirv_cross_set_push_constant(spirv_cross_shader_t *shader, void *data, size_t size) +{ + shader->set_push_constant(data, size); +} + +void spirv_cross_set_builtin(spirv_cross_shader_t *shader, spirv_cross_builtin builtin, void *data, size_t size) +{ + shader->set_builtin(builtin, data, size); +} + +#endif diff --git a/3rdparty/spirv-cross/include/spirv_cross/sampler.hpp b/3rdparty/spirv-cross/include/spirv_cross/sampler.hpp new file mode 100644 index 000000000..a95d489e2 --- /dev/null +++ b/3rdparty/spirv-cross/include/spirv_cross/sampler.hpp @@ -0,0 +1,105 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_SAMPLER_HPP +#define SPIRV_CROSS_SAMPLER_HPP + +#include + +namespace spirv_cross +{ +struct spirv_cross_sampler_2d +{ + inline virtual ~spirv_cross_sampler_2d() + { + } +}; + +template +struct sampler2DBase : spirv_cross_sampler_2d +{ + sampler2DBase(const spirv_cross_sampler_info *info) + { + mips.insert(mips.end(), info->mipmaps, info->mipmaps + info->num_mipmaps); + format = info->format; + wrap_s = info->wrap_s; + wrap_t = info->wrap_t; + min_filter = info->min_filter; + mag_filter = info->mag_filter; + mip_filter = info->mip_filter; + } + + inline virtual T sample(glm::vec2 uv, float bias) + { + return sampleLod(uv, bias); + } + + inline virtual T sampleLod(glm::vec2 uv, float lod) + { + if (mag_filter == SPIRV_CROSS_FILTER_NEAREST) + { + uv.x = wrap(uv.x, wrap_s, mips[0].width); + uv.y = wrap(uv.y, wrap_t, mips[0].height); + glm::vec2 uv_full = uv * glm::vec2(mips[0].width, mips[0].height); + + int x = int(uv_full.x); + int y = int(uv_full.y); + return sample(x, y, 0); + } + else + { + return T(0, 0, 0, 1); + } + } + + inline float wrap(float v, spirv_cross_wrap wrap, unsigned size) + { + switch (wrap) + { + case SPIRV_CROSS_WRAP_REPEAT: + return v - glm::floor(v); + case SPIRV_CROSS_WRAP_CLAMP_TO_EDGE: + { + float half = 0.5f / size; + return glm::clamp(v, half, 1.0f - half); + } + + default: + return 0.0f; + } + } + + std::vector mips; + spirv_cross_format format; + spirv_cross_wrap wrap_s; + spirv_cross_format wrap_t; + spirv_cross_filter min_filter; + spirv_cross_filter mag_filter; + spirv_cross_mipfilter mip_filter; +}; + +typedef sampler2DBase sampler2D; +typedef sampler2DBase isampler2D; +typedef sampler2DBase usampler2D; + +template +inline T texture(const sampler2DBase &samp, const glm::vec2 &uv, float bias = 0.0f) +{ + return samp.sample(uv, bias); +} +} + +#endif diff --git a/3rdparty/spirv-cross/include/spirv_cross/thread_group.hpp b/3rdparty/spirv-cross/include/spirv_cross/thread_group.hpp new file mode 100644 index 000000000..377f098b4 --- /dev/null +++ b/3rdparty/spirv-cross/include/spirv_cross/thread_group.hpp @@ -0,0 +1,113 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_THREAD_GROUP_HPP +#define SPIRV_CROSS_THREAD_GROUP_HPP + +#include +#include +#include + +namespace spirv_cross +{ +template +class ThreadGroup +{ +public: + ThreadGroup(T *impl) + { + for (unsigned i = 0; i < Size; i++) + workers[i].start(&impl[i]); + } + + void run() + { + for (auto &worker : workers) + worker.run(); + } + + void wait() + { + for (auto &worker : workers) + worker.wait(); + } + +private: + struct Thread + { + enum State + { + Idle, + Running, + Dying + }; + State state = Idle; + + void start(T *impl) + { + worker = std::thread([impl, this] { + for (;;) + { + { + std::unique_lock l{ lock }; + cond.wait(l, [this] { return state != Idle; }); + if (state == Dying) + break; + } + + impl->main(); + + std::lock_guard l{ lock }; + state = Idle; + cond.notify_one(); + } + }); + } + + void wait() + { + std::unique_lock l{ lock }; + cond.wait(l, [this] { return state == Idle; }); + } + + void run() + { + std::lock_guard l{ lock }; + state = Running; + cond.notify_one(); + } + + ~Thread() + { + if (worker.joinable()) + { + { + std::lock_guard l{ lock }; + state = Dying; + cond.notify_one(); + } + worker.join(); + } + } + std::thread worker; + std::condition_variable cond; + std::mutex lock; + }; + Thread workers[Size]; +}; +} + +#endif diff --git a/3rdparty/spirv-cross/jni/Android.mk b/3rdparty/spirv-cross/jni/Android.mk new file mode 100644 index 000000000..d5e94b531 --- /dev/null +++ b/3rdparty/spirv-cross/jni/Android.mk @@ -0,0 +1,12 @@ +LOCAL_PATH := $(call my-dir) + +include $(CLEAR_VARS) + +LOCAL_CFLAGS += -std=c++11 -Wall -Wextra +LOCAL_MODULE := spirv-cross +LOCAL_SRC_FILES := ../spirv_cfg.cpp ../spirv_cross.cpp ../spirv_cross_util.cpp ../spirv_glsl.cpp ../spirv_hlsl.cpp ../spirv_msl.cpp ../spirv_cpp.cpp +LOCAL_CPP_FEATURES := exceptions +LOCAL_ARM_MODE := arm +LOCAL_CFLAGS := -D__STDC_LIMIT_MACROS + +include $(BUILD_STATIC_LIBRARY) diff --git a/3rdparty/spirv-cross/jni/Application.mk b/3rdparty/spirv-cross/jni/Application.mk new file mode 100644 index 000000000..9a2e77f2d --- /dev/null +++ b/3rdparty/spirv-cross/jni/Application.mk @@ -0,0 +1,2 @@ +APP_STL := c++_static +APP_ABI := armeabi-v7a diff --git a/3rdparty/spirv-cross/main.cpp b/3rdparty/spirv-cross/main.cpp new file mode 100644 index 000000000..660d77e3a --- /dev/null +++ b/3rdparty/spirv-cross/main.cpp @@ -0,0 +1,1124 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cpp.hpp" +#include "spirv_cross_util.hpp" +#include "spirv_glsl.hpp" +#include "spirv_hlsl.hpp" +#include "spirv_msl.hpp" +#include "spirv_parser.hpp" +#include "spirv_reflect.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef _MSC_VER +#pragma warning(disable : 4996) +#endif + +using namespace spv; +using namespace spirv_cross; +using namespace std; + +#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +static inline void THROW(const char *str) +{ + fprintf(stderr, "SPIRV-Cross will abort: %s\n", str); + fflush(stderr); + abort(); +} +#else +#define THROW(x) throw runtime_error(x) +#endif + +struct CLIParser; +struct CLICallbacks +{ + void add(const char *cli, const function &func) + { + callbacks[cli] = func; + } + unordered_map> callbacks; + function error_handler; + function default_handler; +}; + +struct CLIParser +{ + CLIParser(CLICallbacks cbs_, int argc_, char *argv_[]) + : cbs(move(cbs_)) + , argc(argc_) + , argv(argv_) + { + } + + bool parse() + { +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS + try +#endif + { + while (argc && !ended_state) + { + const char *next = *argv++; + argc--; + + if (*next != '-' && cbs.default_handler) + { + cbs.default_handler(next); + } + else + { + auto itr = cbs.callbacks.find(next); + if (itr == ::end(cbs.callbacks)) + { + THROW("Invalid argument"); + } + + itr->second(*this); + } + } + + return true; + } +#ifndef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS + catch (...) + { + if (cbs.error_handler) + { + cbs.error_handler(); + } + return false; + } +#endif + } + + void end() + { + ended_state = true; + } + + uint32_t next_uint() + { + if (!argc) + { + THROW("Tried to parse uint, but nothing left in arguments"); + } + + uint64_t val = stoul(*argv); + if (val > numeric_limits::max()) + { + THROW("next_uint() out of range"); + } + + argc--; + argv++; + + return uint32_t(val); + } + + double next_double() + { + if (!argc) + { + THROW("Tried to parse double, but nothing left in arguments"); + } + + double val = stod(*argv); + + argc--; + argv++; + + return val; + } + + // Return a string only if it's not prefixed with `--`, otherwise return the default value + const char *next_value_string(const char *default_value) + { + if (!argc) + { + return default_value; + } + + if (0 == strncmp("--", *argv, 2)) + { + return default_value; + } + + return next_string(); + } + + const char *next_string() + { + if (!argc) + { + THROW("Tried to parse string, but nothing left in arguments"); + } + + const char *ret = *argv; + argc--; + argv++; + return ret; + } + + CLICallbacks cbs; + int argc; + char **argv; + bool ended_state = false; +}; + +static vector read_spirv_file(const char *path) +{ + FILE *file = fopen(path, "rb"); + if (!file) + { + fprintf(stderr, "Failed to open SPIR-V file: %s\n", path); + return {}; + } + + fseek(file, 0, SEEK_END); + long len = ftell(file) / sizeof(uint32_t); + rewind(file); + + vector spirv(len); + if (fread(spirv.data(), sizeof(uint32_t), len, file) != size_t(len)) + spirv.clear(); + + fclose(file); + return spirv; +} + +static bool write_string_to_file(const char *path, const char *string) +{ + FILE *file = fopen(path, "w"); + if (!file) + { + fprintf(stderr, "Failed to write file: %s\n", path); + return false; + } + + fprintf(file, "%s", string); + fclose(file); + return true; +} + +static void print_resources(const Compiler &compiler, const char *tag, const vector &resources) +{ + fprintf(stderr, "%s\n", tag); + fprintf(stderr, "=============\n\n"); + bool print_ssbo = !strcmp(tag, "ssbos"); + + for (auto &res : resources) + { + auto &type = compiler.get_type(res.type_id); + + if (print_ssbo && compiler.buffer_is_hlsl_counter_buffer(res.id)) + continue; + + // If we don't have a name, use the fallback for the type instead of the variable + // for SSBOs and UBOs since those are the only meaningful names to use externally. + // Push constant blocks are still accessed by name and not block name, even though they are technically Blocks. + bool is_push_constant = compiler.get_storage_class(res.id) == StorageClassPushConstant; + bool is_block = compiler.get_decoration_bitset(type.self).get(DecorationBlock) || + compiler.get_decoration_bitset(type.self).get(DecorationBufferBlock); + bool is_sized_block = is_block && (compiler.get_storage_class(res.id) == StorageClassUniform || + compiler.get_storage_class(res.id) == StorageClassUniformConstant); + uint32_t fallback_id = !is_push_constant && is_block ? res.base_type_id : res.id; + + uint32_t block_size = 0; + uint32_t runtime_array_stride = 0; + if (is_sized_block) + { + auto &base_type = compiler.get_type(res.base_type_id); + block_size = uint32_t(compiler.get_declared_struct_size(base_type)); + runtime_array_stride = uint32_t(compiler.get_declared_struct_size_runtime_array(base_type, 1) - + compiler.get_declared_struct_size_runtime_array(base_type, 0)); + } + + Bitset mask; + if (print_ssbo) + mask = compiler.get_buffer_block_flags(res.id); + else + mask = compiler.get_decoration_bitset(res.id); + + string array; + for (auto arr : type.array) + array = join("[", arr ? convert_to_string(arr) : "", "]") + array; + + fprintf(stderr, " ID %03u : %s%s", res.id, + !res.name.empty() ? res.name.c_str() : compiler.get_fallback_name(fallback_id).c_str(), array.c_str()); + + if (mask.get(DecorationLocation)) + fprintf(stderr, " (Location : %u)", compiler.get_decoration(res.id, DecorationLocation)); + if (mask.get(DecorationDescriptorSet)) + fprintf(stderr, " (Set : %u)", compiler.get_decoration(res.id, DecorationDescriptorSet)); + if (mask.get(DecorationBinding)) + fprintf(stderr, " (Binding : %u)", compiler.get_decoration(res.id, DecorationBinding)); + if (mask.get(DecorationInputAttachmentIndex)) + fprintf(stderr, " (Attachment : %u)", compiler.get_decoration(res.id, DecorationInputAttachmentIndex)); + if (mask.get(DecorationNonReadable)) + fprintf(stderr, " writeonly"); + if (mask.get(DecorationNonWritable)) + fprintf(stderr, " readonly"); + if (is_sized_block) + { + fprintf(stderr, " (BlockSize : %u bytes)", block_size); + if (runtime_array_stride) + fprintf(stderr, " (Unsized array stride: %u bytes)", runtime_array_stride); + } + + uint32_t counter_id = 0; + if (print_ssbo && compiler.buffer_get_hlsl_counter_buffer(res.id, counter_id)) + fprintf(stderr, " (HLSL counter buffer ID: %u)", counter_id); + fprintf(stderr, "\n"); + } + fprintf(stderr, "=============\n\n"); +} + +static const char *execution_model_to_str(spv::ExecutionModel model) +{ + switch (model) + { + case spv::ExecutionModelVertex: + return "vertex"; + case spv::ExecutionModelTessellationControl: + return "tessellation control"; + case ExecutionModelTessellationEvaluation: + return "tessellation evaluation"; + case ExecutionModelGeometry: + return "geometry"; + case ExecutionModelFragment: + return "fragment"; + case ExecutionModelGLCompute: + return "compute"; + default: + return "???"; + } +} + +static void print_resources(const Compiler &compiler, const ShaderResources &res) +{ + auto &modes = compiler.get_execution_mode_bitset(); + + fprintf(stderr, "Entry points:\n"); + auto entry_points = compiler.get_entry_points_and_stages(); + for (auto &e : entry_points) + fprintf(stderr, " %s (%s)\n", e.name.c_str(), execution_model_to_str(e.execution_model)); + fprintf(stderr, "\n"); + + fprintf(stderr, "Execution modes:\n"); + modes.for_each_bit([&](uint32_t i) { + auto mode = static_cast(i); + uint32_t arg0 = compiler.get_execution_mode_argument(mode, 0); + uint32_t arg1 = compiler.get_execution_mode_argument(mode, 1); + uint32_t arg2 = compiler.get_execution_mode_argument(mode, 2); + + switch (static_cast(i)) + { + case ExecutionModeInvocations: + fprintf(stderr, " Invocations: %u\n", arg0); + break; + + case ExecutionModeLocalSize: + fprintf(stderr, " LocalSize: (%u, %u, %u)\n", arg0, arg1, arg2); + break; + + case ExecutionModeOutputVertices: + fprintf(stderr, " OutputVertices: %u\n", arg0); + break; + +#define CHECK_MODE(m) \ + case ExecutionMode##m: \ + fprintf(stderr, " %s\n", #m); \ + break + CHECK_MODE(SpacingEqual); + CHECK_MODE(SpacingFractionalEven); + CHECK_MODE(SpacingFractionalOdd); + CHECK_MODE(VertexOrderCw); + CHECK_MODE(VertexOrderCcw); + CHECK_MODE(PixelCenterInteger); + CHECK_MODE(OriginUpperLeft); + CHECK_MODE(OriginLowerLeft); + CHECK_MODE(EarlyFragmentTests); + CHECK_MODE(PointMode); + CHECK_MODE(Xfb); + CHECK_MODE(DepthReplacing); + CHECK_MODE(DepthGreater); + CHECK_MODE(DepthLess); + CHECK_MODE(DepthUnchanged); + CHECK_MODE(LocalSizeHint); + CHECK_MODE(InputPoints); + CHECK_MODE(InputLines); + CHECK_MODE(InputLinesAdjacency); + CHECK_MODE(Triangles); + CHECK_MODE(InputTrianglesAdjacency); + CHECK_MODE(Quads); + CHECK_MODE(Isolines); + CHECK_MODE(OutputPoints); + CHECK_MODE(OutputLineStrip); + CHECK_MODE(OutputTriangleStrip); + CHECK_MODE(VecTypeHint); + CHECK_MODE(ContractionOff); + + default: + break; + } + }); + fprintf(stderr, "\n"); + + print_resources(compiler, "subpass inputs", res.subpass_inputs); + print_resources(compiler, "inputs", res.stage_inputs); + print_resources(compiler, "outputs", res.stage_outputs); + print_resources(compiler, "textures", res.sampled_images); + print_resources(compiler, "separate images", res.separate_images); + print_resources(compiler, "separate samplers", res.separate_samplers); + print_resources(compiler, "images", res.storage_images); + print_resources(compiler, "ssbos", res.storage_buffers); + print_resources(compiler, "ubos", res.uniform_buffers); + print_resources(compiler, "push", res.push_constant_buffers); + print_resources(compiler, "counters", res.atomic_counters); +} + +static void print_push_constant_resources(const Compiler &compiler, const vector &res) +{ + for (auto &block : res) + { + auto ranges = compiler.get_active_buffer_ranges(block.id); + fprintf(stderr, "Active members in buffer: %s\n", + !block.name.empty() ? block.name.c_str() : compiler.get_fallback_name(block.id).c_str()); + + fprintf(stderr, "==================\n\n"); + for (auto &range : ranges) + { + const auto &name = compiler.get_member_name(block.base_type_id, range.index); + + fprintf(stderr, "Member #%3u (%s): Offset: %4u, Range: %4u\n", range.index, + !name.empty() ? name.c_str() : compiler.get_fallback_member_name(range.index).c_str(), + unsigned(range.offset), unsigned(range.range)); + } + fprintf(stderr, "==================\n\n"); + } +} + +static void print_spec_constants(const Compiler &compiler) +{ + auto spec_constants = compiler.get_specialization_constants(); + fprintf(stderr, "Specialization constants\n"); + fprintf(stderr, "==================\n\n"); + for (auto &c : spec_constants) + fprintf(stderr, "ID: %u, Spec ID: %u\n", c.id, c.constant_id); + fprintf(stderr, "==================\n\n"); +} + +static void print_capabilities_and_extensions(const Compiler &compiler) +{ + fprintf(stderr, "Capabilities\n"); + fprintf(stderr, "============\n"); + for (auto &capability : compiler.get_declared_capabilities()) + fprintf(stderr, "Capability: %u\n", static_cast(capability)); + fprintf(stderr, "============\n\n"); + + fprintf(stderr, "Extensions\n"); + fprintf(stderr, "============\n"); + for (auto &ext : compiler.get_declared_extensions()) + fprintf(stderr, "Extension: %s\n", ext.c_str()); + fprintf(stderr, "============\n\n"); +} + +struct PLSArg +{ + PlsFormat format; + string name; +}; + +struct Remap +{ + string src_name; + string dst_name; + unsigned components; +}; + +struct VariableTypeRemap +{ + string variable_name; + string new_variable_type; +}; + +struct InterfaceVariableRename +{ + StorageClass storageClass; + uint32_t location; + string variable_name; +}; + +struct CLIArguments +{ + const char *input = nullptr; + const char *output = nullptr; + const char *cpp_interface_name = nullptr; + uint32_t version = 0; + uint32_t shader_model = 0; + uint32_t msl_version = 0; + bool es = false; + bool set_version = false; + bool set_shader_model = false; + bool set_msl_version = false; + bool set_es = false; + bool dump_resources = false; + bool force_temporary = false; + bool flatten_ubo = false; + bool fixup = false; + bool yflip = false; + bool sso = false; + bool support_nonzero_baseinstance = true; + bool msl_swizzle_texture_samples = false; + bool msl_ios = false; + vector pls_in; + vector pls_out; + vector remaps; + vector extensions; + vector variable_type_remaps; + vector interface_variable_renames; + vector hlsl_attr_remap; + string entry; + string entry_stage; + + struct Rename + { + string old_name; + string new_name; + ExecutionModel execution_model; + }; + vector entry_point_rename; + + uint32_t iterations = 1; + bool cpp = false; + string reflect; + bool msl = false; + bool hlsl = false; + bool hlsl_compat = false; + bool vulkan_semantics = false; + bool flatten_multidimensional_arrays = false; + bool use_420pack_extension = true; + bool remove_unused = false; + bool combined_samplers_inherit_bindings = false; +}; + +static void print_help() +{ + fprintf(stderr, "Usage: spirv-cross\n" + "\t[--output ]\n" + "\t[SPIR-V file]\n" + "\t[--es]\n" + "\t[--no-es]\n" + "\t[--version ]\n" + "\t[--dump-resources]\n" + "\t[--help]\n" + "\t[--force-temporary]\n" + "\t[--vulkan-semantics]\n" + "\t[--flatten-ubo]\n" + "\t[--fixup-clipspace]\n" + "\t[--flip-vert-y]\n" + "\t[--iterations iter]\n" + "\t[--cpp]\n" + "\t[--cpp-interface-name ]\n" + "\t[--msl]\n" + "\t[--msl-version ]\n" + "\t[--msl-swizzle-texture-samples]\n" + "\t[--msl-ios]\n" + "\t[--hlsl]\n" + "\t[--reflect]\n" + "\t[--shader-model]\n" + "\t[--hlsl-enable-compat]\n" + "\t[--separate-shader-objects]\n" + "\t[--pls-in format input-name]\n" + "\t[--pls-out format output-name]\n" + "\t[--remap source_name target_name components]\n" + "\t[--extension ext]\n" + "\t[--entry name]\n" + "\t[--stage ]\n" + "\t[--remove-unused-variables]\n" + "\t[--flatten-multidimensional-arrays]\n" + "\t[--no-420pack-extension]\n" + "\t[--remap-variable-type ]\n" + "\t[--rename-interface-variable ]\n" + "\t[--set-hlsl-vertex-input-semantic ]\n" + "\t[--rename-entry-point ]\n" + "\t[--combined-samplers-inherit-bindings]\n" + "\t[--no-support-nonzero-baseinstance]\n" + "\n"); +} + +static bool remap_generic(Compiler &compiler, const vector &resources, const Remap &remap) +{ + auto itr = + find_if(begin(resources), end(resources), [&remap](const Resource &res) { return res.name == remap.src_name; }); + + if (itr != end(resources)) + { + compiler.set_remapped_variable_state(itr->id, true); + compiler.set_name(itr->id, remap.dst_name); + compiler.set_subpass_input_remapped_components(itr->id, remap.components); + return true; + } + else + return false; +} + +static vector remap_pls(const vector &pls_variables, const vector &resources, + const vector *secondary_resources) +{ + vector ret; + + for (auto &pls : pls_variables) + { + bool found = false; + for (auto &res : resources) + { + if (res.name == pls.name) + { + ret.push_back({ res.id, pls.format }); + found = true; + break; + } + } + + if (!found && secondary_resources) + { + for (auto &res : *secondary_resources) + { + if (res.name == pls.name) + { + ret.push_back({ res.id, pls.format }); + found = true; + break; + } + } + } + + if (!found) + fprintf(stderr, "Did not find stage input/output/target with name \"%s\".\n", pls.name.c_str()); + } + + return ret; +} + +static PlsFormat pls_format(const char *str) +{ + if (!strcmp(str, "r11f_g11f_b10f")) + return PlsR11FG11FB10F; + else if (!strcmp(str, "r32f")) + return PlsR32F; + else if (!strcmp(str, "rg16f")) + return PlsRG16F; + else if (!strcmp(str, "rg16")) + return PlsRG16; + else if (!strcmp(str, "rgb10_a2")) + return PlsRGB10A2; + else if (!strcmp(str, "rgba8")) + return PlsRGBA8; + else if (!strcmp(str, "rgba8i")) + return PlsRGBA8I; + else if (!strcmp(str, "rgba8ui")) + return PlsRGBA8UI; + else if (!strcmp(str, "rg16i")) + return PlsRG16I; + else if (!strcmp(str, "rgb10_a2ui")) + return PlsRGB10A2UI; + else if (!strcmp(str, "rg16ui")) + return PlsRG16UI; + else if (!strcmp(str, "r32ui")) + return PlsR32UI; + else + return PlsNone; +} + +static ExecutionModel stage_to_execution_model(const std::string &stage) +{ + if (stage == "vert") + return ExecutionModelVertex; + else if (stage == "frag") + return ExecutionModelFragment; + else if (stage == "comp") + return ExecutionModelGLCompute; + else if (stage == "tesc") + return ExecutionModelTessellationControl; + else if (stage == "tese") + return ExecutionModelTessellationEvaluation; + else if (stage == "geom") + return ExecutionModelGeometry; + else + SPIRV_CROSS_THROW("Invalid stage."); +} + +static int main_inner(int argc, char *argv[]) +{ + CLIArguments args; + CLICallbacks cbs; + + cbs.add("--help", [](CLIParser &parser) { + print_help(); + parser.end(); + }); + cbs.add("--output", [&args](CLIParser &parser) { args.output = parser.next_string(); }); + cbs.add("--es", [&args](CLIParser &) { + args.es = true; + args.set_es = true; + }); + cbs.add("--no-es", [&args](CLIParser &) { + args.es = false; + args.set_es = true; + }); + cbs.add("--version", [&args](CLIParser &parser) { + args.version = parser.next_uint(); + args.set_version = true; + }); + cbs.add("--dump-resources", [&args](CLIParser &) { args.dump_resources = true; }); + cbs.add("--force-temporary", [&args](CLIParser &) { args.force_temporary = true; }); + cbs.add("--flatten-ubo", [&args](CLIParser &) { args.flatten_ubo = true; }); + cbs.add("--fixup-clipspace", [&args](CLIParser &) { args.fixup = true; }); + cbs.add("--flip-vert-y", [&args](CLIParser &) { args.yflip = true; }); + cbs.add("--iterations", [&args](CLIParser &parser) { args.iterations = parser.next_uint(); }); + cbs.add("--cpp", [&args](CLIParser &) { args.cpp = true; }); + cbs.add("--reflect", [&args](CLIParser &parser) { args.reflect = parser.next_value_string("json"); }); + cbs.add("--cpp-interface-name", [&args](CLIParser &parser) { args.cpp_interface_name = parser.next_string(); }); + cbs.add("--metal", [&args](CLIParser &) { args.msl = true; }); // Legacy compatibility + cbs.add("--msl", [&args](CLIParser &) { args.msl = true; }); + cbs.add("--hlsl", [&args](CLIParser &) { args.hlsl = true; }); + cbs.add("--hlsl-enable-compat", [&args](CLIParser &) { args.hlsl_compat = true; }); + cbs.add("--vulkan-semantics", [&args](CLIParser &) { args.vulkan_semantics = true; }); + cbs.add("--flatten-multidimensional-arrays", [&args](CLIParser &) { args.flatten_multidimensional_arrays = true; }); + cbs.add("--no-420pack-extension", [&args](CLIParser &) { args.use_420pack_extension = false; }); + cbs.add("--msl-swizzle-texture-samples", [&args](CLIParser &) { args.msl_swizzle_texture_samples = true; }); + cbs.add("--msl-ios", [&args](CLIParser &) { args.msl_ios = true; }); + cbs.add("--extension", [&args](CLIParser &parser) { args.extensions.push_back(parser.next_string()); }); + cbs.add("--rename-entry-point", [&args](CLIParser &parser) { + auto old_name = parser.next_string(); + auto new_name = parser.next_string(); + auto model = stage_to_execution_model(parser.next_string()); + args.entry_point_rename.push_back({ old_name, new_name, move(model) }); + }); + cbs.add("--entry", [&args](CLIParser &parser) { args.entry = parser.next_string(); }); + cbs.add("--stage", [&args](CLIParser &parser) { args.entry_stage = parser.next_string(); }); + cbs.add("--separate-shader-objects", [&args](CLIParser &) { args.sso = true; }); + cbs.add("--set-hlsl-vertex-input-semantic", [&args](CLIParser &parser) { + HLSLVertexAttributeRemap remap; + remap.location = parser.next_uint(); + remap.semantic = parser.next_string(); + args.hlsl_attr_remap.push_back(move(remap)); + }); + + cbs.add("--remap", [&args](CLIParser &parser) { + string src = parser.next_string(); + string dst = parser.next_string(); + uint32_t components = parser.next_uint(); + args.remaps.push_back({ move(src), move(dst), components }); + }); + + cbs.add("--remap-variable-type", [&args](CLIParser &parser) { + string var_name = parser.next_string(); + string new_type = parser.next_string(); + args.variable_type_remaps.push_back({ move(var_name), move(new_type) }); + }); + + cbs.add("--rename-interface-variable", [&args](CLIParser &parser) { + StorageClass cls = StorageClassMax; + string clsStr = parser.next_string(); + if (clsStr == "in") + cls = StorageClassInput; + else if (clsStr == "out") + cls = StorageClassOutput; + + uint32_t loc = parser.next_uint(); + string var_name = parser.next_string(); + args.interface_variable_renames.push_back({ cls, loc, move(var_name) }); + }); + + cbs.add("--pls-in", [&args](CLIParser &parser) { + auto fmt = pls_format(parser.next_string()); + auto name = parser.next_string(); + args.pls_in.push_back({ move(fmt), move(name) }); + }); + cbs.add("--pls-out", [&args](CLIParser &parser) { + auto fmt = pls_format(parser.next_string()); + auto name = parser.next_string(); + args.pls_out.push_back({ move(fmt), move(name) }); + }); + cbs.add("--shader-model", [&args](CLIParser &parser) { + args.shader_model = parser.next_uint(); + args.set_shader_model = true; + }); + cbs.add("--msl-version", [&args](CLIParser &parser) { + args.msl_version = parser.next_uint(); + args.set_msl_version = true; + }); + + cbs.add("--remove-unused-variables", [&args](CLIParser &) { args.remove_unused = true; }); + cbs.add("--combined-samplers-inherit-bindings", + [&args](CLIParser &) { args.combined_samplers_inherit_bindings = true; }); + + cbs.add("--no-support-nonzero-baseinstance", [&](CLIParser &) { args.support_nonzero_baseinstance = false; }); + + cbs.default_handler = [&args](const char *value) { args.input = value; }; + cbs.error_handler = [] { print_help(); }; + + CLIParser parser{ move(cbs), argc - 1, argv + 1 }; + if (!parser.parse()) + { + return EXIT_FAILURE; + } + else if (parser.ended_state) + { + return EXIT_SUCCESS; + } + + if (!args.input) + { + fprintf(stderr, "Didn't specify input file.\n"); + print_help(); + return EXIT_FAILURE; + } + + auto spirv_file = read_spirv_file(args.input); + if (spirv_file.empty()) + return EXIT_FAILURE; + Parser spirv_parser(move(spirv_file)); + + spirv_parser.parse(); + + // Special case reflection because it has little to do with the path followed by code-outputting compilers + if (!args.reflect.empty()) + { + CompilerReflection compiler(move(spirv_parser.get_parsed_ir())); + compiler.set_format(args.reflect); + auto json = compiler.compile(); + if (args.output) + write_string_to_file(args.output, json.c_str()); + else + printf("%s", json.c_str()); + return EXIT_SUCCESS; + } + + unique_ptr compiler; + bool combined_image_samplers = false; + bool build_dummy_sampler = false; + + if (args.cpp) + { + compiler.reset(new CompilerCPP(move(spirv_parser.get_parsed_ir()))); + if (args.cpp_interface_name) + static_cast(compiler.get())->set_interface_name(args.cpp_interface_name); + } + else if (args.msl) + { + compiler.reset(new CompilerMSL(move(spirv_parser.get_parsed_ir()))); + + auto *msl_comp = static_cast(compiler.get()); + auto msl_opts = msl_comp->get_msl_options(); + if (args.set_msl_version) + msl_opts.msl_version = args.msl_version; + msl_opts.swizzle_texture_samples = args.msl_swizzle_texture_samples; + if (args.msl_ios) + msl_opts.platform = CompilerMSL::Options::iOS; + msl_comp->set_msl_options(msl_opts); + } + else if (args.hlsl) + compiler.reset(new CompilerHLSL(move(spirv_parser.get_parsed_ir()))); + else + { + combined_image_samplers = !args.vulkan_semantics; + if (!args.vulkan_semantics) + build_dummy_sampler = true; + compiler.reset(new CompilerGLSL(move(spirv_parser.get_parsed_ir()))); + } + + if (!args.variable_type_remaps.empty()) + { + auto remap_cb = [&](const SPIRType &, const string &name, string &out) -> void { + for (const VariableTypeRemap &remap : args.variable_type_remaps) + if (name == remap.variable_name) + out = remap.new_variable_type; + }; + + compiler->set_variable_type_remap_callback(move(remap_cb)); + } + + for (auto &rename : args.entry_point_rename) + compiler->rename_entry_point(rename.old_name, rename.new_name, rename.execution_model); + + auto entry_points = compiler->get_entry_points_and_stages(); + auto entry_point = args.entry; + ExecutionModel model = ExecutionModelMax; + + if (!args.entry_stage.empty()) + { + model = stage_to_execution_model(args.entry_stage); + if (entry_point.empty()) + { + // Just use the first entry point with this stage. + for (auto &e : entry_points) + { + if (e.execution_model == model) + { + entry_point = e.name; + break; + } + } + + if (entry_point.empty()) + { + fprintf(stderr, "Could not find an entry point with stage: %s\n", args.entry_stage.c_str()); + return EXIT_FAILURE; + } + } + else + { + // Make sure both stage and name exists. + bool exists = false; + for (auto &e : entry_points) + { + if (e.execution_model == model && e.name == entry_point) + { + exists = true; + break; + } + } + + if (!exists) + { + fprintf(stderr, "Could not find an entry point %s with stage: %s\n", entry_point.c_str(), + args.entry_stage.c_str()); + return EXIT_FAILURE; + } + } + } + else if (!entry_point.empty()) + { + // Make sure there is just one entry point with this name, or the stage + // is ambiguous. + uint32_t stage_count = 0; + for (auto &e : entry_points) + { + if (e.name == entry_point) + { + stage_count++; + model = e.execution_model; + } + } + + if (stage_count == 0) + { + fprintf(stderr, "There is no entry point with name: %s\n", entry_point.c_str()); + return EXIT_FAILURE; + } + else if (stage_count > 1) + { + fprintf(stderr, "There is more than one entry point with name: %s. Use --stage.\n", entry_point.c_str()); + return EXIT_FAILURE; + } + } + + if (!entry_point.empty()) + compiler->set_entry_point(entry_point, model); + + if (!args.set_version && !compiler->get_common_options().version) + { + fprintf(stderr, "Didn't specify GLSL version and SPIR-V did not specify language.\n"); + print_help(); + return EXIT_FAILURE; + } + + CompilerGLSL::Options opts = compiler->get_common_options(); + if (args.set_version) + opts.version = args.version; + if (args.set_es) + opts.es = args.es; + opts.force_temporary = args.force_temporary; + opts.separate_shader_objects = args.sso; + opts.flatten_multidimensional_arrays = args.flatten_multidimensional_arrays; + opts.enable_420pack_extension = args.use_420pack_extension; + opts.vulkan_semantics = args.vulkan_semantics; + opts.vertex.fixup_clipspace = args.fixup; + opts.vertex.flip_vert_y = args.yflip; + opts.vertex.support_nonzero_base_instance = args.support_nonzero_baseinstance; + compiler->set_common_options(opts); + + // Set HLSL specific options. + if (args.hlsl) + { + auto *hlsl = static_cast(compiler.get()); + auto hlsl_opts = hlsl->get_hlsl_options(); + if (args.set_shader_model) + { + if (args.shader_model < 30) + { + fprintf(stderr, "Shader model earlier than 30 (3.0) not supported.\n"); + return EXIT_FAILURE; + } + + hlsl_opts.shader_model = args.shader_model; + } + + if (args.hlsl_compat) + { + // Enable all compat options. + hlsl_opts.point_size_compat = true; + hlsl_opts.point_coord_compat = true; + } + + if (hlsl_opts.shader_model <= 30) + { + combined_image_samplers = true; + build_dummy_sampler = true; + } + + hlsl->set_hlsl_options(hlsl_opts); + } + + if (build_dummy_sampler) + { + uint32_t sampler = compiler->build_dummy_sampler_for_combined_images(); + if (sampler != 0) + { + // Set some defaults to make validation happy. + compiler->set_decoration(sampler, DecorationDescriptorSet, 0); + compiler->set_decoration(sampler, DecorationBinding, 0); + } + } + + ShaderResources res; + if (args.remove_unused) + { + auto active = compiler->get_active_interface_variables(); + res = compiler->get_shader_resources(active); + compiler->set_enabled_interface_variables(move(active)); + } + else + res = compiler->get_shader_resources(); + + if (args.flatten_ubo) + { + for (auto &ubo : res.uniform_buffers) + compiler->flatten_buffer_block(ubo.id); + for (auto &ubo : res.push_constant_buffers) + compiler->flatten_buffer_block(ubo.id); + } + + auto pls_inputs = remap_pls(args.pls_in, res.stage_inputs, &res.subpass_inputs); + auto pls_outputs = remap_pls(args.pls_out, res.stage_outputs, nullptr); + compiler->remap_pixel_local_storage(move(pls_inputs), move(pls_outputs)); + + for (auto &ext : args.extensions) + compiler->require_extension(ext); + + for (auto &remap : args.remaps) + { + if (remap_generic(*compiler, res.stage_inputs, remap)) + continue; + if (remap_generic(*compiler, res.stage_outputs, remap)) + continue; + if (remap_generic(*compiler, res.subpass_inputs, remap)) + continue; + } + + for (auto &rename : args.interface_variable_renames) + { + if (rename.storageClass == StorageClassInput) + spirv_cross_util::rename_interface_variable(*compiler, res.stage_inputs, rename.location, + rename.variable_name); + else if (rename.storageClass == StorageClassOutput) + spirv_cross_util::rename_interface_variable(*compiler, res.stage_outputs, rename.location, + rename.variable_name); + else + { + fprintf(stderr, "error at --rename-interface-variable ...\n"); + return EXIT_FAILURE; + } + } + + if (args.dump_resources) + { + print_resources(*compiler, res); + print_push_constant_resources(*compiler, res.push_constant_buffers); + print_spec_constants(*compiler); + print_capabilities_and_extensions(*compiler); + } + + if (combined_image_samplers) + { + compiler->build_combined_image_samplers(); + if (args.combined_samplers_inherit_bindings) + spirv_cross_util::inherit_combined_sampler_bindings(*compiler); + + // Give the remapped combined samplers new names. + for (auto &remap : compiler->get_combined_image_samplers()) + { + compiler->set_name(remap.combined_id, join("SPIRV_Cross_Combined", compiler->get_name(remap.image_id), + compiler->get_name(remap.sampler_id))); + } + } + + if (args.hlsl) + { + auto *hlsl_compiler = static_cast(compiler.get()); + uint32_t new_builtin = hlsl_compiler->remap_num_workgroups_builtin(); + if (new_builtin) + { + hlsl_compiler->set_decoration(new_builtin, DecorationDescriptorSet, 0); + hlsl_compiler->set_decoration(new_builtin, DecorationBinding, 0); + } + } + + string glsl; + for (uint32_t i = 0; i < args.iterations; i++) + { + if (args.hlsl) + glsl = static_cast(compiler.get())->compile(move(args.hlsl_attr_remap)); + else + glsl = compiler->compile(); + } + + if (args.output) + write_string_to_file(args.output, glsl.c_str()); + else + printf("%s", glsl.c_str()); + + return EXIT_SUCCESS; +} + +int main(int argc, char *argv[]) +{ +#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS + return main_inner(argc, argv); +#else + // Make sure we catch the exception or it just disappears into the aether on Windows. + try + { + return main_inner(argc, argv); + } + catch (const std::exception &e) + { + fprintf(stderr, "SPIRV-Cross threw an exception: %s\n", e.what()); + return EXIT_FAILURE; + } +#endif +} diff --git a/3rdparty/spirv-cross/msvc/SPIRV-Cross.sln b/3rdparty/spirv-cross/msvc/SPIRV-Cross.sln new file mode 100644 index 000000000..c265ec334 --- /dev/null +++ b/3rdparty/spirv-cross/msvc/SPIRV-Cross.sln @@ -0,0 +1,28 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Express 2013 for Windows Desktop +VisualStudioVersion = 12.0.31101.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "SPIRV-Cross", "SPIRV-Cross.vcxproj", "{977E3701-1A21-4425-B7E5-6BDF5EA062CD}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Win32 = Debug|Win32 + Debug|x64 = Debug|x64 + Release|Win32 = Release|Win32 + Release|x64 = Release|x64 + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Debug|Win32.ActiveCfg = Debug|Win32 + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Debug|Win32.Build.0 = Debug|Win32 + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Debug|x64.ActiveCfg = Debug|x64 + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Debug|x64.Build.0 = Debug|x64 + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Release|Win32.ActiveCfg = Release|Win32 + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Release|Win32.Build.0 = Release|Win32 + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Release|x64.ActiveCfg = Release|x64 + {977E3701-1A21-4425-B7E5-6BDF5EA062CD}.Release|x64.Build.0 = Release|x64 + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection +EndGlobal diff --git a/3rdparty/spirv-cross/msvc/SPIRV-Cross.vcxproj b/3rdparty/spirv-cross/msvc/SPIRV-Cross.vcxproj new file mode 100644 index 000000000..bb2841ea2 --- /dev/null +++ b/3rdparty/spirv-cross/msvc/SPIRV-Cross.vcxproj @@ -0,0 +1,156 @@ + + + + + Debug + Win32 + + + Debug + x64 + + + Release + Win32 + + + Release + x64 + + + + {977E3701-1A21-4425-B7E5-6BDF5EA062CD} + SPIRV-Cross + + + + Application + true + v120 + MultiByte + + + Application + true + v120 + MultiByte + + + Application + false + v120 + true + MultiByte + + + Application + false + v120 + true + MultiByte + + + + + + + + + + + + + + + + + + + + + Level3 + Disabled + true + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreadedDebugDLL + + + true + + + + + Level3 + Disabled + true + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreadedDebugDLL + + + true + + + + + Level3 + MaxSpeed + true + true + true + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreadedDLL + + + true + true + true + + + + + Level3 + MaxSpeed + true + true + true + _CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions) + MultiThreadedDLL + + + true + true + true + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/3rdparty/spirv-cross/msvc/SPIRV-Cross.vcxproj.filters b/3rdparty/spirv-cross/msvc/SPIRV-Cross.vcxproj.filters new file mode 100644 index 000000000..0e80b2012 --- /dev/null +++ b/3rdparty/spirv-cross/msvc/SPIRV-Cross.vcxproj.filters @@ -0,0 +1,93 @@ + + + + + {4FC737F1-C7A5-4376-A066-2A32D752A2FF} + cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx + + + {93995380-89BD-4b04-88EB-625FBE52EBFB} + h;hh;hpp;hxx;hm;inl;inc;xsd + + + {67DA6AB6-F800-4c08-8B7A-83BB121AAD01} + rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms + + + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + Source Files + + + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + Header Files + + + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp new file mode 100644 index 000000000..b8265fc99 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp @@ -0,0 +1,19 @@ +RWByteAddressBuffer _4 : register(u0); + +void comp_main() +{ + uint _21 = _4.Load(_4.Load(0) * 4 + 4); + for (uint _23 = 0u; _23 < 64u; ) + { + _4.Store(_23 * 4 + 4, 0u); + _23++; + continue; + } + _4.Store(_4.Load(0) * 4 + 4, _21); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..c534ceaba --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,22 @@ +RWByteAddressBuffer u0_counter : register(u1); +RWBuffer u0 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + uint _29; + u0_counter.InterlockedAdd(0, -1, _29); + u0[asint(asfloat(_29))] = uint(int(gl_GlobalInvocationID.x)).x; +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..5e7d282d6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,22 @@ +RWByteAddressBuffer u0_counter : register(u1); +RWBuffer u0 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + uint _29; + u0_counter.InterlockedAdd(0, 1, _29); + u0[asint(asfloat(_29))] = uint(int(gl_GlobalInvocationID.x)).x; +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..a12274c01 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,45 @@ +struct A +{ + int a; + int b; +}; + +struct A_1 +{ + int a; + int b; +}; + +RWByteAddressBuffer C1 : register(u1); +cbuffer C2 : register(b2) +{ + A_1 C2_1_Data[1024] : packoffset(c0); +}; + +RWByteAddressBuffer C3 : register(u0); +cbuffer B : register(b3) +{ + A_1 C4_Data[1024] : packoffset(c0); +}; + + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + C1.Store(gl_GlobalInvocationID.x * 8 + 0, uint(C2_1_Data[gl_GlobalInvocationID.x].a)); + C1.Store(gl_GlobalInvocationID.x * 8 + 4, uint(C2_1_Data[gl_GlobalInvocationID.x].b)); + C3.Store(gl_GlobalInvocationID.x * 8 + 0, uint(C4_Data[gl_GlobalInvocationID.x].a)); + C3.Store(gl_GlobalInvocationID.x * 8 + 4, uint(C4_Data[gl_GlobalInvocationID.x].b)); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/control-flow-hints.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/control-flow-hints.asm.comp new file mode 100644 index 000000000..142ef5efa --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/control-flow-hints.asm.comp @@ -0,0 +1,32 @@ +RWByteAddressBuffer bar : register(u0); +RWByteAddressBuffer foo : register(u1); + +void comp_main() +{ + [unroll] + for (int _135 = 0; _135 < 16; ) + { + bar.Store4(_135 * 16 + 0, asuint(asfloat(foo.Load4(_135 * 16 + 0)))); + _135++; + continue; + } + [loop] + for (int _136 = 0; _136 < 16; ) + { + bar.Store4((15 - _136) * 16 + 0, asuint(asfloat(foo.Load4(_136 * 16 + 0)))); + _136++; + continue; + } + [branch] + if (asfloat(bar.Load(160)) > 10.0f) + { + foo.Store4(320, asuint(5.0f.xxxx)); + } + foo.Store4(320, asuint(20.0f.xxxx)); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..d8bce8d54 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,9 @@ +void comp_main() +{ +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp new file mode 100644 index 000000000..c567fbaf1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp @@ -0,0 +1,32 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1u +#endif +static const uint _3 = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 3u +#endif +static const uint _4 = SPIRV_CROSS_CONSTANT_ID_2; +static const uint3 gl_WorkGroupSize = uint3(_3, 2u, _4); + +RWByteAddressBuffer _8 : register(u0); +RWByteAddressBuffer _9 : register(u1); + +static uint3 gl_WorkGroupID; +struct SPIRV_Cross_Input +{ + uint3 gl_WorkGroupID : SV_GroupID; +}; + +static uint3 _22 = gl_WorkGroupSize; + +void comp_main() +{ + _8.Store(gl_WorkGroupID.x * 4 + 0, asuint(asfloat(_9.Load(gl_WorkGroupID.x * 4 + 0)) + asfloat(_8.Load(gl_WorkGroupID.x * 4 + 0)))); +} + +[numthreads(SPIRV_CROSS_CONSTANT_ID_0, 2, SPIRV_CROSS_CONSTANT_ID_2)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_WorkGroupID = stage_input.gl_WorkGroupID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag new file mode 100644 index 000000000..b410010d2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag @@ -0,0 +1,26 @@ +cbuffer _4_5 : register(b0) +{ + column_major float2x4 _5_m0 : packoffset(c0); + float4 _5_m1 : packoffset(c4); +}; + + +static float2 _3; + +struct SPIRV_Cross_Output +{ + float2 _3 : SV_Target0; +}; + +void frag_main() +{ + _3 = mul(_5_m0, _5_m1); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output._3 = _3; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag new file mode 100644 index 000000000..3951fd511 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag @@ -0,0 +1,30 @@ +Texture2D uTex : register(t1); +SamplerState uSampler : register(s0); + +static float4 FragColor; +static float2 vUV; + +struct SPIRV_Cross_Input +{ + float2 vUV : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uTex.Sample(uSampler, vUV); + FragColor += uTex.Sample(uSampler, vUV, int2(1, 1)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUV = stage_input.vUV; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..3b50282fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/empty-struct.asm.frag @@ -0,0 +1,8 @@ +void frag_main() +{ +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/frem.asm.frag new file mode 100644 index 000000000..67998c56a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/frem.asm.frag @@ -0,0 +1,29 @@ +static float4 FragColor; +static float4 vA; +static float4 vB; + +struct SPIRV_Cross_Input +{ + float4 vA : TEXCOORD0; + float4 vB : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = fmod(vA, vB); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vA = stage_input.vA; + vB = stage_input.vB; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..93f8414e9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,19 @@ +static float4 FragColor; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = 10.0f.xxxx; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..ed53720d9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,31 @@ +Texture2D uTexture : register(t0); +SamplerState _uTexture_sampler : register(s0); + +static int2 Size; + +struct SPIRV_Cross_Output +{ + int2 Size : SV_Target0; +}; + +uint2 SPIRV_Cross_textureSize(Texture2D Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +void frag_main() +{ + uint _19_dummy_parameter; + uint _20_dummy_parameter; + Size = int2(SPIRV_Cross_textureSize(uTexture, uint(0), _19_dummy_parameter)) + int2(SPIRV_Cross_textureSize(uTexture, uint(1), _20_dummy_parameter)); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.Size = Size; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..67f14fcca --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,57 @@ +Texture2D uImage : register(t0); +SamplerState _uImage_sampler : register(s0); + +static float4 v0; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 v0 : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + float phi; + float4 _36; + int _51; + _51 = 0; + phi = 1.0f; + _36 = float4(1.0f, 2.0f, 1.0f, 2.0f); + for (;;) + { + FragColor = _36; + if (_51 < 4) + { + if (v0[_51] > 0.0f) + { + float2 _48 = phi.xx; + _51++; + phi += 2.0f; + _36 = uImage.SampleLevel(_uImage_sampler, _48, 0.0f); + continue; + } + else + { + break; + } + } + else + { + break; + } + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + v0 = stage_input.v0; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..d20cf995a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,19 @@ +static float3 FragColor; + +struct SPIRV_Cross_Output +{ + float3 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float3(asfloat(0x7f800000u), asfloat(0xff800000u), asfloat(0x7fc00000u)); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..5deae3a56 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,57 @@ +static const float _46[16] = { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; +static const float4 _76[4] = { 0.0f.xxxx, 1.0f.xxxx, 8.0f.xxxx, 5.0f.xxxx }; +static const float4 _90[4] = { 20.0f.xxxx, 30.0f.xxxx, 50.0f.xxxx, 60.0f.xxxx }; + +static float FragColor; +static int index; + +struct SPIRV_Cross_Input +{ + nointerpolation int index : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + float4 foobar[4] = _76; + float4 baz[4] = _76; + FragColor = _46[index]; + if (index < 10) + { + FragColor += _46[index ^ 1]; + } + else + { + FragColor += _46[index & 1]; + } + bool _99 = index > 30; + if (_99) + { + FragColor += _76[index & 3].y; + } + else + { + FragColor += _76[index & 1].x; + } + if (_99) + { + foobar[1].z = 20.0f; + } + int _37 = index & 3; + FragColor += foobar[_37].z; + baz = _90; + FragColor += baz[_37].z; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + index = stage_input.index; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..56ac1f22a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,25 @@ +cbuffer Registers +{ + float registers_foo : packoffset(c0); +}; + + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = 10.0f + registers_foo; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/srem.asm.frag new file mode 100644 index 000000000..db5e71745 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/srem.asm.frag @@ -0,0 +1,29 @@ +static float4 FragColor; +static int4 vA; +static int4 vB; + +struct SPIRV_Cross_Input +{ + nointerpolation int4 vA : TEXCOORD0; + nointerpolation int4 vB : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float4(vA - vB * (vA / vB)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vA = stage_input.vA; + vB = stage_input.vB; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..695d5fe9d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,29 @@ +Texture2D uTexture : register(t0); +SamplerState _uTexture_sampler : register(s0); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uTexture.Load(int3(int2(gl_FragCoord.xy), 0)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag new file mode 100644 index 000000000..5b894de83 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag @@ -0,0 +1,31 @@ +Texture2D uShadow : register(t0); +SamplerComparisonState _uShadow_sampler : register(s0); +Texture2D uTexture : register(t1); +SamplerComparisonState uSampler : register(s2); + +static float3 vUV; +static float FragColor; + +struct SPIRV_Cross_Input +{ + float3 vUV : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uShadow.SampleCmp(_uShadow_sampler, vUV.xy, vUV.z) + uTexture.SampleCmp(uSampler, vUV.xy, vUV.z); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUV = stage_input.vUV; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..ee3e46724 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/frag/unreachable.asm.frag @@ -0,0 +1,40 @@ +static int counter; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + nointerpolation int counter : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + bool _29; + for (;;) + { + _29 = counter == 10; + if (_29) + { + break; + } + else + { + break; + } + } + bool4 _35 = _29.xxxx; + FragColor = float4(_35.x ? 10.0f.xxxx.x : 30.0f.xxxx.x, _35.y ? 10.0f.xxxx.y : 30.0f.xxxx.y, _35.z ? 10.0f.xxxx.z : 30.0f.xxxx.z, _35.w ? 10.0f.xxxx.w : 30.0f.xxxx.w); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + counter = stage_input.counter; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert new file mode 100644 index 000000000..a72a098fd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert @@ -0,0 +1,46 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_201 +#define SPIRV_CROSS_CONSTANT_ID_201 -10 +#endif +static const int _7 = SPIRV_CROSS_CONSTANT_ID_201; +#ifndef SPIRV_CROSS_CONSTANT_ID_202 +#define SPIRV_CROSS_CONSTANT_ID_202 100u +#endif +static const uint _8 = SPIRV_CROSS_CONSTANT_ID_202; +static const int _20 = (_7 + 2); +static const uint _25 = (_8 % 5u); +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 int4(20, 30, _20, _20) +#endif +static const int4 _30 = SPIRV_CROSS_CONSTANT_ID_0; +static const int2 _32 = int2(_30.y, _30.x); +static const int _33 = _30.y; + +static float4 gl_Position; +static int _4; + +struct SPIRV_Cross_Output +{ + nointerpolation int _4 : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + float4 _63 = 0.0f.xxxx; + _63.y = float(_20); + float4 _66 = _63; + _66.z = float(_25); + float4 _52 = _66 + float4(_30); + float2 _56 = _52.xy + float2(_32); + gl_Position = float4(_56.x, _56.y, _52.z, _52.w); + _4 = _33; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output._4 = _4; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..0d1e8cc53 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,28 @@ +static float4 gl_Position; +static int gl_VertexIndex; +static int gl_InstanceIndex; +struct SPIRV_Cross_Input +{ + uint gl_VertexIndex : SV_VertexID; + uint gl_InstanceIndex : SV_InstanceID; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = float(uint(gl_VertexIndex) + uint(gl_InstanceIndex)).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_VertexIndex = int(stage_input.gl_VertexIndex); + gl_InstanceIndex = int(stage_input.gl_InstanceIndex); + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..8d5e771fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert @@ -0,0 +1,28 @@ +static float4 gl_Position; +static int gl_VertexID; +static int gl_InstanceID; +struct SPIRV_Cross_Input +{ + uint gl_VertexID : SV_VertexID; + uint gl_InstanceID : SV_InstanceID; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = float(gl_VertexID + gl_InstanceID).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_VertexID = int(stage_input.gl_VertexID); + gl_InstanceID = int(stage_input.gl_InstanceID); + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/access-chains.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/access-chains.comp new file mode 100644 index 000000000..924e91912 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/access-chains.comp @@ -0,0 +1,21 @@ +RWByteAddressBuffer wo : register(u1); +ByteAddressBuffer ro : register(t0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + wo.Store4(gl_GlobalInvocationID.x * 64 + 272, asuint(asfloat(ro.Load4(gl_GlobalInvocationID.x * 64 + 160)))); + wo.Store4(gl_GlobalInvocationID.x * 16 + 480, asuint(asfloat(ro.Load4(gl_GlobalInvocationID.x * 16 + 480)))); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/address-buffers.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/address-buffers.comp new file mode 100644 index 000000000..a252fc8ae --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/address-buffers.comp @@ -0,0 +1,15 @@ +RWByteAddressBuffer WriteOnly : register(u2); +ByteAddressBuffer ReadOnly : register(t0); +RWByteAddressBuffer ReadWrite : register(u1); + +void comp_main() +{ + WriteOnly.Store4(0, asuint(asfloat(ReadOnly.Load4(0)))); + ReadWrite.Store4(0, asuint(asfloat(ReadWrite.Load4(0)) + 10.0f.xxxx)); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..b86b5327e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic-decrement.asm.comp @@ -0,0 +1,22 @@ +RWByteAddressBuffer u0_counter : register(u1); +RWBuffer u0 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + uint _29; + u0_counter.InterlockedAdd(0, -1, _29); + u0[uint(asint(asfloat(_29))) + 0u] = uint(int(gl_GlobalInvocationID.x)).x; +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..f2338f225 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic-increment.asm.comp @@ -0,0 +1,22 @@ +RWByteAddressBuffer u0_counter : register(u1); +RWBuffer u0 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + uint _29; + u0_counter.InterlockedAdd(0, 1, _29); + u0[uint(asint(asfloat(_29))) + 0u] = uint(int(gl_GlobalInvocationID.x)).x; +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic.comp new file mode 100644 index 000000000..72e15bf77 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/atomic.comp @@ -0,0 +1,89 @@ +RWByteAddressBuffer ssbo : register(u2); +RWTexture2D uImage : register(u0); +RWTexture2D iImage : register(u1); + +groupshared int int_atomic; +groupshared uint uint_atomic; +groupshared int int_atomic_array[1]; +groupshared uint uint_atomic_array[1]; + +void comp_main() +{ + uint _19; + InterlockedAdd(uImage[int2(1, 5)], 1u, _19); + uint _27; + InterlockedAdd(uImage[int2(1, 5)], 1u, _27); + iImage[int2(1, 6)] = int(_27).x; + uint _32; + InterlockedOr(uImage[int2(1, 5)], 1u, _32); + uint _34; + InterlockedXor(uImage[int2(1, 5)], 1u, _34); + uint _36; + InterlockedAnd(uImage[int2(1, 5)], 1u, _36); + uint _38; + InterlockedMin(uImage[int2(1, 5)], 1u, _38); + uint _40; + InterlockedMax(uImage[int2(1, 5)], 1u, _40); + uint _44; + InterlockedCompareExchange(uImage[int2(1, 5)], 10u, 2u, _44); + int _47; + InterlockedAdd(iImage[int2(1, 6)], 1, _47); + int _49; + InterlockedOr(iImage[int2(1, 6)], 1, _49); + int _51; + InterlockedXor(iImage[int2(1, 6)], 1, _51); + int _53; + InterlockedAnd(iImage[int2(1, 6)], 1, _53); + int _55; + InterlockedMin(iImage[int2(1, 6)], 1, _55); + int _57; + InterlockedMax(iImage[int2(1, 6)], 1, _57); + int _61; + InterlockedCompareExchange(iImage[int2(1, 5)], 10, 2, _61); + uint _68; + ssbo.InterlockedAdd(0, 1u, _68); + uint _70; + ssbo.InterlockedOr(0, 1u, _70); + uint _72; + ssbo.InterlockedXor(0, 1u, _72); + uint _74; + ssbo.InterlockedAnd(0, 1u, _74); + uint _76; + ssbo.InterlockedMin(0, 1u, _76); + uint _78; + ssbo.InterlockedMax(0, 1u, _78); + uint _80; + ssbo.InterlockedExchange(0, 1u, _80); + uint _82; + ssbo.InterlockedCompareExchange(0, 10u, 2u, _82); + int _85; + ssbo.InterlockedAdd(4, 1, _85); + int _87; + ssbo.InterlockedOr(4, 1, _87); + int _89; + ssbo.InterlockedXor(4, 1, _89); + int _91; + ssbo.InterlockedAnd(4, 1, _91); + int _93; + ssbo.InterlockedMin(4, 1, _93); + int _95; + ssbo.InterlockedMax(4, 1, _95); + int _97; + ssbo.InterlockedExchange(4, 1, _97); + int _99; + ssbo.InterlockedCompareExchange(4, 10, 2, _99); + int _102; + InterlockedAdd(int_atomic, 10, _102); + uint _105; + InterlockedAdd(uint_atomic, 10u, _105); + int _110; + InterlockedAdd(int_atomic_array[0], 10, _110); + uint _115; + InterlockedAdd(uint_atomic_array[0], 10u, _115); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/barriers.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/barriers.comp new file mode 100644 index 000000000..7ac2a656f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/barriers.comp @@ -0,0 +1,26 @@ +static const uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +void comp_main() +{ + GroupMemoryBarrier(); + AllMemoryBarrier(); + DeviceMemoryBarrier(); + DeviceMemoryBarrier(); + AllMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); + AllMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); + DeviceMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); + DeviceMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); + AllMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); + GroupMemoryBarrierWithGroupSync(); +} + +[numthreads(4, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/builtins.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/builtins.comp new file mode 100644 index 000000000..7f88aa798 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/builtins.comp @@ -0,0 +1,11 @@ +static const uint3 gl_WorkGroupSize = uint3(8u, 4u, 2u); + +void comp_main() +{ +} + +[numthreads(8, 4, 2)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/composite-array-initialization.comp new file mode 100644 index 000000000..617e0987d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/composite-array-initialization.comp @@ -0,0 +1,50 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 4.0f +#endif +static const float X = SPIRV_CROSS_CONSTANT_ID_0; +static const uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u); + +struct Data +{ + float a; + float b; +}; + +static const Data _21 = { 1.0f, 2.0f }; +static const Data _24 = { 3.0f, 4.0f }; +static const Data _25[2] = { { 1.0f, 2.0f }, { 3.0f, 4.0f } }; +static const Data _30 = { 3.0f, 5.0f }; + +RWByteAddressBuffer _61 : register(u0); + +static uint3 gl_WorkGroupID; +static uint3 gl_LocalInvocationID; +static uint gl_LocalInvocationIndex; +struct SPIRV_Cross_Input +{ + uint3 gl_WorkGroupID : SV_GroupID; + uint3 gl_LocalInvocationID : SV_GroupThreadID; + uint gl_LocalInvocationIndex : SV_GroupIndex; +}; + +void comp_main() +{ + Data data[2] = _25; + Data _28 = { X, 2.0f }; + Data _31[2] = { _28, _30 }; + Data data2[2] = _31; + if (gl_LocalInvocationIndex == 0u) + { + _61.Store(gl_WorkGroupID.x * 8 + 0, asuint(data[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a)); + _61.Store(gl_WorkGroupID.x * 8 + 4, asuint(data[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b)); + } +} + +[numthreads(2, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_WorkGroupID = stage_input.gl_WorkGroupID; + gl_LocalInvocationID = stage_input.gl_LocalInvocationID; + gl_LocalInvocationIndex = stage_input.gl_LocalInvocationIndex; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/globallycoherent.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/globallycoherent.comp new file mode 100644 index 000000000..1637727de --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/globallycoherent.comp @@ -0,0 +1,16 @@ +globallycoherent RWByteAddressBuffer _29 : register(u3); +ByteAddressBuffer _33 : register(t2); +RWTexture2D uImageIn : register(u0); +globallycoherent RWTexture2D uImageOut : register(u1); + +void comp_main() +{ + uImageOut[int2(9, 7)] = uImageIn[int2(9, 7)].x; + _29.Store(0, asuint(asfloat(_33.Load(0)))); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/image.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/image.comp new file mode 100644 index 000000000..6c2b58cd2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/image.comp @@ -0,0 +1,64 @@ +RWTexture2D uImageInF : register(u0); +RWTexture2D uImageOutF : register(u1); +RWTexture2D uImageInI : register(u2); +RWTexture2D uImageOutI : register(u3); +RWTexture2D uImageInU : register(u4); +RWTexture2D uImageOutU : register(u5); +RWBuffer uImageInBuffer : register(u6); +RWBuffer uImageOutBuffer : register(u7); +RWTexture2D uImageInF2 : register(u8); +RWTexture2D uImageOutF2 : register(u9); +RWTexture2D uImageInI2 : register(u10); +RWTexture2D uImageOutI2 : register(u11); +RWTexture2D uImageInU2 : register(u12); +RWTexture2D uImageOutU2 : register(u13); +RWBuffer uImageInBuffer2 : register(u14); +RWBuffer uImageOutBuffer2 : register(u15); +RWTexture2D uImageInF4 : register(u16); +RWTexture2D uImageOutF4 : register(u17); +RWTexture2D uImageInI4 : register(u18); +RWTexture2D uImageOutI4 : register(u19); +RWTexture2D uImageInU4 : register(u20); +RWTexture2D uImageOutU4 : register(u21); +RWBuffer uImageInBuffer4 : register(u22); +RWBuffer uImageOutBuffer4 : register(u23); +RWTexture2D uImageNoFmtF : register(u24); +RWTexture2D uImageNoFmtU : register(u25); +RWTexture2D uImageNoFmtI : register(u26); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + int2 _23 = int2(gl_GlobalInvocationID.xy); + uImageOutF[_23] = uImageInF[_23].x; + uImageOutI[_23] = uImageInI[_23].x; + uImageOutU[_23] = uImageInU[_23].x; + int _74 = int(gl_GlobalInvocationID.x); + uImageOutBuffer[_74] = uImageInBuffer[_74].x; + uImageOutF2[_23] = uImageInF2[_23].xy; + uImageOutI2[_23] = uImageInI2[_23].xy; + uImageOutU2[_23] = uImageInU2[_23].xy; + float4 _135 = uImageInBuffer2[_74].xyyy; + uImageOutBuffer2[_74] = _135.xy; + uImageOutF4[_23] = uImageInF4[_23]; + int4 _165 = uImageInI4[_23]; + uImageOutI4[_23] = _165; + uint4 _180 = uImageInU4[_23]; + uImageOutU4[_23] = _180; + uImageOutBuffer4[_74] = uImageInBuffer4[_74]; + uImageNoFmtF[_23] = _135; + uImageNoFmtU[_23] = _180; + uImageNoFmtI[_23] = _165; +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/inverse.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/inverse.comp new file mode 100644 index 000000000..3be954a6f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/inverse.comp @@ -0,0 +1,122 @@ +RWByteAddressBuffer _15 : register(u0); +ByteAddressBuffer _20 : register(t1); + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float2x2 SPIRV_Cross_Inverse(float2x2 m) +{ + float2x2 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = m[1][1]; + adj[0][1] = -m[0][1]; + + adj[1][0] = -m[1][0]; + adj[1][1] = m[0][0]; + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the determinant of a 2x2 matrix. +float SPIRV_Cross_Det2x2(float a1, float a2, float b1, float b2) +{ + return a1 * b2 - b1 * a2; +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float3x3 SPIRV_Cross_Inverse(float3x3 m) +{ + float3x3 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = SPIRV_Cross_Det2x2(m[1][1], m[1][2], m[2][1], m[2][2]); + adj[0][1] = -SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[2][1], m[2][2]); + adj[0][2] = SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[1][1], m[1][2]); + + adj[1][0] = -SPIRV_Cross_Det2x2(m[1][0], m[1][2], m[2][0], m[2][2]); + adj[1][1] = SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[2][0], m[2][2]); + adj[1][2] = -SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[1][0], m[1][2]); + + adj[2][0] = SPIRV_Cross_Det2x2(m[1][0], m[1][1], m[2][0], m[2][1]); + adj[2][1] = -SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[2][0], m[2][1]); + adj[2][2] = SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[1][0], m[1][1]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the determinant of a 3x3 matrix. +float SPIRV_Cross_Det3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3) +{ + return a1 * SPIRV_Cross_Det2x2(b2, b3, c2, c3) - b1 * SPIRV_Cross_Det2x2(a2, a3, c2, c3) + c1 * SPIRV_Cross_Det2x2(a2, a3, b2, b3); +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float4x4 SPIRV_Cross_Inverse(float4x4 m) +{ + float4x4 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = SPIRV_Cross_Det3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][1] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][2] = SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3]); + adj[0][3] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3]); + + adj[1][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3]); + adj[1][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3]); + + adj[2][0] = SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][1] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][2] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3]); + adj[2][3] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3]); + + adj[3][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2]); + adj[3][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] * m[3][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +void comp_main() +{ + float2x2 _23 = asfloat(uint2x2(_20.Load2(0), _20.Load2(8))); + float2x2 _24 = SPIRV_Cross_Inverse(_23); + _15.Store2(0, asuint(_24[0])); + _15.Store2(8, asuint(_24[1])); + float3x3 _29 = asfloat(uint3x3(_20.Load3(16), _20.Load3(32), _20.Load3(48))); + float3x3 _30 = SPIRV_Cross_Inverse(_29); + _15.Store3(16, asuint(_30[0])); + _15.Store3(32, asuint(_30[1])); + _15.Store3(48, asuint(_30[2])); + float4x4 _35 = asfloat(uint4x4(_20.Load4(64), _20.Load4(80), _20.Load4(96), _20.Load4(112))); + float4x4 _36 = SPIRV_Cross_Inverse(_35); + _15.Store4(64, asuint(_36[0])); + _15.Store4(80, asuint(_36[1])); + _15.Store4(96, asuint(_36[2])); + _15.Store4(112, asuint(_36[3])); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/num-workgroups-alone.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/num-workgroups-alone.comp new file mode 100644 index 000000000..dee39e3d5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/num-workgroups-alone.comp @@ -0,0 +1,17 @@ +RWByteAddressBuffer _10 : register(u0); +cbuffer SPIRV_Cross_NumWorkgroups : register(b0) +{ + uint3 SPIRV_Cross_NumWorkgroups_1_count : packoffset(c0); +}; + + +void comp_main() +{ + _10.Store3(0, SPIRV_Cross_NumWorkgroups_1_count); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/num-workgroups-with-builtins.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/num-workgroups-with-builtins.comp new file mode 100644 index 000000000..1c98e5e56 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/num-workgroups-with-builtins.comp @@ -0,0 +1,24 @@ +RWByteAddressBuffer _10 : register(u0); +cbuffer SPIRV_Cross_NumWorkgroups : register(b0) +{ + uint3 SPIRV_Cross_NumWorkgroups_1_count : packoffset(c0); +}; + + +static uint3 gl_WorkGroupID; +struct SPIRV_Cross_Input +{ + uint3 gl_WorkGroupID : SV_GroupID; +}; + +void comp_main() +{ + _10.Store3(0, SPIRV_Cross_NumWorkgroups_1_count + gl_WorkGroupID); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_WorkGroupID = stage_input.gl_WorkGroupID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/rmw-matrix.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/rmw-matrix.comp new file mode 100644 index 000000000..ed6666935 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/rmw-matrix.comp @@ -0,0 +1,20 @@ +RWByteAddressBuffer _11 : register(u0); + +void comp_main() +{ + _11.Store(0, asuint(asfloat(_11.Load(0)) * asfloat(_11.Load(96)))); + _11.Store4(16, asuint(asfloat(_11.Load4(16)) * asfloat(_11.Load4(112)))); + float4x4 _35 = asfloat(uint4x4(_11.Load4(128), _11.Load4(144), _11.Load4(160), _11.Load4(176))); + float4x4 _37 = asfloat(uint4x4(_11.Load4(32), _11.Load4(48), _11.Load4(64), _11.Load4(80))); + float4x4 _38 = mul(_35, _37); + _11.Store4(32, asuint(_38[0])); + _11.Store4(48, asuint(_38[1])); + _11.Store4(64, asuint(_38[2])); + _11.Store4(80, asuint(_38[3])); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/rwbuffer-matrix.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/rwbuffer-matrix.comp new file mode 100644 index 000000000..42103c2bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/rwbuffer-matrix.comp @@ -0,0 +1,91 @@ +RWByteAddressBuffer _28 : register(u0); +cbuffer UBO : register(b1) +{ + int _68_index0 : packoffset(c0); + int _68_index1 : packoffset(c0.y); +}; + + +void comp_main() +{ + float4x4 _253 = asfloat(uint4x4(_28.Load(64), _28.Load(80), _28.Load(96), _28.Load(112), _28.Load(68), _28.Load(84), _28.Load(100), _28.Load(116), _28.Load(72), _28.Load(88), _28.Load(104), _28.Load(120), _28.Load(76), _28.Load(92), _28.Load(108), _28.Load(124))); + _28.Store4(0, asuint(_253[0])); + _28.Store4(16, asuint(_253[1])); + _28.Store4(32, asuint(_253[2])); + _28.Store4(48, asuint(_253[3])); + float2x2 _256 = asfloat(uint2x2(_28.Load(144), _28.Load(152), _28.Load(148), _28.Load(156))); + _28.Store2(128, asuint(_256[0])); + _28.Store2(136, asuint(_256[1])); + float2x3 _259 = asfloat(uint2x3(_28.Load(192), _28.Load(200), _28.Load(208), _28.Load(196), _28.Load(204), _28.Load(212))); + _28.Store3(160, asuint(_259[0])); + _28.Store3(176, asuint(_259[1])); + float3x2 _262 = asfloat(uint3x2(_28.Load(240), _28.Load(256), _28.Load(244), _28.Load(260), _28.Load(248), _28.Load(264))); + _28.Store2(216, asuint(_262[0])); + _28.Store2(224, asuint(_262[1])); + _28.Store2(232, asuint(_262[2])); + float4x4 _265 = asfloat(uint4x4(_28.Load4(0), _28.Load4(16), _28.Load4(32), _28.Load4(48))); + _28.Store(64, asuint(_265[0].x)); + _28.Store(68, asuint(_265[1].x)); + _28.Store(72, asuint(_265[2].x)); + _28.Store(76, asuint(_265[3].x)); + _28.Store(80, asuint(_265[0].y)); + _28.Store(84, asuint(_265[1].y)); + _28.Store(88, asuint(_265[2].y)); + _28.Store(92, asuint(_265[3].y)); + _28.Store(96, asuint(_265[0].z)); + _28.Store(100, asuint(_265[1].z)); + _28.Store(104, asuint(_265[2].z)); + _28.Store(108, asuint(_265[3].z)); + _28.Store(112, asuint(_265[0].w)); + _28.Store(116, asuint(_265[1].w)); + _28.Store(120, asuint(_265[2].w)); + _28.Store(124, asuint(_265[3].w)); + float2x2 _268 = asfloat(uint2x2(_28.Load2(128), _28.Load2(136))); + _28.Store(144, asuint(_268[0].x)); + _28.Store(148, asuint(_268[1].x)); + _28.Store(152, asuint(_268[0].y)); + _28.Store(156, asuint(_268[1].y)); + float2x3 _271 = asfloat(uint2x3(_28.Load3(160), _28.Load3(176))); + _28.Store(192, asuint(_271[0].x)); + _28.Store(196, asuint(_271[1].x)); + _28.Store(200, asuint(_271[0].y)); + _28.Store(204, asuint(_271[1].y)); + _28.Store(208, asuint(_271[0].z)); + _28.Store(212, asuint(_271[1].z)); + float3x2 _274 = asfloat(uint3x2(_28.Load2(216), _28.Load2(224), _28.Load2(232))); + _28.Store(240, asuint(_274[0].x)); + _28.Store(244, asuint(_274[1].x)); + _28.Store(248, asuint(_274[2].x)); + _28.Store(256, asuint(_274[0].y)); + _28.Store(260, asuint(_274[1].y)); + _28.Store(264, asuint(_274[2].y)); + _28.Store(_68_index0 * 4 + _68_index1 * 16 + 64, asuint(1.0f)); + _28.Store(_68_index0 * 4 + _68_index1 * 8 + 144, asuint(2.0f)); + _28.Store(_68_index0 * 4 + _68_index1 * 8 + 192, asuint(3.0f)); + _28.Store(_68_index0 * 4 + _68_index1 * 16 + 240, asuint(4.0f)); + _28.Store(_68_index0 * 4 + 64, asuint(1.0f.x)); + _28.Store(_68_index0 * 4 + 80, asuint(1.0f.xxxx.y)); + _28.Store(_68_index0 * 4 + 96, asuint(1.0f.xxxx.z)); + _28.Store(_68_index0 * 4 + 112, asuint(1.0f.xxxx.w)); + _28.Store(_68_index0 * 4 + 144, asuint(2.0f.x)); + _28.Store(_68_index0 * 4 + 152, asuint(2.0f.xx.y)); + _28.Store(_68_index0 * 4 + 192, asuint(3.0f.x)); + _28.Store(_68_index0 * 4 + 200, asuint(3.0f.xxx.y)); + _28.Store(_68_index0 * 4 + 208, asuint(3.0f.xxx.z)); + _28.Store(_68_index0 * 4 + 240, asuint(4.0f.x)); + _28.Store(_68_index0 * 4 + 256, asuint(4.0f.xx.y)); + _28.Store(_68_index0 * 16 + _68_index1 * 4 + 0, asuint(1.0f)); + _28.Store(_68_index0 * 8 + _68_index1 * 4 + 128, asuint(2.0f)); + _28.Store(_68_index0 * 16 + _68_index1 * 4 + 160, asuint(3.0f)); + _28.Store(_68_index0 * 8 + _68_index1 * 4 + 216, asuint(4.0f)); + _28.Store4(_68_index0 * 16 + 0, asuint(1.0f.xxxx)); + _28.Store2(_68_index0 * 8 + 128, asuint(2.0f.xx)); + _28.Store3(_68_index0 * 16 + 160, asuint(3.0f.xxx)); + _28.Store2(_68_index0 * 8 + 216, asuint(4.0f.xx)); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/shared.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/shared.comp new file mode 100644 index 000000000..9831302af --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/shared.comp @@ -0,0 +1,29 @@ +static const uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +ByteAddressBuffer _22 : register(t0); +RWByteAddressBuffer _44 : register(u1); + +static uint3 gl_GlobalInvocationID; +static uint gl_LocalInvocationIndex; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; + uint gl_LocalInvocationIndex : SV_GroupIndex; +}; + +groupshared float sShared[4]; + +void comp_main() +{ + sShared[gl_LocalInvocationIndex] = asfloat(_22.Load(gl_GlobalInvocationID.x * 4 + 0)); + GroupMemoryBarrierWithGroupSync(); + _44.Store(gl_GlobalInvocationID.x * 4 + 0, asuint(sShared[3u - gl_LocalInvocationIndex])); +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + gl_LocalInvocationIndex = stage_input.gl_LocalInvocationIndex; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/spec-constant-op-member-array.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/spec-constant-op-member-array.comp new file mode 100644 index 000000000..ad815b282 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/spec-constant-op-member-array.comp @@ -0,0 +1,49 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 100 +#endif +static const int a = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 200 +#endif +static const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 300 +#endif +static const int c = SPIRV_CROSS_CONSTANT_ID_2; +static const int _18 = (c + 50); +#ifndef SPIRV_CROSS_CONSTANT_ID_3 +#define SPIRV_CROSS_CONSTANT_ID_3 400 +#endif +static const int e = SPIRV_CROSS_CONSTANT_ID_3; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +RWByteAddressBuffer _22 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + _22.Store(gl_GlobalInvocationID.x * 4 + 2800, uint(int(_22.Load(gl_GlobalInvocationID.x * 4 + 2800)) + (int(_22.Load(gl_GlobalInvocationID.x * 4 + 2400)) + e))); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/spec-constant-work-group-size.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/spec-constant-work-group-size.comp new file mode 100644 index 000000000..55ebf32bb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/spec-constant-work-group-size.comp @@ -0,0 +1,43 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 2 +#endif +static const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1 +#endif +static const int a = SPIRV_CROSS_CONSTANT_ID_0; +static const uint _26 = (uint(a) + 0u); +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 1u +#endif +static const uint _27 = SPIRV_CROSS_CONSTANT_ID_10; +static const uint3 gl_WorkGroupSize = uint3(_27, 20u, 1u); +static const uint _32 = gl_WorkGroupSize.x; +static const uint _33 = (_26 + _32); +static const uint _34 = gl_WorkGroupSize.y; +static const uint _35 = (_33 + _34); +static const int _42 = (1 - a); + +RWByteAddressBuffer _23 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + int spec_const_array_size[b]; + spec_const_array_size[0] = 10; + spec_const_array_size[1] = 40; + spec_const_array_size[a] = a; + _23.Store((_35 + gl_GlobalInvocationID.x) * 4 + 0, uint(b + spec_const_array_size[_42])); +} + +[numthreads(SPIRV_CROSS_CONSTANT_ID_10, 20, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/ssbo-array.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/ssbo-array.comp new file mode 100644 index 000000000..d8bce8d54 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/ssbo-array.comp @@ -0,0 +1,9 @@ +void comp_main() +{ +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp new file mode 100644 index 000000000..dabc7df9e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp @@ -0,0 +1,67 @@ +RWByteAddressBuffer _9 : register(u0, space0); + +static uint4 gl_SubgroupEqMask; +static uint4 gl_SubgroupGeMask; +static uint4 gl_SubgroupGtMask; +static uint4 gl_SubgroupLeMask; +static uint4 gl_SubgroupLtMask; +void comp_main() +{ + _9.Store(0, asuint(float(WaveGetLaneCount()))); + _9.Store(0, asuint(float(WaveGetLaneIndex()))); + _9.Store(0, asuint(float4(gl_SubgroupEqMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupGeMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupGtMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupLeMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupLtMask).x)); + uint4 _75 = WaveActiveBallot(true); + float4 _88 = WaveActiveSum(20.0f.xxxx); + int4 _94 = WaveActiveSum(int4(20, 20, 20, 20)); + float4 _96 = WaveActiveProduct(20.0f.xxxx); + int4 _98 = WaveActiveProduct(int4(20, 20, 20, 20)); + float4 _127 = WavePrefixProduct(_96) * _96; + int4 _129 = WavePrefixProduct(_98) * _98; +} + +[numthreads(1, 1, 1)] +void main() +{ + gl_SubgroupEqMask = 1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96)); + if (WaveGetLaneIndex() >= 32) gl_SubgroupEqMask.x = 0; + if (WaveGetLaneIndex() >= 64 || WaveGetLaneIndex() < 32) gl_SubgroupEqMask.y = 0; + if (WaveGetLaneIndex() >= 96 || WaveGetLaneIndex() < 64) gl_SubgroupEqMask.z = 0; + if (WaveGetLaneIndex() < 96) gl_SubgroupEqMask.w = 0; + gl_SubgroupGeMask = ~((1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u); + if (WaveGetLaneIndex() >= 32) gl_SubgroupGeMask.x = 0u; + if (WaveGetLaneIndex() >= 64) gl_SubgroupGeMask.y = 0u; + if (WaveGetLaneIndex() >= 96) gl_SubgroupGeMask.z = 0u; + if (WaveGetLaneIndex() < 32) gl_SubgroupGeMask.y = ~0u; + if (WaveGetLaneIndex() < 64) gl_SubgroupGeMask.z = ~0u; + if (WaveGetLaneIndex() < 96) gl_SubgroupGeMask.w = ~0u; + uint gt_lane_index = WaveGetLaneIndex() + 1; + gl_SubgroupGtMask = ~((1u << (gt_lane_index - uint4(0, 32, 64, 96))) - 1u); + if (gt_lane_index >= 32) gl_SubgroupGtMask.x = 0u; + if (gt_lane_index >= 64) gl_SubgroupGtMask.y = 0u; + if (gt_lane_index >= 96) gl_SubgroupGtMask.z = 0u; + if (gt_lane_index >= 128) gl_SubgroupGtMask.w = 0u; + if (gt_lane_index < 32) gl_SubgroupGtMask.y = ~0u; + if (gt_lane_index < 64) gl_SubgroupGtMask.z = ~0u; + if (gt_lane_index < 96) gl_SubgroupGtMask.w = ~0u; + uint le_lane_index = WaveGetLaneIndex() + 1; + gl_SubgroupLeMask = (1u << (le_lane_index - uint4(0, 32, 64, 96))) - 1u; + if (le_lane_index >= 32) gl_SubgroupLeMask.x = ~0u; + if (le_lane_index >= 64) gl_SubgroupLeMask.y = ~0u; + if (le_lane_index >= 96) gl_SubgroupLeMask.z = ~0u; + if (le_lane_index >= 128) gl_SubgroupLeMask.w = ~0u; + if (le_lane_index < 32) gl_SubgroupLeMask.y = 0u; + if (le_lane_index < 64) gl_SubgroupLeMask.z = 0u; + if (le_lane_index < 96) gl_SubgroupLeMask.w = 0u; + gl_SubgroupLtMask = (1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u; + if (WaveGetLaneIndex() >= 32) gl_SubgroupLtMask.x = ~0u; + if (WaveGetLaneIndex() >= 64) gl_SubgroupLtMask.y = ~0u; + if (WaveGetLaneIndex() >= 96) gl_SubgroupLtMask.z = ~0u; + if (WaveGetLaneIndex() < 32) gl_SubgroupLtMask.y = 0u; + if (WaveGetLaneIndex() < 64) gl_SubgroupLtMask.z = 0u; + if (WaveGetLaneIndex() < 96) gl_SubgroupLtMask.w = 0u; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..3adf7d985 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,34 @@ +static const float _17[5] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f }; + +static float4 FragColor; +static float4 v0; + +struct SPIRV_Cross_Input +{ + float4 v0 : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + for (int _46 = 0; _46 < 4; ) + { + int _33 = _46 + 1; + FragColor += _17[_33].xxxx; + _46 = _33; + continue; + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + v0 = stage_input.v0; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic-color-3comp.sm30.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic-color-3comp.sm30.frag new file mode 100644 index 000000000..d3697d650 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic-color-3comp.sm30.frag @@ -0,0 +1,26 @@ +static float3 FragColor; +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : COLOR0; +}; + +void frag_main() +{ + FragColor = vColor.xyz; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = float4(FragColor, 0.0); + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic-color-3comp.sm50.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic-color-3comp.sm50.frag new file mode 100644 index 000000000..52f6fed6c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic-color-3comp.sm50.frag @@ -0,0 +1,26 @@ +static float3 FragColor; +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float3 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = vColor.xyz; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic.frag new file mode 100644 index 000000000..6d067041c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/basic.frag @@ -0,0 +1,32 @@ +Texture2D uTex : register(t0); +SamplerState _uTex_sampler : register(s0); + +static float4 FragColor; +static float4 vColor; +static float2 vTex; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; + float2 vTex : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = vColor * uTex.Sample(_uTex_sampler, vTex); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + vTex = stage_input.vTex; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/bit-conversions.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/bit-conversions.frag new file mode 100644 index 000000000..b60b2ebb4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/bit-conversions.frag @@ -0,0 +1,26 @@ +static float2 value; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 value : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float4(1.0f, 0.0f, asfloat(asint(value.x)), 1.0f); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + value = stage_input.value; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/boolean-mix.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/boolean-mix.frag new file mode 100644 index 000000000..f3e84898d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/boolean-mix.frag @@ -0,0 +1,27 @@ +static float2 FragColor; +static float2 x0; + +struct SPIRV_Cross_Input +{ + float2 x0 : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float2 FragColor : SV_Target0; +}; + +void frag_main() +{ + bool2 _27 = (x0.x > x0.y).xx; + FragColor = float2(_27.x ? float2(1.0f, 0.0f).x : float2(0.0f, 1.0f).x, _27.y ? float2(1.0f, 0.0f).y : float2(0.0f, 1.0f).y); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + x0 = stage_input.x0; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/builtins.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/builtins.frag new file mode 100644 index 000000000..922eca7c2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/builtins.frag @@ -0,0 +1,33 @@ +static float4 gl_FragCoord; +static float gl_FragDepth; +static float4 FragColor; +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; + float gl_FragDepth : SV_Depth; +}; + +void frag_main() +{ + FragColor = gl_FragCoord + vColor; + gl_FragDepth = 0.5f; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + vColor = stage_input.vColor; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_FragDepth = gl_FragDepth; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/bvec-operations.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/bvec-operations.frag new file mode 100644 index 000000000..6a22df1ed --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/bvec-operations.frag @@ -0,0 +1,29 @@ +static float2 value; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 value : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +bool _47; + +void frag_main() +{ + bool2 _25 = bool2(value.x == 0.0f, _47); + FragColor = float4(1.0f, 0.0f, float(bool2(!_25.x, !_25.y).x), float(bool2(value.x <= float2(1.5f, 0.5f).x, value.y <= float2(1.5f, 0.5f).y).x)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + value = stage_input.value; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/clip-cull-distance.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/clip-cull-distance.frag new file mode 100644 index 000000000..52f1ac30b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/clip-cull-distance.frag @@ -0,0 +1,30 @@ +static float gl_ClipDistance[2]; +static float gl_CullDistance[1]; +static float FragColor; + +struct SPIRV_Cross_Input +{ + float2 gl_ClipDistance0 : SV_ClipDistance0; + float gl_CullDistance0 : SV_CullDistance0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = (gl_ClipDistance[0] + gl_CullDistance[0]) + gl_ClipDistance[1]; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_ClipDistance[0] = stage_input.gl_ClipDistance0.x; + gl_ClipDistance[1] = stage_input.gl_ClipDistance0.y; + gl_CullDistance[0] = stage_input.gl_CullDistance0.x; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/combined-texture-sampler-parameter.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/combined-texture-sampler-parameter.frag new file mode 100644 index 000000000..18968cb19 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/combined-texture-sampler-parameter.frag @@ -0,0 +1,24 @@ +Texture2D uSampler : register(t0); +SamplerState _uSampler_sampler : register(s0); +Texture2D uSamplerShadow : register(t1); +SamplerComparisonState _uSamplerShadow_sampler : register(s1); + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = (uSampler.Sample(_uSampler_sampler, 1.0f.xx) + uSampler.Load(int3(int2(10, 10), 0))).x + uSamplerShadow.SampleCmp(_uSamplerShadow_sampler, 1.0f.xxx.xy, 1.0f); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/combined-texture-sampler-shadow.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/combined-texture-sampler-shadow.frag new file mode 100644 index 000000000..6e8f833b3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/combined-texture-sampler-shadow.frag @@ -0,0 +1,23 @@ +Texture2D uDepth : register(t2); +SamplerComparisonState uSampler : register(s0); +SamplerState uSampler1 : register(s1); + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uDepth.SampleCmp(uSampler, 1.0f.xxx.xy, 1.0f) + uDepth.Sample(uSampler1, 1.0f.xx).x; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..d9336c09f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,37 @@ +RWByteAddressBuffer _34 : register(u0); +Texture2D Buf : register(t1); +SamplerState _Buf_sampler : register(s1); + +static float4 gl_FragCoord; +static int vIn; +static int vIn2; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + nointerpolation int vIn : TEXCOORD0; + nointerpolation int vIn2 : TEXCOORD1; + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + int _40 = Buf.Load(int3(int2(gl_FragCoord.xy), 0)).x % 16; + FragColor = (asfloat(_34.Load4(_40 * 16 + 0)) + asfloat(_34.Load4(_40 * 16 + 0))) + asfloat(_34.Load4(((vIn * vIn) + (vIn2 * vIn2)) * 16 + 0)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + vIn = stage_input.vIn; + vIn2 = stage_input.vIn2; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag new file mode 100644 index 000000000..d330706c7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag @@ -0,0 +1,44 @@ +struct CBO_1 +{ + float4 a; + float4 b; + float4 c; + float4 d; +}; + +ConstantBuffer cbo[2][4] : register(b4, space0); +cbuffer PushMe +{ + float4 push_a : packoffset(c0); + float4 push_b : packoffset(c1); + float4 push_c : packoffset(c2); + float4 push_d : packoffset(c3); +}; + + +static float4 FragColor; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = cbo[1][2].a; + FragColor += cbo[1][2].b; + FragColor += cbo[1][2].c; + FragColor += cbo[1][2].d; + FragColor += push_a; + FragColor += push_b; + FragColor += push_c; + FragColor += push_d; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/constant-composites.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/constant-composites.frag new file mode 100644 index 000000000..ab14fab4c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/constant-composites.frag @@ -0,0 +1,39 @@ +struct Foo +{ + float a; + float b; +}; + +static const float _16[4] = { 1.0f, 4.0f, 3.0f, 2.0f }; +static const Foo _24 = { 10.0f, 20.0f }; +static const Foo _27 = { 30.0f, 40.0f }; +static const Foo _28[2] = { { 10.0f, 20.0f }, { 30.0f, 40.0f } }; + +static float4 FragColor; +static int _line; + +struct SPIRV_Cross_Input +{ + nointerpolation int _line : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + Foo foos[2] = _28; + FragColor = _16[_line].xxxx; + FragColor += (foos[_line].a * foos[1 - _line].a).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + _line = stage_input._line; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..b2899ea02 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,54 @@ +Texture2D uSampler : register(t0); +SamplerState _uSampler_sampler : register(s0); + +static float4 FragColor; +static float4 vInput; + +struct SPIRV_Cross_Input +{ + float4 vInput : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = vInput; + float4 _23 = uSampler.Sample(_uSampler_sampler, vInput.xy); + float4 _26 = ddx(vInput); + float4 _29 = ddy(vInput); + float4 _32 = fwidth(vInput); + float4 _35 = ddx_coarse(vInput); + float4 _38 = ddy_coarse(vInput); + float4 _41 = fwidth(vInput); + float4 _44 = ddx_fine(vInput); + float4 _47 = ddy_fine(vInput); + float4 _50 = fwidth(vInput); + float _56_tmp = uSampler.CalculateLevelOfDetail(_uSampler_sampler, vInput.zw); + if (vInput.y > 10.0f) + { + FragColor += _23; + FragColor += _26; + FragColor += _29; + FragColor += _32; + FragColor += _35; + FragColor += _38; + FragColor += _41; + FragColor += _44; + FragColor += _47; + FragColor += _50; + FragColor += float2(_56_tmp, _56_tmp).xyxy; + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vInput = stage_input.vInput; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/depth-greater-than.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/depth-greater-than.frag new file mode 100644 index 000000000..b9f50db00 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/depth-greater-than.frag @@ -0,0 +1,19 @@ +static float gl_FragDepth; +struct SPIRV_Cross_Output +{ + float gl_FragDepth : SV_DepthGreaterEqual; +}; + +void frag_main() +{ + gl_FragDepth = 0.5f; +} + +[earlydepthstencil] +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_FragDepth = gl_FragDepth; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/depth-less-than.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/depth-less-than.frag new file mode 100644 index 000000000..a702fd9f8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/depth-less-than.frag @@ -0,0 +1,19 @@ +static float gl_FragDepth; +struct SPIRV_Cross_Output +{ + float gl_FragDepth : SV_DepthLessEqual; +}; + +void frag_main() +{ + gl_FragDepth = 0.5f; +} + +[earlydepthstencil] +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_FragDepth = gl_FragDepth; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/early-fragment-test.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/early-fragment-test.frag new file mode 100644 index 000000000..ae2569d5c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/early-fragment-test.frag @@ -0,0 +1,9 @@ +void frag_main() +{ +} + +[earlydepthstencil] +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/fp16-packing.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/fp16-packing.frag new file mode 100644 index 000000000..d87828225 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/fp16-packing.frag @@ -0,0 +1,44 @@ +static float2 FP32Out; +static uint FP16; +static uint FP16Out; +static float2 FP32; + +struct SPIRV_Cross_Input +{ + nointerpolation uint FP16 : TEXCOORD0; + nointerpolation float2 FP32 : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float2 FP32Out : SV_Target0; + uint FP16Out : SV_Target1; +}; + +uint SPIRV_Cross_packHalf2x16(float2 value) +{ + uint2 Packed = f32tof16(value); + return Packed.x | (Packed.y << 16); +} + +float2 SPIRV_Cross_unpackHalf2x16(uint value) +{ + return f16tof32(uint2(value & 0xffff, value >> 16)); +} + +void frag_main() +{ + FP32Out = SPIRV_Cross_unpackHalf2x16(FP16); + FP16Out = SPIRV_Cross_packHalf2x16(FP32); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + FP16 = stage_input.FP16; + FP32 = stage_input.FP32; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FP32Out = FP32Out; + stage_output.FP16Out = FP16Out; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/fp16.invalid.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/fp16.invalid.desktop.frag new file mode 100644 index 000000000..8ec30af16 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/fp16.invalid.desktop.frag @@ -0,0 +1,45 @@ +static min16float4 v4; +static min16float3 v3; +static min16float v1; +static min16float2 v2; +static float o1; +static float2 o2; +static float3 o3; +static float4 o4; + +struct SPIRV_Cross_Input +{ + min16float v1 : TEXCOORD0; + min16float2 v2 : TEXCOORD1; + min16float3 v3 : TEXCOORD2; + min16float4 v4 : TEXCOORD3; +}; + +struct SPIRV_Cross_Output +{ + float o1 : SV_Target0; + float2 o2 : SV_Target1; + float3 o3 : SV_Target2; + float4 o4 : SV_Target3; +}; + +void frag_main() +{ + min16float4 _324; + min16float4 _387 = modf(v4, _324); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + v4 = stage_input.v4; + v3 = stage_input.v3; + v1 = stage_input.v1; + v2 = stage_input.v2; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.o1 = o1; + stage_output.o2 = o2; + stage_output.o3 = o3; + stage_output.o4 = o4; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/front-facing.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/front-facing.frag new file mode 100644 index 000000000..4ed09a2bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/front-facing.frag @@ -0,0 +1,39 @@ +static bool gl_FrontFacing; +static float4 FragColor; +static float4 vA; +static float4 vB; + +struct SPIRV_Cross_Input +{ + float4 vA : TEXCOORD0; + float4 vB : TEXCOORD1; + bool gl_FrontFacing : SV_IsFrontFace; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + if (gl_FrontFacing) + { + FragColor = vA; + } + else + { + FragColor = vB; + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FrontFacing = stage_input.gl_FrontFacing; + vA = stage_input.vA; + vB = stage_input.vB; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/image-query-selective.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/image-query-selective.frag new file mode 100644 index 000000000..3b50282fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/image-query-selective.frag @@ -0,0 +1,8 @@ +void frag_main() +{ +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/image-query.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/image-query.frag new file mode 100644 index 000000000..3b50282fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/image-query.frag @@ -0,0 +1,8 @@ +void frag_main() +{ +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/input-attachment-ms.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/input-attachment-ms.frag new file mode 100644 index 000000000..e206b8379 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/input-attachment-ms.frag @@ -0,0 +1,32 @@ +Texture2DMS uSubpass0 : register(t0); +Texture2DMS uSubpass1 : register(t1); + +static float4 gl_FragCoord; +static int gl_SampleID; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; + uint gl_SampleID : SV_SampleIndex; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = (uSubpass0.Load(int2(gl_FragCoord.xy), 1) + uSubpass1.Load(int2(gl_FragCoord.xy), 2)) + uSubpass0.Load(int2(gl_FragCoord.xy), gl_SampleID); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + gl_SampleID = stage_input.gl_SampleID; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/input-attachment.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/input-attachment.frag new file mode 100644 index 000000000..d87661e5f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/input-attachment.frag @@ -0,0 +1,29 @@ +Texture2D uSubpass0 : register(t0); +Texture2D uSubpass1 : register(t1); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uSubpass0.Load(int3(int2(gl_FragCoord.xy), 0)) + uSubpass1.Load(int3(int2(gl_FragCoord.xy), 0)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/io-block.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/io-block.frag new file mode 100644 index 000000000..52c1f518b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/io-block.frag @@ -0,0 +1,28 @@ +static float4 FragColor; + +struct VertexOut +{ + float4 a : TEXCOORD1; + float4 b : TEXCOORD2; +}; + +static VertexOut _12; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = _12.a + _12.b; +} + +SPIRV_Cross_Output main(in VertexOut stage_input_12) +{ + _12 = stage_input_12; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/lut-promotion.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/lut-promotion.frag new file mode 100644 index 000000000..4d89d36eb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/lut-promotion.frag @@ -0,0 +1,55 @@ +static const float _16[16] = { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; +static const float4 _60[4] = { 0.0f.xxxx, 1.0f.xxxx, 8.0f.xxxx, 5.0f.xxxx }; +static const float4 _104[4] = { 20.0f.xxxx, 30.0f.xxxx, 50.0f.xxxx, 60.0f.xxxx }; + +static float FragColor; +static int index; + +struct SPIRV_Cross_Input +{ + nointerpolation int index : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = _16[index]; + if (index < 10) + { + FragColor += _16[index ^ 1]; + } + else + { + FragColor += _16[index & 1]; + } + bool _63 = index > 30; + if (_63) + { + FragColor += _60[index & 3].y; + } + else + { + FragColor += _60[index & 1].x; + } + float4 foobar[4] = _60; + if (_63) + { + foobar[1].z = 20.0f; + } + int _91 = index & 3; + FragColor += foobar[_91].z; + FragColor += _104[_91].z; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + index = stage_input.index; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/matrix-input.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/matrix-input.frag new file mode 100644 index 000000000..92d87d396 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/matrix-input.frag @@ -0,0 +1,26 @@ +static float4 FragColor; +static float4x4 m; + +struct SPIRV_Cross_Input +{ + float4x4 m : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = ((m[0] + m[1]) + m[2]) + m[3]; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + m = stage_input.m; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/mod.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/mod.frag new file mode 100644 index 000000000..41ac93049 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/mod.frag @@ -0,0 +1,67 @@ +static float4 a4; +static float4 b4; +static float3 a3; +static float3 b3; +static float2 a2; +static float2 b2; +static float a1; +static float b1; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 a4 : TEXCOORD0; + float3 a3 : TEXCOORD1; + float2 a2 : TEXCOORD2; + float a1 : TEXCOORD3; + float4 b4 : TEXCOORD4; + float3 b3 : TEXCOORD5; + float2 b2 : TEXCOORD6; + float b1 : TEXCOORD7; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float mod(float x, float y) +{ + return x - y * floor(x / y); +} + +float2 mod(float2 x, float2 y) +{ + return x - y * floor(x / y); +} + +float3 mod(float3 x, float3 y) +{ + return x - y * floor(x / y); +} + +float4 mod(float4 x, float4 y) +{ + return x - y * floor(x / y); +} + +void frag_main() +{ + FragColor = ((mod(a4, b4) + mod(a3, b3).xyzx) + mod(a2, b2).xyxy) + mod(a1, b1).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + a4 = stage_input.a4; + b4 = stage_input.b4; + a3 = stage_input.a3; + b3 = stage_input.b3; + a2 = stage_input.a2; + b2 = stage_input.b2; + a1 = stage_input.a1; + b1 = stage_input.b1; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/mrt.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/mrt.frag new file mode 100644 index 000000000..e69e91196 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/mrt.frag @@ -0,0 +1,31 @@ +static float4 RT0; +static float4 RT1; +static float4 RT2; +static float4 RT3; + +struct SPIRV_Cross_Output +{ + float4 RT0 : SV_Target0; + float4 RT1 : SV_Target1; + float4 RT2 : SV_Target2; + float4 RT3 : SV_Target3; +}; + +void frag_main() +{ + RT0 = 1.0f.xxxx; + RT1 = 2.0f.xxxx; + RT2 = 3.0f.xxxx; + RT3 = 4.0f.xxxx; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.RT0 = RT0; + stage_output.RT1 = RT1; + stage_output.RT2 = RT2; + stage_output.RT3 = RT3; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/no-return.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/no-return.frag new file mode 100644 index 000000000..3b50282fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/no-return.frag @@ -0,0 +1,8 @@ +void frag_main() +{ +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/no-return2.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/no-return2.frag new file mode 100644 index 000000000..e9d7bbc8f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/no-return2.frag @@ -0,0 +1,16 @@ +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; +}; + +void frag_main() +{ +} + +void main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/partial-write-preserve.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/partial-write-preserve.frag new file mode 100644 index 000000000..3b50282fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/partial-write-preserve.frag @@ -0,0 +1,8 @@ +void frag_main() +{ +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/point-coord-compat.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/point-coord-compat.frag new file mode 100644 index 000000000..629153982 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/point-coord-compat.frag @@ -0,0 +1,19 @@ +static float2 FragColor; + +struct SPIRV_Cross_Output +{ + float2 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float2(0.5f, 0.5f); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/query-lod.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/query-lod.desktop.frag new file mode 100644 index 000000000..fd95798bf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/query-lod.desktop.frag @@ -0,0 +1,30 @@ +Texture2D uSampler : register(t0); +SamplerState _uSampler_sampler : register(s0); + +static float4 FragColor; +static float2 vTexCoord; + +struct SPIRV_Cross_Input +{ + float2 vTexCoord : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + float _19_tmp = uSampler.CalculateLevelOfDetail(_uSampler_sampler, vTexCoord); + FragColor = float2(_19_tmp, _19_tmp).xyxy; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vTexCoord = stage_input.vTexCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/resources.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/resources.frag new file mode 100644 index 000000000..aac0d53a1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/resources.frag @@ -0,0 +1,41 @@ +cbuffer CBuffer : register(b3) +{ + float4 cbuf_a : packoffset(c0); +}; + +cbuffer PushMe +{ + float4 registers_d : packoffset(c0); +}; + +Texture2D uSampledImage : register(t4); +SamplerState _uSampledImage_sampler : register(s4); +Texture2D uTexture : register(t5); +SamplerState uSampler : register(s6); + +static float2 vTex; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 vTex : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = (uSampledImage.Sample(_uSampledImage_sampler, vTex) + uTexture.Sample(uSampler, vTex)) + (cbuf_a + registers_d); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vTex = stage_input.vTex; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/row-major-layout-in-struct.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/row-major-layout-in-struct.frag new file mode 100644 index 000000000..7df0fd91d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/row-major-layout-in-struct.frag @@ -0,0 +1,38 @@ +struct Foo +{ + row_major float4x4 v; + row_major float4x4 w; +}; + +cbuffer UBO : register(b0) +{ + Foo _17_foo : packoffset(c0); +}; + + +static float4 FragColor; +static float4 vUV; + +struct SPIRV_Cross_Input +{ + float4 vUV : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = mul(mul(vUV, _17_foo.w), _17_foo.v); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUV = stage_input.vUV; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sample-cmp-level-zero.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sample-cmp-level-zero.frag new file mode 100644 index 000000000..b6e91ce7b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sample-cmp-level-zero.frag @@ -0,0 +1,55 @@ +Texture2D uSampler2D : register(t0); +SamplerComparisonState _uSampler2D_sampler : register(s0); +Texture2DArray uSampler2DArray : register(t1); +SamplerComparisonState _uSampler2DArray_sampler : register(s1); +TextureCube uSamplerCube : register(t2); +SamplerComparisonState _uSamplerCube_sampler : register(s2); +TextureCubeArray uSamplerCubeArray : register(t3); +SamplerComparisonState _uSamplerCubeArray_sampler : register(s3); + +static float3 vUVRef; +static float4 vDirRef; +static float FragColor; + +struct SPIRV_Cross_Input +{ + float3 vUVRef : TEXCOORD0; + float4 vDirRef : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float SPIRV_Cross_projectTextureCoordinate(float2 coord) +{ + return coord.x / coord.y; +} + +float2 SPIRV_Cross_projectTextureCoordinate(float3 coord) +{ + return float2(coord.x, coord.y) / coord.z; +} + +float3 SPIRV_Cross_projectTextureCoordinate(float4 coord) +{ + return float3(coord.x, coord.y, coord.z) / coord.w; +} + +void frag_main() +{ + float4 _80 = vDirRef; + _80.z = vDirRef.w; + FragColor = (((((((uSampler2D.SampleCmp(_uSampler2D_sampler, vUVRef.xy, vUVRef.z, int2(-1, -1)) + uSampler2DArray.SampleCmp(_uSampler2DArray_sampler, vDirRef.xyz, vDirRef.w, int2(-1, -1))) + uSamplerCube.SampleCmp(_uSamplerCube_sampler, vDirRef.xyz, vDirRef.w)) + uSamplerCubeArray.SampleCmp(_uSamplerCubeArray_sampler, vDirRef, 0.5f)) + uSampler2D.SampleCmpLevelZero(_uSampler2D_sampler, vUVRef.xy, vUVRef.z, int2(-1, -1))) + uSampler2DArray.SampleCmpLevelZero(_uSampler2DArray_sampler, vDirRef.xyz, vDirRef.w, int2(-1, -1))) + uSamplerCube.SampleCmpLevelZero(_uSamplerCube_sampler, vDirRef.xyz, vDirRef.w)) + uSampler2D.SampleCmp(_uSampler2D_sampler, SPIRV_Cross_projectTextureCoordinate(_80.xyz), vDirRef.z, int2(1, 1))) + uSampler2D.SampleCmpLevelZero(_uSampler2D_sampler, SPIRV_Cross_projectTextureCoordinate(_80.xyz), vDirRef.z, int2(1, 1)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUVRef = stage_input.vUVRef; + vDirRef = stage_input.vDirRef; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sampler-array.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sampler-array.frag new file mode 100644 index 000000000..1eced29be --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sampler-array.frag @@ -0,0 +1,30 @@ +Texture2D uCombined[4] : register(t0); +SamplerState _uCombined_sampler[4] : register(s0); +Texture2D uTex[4] : register(t4); +SamplerState uSampler[4] : register(s8); +RWTexture2D uImage[8] : register(u12); + +static float4 gl_FragCoord; +static float2 vTex; +static int vIndex; + +struct SPIRV_Cross_Input +{ + float2 vTex : TEXCOORD0; + nointerpolation int vIndex : TEXCOORD1; + float4 gl_FragCoord : SV_Position; +}; + +void frag_main() +{ + int _72 = vIndex + 1; + uImage[vIndex][int2(gl_FragCoord.xy)] = ((uCombined[vIndex].Sample(_uCombined_sampler[vIndex], vTex) + uTex[vIndex].Sample(uSampler[vIndex], vTex)) + uCombined[_72].Sample(_uCombined_sampler[_72], vTex)) + uTex[_72].Sample(uSampler[_72], vTex); +} + +void main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + vTex = stage_input.vTex; + vIndex = stage_input.vIndex; + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sampler-image-arrays.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sampler-image-arrays.frag new file mode 100644 index 000000000..b6d0e9421 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/sampler-image-arrays.frag @@ -0,0 +1,39 @@ +Texture2D uSampler[4] : register(t0); +SamplerState _uSampler_sampler[4] : register(s0); +Texture2D uTextures[4] : register(t8); +SamplerState uSamplers[4] : register(s4); + +static int vIndex; +static float2 vTex; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + nointerpolation float2 vTex : TEXCOORD0; + nointerpolation int vIndex : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = 0.0f.xxxx; + FragColor += uTextures[2].Sample(uSamplers[1], vTex); + FragColor += uSampler[vIndex].Sample(_uSampler_sampler[vIndex], vTex); + FragColor += uSampler[vIndex].Sample(_uSampler_sampler[vIndex], vTex + 0.100000001490116119384765625f.xx); + FragColor += uSampler[vIndex].Sample(_uSampler_sampler[vIndex], vTex + 0.20000000298023223876953125f.xx); + FragColor += uSampler[3].Sample(_uSampler_sampler[3], vTex + 0.300000011920928955078125f.xx); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vIndex = stage_input.vIndex; + vTex = stage_input.vTex; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag new file mode 100644 index 000000000..faed40678 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag @@ -0,0 +1,22 @@ +uniform sampler2D uSamp; +uniform sampler2D SPIRV_Cross_CombineduTuS; + +static float4 FragColor; + +struct SPIRV_Cross_Output +{ + float4 FragColor : COLOR0; +}; + +void frag_main() +{ + FragColor = tex2D(uSamp, 0.5f.xx) + tex2D(SPIRV_Cross_CombineduTuS, 0.5f.xx); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = float4(FragColor); + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/spec-constant-block-size.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/spec-constant-block-size.frag new file mode 100644 index 000000000..415886dd3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/spec-constant-block-size.frag @@ -0,0 +1,37 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 2 +#endif +static const int Value = SPIRV_CROSS_CONSTANT_ID_10; + +cbuffer SpecConstArray : register(b0) +{ + float4 _15_samples[Value] : packoffset(c0); +}; + + +static float4 FragColor; +static int Index; + +struct SPIRV_Cross_Input +{ + nointerpolation int Index : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = _15_samples[Index]; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + Index = stage_input.Index; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/spec-constant-ternary.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/spec-constant-ternary.frag new file mode 100644 index 000000000..58735aad0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/spec-constant-ternary.frag @@ -0,0 +1,26 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 10u +#endif +static const uint s = SPIRV_CROSS_CONSTANT_ID_0; +static const bool _13 = (s > 20u); +static const uint _16 = _13 ? 30u : 50u; + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float(_16); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..d7ec92f0a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/switch-unsigned-case.frag @@ -0,0 +1,38 @@ +cbuffer Buff : register(b0) +{ + uint _15_TestVal : packoffset(c0); +}; + + +static float4 fsout_Color; + +struct SPIRV_Cross_Output +{ + float4 fsout_Color : SV_Target0; +}; + +void frag_main() +{ + fsout_Color = 1.0f.xxxx; + switch (_15_TestVal) + { + case 0u: + { + fsout_Color = 0.100000001490116119384765625f.xxxx; + break; + } + case 1u: + { + fsout_Color = 0.20000000298023223876953125f.xxxx; + break; + } + } +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.fsout_Color = fsout_Color; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/swizzle-scalar.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/swizzle-scalar.frag new file mode 100644 index 000000000..ab310b82f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/swizzle-scalar.frag @@ -0,0 +1,41 @@ +static float4 Float; +static float vFloat; +static int4 Int; +static int vInt; +static float4 Float2; +static int4 Int2; + +struct SPIRV_Cross_Input +{ + nointerpolation float vFloat : TEXCOORD0; + nointerpolation int vInt : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 Float : SV_Target0; + int4 Int : SV_Target1; + float4 Float2 : SV_Target2; + int4 Int2 : SV_Target3; +}; + +void frag_main() +{ + Float = vFloat.xxxx * 2.0f; + Int = vInt.xxxx * int4(2, 2, 2, 2); + Float2 = 10.0f.xxxx; + Int2 = int4(10, 10, 10, 10); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vFloat = stage_input.vFloat; + vInt = stage_input.vInt; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.Float = Float; + stage_output.Int = Int; + stage_output.Float2 = Float2; + stage_output.Int2 = Int2; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/tex-sampling-ms.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/tex-sampling-ms.frag new file mode 100644 index 000000000..ca88cfaeb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/tex-sampling-ms.frag @@ -0,0 +1,33 @@ +Texture2DMS uTex : register(t0); +SamplerState _uTex_sampler : register(s0); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + int2 _22 = int2(gl_FragCoord.xy); + FragColor = uTex.Load(_22, 0); + FragColor += uTex.Load(_22, 1); + FragColor += uTex.Load(_22, 2); + FragColor += uTex.Load(_22, 3); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/tex-sampling.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/tex-sampling.frag new file mode 100644 index 000000000..2112059a5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/tex-sampling.frag @@ -0,0 +1,81 @@ +Texture1D tex1d : register(t0); +SamplerState _tex1d_sampler : register(s0); +Texture2D tex2d : register(t1); +SamplerState _tex2d_sampler : register(s1); +Texture3D tex3d : register(t2); +SamplerState _tex3d_sampler : register(s2); +TextureCube texCube : register(t3); +SamplerState _texCube_sampler : register(s3); +Texture1D tex1dShadow : register(t4); +SamplerComparisonState _tex1dShadow_sampler : register(s4); +Texture2D tex2dShadow : register(t5); +SamplerComparisonState _tex2dShadow_sampler : register(s5); +TextureCube texCubeShadow : register(t6); +SamplerComparisonState _texCubeShadow_sampler : register(s6); +Texture1DArray tex1dArray : register(t7); +SamplerState _tex1dArray_sampler : register(s7); +Texture2DArray tex2dArray : register(t8); +SamplerState _tex2dArray_sampler : register(s8); +TextureCubeArray texCubeArray : register(t9); +SamplerState _texCubeArray_sampler : register(s9); +Texture2D separateTex2d : register(t12); +SamplerState samplerNonDepth : register(s11); +Texture2D separateTex2dDepth : register(t13); +SamplerComparisonState samplerDepth : register(s10); + +static float texCoord1d; +static float2 texCoord2d; +static float3 texCoord3d; +static float4 texCoord4d; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float texCoord1d : TEXCOORD0; + float2 texCoord2d : TEXCOORD1; + float3 texCoord3d : TEXCOORD2; + float4 texCoord4d : TEXCOORD3; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float SPIRV_Cross_projectTextureCoordinate(float2 coord) +{ + return coord.x / coord.y; +} + +float2 SPIRV_Cross_projectTextureCoordinate(float3 coord) +{ + return float2(coord.x, coord.y) / coord.z; +} + +float3 SPIRV_Cross_projectTextureCoordinate(float4 coord) +{ + return float3(coord.x, coord.y, coord.z) / coord.w; +} + +void frag_main() +{ + float4 _162 = (((((((((((((((((((tex1d.Sample(_tex1d_sampler, texCoord1d) + tex1d.Sample(_tex1d_sampler, texCoord1d, 1)) + tex1d.SampleLevel(_tex1d_sampler, texCoord1d, 2.0f)) + tex1d.SampleGrad(_tex1d_sampler, texCoord1d, 1.0f, 2.0f)) + tex1d.Sample(_tex1d_sampler, SPIRV_Cross_projectTextureCoordinate(float2(texCoord1d, 2.0f)))) + tex1d.SampleBias(_tex1d_sampler, texCoord1d, 1.0f)) + tex2d.Sample(_tex2d_sampler, texCoord2d)) + tex2d.Sample(_tex2d_sampler, texCoord2d, int2(1, 2))) + tex2d.SampleLevel(_tex2d_sampler, texCoord2d, 2.0f)) + tex2d.SampleGrad(_tex2d_sampler, texCoord2d, float2(1.0f, 2.0f), float2(3.0f, 4.0f))) + tex2d.Sample(_tex2d_sampler, SPIRV_Cross_projectTextureCoordinate(float3(texCoord2d, 2.0f)))) + tex2d.SampleBias(_tex2d_sampler, texCoord2d, 1.0f)) + tex3d.Sample(_tex3d_sampler, texCoord3d)) + tex3d.Sample(_tex3d_sampler, texCoord3d, int3(1, 2, 3))) + tex3d.SampleLevel(_tex3d_sampler, texCoord3d, 2.0f)) + tex3d.SampleGrad(_tex3d_sampler, texCoord3d, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f))) + tex3d.Sample(_tex3d_sampler, SPIRV_Cross_projectTextureCoordinate(float4(texCoord3d, 2.0f)))) + tex3d.SampleBias(_tex3d_sampler, texCoord3d, 1.0f)) + texCube.Sample(_texCube_sampler, texCoord3d)) + texCube.SampleLevel(_texCube_sampler, texCoord3d, 2.0f)) + texCube.SampleBias(_texCube_sampler, texCoord3d, 1.0f); + float4 _333 = _162; + _333.w = ((_162.w + tex1dShadow.SampleCmp(_tex1dShadow_sampler, float3(texCoord1d, 0.0f, 0.0f).x, 0.0f)) + tex2dShadow.SampleCmp(_tex2dShadow_sampler, float3(texCoord2d, 0.0f).xy, 0.0f)) + texCubeShadow.SampleCmp(_texCubeShadow_sampler, float4(texCoord3d, 0.0f).xyz, 0.0f); + float4 _308 = ((((((((((((((_333 + tex1dArray.Sample(_tex1dArray_sampler, texCoord2d)) + tex2dArray.Sample(_tex2dArray_sampler, texCoord3d)) + texCubeArray.Sample(_texCubeArray_sampler, texCoord4d)) + tex2d.GatherRed(_tex2d_sampler, texCoord2d)) + tex2d.GatherRed(_tex2d_sampler, texCoord2d)) + tex2d.GatherGreen(_tex2d_sampler, texCoord2d)) + tex2d.GatherBlue(_tex2d_sampler, texCoord2d)) + tex2d.GatherAlpha(_tex2d_sampler, texCoord2d)) + tex2d.GatherRed(_tex2d_sampler, texCoord2d, int2(1, 1))) + tex2d.GatherRed(_tex2d_sampler, texCoord2d, int2(1, 1))) + tex2d.GatherGreen(_tex2d_sampler, texCoord2d, int2(1, 1))) + tex2d.GatherBlue(_tex2d_sampler, texCoord2d, int2(1, 1))) + tex2d.GatherAlpha(_tex2d_sampler, texCoord2d, int2(1, 1))) + tex2d.Load(int3(int2(1, 2), 0))) + separateTex2d.Sample(samplerNonDepth, texCoord2d); + float4 _336 = _308; + _336.w = _308.w + separateTex2dDepth.SampleCmp(samplerDepth, texCoord3d.xy, texCoord3d.z); + FragColor = _336; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + texCoord1d = stage_input.texCoord1d; + texCoord2d = stage_input.texCoord2d; + texCoord3d = stage_input.texCoord3d; + texCoord4d = stage_input.texCoord4d; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..d7aa73d52 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texel-fetch-offset.frag @@ -0,0 +1,31 @@ +Texture2D uTexture : register(t0); +SamplerState _uTexture_sampler : register(s0); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + int2 _22 = int2(gl_FragCoord.xy); + FragColor = uTexture.Load(int3(_22, 0), int2(1, 1)); + FragColor += uTexture.Load(int3(_22, 0), int2(-1, 1)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texture-proj-shadow.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texture-proj-shadow.frag new file mode 100644 index 000000000..4beaa1176 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texture-proj-shadow.frag @@ -0,0 +1,66 @@ +Texture1D uShadow1D : register(t0); +SamplerComparisonState _uShadow1D_sampler : register(s0); +Texture2D uShadow2D : register(t1); +SamplerComparisonState _uShadow2D_sampler : register(s1); +Texture1D uSampler1D : register(t2); +SamplerState _uSampler1D_sampler : register(s2); +Texture2D uSampler2D : register(t3); +SamplerState _uSampler2D_sampler : register(s3); +Texture3D uSampler3D : register(t4); +SamplerState _uSampler3D_sampler : register(s4); + +static float FragColor; +static float4 vClip4; +static float2 vClip2; +static float3 vClip3; + +struct SPIRV_Cross_Input +{ + float3 vClip3 : TEXCOORD0; + float4 vClip4 : TEXCOORD1; + float2 vClip2 : TEXCOORD2; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float SPIRV_Cross_projectTextureCoordinate(float2 coord) +{ + return coord.x / coord.y; +} + +float2 SPIRV_Cross_projectTextureCoordinate(float3 coord) +{ + return float2(coord.x, coord.y) / coord.z; +} + +float3 SPIRV_Cross_projectTextureCoordinate(float4 coord) +{ + return float3(coord.x, coord.y, coord.z) / coord.w; +} + +void frag_main() +{ + float4 _20 = vClip4; + _20.y = vClip4.w; + FragColor = uShadow1D.SampleCmp(_uShadow1D_sampler, SPIRV_Cross_projectTextureCoordinate(_20.xy), vClip4.z); + float4 _30 = vClip4; + _30.z = vClip4.w; + FragColor = uShadow2D.SampleCmp(_uShadow2D_sampler, SPIRV_Cross_projectTextureCoordinate(_30.xyz), vClip4.z); + FragColor = uSampler1D.Sample(_uSampler1D_sampler, SPIRV_Cross_projectTextureCoordinate(vClip2)).x; + FragColor = uSampler2D.Sample(_uSampler2D_sampler, SPIRV_Cross_projectTextureCoordinate(vClip3)).x; + FragColor = uSampler3D.Sample(_uSampler3D_sampler, SPIRV_Cross_projectTextureCoordinate(vClip4)).x; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vClip4 = stage_input.vClip4; + vClip2 = stage_input.vClip2; + vClip3 = stage_input.vClip3; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texture-size-combined-image-sampler.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texture-size-combined-image-sampler.frag new file mode 100644 index 000000000..d5c373746 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/texture-size-combined-image-sampler.frag @@ -0,0 +1,30 @@ +Texture2D uTex : register(t0); +SamplerState uSampler : register(s1); + +static int2 FooOut; + +struct SPIRV_Cross_Output +{ + int2 FooOut : SV_Target0; +}; + +uint2 SPIRV_Cross_textureSize(Texture2D Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +void frag_main() +{ + uint _23_dummy_parameter; + FooOut = int2(SPIRV_Cross_textureSize(uTex, uint(0), _23_dummy_parameter)); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FooOut = FooOut; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/unary-enclose.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/unary-enclose.frag new file mode 100644 index 000000000..348b91c17 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/unary-enclose.frag @@ -0,0 +1,29 @@ +static float4 FragColor; +static float4 vIn; +static int4 vIn1; + +struct SPIRV_Cross_Input +{ + float4 vIn : TEXCOORD0; + nointerpolation int4 vIn1 : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = vIn; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vIn = stage_input.vIn; + vIn1 = stage_input.vIn1; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/unorm-snorm-packing.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/unorm-snorm-packing.frag new file mode 100644 index 000000000..57b595063 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/unorm-snorm-packing.frag @@ -0,0 +1,109 @@ +static float4 FP32Out; +static uint UNORM8; +static uint SNORM8; +static uint UNORM16; +static uint SNORM16; +static uint UNORM8Out; +static float4 FP32; +static uint SNORM8Out; +static uint UNORM16Out; +static uint SNORM16Out; + +struct SPIRV_Cross_Input +{ + nointerpolation uint SNORM8 : TEXCOORD0; + nointerpolation uint UNORM8 : TEXCOORD1; + nointerpolation uint SNORM16 : TEXCOORD2; + nointerpolation uint UNORM16 : TEXCOORD3; + nointerpolation float4 FP32 : TEXCOORD4; +}; + +struct SPIRV_Cross_Output +{ + float4 FP32Out : SV_Target0; + uint UNORM8Out : SV_Target1; + uint SNORM8Out : SV_Target2; + uint UNORM16Out : SV_Target3; + uint SNORM16Out : SV_Target4; +}; + +uint SPIRV_Cross_packUnorm4x8(float4 value) +{ + uint4 Packed = uint4(round(saturate(value) * 255.0)); + return Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24); +} + +float4 SPIRV_Cross_unpackUnorm4x8(uint value) +{ + uint4 Packed = uint4(value & 0xff, (value >> 8) & 0xff, (value >> 16) & 0xff, value >> 24); + return float4(Packed) / 255.0; +} + +uint SPIRV_Cross_packSnorm4x8(float4 value) +{ + int4 Packed = int4(round(clamp(value, -1.0, 1.0) * 127.0)) & 0xff; + return uint(Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24)); +} + +float4 SPIRV_Cross_unpackSnorm4x8(uint value) +{ + int SignedValue = int(value); + int4 Packed = int4(SignedValue << 24, SignedValue << 16, SignedValue << 8, SignedValue) >> 24; + return clamp(float4(Packed) / 127.0, -1.0, 1.0); +} + +uint SPIRV_Cross_packUnorm2x16(float2 value) +{ + uint2 Packed = uint2(round(saturate(value) * 65535.0)); + return Packed.x | (Packed.y << 16); +} + +float2 SPIRV_Cross_unpackUnorm2x16(uint value) +{ + uint2 Packed = uint2(value & 0xffff, value >> 16); + return float2(Packed) / 65535.0; +} + +uint SPIRV_Cross_packSnorm2x16(float2 value) +{ + int2 Packed = int2(round(clamp(value, -1.0, 1.0) * 32767.0)) & 0xffff; + return uint(Packed.x | (Packed.y << 16)); +} + +float2 SPIRV_Cross_unpackSnorm2x16(uint value) +{ + int SignedValue = int(value); + int2 Packed = int2(SignedValue << 16, SignedValue) >> 16; + return clamp(float2(Packed) / 32767.0, -1.0, 1.0); +} + +void frag_main() +{ + FP32Out = SPIRV_Cross_unpackUnorm4x8(UNORM8); + FP32Out = SPIRV_Cross_unpackSnorm4x8(SNORM8); + float2 _21 = SPIRV_Cross_unpackUnorm2x16(UNORM16); + FP32Out = float4(_21.x, _21.y, FP32Out.z, FP32Out.w); + float2 _26 = SPIRV_Cross_unpackSnorm2x16(SNORM16); + FP32Out = float4(_26.x, _26.y, FP32Out.z, FP32Out.w); + UNORM8Out = SPIRV_Cross_packUnorm4x8(FP32); + SNORM8Out = SPIRV_Cross_packSnorm4x8(FP32); + UNORM16Out = SPIRV_Cross_packUnorm2x16(FP32.xy); + SNORM16Out = SPIRV_Cross_packSnorm2x16(FP32.zw); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + UNORM8 = stage_input.UNORM8; + SNORM8 = stage_input.SNORM8; + UNORM16 = stage_input.UNORM16; + SNORM16 = stage_input.SNORM16; + FP32 = stage_input.FP32; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FP32Out = FP32Out; + stage_output.UNORM8Out = UNORM8Out; + stage_output.SNORM8Out = SNORM8Out; + stage_output.UNORM16Out = UNORM16Out; + stage_output.SNORM16Out = SNORM16Out; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/various-glsl-ops.frag b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/various-glsl-ops.frag new file mode 100644 index 000000000..0bc2fc1a9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/frag/various-glsl-ops.frag @@ -0,0 +1,26 @@ +static float2 interpolant; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 interpolant : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float4(0.0f, 0.0f, 0.0f, EvaluateAttributeSnapped(interpolant, 0.100000001490116119384765625f.xx).x) + float4(0.0f, 0.0f, 0.0f, ddx_coarse(interpolant.x)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + interpolant = stage_input.interpolant; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/basic.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/basic.vert new file mode 100644 index 000000000..f357d907c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/basic.vert @@ -0,0 +1,39 @@ +cbuffer UBO +{ + row_major float4x4 _16_uMVP : packoffset(c0); +}; + + +static float4 gl_Position; +static float4 aVertex; +static float3 vNormal; +static float3 aNormal; + +struct SPIRV_Cross_Input +{ + float4 aVertex : TEXCOORD0; + float3 aNormal : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float3 vNormal : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = mul(aVertex, _16_uMVP); + vNormal = aNormal; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + aVertex = stage_input.aVertex; + aNormal = stage_input.aNormal; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.vNormal = vNormal; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/clip-cull-distance.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/clip-cull-distance.vert new file mode 100644 index 000000000..7e0d104ac --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/clip-cull-distance.vert @@ -0,0 +1,28 @@ +static float4 gl_Position; +static float gl_ClipDistance[2]; +static float gl_CullDistance[1]; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; + float2 gl_ClipDistance0 : SV_ClipDistance0; + float gl_CullDistance0 : SV_CullDistance0; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + gl_ClipDistance[0] = 0.0f; + gl_ClipDistance[1] = 0.0f; + gl_CullDistance[0] = 4.0f; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.gl_ClipDistance0.x = gl_ClipDistance[0]; + stage_output.gl_ClipDistance0.y = gl_ClipDistance[1]; + stage_output.gl_CullDistance0.x = gl_CullDistance[0]; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/instancing.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/instancing.vert new file mode 100644 index 000000000..48b2df20d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/instancing.vert @@ -0,0 +1,28 @@ +static float4 gl_Position; +static int gl_VertexIndex; +static int gl_InstanceIndex; +struct SPIRV_Cross_Input +{ + uint gl_VertexIndex : SV_VertexID; + uint gl_InstanceIndex : SV_InstanceID; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = float(gl_VertexIndex + gl_InstanceIndex).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_VertexIndex = int(stage_input.gl_VertexIndex); + gl_InstanceIndex = int(stage_input.gl_InstanceIndex); + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/locations.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/locations.vert new file mode 100644 index 000000000..b007582c2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/locations.vert @@ -0,0 +1,73 @@ +struct Foo +{ + float3 a; + float3 b; + float3 c; +}; + +static const Foo _71 = { 1.0f.xxx, 1.0f.xxx, 1.0f.xxx }; + +static float4 gl_Position; +static float4 Input2; +static float4 Input4; +static float4 Input0; +static float vLocation0; +static float vLocation1; +static float vLocation2[2]; +static Foo vLocation4; +static float vLocation9; + +struct VertexOut +{ + float3 color : TEXCOORD7; + float3 foo : TEXCOORD8; +}; + +static VertexOut vout; + +struct SPIRV_Cross_Input +{ + float4 Input0 : TEXCOORD0; + float4 Input2 : TEXCOORD2; + float4 Input4 : TEXCOORD4; +}; + +struct SPIRV_Cross_Output +{ + float vLocation0 : TEXCOORD0; + float vLocation1 : TEXCOORD1; + float vLocation2[2] : TEXCOORD2; + Foo vLocation4 : TEXCOORD4; + float vLocation9 : TEXCOORD9; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = ((1.0f.xxxx + Input2) + Input4) + Input0; + vLocation0 = 0.0f; + vLocation1 = 1.0f; + vLocation2[0] = 2.0f; + vLocation2[1] = 2.0f; + vLocation4 = _71; + vLocation9 = 9.0f; + vout.color = 2.0f.xxx; + vout.foo = 4.0f.xxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input, out VertexOut stage_outputvout) +{ + Input2 = stage_input.Input2; + Input4 = stage_input.Input4; + Input0 = stage_input.Input0; + vert_main(); + stage_outputvout = vout; + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.vLocation0 = vLocation0; + stage_output.vLocation1 = vLocation1; + stage_output.vLocation2 = vLocation2; + stage_output.vLocation4 = vLocation4; + stage_output.vLocation9 = vLocation9; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/matrix-attribute.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/matrix-attribute.vert new file mode 100644 index 000000000..a3d0eef56 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/matrix-attribute.vert @@ -0,0 +1,35 @@ +static float4 gl_Position; +static float4x4 m; +static float3 pos; + +struct SPIRV_Cross_Input +{ + float3 pos : TEXCOORD0; + float4 m_0 : TEXCOORD1_0; + float4 m_1 : TEXCOORD1_1; + float4 m_2 : TEXCOORD1_2; + float4 m_3 : TEXCOORD1_3; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = mul(float4(pos, 1.0f), m); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + m[0] = stage_input.m_0; + m[1] = stage_input.m_1; + m[2] = stage_input.m_2; + m[3] = stage_input.m_3; + pos = stage_input.pos; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/matrix-output.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/matrix-output.vert new file mode 100644 index 000000000..dc776cb5e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/matrix-output.vert @@ -0,0 +1,23 @@ +static float4 gl_Position; +static float4x4 m; + +struct SPIRV_Cross_Output +{ + float4x4 m : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + m = float4x4(float4(1.0f, 0.0f, 0.0f, 0.0f), float4(0.0f, 1.0f, 0.0f, 0.0f), float4(0.0f, 0.0f, 1.0f, 0.0f), float4(0.0f, 0.0f, 0.0f, 1.0f)); +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.m = m; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/no-input.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/no-input.vert new file mode 100644 index 000000000..c98544dbe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/no-input.vert @@ -0,0 +1,18 @@ +static float4 gl_Position; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/point-size-compat.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/point-size-compat.vert new file mode 100644 index 000000000..95f45d02f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/point-size-compat.vert @@ -0,0 +1,20 @@ +static float4 gl_Position; +static float gl_PointSize; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + gl_PointSize = 1.0f; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/qualifiers.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/qualifiers.vert new file mode 100644 index 000000000..13ee2a8c1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/qualifiers.vert @@ -0,0 +1,50 @@ +static float4 gl_Position; +static float vFlat; +static float vCentroid; +static float vSample; +static float vNoperspective; + +struct Block +{ + nointerpolation float vFlat : TEXCOORD4; + centroid float vCentroid : TEXCOORD5; + sample float vSample : TEXCOORD6; + noperspective float vNoperspective : TEXCOORD7; +}; + +static Block vout; + +struct SPIRV_Cross_Output +{ + nointerpolation float vFlat : TEXCOORD0; + centroid float vCentroid : TEXCOORD1; + sample float vSample : TEXCOORD2; + noperspective float vNoperspective : TEXCOORD3; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + vFlat = 0.0f; + vCentroid = 1.0f; + vSample = 2.0f; + vNoperspective = 3.0f; + vout.vFlat = 0.0f; + vout.vCentroid = 1.0f; + vout.vSample = 2.0f; + vout.vNoperspective = 3.0f; +} + +SPIRV_Cross_Output main(out Block stage_outputvout) +{ + vert_main(); + stage_outputvout = vout; + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.vFlat = vFlat; + stage_output.vCentroid = vCentroid; + stage_output.vSample = vSample; + stage_output.vNoperspective = vNoperspective; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..03fa4f335 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/read-from-row-major-array.vert @@ -0,0 +1,36 @@ +cbuffer Block : register(b0) +{ + column_major float2x3 _104_var[3][4] : packoffset(c0); +}; + + +static float4 gl_Position; +static float4 a_position; +static float v_vtxResult; + +struct SPIRV_Cross_Input +{ + float4 a_position : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float v_vtxResult : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = a_position; + v_vtxResult = ((float(abs(_104_var[0][0][0].x - 2.0f) < 0.0500000007450580596923828125f) * float(abs(_104_var[0][0][0].y - 6.0f) < 0.0500000007450580596923828125f)) * float(abs(_104_var[0][0][0].z - (-6.0f)) < 0.0500000007450580596923828125f)) * ((float(abs(_104_var[0][0][1].x) < 0.0500000007450580596923828125f) * float(abs(_104_var[0][0][1].y - 5.0f) < 0.0500000007450580596923828125f)) * float(abs(_104_var[0][0][1].z - 5.0f) < 0.0500000007450580596923828125f)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + a_position = stage_input.a_position; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.v_vtxResult = v_vtxResult; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/return-array.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/return-array.vert new file mode 100644 index 000000000..bd1575563 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/return-array.vert @@ -0,0 +1,29 @@ +static float4 gl_Position; +static float4 vInput0; +static float4 vInput1; + +struct SPIRV_Cross_Input +{ + float4 vInput0 : TEXCOORD0; + float4 vInput1 : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 10.0f.xxxx + vInput1; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vInput0 = stage_input.vInput0; + vInput1 = stage_input.vInput1; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/sampler-buffers.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/sampler-buffers.vert new file mode 100644 index 000000000..365218544 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/sampler-buffers.vert @@ -0,0 +1,22 @@ +Buffer uFloatSampler : register(t1); +Buffer uIntSampler : register(t2); +Buffer uUintSampler : register(t3); + +static float4 gl_Position; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = (uFloatSampler.Load(20) + asfloat(uIntSampler.Load(40))) + asfloat(uUintSampler.Load(60)); +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/struct-composite-decl.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/struct-composite-decl.vert new file mode 100644 index 000000000..76bd34977 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/struct-composite-decl.vert @@ -0,0 +1,44 @@ +struct VOut +{ + float4 a; + float4 b; + float4 c; + float4 d; +}; + +static VOut vout; +static float4 a; +static float4 b; +static float4 c; +static float4 d; + +struct SPIRV_Cross_Input +{ + float4 a : TEXCOORD0; + float4 b : TEXCOORD1; + float4 c : TEXCOORD2; + float4 d : TEXCOORD3; +}; + +struct SPIRV_Cross_Output +{ + VOut vout : TEXCOORD0; +}; + +void vert_main() +{ + VOut _26 = { a, b, c, d }; + vout = _26; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + a = stage_input.a; + b = stage_input.b; + c = stage_input.c; + d = stage_input.d; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.vout = vout; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/texture_buffer.vert b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/texture_buffer.vert new file mode 100644 index 000000000..1c92f6fe6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-hlsl/vert/texture_buffer.vert @@ -0,0 +1,21 @@ +Buffer uSamp : register(t4); +RWBuffer uSampo : register(u5); + +static float4 gl_Position; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = uSamp.Load(10) + uSampo[100]; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..feb7dbbe5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct u0_counters +{ + uint c; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(device u0_counters& u0_counter [[buffer(0)]], texture2d u0 [[texture(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint _29 = atomic_fetch_sub_explicit((volatile device atomic_uint*)&u0_counter.c, 1, memory_order_relaxed); + u0.write(uint4(uint(int(gl_GlobalInvocationID.x))), spvTexelBufferCoord(as_type(as_type(_29)))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..22409301c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct u0_counters +{ + uint c; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(device u0_counters& u0_counter [[buffer(0)]], texture2d u0 [[texture(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint _29 = atomic_fetch_add_explicit((volatile device atomic_uint*)&u0_counter.c, 1, memory_order_relaxed); + u0.write(uint4(uint(int(gl_GlobalInvocationID.x))), spvTexelBufferCoord(as_type(as_type(_29)))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_iadd.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_iadd.asm.comp new file mode 100644 index 000000000..47ce85f8f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_iadd.asm.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + _6._m0 = _5._m1 + uint4(_5._m0); + _6._m0 = uint4(_5._m0) + _5._m1; + _6._m0 = _5._m1 + _5._m1; + _6._m0 = uint4(_5._m0 + _5._m0); + _6._m1 = int4(_5._m1 + _5._m1); + _6._m1 = _5._m0 + _5._m0; + _6._m1 = int4(_5._m1) + _5._m0; + _6._m1 = _5._m0 + int4(_5._m1); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_sar.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_sar.asm.comp new file mode 100644 index 000000000..417683058 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_sar.asm.comp @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + int4 _22 = _5._m0; + uint4 _23 = _5._m1; + _6._m0 = uint4(int4(_23) >> _22); + _6._m0 = uint4(_22 >> int4(_23)); + _6._m0 = uint4(int4(_23) >> int4(_23)); + _6._m0 = uint4(_22 >> _22); + _6._m1 = int4(_23) >> int4(_23); + _6._m1 = _22 >> _22; + _6._m1 = int4(_23) >> _22; + _6._m1 = _22 >> int4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_sdiv.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_sdiv.asm.comp new file mode 100644 index 000000000..6b80dff31 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_sdiv.asm.comp @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + int4 _22 = _5._m0; + uint4 _23 = _5._m1; + _6._m0 = uint4(int4(_23) / _22); + _6._m0 = uint4(_22 / int4(_23)); + _6._m0 = uint4(int4(_23) / int4(_23)); + _6._m0 = uint4(_22 / _22); + _6._m1 = int4(_23) / int4(_23); + _6._m1 = _22 / _22; + _6._m1 = int4(_23) / _22; + _6._m1 = _22 / int4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_slr.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_slr.asm.comp new file mode 100644 index 000000000..1dfca3918 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/bitcast_slr.asm.comp @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + int4 _22 = _5._m0; + uint4 _23 = _5._m1; + _6._m0 = _23 >> uint4(_22); + _6._m0 = uint4(_22) >> _23; + _6._m0 = _23 >> _23; + _6._m0 = uint4(_22) >> uint4(_22); + _6._m1 = int4(_23 >> _23); + _6._m1 = int4(uint4(_22) >> uint4(_22)); + _6._m1 = int4(_23 >> uint4(_22)); + _6._m1 = int4(uint4(_22) >> _23); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..95f2717b4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,45 @@ +#include +#include + +using namespace metal; + +struct A +{ + int a; + int b; +}; + +struct A_1 +{ + A Data[1]; +}; + +struct A_2 +{ + int a; + int b; +}; + +struct A_3 +{ + A_2 Data[1024]; +}; + +struct B +{ + A Data[1]; +}; + +struct B_1 +{ + A_2 Data[1024]; +}; + +kernel void main0(device B& C3 [[buffer(0)]], device A_1& C1 [[buffer(1)]], constant A_3& C2 [[buffer(2)]], constant B_1& C4 [[buffer(3)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + C1.Data[gl_GlobalInvocationID.x].a = C2.Data[gl_GlobalInvocationID.x].a; + C1.Data[gl_GlobalInvocationID.x].b = C2.Data[gl_GlobalInvocationID.x].b; + C3.Data[gl_GlobalInvocationID.x].a = C4.Data[gl_GlobalInvocationID.x].a; + C3.Data[gl_GlobalInvocationID.x].b = C4.Data[gl_GlobalInvocationID.x].b; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp new file mode 100644 index 000000000..ce336e97b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp @@ -0,0 +1,28 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct cb5_struct +{ + float4 _m0[5]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(constant cb5_struct& cb0_5 [[buffer(1)]], texture2d u0 [[texture(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + uint _44 = as_type(as_type(int(gl_LocalInvocationID.x) << 4)) >> 2u; + uint4 _51 = as_type(cb0_5._m0[uint(as_type(as_type(int(gl_LocalInvocationID.x)))) + 1u]); + u0.write(_51.xxxx, spvTexelBufferCoord(_44)); + u0.write(_51.yyyy, spvTexelBufferCoord((_44 + 1u))); + u0.write(_51.zzzz, spvTexelBufferCoord((_44 + 2u))); + u0.write(_51.wwww, spvTexelBufferCoord((_44 + 3u))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/buffer-write.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/buffer-write.asm.comp new file mode 100644 index 000000000..ddf958289 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/buffer-write.asm.comp @@ -0,0 +1,23 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct cb +{ + float value; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(constant cb& _6 [[buffer(7)]], texture2d _buffer [[texture(0)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + _buffer.write(_6.value, spvTexelBufferCoord(((32u * gl_WorkGroupID.x) + gl_LocalInvocationIndex))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..b0ff373ac --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +kernel void main0(uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/multiple-entry.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/multiple-entry.asm.comp new file mode 100644 index 000000000..765273326 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/multiple-entry.asm.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct _6 +{ + int4 _m0; + uint4 _m1; +}; + +struct _7 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _6& _8 [[buffer(0)]], device _7& _9 [[buffer(1)]]) +{ + _9._m0 = _8._m1 + uint4(_8._m0); + _9._m0 = uint4(_8._m0) + _8._m1; + _9._m0 = _8._m1 + _8._m1; + _9._m0 = uint4(_8._m0 + _8._m0); + _9._m1 = int4(_8._m1 + _8._m1); + _9._m1 = _8._m0 + _8._m0; + _9._m1 = int4(_8._m1) + _8._m0; + _9._m1 = _8._m0 + int4(_8._m1); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/quantize.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/quantize.asm.comp new file mode 100644 index 000000000..1839ec7a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/quantize.asm.comp @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +struct SSBO0 +{ + float scalar; + float2 vec2_val; + float3 vec3_val; + float4 vec4_val; +}; + +kernel void main0(device SSBO0& _4 [[buffer(0)]]) +{ + _4.scalar = float(half(_4.scalar)); + _4.vec2_val = float2(half2(_4.vec2_val)); + _4.vec3_val = float3(half3(_4.vec3_val)); + _4.vec4_val = float4(half4(_4.vec4_val)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/relaxed-block-layout.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/relaxed-block-layout.asm.comp new file mode 100644 index 000000000..6728a4e2d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/relaxed-block-layout.asm.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct foo +{ + uint bar; + packed_float3 baz; + uchar quux; + packed_uchar4 blah; + packed_half2 wibble; +}; + +kernel void main0(device foo& _8 [[buffer(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint3 gl_NumWorkGroups [[threadgroups_per_grid]]) +{ + _8.bar = gl_LocalInvocationID.x; + _8.baz = float3(gl_GlobalInvocationID); + _8.blah = uchar4(uint4(uint4(uchar4(_8.blah)).xyz + gl_WorkGroupID, 0u)); + _8.wibble = half2(float2(half2(_8.wibble)) * float2(gl_NumWorkGroups.xy)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp new file mode 100644 index 000000000..5802ddac9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +constant uint _5_tmp [[function_constant(10)]]; +constant uint _5 = is_function_constant_defined(_5_tmp) ? _5_tmp : 9u; +constant uint _6_tmp [[function_constant(12)]]; +constant uint _6 = is_function_constant_defined(_6_tmp) ? _6_tmp : 4u; +constant uint3 gl_WorkGroupSize = uint3(_5, 20u, _6); + +struct SSBO +{ + float a; +}; + +kernel void main0(device SSBO& _4 [[buffer(0)]]) +{ + _4.a += 1.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp new file mode 100644 index 000000000..2c9b038b2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +constant uint _3_tmp [[function_constant(0)]]; +constant uint _3 = is_function_constant_defined(_3_tmp) ? _3_tmp : 1u; +constant uint _4_tmp [[function_constant(2)]]; +constant uint _4 = is_function_constant_defined(_4_tmp) ? _4_tmp : 3u; +constant uint3 gl_WorkGroupSize = uint3(_3, 2u, _4); + +struct _6 +{ + float _m0[1]; +}; + +kernel void main0(device _6& _8 [[buffer(0)]], device _6& _9 [[buffer(1)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]]) +{ + _8._m0[gl_WorkGroupID.x] = _9._m0[gl_WorkGroupID.x] + _8._m0[gl_WorkGroupID.x]; + uint3 _23 = gl_WorkGroupSize; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/variable-pointers-2.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/variable-pointers-2.asm.comp new file mode 100644 index 000000000..77caa900e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/variable-pointers-2.asm.comp @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct foo +{ + int a[128]; + uint b; + float2 c; +}; + +struct bar +{ + int d; +}; + +kernel void main0(device foo& buf [[buffer(0)]], constant bar& cb [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + bool _70 = cb.d != 0; + for (device int* _52 = &(_70 ? &buf : nullptr)->a[0u], * _55 = &buf.a[0u]; (*_52) != (*_55); ) + { + int _66 = ((*_52) + (*_55)) + int((*(_70 ? &gl_GlobalInvocationID : &gl_LocalInvocationID)).x); + *_52 = _66; + *_55 = _66; + _52 = &_52[1u]; + _55 = &_55[1u]; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp new file mode 100644 index 000000000..9b29d85bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp @@ -0,0 +1,20 @@ +#include +#include + +using namespace metal; + +struct foo +{ + int a; +}; + +struct bar +{ + int b; +}; + +kernel void main0(device foo& x [[buffer(0)]], device bar& y [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + y.b = x.a + x.a; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp new file mode 100644 index 000000000..9627124d2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u); + +struct cb1_struct +{ + float4 _m0[1]; +}; + +kernel void main0(constant cb1_struct& cb0_1 [[buffer(0)]], texture2d u0 [[texture(1)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + int2 _46 = int2(u0.get_width(), u0.get_height()) >> int2(uint2(4u)); + int _97; + _97 = 0; + for (; _97 < _46.y; _97++) + { + for (int _98 = 0; _98 < _46.x; ) + { + u0.write(cb0_1._m0[0].xxxx, uint2(((_46 * int3(gl_LocalInvocationID).xy) + int2(_97, _98)))); + _98++; + continue; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp new file mode 100644 index 000000000..9ab3a8729 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u); + +struct cb1_struct +{ + float4 _m0[1]; +}; + +kernel void main0(constant cb1_struct& cb0_1 [[buffer(0)]], texture2d u0 [[texture(1)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + int2 _40 = int2(u0.get_width(), u0.get_height()) >> int2(uint2(4u)); + int _80; + _80 = 0; + for (; _80 < _40.y; _80++) + { + for (int _81 = 0; _81 < _40.x; ) + { + u0.write(cb0_1._m0[0].xxxx, uint2(((_40 * int3(gl_LocalInvocationID).xy) + int2(_80, _81)))); + _81++; + continue; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag new file mode 100644 index 000000000..e420153bf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uTex [[texture(1)]], sampler uSampler [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uTex.sample(uSampler, in.vUV); + out.FragColor += uTex.sample(uSampler, in.vUV, int2(1)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/default-member-names.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/default-member-names.asm.frag new file mode 100644 index 000000000..82b41d1a8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/default-member-names.asm.frag @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +constant float _49 = {}; + +struct main0_out +{ + float4 m_3 [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.m_3 = float4(_49); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..92ac1d9f8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/empty-struct.asm.frag @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +fragment void main0() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag new file mode 100644 index 000000000..649f9f6f1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct Foo +{ + packed_float3 a; + float b; +}; + +struct buf +{ + Foo results[16]; + float4 bar; +}; + +struct main0_out +{ + float4 _entryPointOutput [[color(0)]]; +}; + +fragment main0_out main0(constant buf& _11 [[buffer(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + int _67 = int(gl_FragCoord.x) % 16; + out._entryPointOutput = float4(dot(float3(_11.results[_67].a), _11.bar.xyz), _11.results[_67].b, 0.0, 0.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/frem.asm.frag new file mode 100644 index 000000000..ebc73d52d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/frem.asm.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vA [[user(locn0)]]; + float4 vB [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = fmod(in.vA, in.vB); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..64edee872 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(10.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..0d691b306 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + int2 Size [[color(0)]]; +}; + +fragment main0_out main0(texture2d uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.Size = int2(uTexture.get_width(), uTexture.get_height()) + int2(uTexture.get_width(1), uTexture.get_height(1)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..dd977a99d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,50 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 v0 [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uImage [[texture(0)]], sampler uImageSmplr [[sampler(0)]]) +{ + main0_out out = {}; + float phi; + float4 _36; + int _51; + _51 = 0; + phi = 1.0; + _36 = float4(1.0, 2.0, 1.0, 2.0); + for (;;) + { + out.FragColor = _36; + if (_51 < 4) + { + if (in.v0[_51] > 0.0) + { + float2 _48 = float2(phi); + _51++; + phi += 2.0; + _36 = uImage.sample(uImageSmplr, _48, level(0.0)); + continue; + } + else + { + break; + } + } + else + { + break; + } + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..8537dac19 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float3 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float3(as_type(0x7f800000u), as_type(0xff800000u), as_type(0x7fc00000u)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag new file mode 100644 index 000000000..41472adac --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag @@ -0,0 +1,47 @@ +#include +#include + +using namespace metal; + +struct Input +{ + float2 v0; + float2 v1; + float3 v2; + float4 v3; + float v4; + float v5; + float v6; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 Input_v0 [[user(locn0)]]; + float2 Input_v1 [[user(locn1), center_no_perspective]]; + float3 Input_v2 [[user(locn2), centroid_perspective]]; + float4 Input_v3 [[user(locn3), centroid_no_perspective]]; + float Input_v4 [[user(locn4), sample_perspective]]; + float Input_v5 [[user(locn5), sample_no_perspective]]; + float Input_v6 [[user(locn6), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Input inp = {}; + inp.v0 = in.Input_v0; + inp.v1 = in.Input_v1; + inp.v2 = in.Input_v2; + inp.v3 = in.Input_v3; + inp.v4 = in.Input_v4; + inp.v5 = in.Input_v5; + inp.v6 = in.Input_v6; + out.FragColor = float4(inp.v0.x + inp.v1.y, inp.v2.xy, ((inp.v3.w * inp.v4) + inp.v5) - inp.v6); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/locations-components.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/locations-components.asm.frag new file mode 100644 index 000000000..07ab82ac1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/locations-components.asm.frag @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 o0 [[color(0)]]; +}; + +struct main0_in +{ + float2 m_2 [[user(locn1)]]; + float m_3 [[user(locn1_2)]]; + float m_4 [[user(locn2), flat]]; + uint m_5 [[user(locn2_1)]]; + uint m_6 [[user(locn2_2)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float4 v1; + v1 = float4(in.m_2.x, in.m_2.y, v1.z, v1.w); + v1.z = in.m_3; + float4 v2; + v2.x = in.m_4; + v2.y = as_type(in.m_5); + v2.z = as_type(in.m_6); + out.o0.y = float(as_type(as_type(as_type(v2.y) + as_type(v2.z)))); + out.o0.x = v1.y + v2.x; + out.o0 = float4(out.o0.x, out.o0.y, v1.z, v1.x); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..610d447a8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,68 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant float _46[16] = { 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0 }; +constant float4 _76[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; +constant float4 _90[4] = { float4(20.0), float4(30.0), float4(50.0), float4(60.0) }; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + float4 foobar[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; + float4 baz[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; + main0_out out = {}; + out.FragColor = _46[in.index]; + if (in.index < 10) + { + out.FragColor += _46[in.index ^ 1]; + } + else + { + out.FragColor += _46[in.index & 1]; + } + bool _99 = in.index > 30; + if (_99) + { + out.FragColor += _76[in.index & 3].y; + } + else + { + out.FragColor += _76[in.index & 1].x; + } + if (_99) + { + foobar[1].z = 20.0; + } + int _37 = in.index & 3; + out.FragColor += foobar[_37].z; + spvArrayCopyFromConstant1(baz, _90); + out.FragColor += baz[_37].z; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag new file mode 100644 index 000000000..92ac1d9f8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +fragment void main0() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/op-constant-null.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/op-constant-null.asm.frag new file mode 100644 index 000000000..e1badb517 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/op-constant-null.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = 0.0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/op-image-sampled-image.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/op-image-sampled-image.asm.frag new file mode 100644 index 000000000..590e503c5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/op-image-sampled-image.asm.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct push_cb +{ + float4 cb0[1]; +}; + +struct main0_out +{ + float4 o0 [[color(0)]]; +}; + +fragment main0_out main0(constant push_cb& _19 [[buffer(0)]], texture2d t0 [[texture(2)]], sampler dummy_sampler [[sampler(4)]]) +{ + main0_out out = {}; + out.o0 = t0.read(uint2(as_type(_19.cb0[0u].zw)) + uint2(int2(-1, -2)), as_type(0.0)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..3858f6dac --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct Registers +{ + float foo; +}; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +fragment main0_out main0(constant Registers& registers [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = 10.0 + registers.foo; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/phi-loop-variable.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/phi-loop-variable.asm.frag new file mode 100644 index 000000000..92ac1d9f8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/phi-loop-variable.asm.frag @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +fragment void main0() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/srem.asm.frag new file mode 100644 index 000000000..f0cdd574d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/srem.asm.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int4 vA [[user(locn0)]]; + int4 vB [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = float4(in.vA - in.vB * (in.vA / in.vB)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..dd308c32a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = uTexture.read(uint2(int2(gl_FragCoord.xy)), 0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/undef-variable-store.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/undef-variable-store.asm.frag new file mode 100644 index 000000000..a5380c51d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/undef-variable-store.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 _entryPointOutput [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out._entryPointOutput = float4(1.0, 1.0, 0.0, 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unknown-depth-state.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unknown-depth-state.asm.frag new file mode 100644 index 000000000..e8a88623a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unknown-depth-state.asm.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uShadow [[texture(0)]], depth2d uTexture [[texture(1)]], sampler uShadowSmplr [[sampler(0)]], sampler uSampler [[sampler(2)]]) +{ + main0_out out = {}; + out.FragColor = uShadow.sample_compare(uShadowSmplr, in.vUV.xy, in.vUV.z) + uTexture.sample_compare(uSampler, in.vUV.xy, in.vUV.z); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unord-relational-op.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unord-relational-op.asm.frag new file mode 100644 index 000000000..aee290f5a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unord-relational-op.asm.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +constant float a_tmp [[function_constant(1)]]; +constant float a = is_function_constant_defined(a_tmp) ? a_tmp : 1.0; +constant float b_tmp [[function_constant(2)]]; +constant float b = is_function_constant_defined(b_tmp) ? b_tmp : 2.0; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(a + b); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..9a9baef57 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/unreachable.asm.frag @@ -0,0 +1,36 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int counter [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + bool _29; + for (;;) + { + _29 = in.counter == 10; + if (_29) + { + break; + } + else + { + break; + } + } + bool4 _35 = bool4(_29); + out.FragColor = float4(_35.x ? float4(10.0).x : float4(30.0).x, _35.y ? float4(10.0).y : float4(30.0).y, _35.z ? float4(10.0).z : float4(30.0).z, _35.w ? float4(10.0).w : float4(30.0).w); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag new file mode 100644 index 000000000..ee1926a9a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag @@ -0,0 +1,294 @@ +#include +#include + +using namespace metal; + +struct _6 +{ + float4 _m0; + float _m1; + float4 _m2; +}; + +struct _10 +{ + float3 _m0; + packed_float3 _m1; + float _m2; + packed_float3 _m3; + float _m4; + packed_float3 _m5; + float _m6; + packed_float3 _m7; + float _m8; + packed_float3 _m9; + float _m10; + packed_float3 _m11; + float _m12; + float2 _m13; + float2 _m14; + packed_float3 _m15; + float _m16; + float _m17; + float _m18; + float _m19; + float _m20; + float4 _m21; + float4 _m22; + float4x4 _m23; + float4 _m24; +}; + +struct _18 +{ + float4x4 _m0; + float4x4 _m1; + float4x4 _m2; + float4x4 _m3; + float4 _m4; + float4 _m5; + float _m6; + float _m7; + float _m8; + float _m9; + packed_float3 _m10; + float _m11; + packed_float3 _m12; + float _m13; + packed_float3 _m14; + float _m15; + packed_float3 _m16; + float _m17; + float _m18; + float _m19; + float2 _m20; + float2 _m21; + float2 _m22; + float4 _m23; + float2 _m24; + float2 _m25; + float2 _m26; + char pad27[8]; + packed_float3 _m27; + float _m28; + float _m29; + float _m30; + float _m31; + float _m32; + float2 _m33; + float _m34; + float _m35; + float3 _m36; + float4x4 _m37[2]; + float4 _m38[2]; +}; + +struct _28 +{ + float4 _m0; +}; + +constant _28 _74 = {}; + +struct main0_out +{ + float4 m_5 [[color(0)]]; +}; + +fragment main0_out main0(constant _6& _7 [[buffer(0)]], constant _18& _19 [[buffer(1)]], constant _10& _11 [[buffer(2)]], texture2d _14 [[texture(4)]], texture2d _12 [[texture(13)]], texture2d _8 [[texture(14)]], sampler _15 [[sampler(3)]], sampler _13 [[sampler(5)]], sampler _9 [[sampler(6)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + float2 _82 = gl_FragCoord.xy * _19._m23.xy; + float4 _88 = _7._m2 * _7._m0.xyxy; + float2 _95 = _88.xy; + float2 _96 = _88.zw; + float2 _97 = fast::clamp(_82 + (float2(0.0, -2.0) * _7._m0.xy), _95, _96); + float3 _109 = float3(_11._m5) * fast::clamp(_8.sample(_9, _97, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _113 = _12.sample(_13, _97, level(0.0)); + float _114 = _113.y; + float3 _129; + if (_114 > 0.0) + { + _129 = _109 + (_14.sample(_15, _97, level(0.0)).xyz * fast::clamp(_114 * _113.z, 0.0, 1.0)); + } + else + { + _129 = _109; + } + float3 _130 = _129 * 0.5; + float2 _144 = fast::clamp(_82 + (float2(-1.0) * _7._m0.xy), _95, _96); + float3 _156 = float3(_11._m5) * fast::clamp(_8.sample(_9, _144, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _160 = _12.sample(_13, _144, level(0.0)); + float _161 = _160.y; + float3 _176; + if (_161 > 0.0) + { + _176 = _156 + (_14.sample(_15, _144, level(0.0)).xyz * fast::clamp(_161 * _160.z, 0.0, 1.0)); + } + else + { + _176 = _156; + } + float3 _177 = _176 * 0.5; + float2 _191 = fast::clamp(_82 + (float2(0.0, -1.0) * _7._m0.xy), _95, _96); + float3 _203 = float3(_11._m5) * fast::clamp(_8.sample(_9, _191, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _207 = _12.sample(_13, _191, level(0.0)); + float _208 = _207.y; + float3 _223; + if (_208 > 0.0) + { + _223 = _203 + (_14.sample(_15, _191, level(0.0)).xyz * fast::clamp(_208 * _207.z, 0.0, 1.0)); + } + else + { + _223 = _203; + } + float3 _224 = _223 * 0.75; + float2 _238 = fast::clamp(_82 + (float2(1.0, -1.0) * _7._m0.xy), _95, _96); + float3 _250 = float3(_11._m5) * fast::clamp(_8.sample(_9, _238, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _254 = _12.sample(_13, _238, level(0.0)); + float _255 = _254.y; + float3 _270; + if (_255 > 0.0) + { + _270 = _250 + (_14.sample(_15, _238, level(0.0)).xyz * fast::clamp(_255 * _254.z, 0.0, 1.0)); + } + else + { + _270 = _250; + } + float3 _271 = _270 * 0.5; + float2 _285 = fast::clamp(_82 + (float2(-2.0, 0.0) * _7._m0.xy), _95, _96); + float3 _297 = float3(_11._m5) * fast::clamp(_8.sample(_9, _285, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _301 = _12.sample(_13, _285, level(0.0)); + float _302 = _301.y; + float3 _317; + if (_302 > 0.0) + { + _317 = _297 + (_14.sample(_15, _285, level(0.0)).xyz * fast::clamp(_302 * _301.z, 0.0, 1.0)); + } + else + { + _317 = _297; + } + float3 _318 = _317 * 0.5; + float2 _332 = fast::clamp(_82 + (float2(-1.0, 0.0) * _7._m0.xy), _95, _96); + float3 _344 = float3(_11._m5) * fast::clamp(_8.sample(_9, _332, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _348 = _12.sample(_13, _332, level(0.0)); + float _349 = _348.y; + float3 _364; + if (_349 > 0.0) + { + _364 = _344 + (_14.sample(_15, _332, level(0.0)).xyz * fast::clamp(_349 * _348.z, 0.0, 1.0)); + } + else + { + _364 = _344; + } + float3 _365 = _364 * 0.75; + float2 _379 = fast::clamp(_82, _95, _96); + float3 _391 = float3(_11._m5) * fast::clamp(_8.sample(_9, _379, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _395 = _12.sample(_13, _379, level(0.0)); + float _396 = _395.y; + float3 _411; + if (_396 > 0.0) + { + _411 = _391 + (_14.sample(_15, _379, level(0.0)).xyz * fast::clamp(_396 * _395.z, 0.0, 1.0)); + } + else + { + _411 = _391; + } + float3 _412 = _411 * 1.0; + float2 _426 = fast::clamp(_82 + (float2(1.0, 0.0) * _7._m0.xy), _95, _96); + float3 _438 = float3(_11._m5) * fast::clamp(_8.sample(_9, _426, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _442 = _12.sample(_13, _426, level(0.0)); + float _443 = _442.y; + float3 _458; + if (_443 > 0.0) + { + _458 = _438 + (_14.sample(_15, _426, level(0.0)).xyz * fast::clamp(_443 * _442.z, 0.0, 1.0)); + } + else + { + _458 = _438; + } + float3 _459 = _458 * 0.75; + float2 _473 = fast::clamp(_82 + (float2(2.0, 0.0) * _7._m0.xy), _95, _96); + float3 _485 = float3(_11._m5) * fast::clamp(_8.sample(_9, _473, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _489 = _12.sample(_13, _473, level(0.0)); + float _490 = _489.y; + float3 _505; + if (_490 > 0.0) + { + _505 = _485 + (_14.sample(_15, _473, level(0.0)).xyz * fast::clamp(_490 * _489.z, 0.0, 1.0)); + } + else + { + _505 = _485; + } + float3 _506 = _505 * 0.5; + float2 _520 = fast::clamp(_82 + (float2(-1.0, 1.0) * _7._m0.xy), _95, _96); + float3 _532 = float3(_11._m5) * fast::clamp(_8.sample(_9, _520, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _536 = _12.sample(_13, _520, level(0.0)); + float _537 = _536.y; + float3 _552; + if (_537 > 0.0) + { + _552 = _532 + (_14.sample(_15, _520, level(0.0)).xyz * fast::clamp(_537 * _536.z, 0.0, 1.0)); + } + else + { + _552 = _532; + } + float3 _553 = _552 * 0.5; + float2 _567 = fast::clamp(_82 + (float2(0.0, 1.0) * _7._m0.xy), _95, _96); + float3 _579 = float3(_11._m5) * fast::clamp(_8.sample(_9, _567, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _583 = _12.sample(_13, _567, level(0.0)); + float _584 = _583.y; + float3 _599; + if (_584 > 0.0) + { + _599 = _579 + (_14.sample(_15, _567, level(0.0)).xyz * fast::clamp(_584 * _583.z, 0.0, 1.0)); + } + else + { + _599 = _579; + } + float3 _600 = _599 * 0.75; + float2 _614 = fast::clamp(_82 + _7._m0.xy, _95, _96); + float3 _626 = float3(_11._m5) * fast::clamp(_8.sample(_9, _614, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _630 = _12.sample(_13, _614, level(0.0)); + float _631 = _630.y; + float3 _646; + if (_631 > 0.0) + { + _646 = _626 + (_14.sample(_15, _614, level(0.0)).xyz * fast::clamp(_631 * _630.z, 0.0, 1.0)); + } + else + { + _646 = _626; + } + float3 _647 = _646 * 0.5; + float2 _661 = fast::clamp(_82 + (float2(0.0, 2.0) * _7._m0.xy), _95, _96); + float3 _673 = float3(_11._m5) * fast::clamp(_8.sample(_9, _661, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _677 = _12.sample(_13, _661, level(0.0)); + float _678 = _677.y; + float3 _693; + if (_678 > 0.0) + { + _693 = _673 + (_14.sample(_15, _661, level(0.0)).xyz * fast::clamp(_678 * _677.z, 0.0, 1.0)); + } + else + { + _693 = _673; + } + float3 _702 = ((((((((((((_130.xyz + _177).xyz + _224).xyz + _271).xyz + _318).xyz + _365).xyz + _412).xyz + _459).xyz + _506).xyz + _553).xyz + _600).xyz + _647).xyz + (_693 * 0.5)).xyz * float3(0.125); + _28 _704 = _74; + _704._m0 = float4(_702.x, _702.y, _702.z, float4(0.0).w); + _28 _705 = _704; + _705._m0.w = 1.0; + out.m_5 = _705._m0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/packing-test.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/packing-test.asm.vert new file mode 100644 index 000000000..9e024c209 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/packing-test.asm.vert @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +vertex void main0() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert new file mode 100644 index 000000000..b0852f0be --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +constant int _7_tmp [[function_constant(201)]]; +constant int _7 = is_function_constant_defined(_7_tmp) ? _7_tmp : -10; +constant uint _8_tmp [[function_constant(202)]]; +constant uint _8 = is_function_constant_defined(_8_tmp) ? _8_tmp : 100u; +constant int _20 = (_7 + 2); +constant uint _25 = (_8 % 5u); +constant int4 _30 = int4(20, 30, _20, _20); +constant int2 _32 = int2(_30.y, _30.x); +constant int _33 = _30.y; + +struct main0_out +{ + int m_4 [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +vertex main0_out main0() +{ + main0_out out = {}; + float4 _63 = float4(0.0); + _63.y = float(_20); + float4 _66 = _63; + _66.z = float(_25); + float4 _52 = _66 + float4(_30); + float2 _56 = _52.xy + float2(_32); + out.gl_Position = float4(_56.x, _56.y, _52.z, _52.w); + out.m_4 = _33; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..d453aadef --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +vertex main0_out main0(uint gl_VertexIndex [[vertex_id]], uint gl_InstanceIndex [[instance_id]]) +{ + main0_out out = {}; + out.gl_Position = float4(float(gl_VertexIndex + gl_InstanceIndex)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/access-private-workgroup-in-function.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/access-private-workgroup-in-function.comp new file mode 100644 index 000000000..a4ba60c3b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/access-private-workgroup-in-function.comp @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +kernel void main0(uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/atomic.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/atomic.comp new file mode 100644 index 000000000..17d0a6528 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/atomic.comp @@ -0,0 +1,68 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct SSBO +{ + uint u32; + int i32; +}; + +kernel void main0(device SSBO& ssbo [[buffer(2)]]) +{ + threadgroup uint shared_u32; + threadgroup int shared_i32; + uint _16 = atomic_fetch_add_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _18 = atomic_fetch_or_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _20 = atomic_fetch_xor_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _22 = atomic_fetch_and_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _24 = atomic_fetch_min_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _26 = atomic_fetch_max_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _28 = atomic_exchange_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _32; + do + { + _32 = 10u; + } while (!atomic_compare_exchange_weak_explicit((volatile device atomic_uint*)&ssbo.u32, &_32, 2u, memory_order_relaxed, memory_order_relaxed)); + int _36 = atomic_fetch_add_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _38 = atomic_fetch_or_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _40 = atomic_fetch_xor_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _42 = atomic_fetch_and_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _44 = atomic_fetch_min_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _46 = atomic_fetch_max_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _48 = atomic_exchange_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _52; + do + { + _52 = 10; + } while (!atomic_compare_exchange_weak_explicit((volatile device atomic_int*)&ssbo.i32, &_52, 2, memory_order_relaxed, memory_order_relaxed)); + uint _57 = atomic_fetch_add_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _58 = atomic_fetch_or_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _59 = atomic_fetch_xor_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _60 = atomic_fetch_and_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _61 = atomic_fetch_min_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _62 = atomic_fetch_max_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _63 = atomic_exchange_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _64; + do + { + _64 = 10u; + } while (!atomic_compare_exchange_weak_explicit((volatile threadgroup atomic_uint*)&shared_u32, &_64, 2u, memory_order_relaxed, memory_order_relaxed)); + int _65 = atomic_fetch_add_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _66 = atomic_fetch_or_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _67 = atomic_fetch_xor_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _68 = atomic_fetch_and_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _69 = atomic_fetch_min_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _70 = atomic_fetch_max_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _71 = atomic_exchange_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _72; + do + { + _72 = 10; + } while (!atomic_compare_exchange_weak_explicit((volatile threadgroup atomic_int*)&shared_i32, &_72, 2, memory_order_relaxed, memory_order_relaxed)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/barriers.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/barriers.comp new file mode 100644 index 000000000..23a19914c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/barriers.comp @@ -0,0 +1,20 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +kernel void main0() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_none); + threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_none); + threadgroup_barrier(mem_flags::mem_threadgroup); + threadgroup_barrier(mem_flags::mem_threadgroup); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/basic.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/basic.comp new file mode 100644 index 000000000..22ec74196 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/basic.comp @@ -0,0 +1,33 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +struct SSBO3 +{ + uint counter; +}; + +kernel void main0(const device SSBO& _23 [[buffer(0)]], device SSBO2& _45 [[buffer(1)]], device SSBO3& _48 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float4 _29 = _23.in_data[gl_GlobalInvocationID.x]; + if (dot(_29, float4(1.0, 5.0, 6.0, 2.0)) > 8.19999980926513671875) + { + uint _52 = atomic_fetch_add_explicit((volatile device atomic_uint*)&_48.counter, 1u, memory_order_relaxed); + _45.out_data[_52] = _29; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/bitcast-16bit-1.invalid.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/bitcast-16bit-1.invalid.comp new file mode 100644 index 000000000..5671cd7cd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/bitcast-16bit-1.invalid.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct SSBO0 +{ + short4 inputs[1]; +}; + +struct SSBO1 +{ + int4 outputs[1]; +}; + +kernel void main0(device SSBO0& _25 [[buffer(0)]], device SSBO1& _39 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _39.outputs[gl_GlobalInvocationID.x].x = int(as_type(as_type(_25.inputs[gl_GlobalInvocationID.x].xy) + half2(half(1)))); + _39.outputs[gl_GlobalInvocationID.x].y = as_type(_25.inputs[gl_GlobalInvocationID.x].zw); + _39.outputs[gl_GlobalInvocationID.x].z = int(as_type(ushort2(_25.inputs[gl_GlobalInvocationID.x].xy))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/bitcast-16bit-2.invalid.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/bitcast-16bit-2.invalid.comp new file mode 100644 index 000000000..e7c20892a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/bitcast-16bit-2.invalid.comp @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct SSBO1 +{ + short4 outputs[1]; +}; + +struct SSBO0 +{ + int4 inputs[1]; +}; + +struct UBO +{ + half4 const0; +}; + +kernel void main0(device SSBO0& _29 [[buffer(0)]], device SSBO1& _21 [[buffer(1)]], constant UBO& _40 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + short2 _47 = as_type(_29.inputs[gl_GlobalInvocationID.x].x) + as_type(_40.const0.xy); + _21.outputs[gl_GlobalInvocationID.x] = short4(_47.x, _47.y, _21.outputs[gl_GlobalInvocationID.x].z, _21.outputs[gl_GlobalInvocationID.x].w); + short2 _66 = short2(as_type(uint(_29.inputs[gl_GlobalInvocationID.x].y)) - as_type(_40.const0.zw)); + _21.outputs[gl_GlobalInvocationID.x] = short4(_21.outputs[gl_GlobalInvocationID.x].x, _21.outputs[gl_GlobalInvocationID.x].y, _66.x, _66.y); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/builtins.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/builtins.comp new file mode 100644 index 000000000..189bb1b75 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/builtins.comp @@ -0,0 +1,11 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(8u, 4u, 2u); + +kernel void main0(uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]], uint3 gl_NumWorkGroups [[threadgroups_per_grid]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]]) +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/cfg-preserve-parameter.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/cfg-preserve-parameter.comp new file mode 100644 index 000000000..59fc03a75 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/cfg-preserve-parameter.comp @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +kernel void main0() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/coherent-block.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/coherent-block.comp new file mode 100644 index 000000000..963574acd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/coherent-block.comp @@ -0,0 +1,15 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 value; +}; + +kernel void main0(device SSBO& _10 [[buffer(1)]]) +{ + _10.value = float4(20.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/coherent-image.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/coherent-image.comp new file mode 100644 index 000000000..827a24712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/coherent-image.comp @@ -0,0 +1,15 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + int4 value; +}; + +kernel void main0(device SSBO& _10 [[buffer(1)]], texture2d uImage [[texture(3)]]) +{ + _10.value = uImage.read(uint2(int2(10))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/composite-array-initialization.comp new file mode 100644 index 000000000..9e47e8971 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/composite-array-initialization.comp @@ -0,0 +1,53 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant float X_tmp [[function_constant(0)]]; +constant float X = is_function_constant_defined(X_tmp) ? X_tmp : 4.0; +constant uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u); + +struct Data +{ + float a; + float b; +}; + +struct Data_1 +{ + float a; + float b; +}; + +struct SSBO +{ + Data_1 outdata[1]; +}; + +constant Data _25[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } }; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +kernel void main0(device SSBO& _53 [[buffer(0)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + Data data[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } }; + Data _31[2] = { Data{ X, 2.0 }, Data{ 3.0, 5.0 } }; + Data data2[2]; + spvArrayCopyFromStack1(data2, _31); + _53.outdata[gl_WorkGroupID.x].a = data[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a; + _53.outdata[gl_WorkGroupID.x].b = data[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/composite-construct.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/composite-construct.comp new file mode 100644 index 000000000..6d44fc57b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/composite-construct.comp @@ -0,0 +1,39 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct SSBO0 +{ + float4 as[1]; +}; + +struct SSBO1 +{ + float4 bs[1]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +kernel void main0(device SSBO0& _16 [[buffer(0)]], device SSBO1& _32 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + float4 _37[2] = { _16.as[gl_GlobalInvocationID.x], _32.bs[gl_GlobalInvocationID.x] }; + float4 values[2]; + spvArrayCopyFromStack1(values, _37); + _16.as[0] = values[gl_LocalInvocationIndex]; + _32.bs[1] = float4(40.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/copy-array-of-arrays.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/copy-array-of-arrays.comp new file mode 100644 index 000000000..ea9693ce4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/copy-array-of-arrays.comp @@ -0,0 +1,18 @@ +#include +#include + +using namespace metal; + +struct BUF +{ + int a; + float b; + float c; +}; + +kernel void main0(device BUF& o [[buffer(0)]]) +{ + o.a = 4; + o.b = o.c; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/culling.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/culling.comp new file mode 100644 index 000000000..9eac8c9ff --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/culling.comp @@ -0,0 +1,35 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +struct SSBO +{ + float in_data[1]; +}; + +struct SSBO2 +{ + float out_data[1]; +}; + +struct SSBO3 +{ + uint count; +}; + +kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _38 [[buffer(1)]], device SSBO3& _41 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float _28 = _22.in_data[gl_GlobalInvocationID.x]; + if (_28 > 12.0) + { + uint _45 = atomic_fetch_add_explicit((volatile device atomic_uint*)&_41.count, 1u, memory_order_relaxed); + _38.out_data[_45] = _28; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/defer-parens.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/defer-parens.comp new file mode 100644 index 000000000..69a8aab92 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/defer-parens.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 data; + int index; +}; + +kernel void main0(device SSBO& _13 [[buffer(0)]]) +{ + float4 _17 = _13.data; + float2 _28 = _17.yz + float2(10.0); + _13.data = float4(_17.x, _28, _17.w); + _13.data = (_17 + _17) + _17; + _13.data = _28.xxyy; + _13.data = float4(_28.y); + _13.data = float4((_17.zw + float2(10.0))[_13.index]); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/dowhile.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/dowhile.comp new file mode 100644 index 000000000..3ebafe0fd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/dowhile.comp @@ -0,0 +1,40 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4x4 mvp; + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(const device SSBO& _28 [[buffer(0)]], device SSBO2& _52 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float4 _57; + int _58; + _58 = 0; + _57 = _28.in_data[gl_GlobalInvocationID.x]; + float4 _42; + for (;;) + { + _42 = _28.mvp * _57; + int _44 = _58 + 1; + if (_44 < 16) + { + _58 = _44; + _57 = _42; + } + else + { + break; + } + } + _52.out_data[gl_GlobalInvocationID.x] = _42; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/functions.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/functions.comp new file mode 100644 index 000000000..59fc03a75 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/functions.comp @@ -0,0 +1,9 @@ +#include +#include + +using namespace metal; + +kernel void main0() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp new file mode 100644 index 000000000..fe0212ec3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_GlobalInvocationID.x] = mod(myStorage.b[gl_GlobalInvocationID.x] + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/global-invocation-id.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/global-invocation-id.comp new file mode 100644 index 000000000..fe0212ec3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/global-invocation-id.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_GlobalInvocationID.x] = mod(myStorage.b[gl_GlobalInvocationID.x] + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/image-cube-array-load-store.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/image-cube-array-load-store.comp new file mode 100644 index 000000000..1eeaf87cf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/image-cube-array-load-store.comp @@ -0,0 +1,10 @@ +#include +#include + +using namespace metal; + +kernel void main0(texturecube_array uImageIn [[texture(0)]], texturecube_array uImageOut [[texture(1)]]) +{ + uImageOut.write(uImageIn.read(uint2(int3(9, 7, 11).xy), uint(int3(9, 7, 11).z) % 6u, uint(int3(9, 7, 11).z) / 6u), uint2(int3(9, 7, 11).xy), uint(int3(9, 7, 11).z) % 6u, uint(int3(9, 7, 11).z) / 6u); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/image.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/image.comp new file mode 100644 index 000000000..447732dd2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/image.comp @@ -0,0 +1,11 @@ +#include +#include + +using namespace metal; + +kernel void main0(texture2d uImageIn [[texture(0)]], texture2d uImageOut [[texture(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + int2 _23 = int2(gl_GlobalInvocationID.xy); + uImageOut.write(uImageIn.read(uint2((_23 + int2(uImageIn.get_width(), uImageIn.get_height())))), uint2(_23)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/insert.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/insert.comp new file mode 100644 index 000000000..1418ce35b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/insert.comp @@ -0,0 +1,26 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 out_data[1]; +}; + +constant float4 _52 = {}; + +kernel void main0(device SSBO& _27 [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float4 _45 = _52; + _45.x = 10.0; + float4 _47 = _45; + _47.y = 30.0; + float4 _49 = _47; + _49.z = 70.0; + float4 _51 = _49; + _51.w = 90.0; + _27.out_data[gl_GlobalInvocationID.x] = _51; + _27.out_data[gl_GlobalInvocationID.x].y = 20.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/inverse.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/inverse.comp new file mode 100644 index 000000000..f2f499b91 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/inverse.comp @@ -0,0 +1,123 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct MatrixOut +{ + float2x2 m2out; + float3x3 m3out; + float4x4 m4out; +}; + +struct MatrixIn +{ + float2x2 m2in; + float3x3 m3in; + float4x4 m4in; +}; + +// Returns the determinant of a 2x2 matrix. +inline float spvDet2x2(float a1, float a2, float b1, float b2) +{ + return a1 * b2 - b1 * a2; +} + +// Returns the determinant of a 3x3 matrix. +inline float spvDet3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3) +{ + return a1 * spvDet2x2(b2, b3, c2, c3) - b1 * spvDet2x2(a2, a3, c2, c3) + c1 * spvDet2x2(a2, a3, b2, b3); +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float4x4 spvInverse4x4(float4x4 m) +{ + float4x4 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = spvDet3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][1] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][2] = spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3]); + adj[0][3] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3]); + + adj[1][0] = -spvDet3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][1] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][2] = -spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3]); + adj[1][3] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3]); + + adj[2][0] = spvDet3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][1] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][2] = spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3]); + adj[2][3] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3]); + + adj[3][0] = -spvDet3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][1] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][2] = -spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2]); + adj[3][3] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] * m[3][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float3x3 spvInverse3x3(float3x3 m) +{ + float3x3 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = spvDet2x2(m[1][1], m[1][2], m[2][1], m[2][2]); + adj[0][1] = -spvDet2x2(m[0][1], m[0][2], m[2][1], m[2][2]); + adj[0][2] = spvDet2x2(m[0][1], m[0][2], m[1][1], m[1][2]); + + adj[1][0] = -spvDet2x2(m[1][0], m[1][2], m[2][0], m[2][2]); + adj[1][1] = spvDet2x2(m[0][0], m[0][2], m[2][0], m[2][2]); + adj[1][2] = -spvDet2x2(m[0][0], m[0][2], m[1][0], m[1][2]); + + adj[2][0] = spvDet2x2(m[1][0], m[1][1], m[2][0], m[2][1]); + adj[2][1] = -spvDet2x2(m[0][0], m[0][1], m[2][0], m[2][1]); + adj[2][2] = spvDet2x2(m[0][0], m[0][1], m[1][0], m[1][1]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float2x2 spvInverse2x2(float2x2 m) +{ + float2x2 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = m[1][1]; + adj[0][1] = -m[0][1]; + + adj[1][0] = -m[1][0]; + adj[1][1] = m[0][0]; + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +kernel void main0(device MatrixOut& _15 [[buffer(0)]], const device MatrixIn& _20 [[buffer(1)]]) +{ + _15.m2out = spvInverse2x2(_20.m2in); + _15.m3out = spvInverse3x3(_20.m3in); + _15.m4out = spvInverse4x4(_20.m4in); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/local-invocation-id.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/local-invocation-id.comp new file mode 100644 index 000000000..772e5e0d8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/local-invocation-id.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_LocalInvocationID.x] = mod(myStorage.b[gl_LocalInvocationID.x] + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/local-invocation-index.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/local-invocation-index.comp new file mode 100644 index 000000000..41adbdca5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/local-invocation-index.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_LocalInvocationIndex] = mod(myStorage.b[gl_LocalInvocationIndex] + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/mat3.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/mat3.comp new file mode 100644 index 000000000..adf7e9496 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/mat3.comp @@ -0,0 +1,15 @@ +#include +#include + +using namespace metal; + +struct SSBO2 +{ + float3x3 out_data[1]; +}; + +kernel void main0(device SSBO2& _22 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _22.out_data[gl_GlobalInvocationID.x] = float3x3(float3(10.0), float3(20.0), float3(40.0)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/mod.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/mod.comp new file mode 100644 index 000000000..8574f87b7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/mod.comp @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(const device SSBO& _23 [[buffer(0)]], device SSBO2& _33 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _33.out_data[gl_GlobalInvocationID.x] = mod(_23.in_data[gl_GlobalInvocationID.x], _33.out_data[gl_GlobalInvocationID.x]); + _33.out_data[gl_GlobalInvocationID.x] = as_type(as_type(_23.in_data[gl_GlobalInvocationID.x]) % as_type(_33.out_data[gl_GlobalInvocationID.x])); + _33.out_data[gl_GlobalInvocationID.x] = as_type(as_type(_23.in_data[gl_GlobalInvocationID.x]) % as_type(_33.out_data[gl_GlobalInvocationID.x])); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/modf.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/modf.comp new file mode 100644 index 000000000..39e402337 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/modf.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(const device SSBO& _23 [[buffer(0)]], device SSBO2& _35 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float4 i; + float4 _31 = modf(_23.in_data[gl_GlobalInvocationID.x], i); + _35.out_data[gl_GlobalInvocationID.x] = _31; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/packing-test-1.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/packing-test-1.comp new file mode 100644 index 000000000..44d2b2d74 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/packing-test-1.comp @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u); + +struct T1 +{ + packed_float3 a; + float b; +}; + +struct Buffer0 +{ + T1 buf0[1]; +}; + +struct Buffer1 +{ + float buf1[1]; +}; + +kernel void main0(device Buffer0& _15 [[buffer(1)]], device Buffer1& _34 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _34.buf1[gl_GlobalInvocationID.x] = _15.buf0[0].b; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/packing-test-2.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/packing-test-2.comp new file mode 100644 index 000000000..4cc9c673a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/packing-test-2.comp @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u); + +struct T1 +{ + packed_float3 a; + float b; +}; + +struct Buffer0 +{ + T1 buf0[1]; +}; + +struct Buffer1 +{ + float buf1[1]; +}; + +kernel void main0(device Buffer0& _14 [[buffer(1)]], device Buffer1& _24 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _24.buf1[gl_GlobalInvocationID.x] = _14.buf0[0].b; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/read-write-only.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/read-write-only.comp new file mode 100644 index 000000000..42c625092 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/read-write-only.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct SSBO2 +{ + float4 data4; + float4 data5; +}; + +struct SSBO0 +{ + float4 data0; + float4 data1; +}; + +struct SSBO1 +{ + float4 data2; + float4 data3; +}; + +kernel void main0(const device SSBO0& _15 [[buffer(0)]], device SSBO1& _21 [[buffer(1)]], device SSBO2& _10 [[buffer(2)]]) +{ + _10.data4 = _15.data0 + _21.data2; + _10.data5 = _15.data1 + _21.data3; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/rmw-matrix.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/rmw-matrix.comp new file mode 100644 index 000000000..150db7ede --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/rmw-matrix.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float a; + float4 b; + float4x4 c; + float a1; + float4 b1; + float4x4 c1; +}; + +kernel void main0(device SSBO& _11 [[buffer(0)]]) +{ + _11.a *= _11.a1; + _11.b *= _11.b1; + _11.c = _11.c * _11.c1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/rmw-opt.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/rmw-opt.comp new file mode 100644 index 000000000..05e1f6f28 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/rmw-opt.comp @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + int a; +}; + +kernel void main0(device SSBO& _9 [[buffer(0)]]) +{ + _9.a += 10; + _9.a -= 10; + _9.a *= 10; + _9.a /= 10; + _9.a = _9.a << 2; + _9.a = _9.a >> 3; + _9.a &= 40; + _9.a ^= 10; + _9.a %= 40; + _9.a |= 1; + _9.a = 0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/shared-array-of-arrays.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/shared-array-of-arrays.comp new file mode 100644 index 000000000..3884c22f5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/shared-array-of-arrays.comp @@ -0,0 +1,20 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 4u, 1u); + +struct SSBO +{ + float out_data[1]; +}; + +kernel void main0(device SSBO& _67 [[buffer(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + threadgroup float foo[4][4]; + foo[gl_LocalInvocationID.x][gl_LocalInvocationID.y] = float(gl_LocalInvocationIndex); + threadgroup_barrier(mem_flags::mem_threadgroup); + _67.out_data[gl_GlobalInvocationID.x] = ((foo[gl_LocalInvocationID.x][0] + foo[gl_LocalInvocationID.x][1]) + foo[gl_LocalInvocationID.x][2]) + foo[gl_LocalInvocationID.x][3]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/shared.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/shared.comp new file mode 100644 index 000000000..e58638b5c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/shared.comp @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +struct SSBO +{ + float in_data[1]; +}; + +struct SSBO2 +{ + float out_data[1]; +}; + +kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _44 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + threadgroup float sShared[4]; + sShared[gl_LocalInvocationIndex] = _22.in_data[gl_GlobalInvocationID.x]; + threadgroup_barrier(mem_flags::mem_threadgroup); + _44.out_data[gl_GlobalInvocationID.x] = sShared[3u - gl_LocalInvocationIndex]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/spec-constant-op-member-array.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/spec-constant-op-member-array.comp new file mode 100644 index 000000000..35ebe1592 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/spec-constant-op-member-array.comp @@ -0,0 +1,46 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 100 +#endif +constant int a = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 200 +#endif +constant int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 300 +#endif +constant int c = SPIRV_CROSS_CONSTANT_ID_2; +constant int _18 = (c + 50); +constant int e_tmp [[function_constant(3)]]; +constant int e = is_function_constant_defined(e_tmp) ? e_tmp : 400; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +struct SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[_18]; +}; + +kernel void main0(device SSBO& _22 [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/spec-constant-work-group-size.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/spec-constant-work-group-size.comp new file mode 100644 index 000000000..e46ab8939 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/spec-constant-work-group-size.comp @@ -0,0 +1,33 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 2 +#endif +constant int b = SPIRV_CROSS_CONSTANT_ID_1; +constant int a_tmp [[function_constant(0)]]; +constant int a = is_function_constant_defined(a_tmp) ? a_tmp : 1; +constant uint _21 = (uint(a) + 0u); +constant uint _22_tmp [[function_constant(10)]]; +constant uint _22 = is_function_constant_defined(_22_tmp) ? _22_tmp : 1u; +constant uint3 gl_WorkGroupSize = uint3(_22, 20u, 1u); +constant uint _27 = gl_WorkGroupSize.x; +constant uint _28 = (_21 + _27); +constant uint _29 = gl_WorkGroupSize.y; +constant uint _30 = (_28 + _29); +constant int _32 = (1 - a); + +struct SSBO +{ + int v[1]; +}; + +kernel void main0(device SSBO& _17 [[buffer(0)]]) +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + _17.v[_30] = b + spec_const_array_size[_32]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-layout.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-layout.comp new file mode 100644 index 000000000..e7bb53ece --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-layout.comp @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct Foo +{ + float4x4 m; +}; + +struct SSBO2 +{ + Foo out_data[1]; +}; + +struct SSBO +{ + Foo in_data[1]; +}; + +kernel void main0(const device SSBO& _30 [[buffer(0)]], device SSBO2& _23 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _23.out_data[gl_GlobalInvocationID.x].m = _30.in_data[gl_GlobalInvocationID.x].m * _30.in_data[gl_GlobalInvocationID.x].m; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-nested.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-nested.comp new file mode 100644 index 000000000..9abc498f4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-nested.comp @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct s1 +{ + int a; +}; + +struct s2 +{ + s1 b; +}; + +struct dstbuffer +{ + s2 test[1]; +}; + +kernel void main0(device dstbuffer& _19 [[buffer(1)]]) +{ + _19.test[0].b.a = 0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-packing.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-packing.comp new file mode 100644 index 000000000..2b37844fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/struct-packing.comp @@ -0,0 +1,149 @@ +#include +#include + +using namespace metal; + +typedef float3x2 packed_float2x3; + +struct S0 +{ + float2 a[1]; + float b; +}; + +struct S1 +{ + packed_float3 a; + float b; +}; + +struct S2 +{ + float3 a[1]; + float b; +}; + +struct S3 +{ + float2 a; + float b; +}; + +struct S4 +{ + float2 c; +}; + +struct Content +{ + S0 m0s[1]; + S1 m1s[1]; + S2 m2s[1]; + S0 m0; + S1 m1; + S2 m2; + S3 m3; + char pad7[4]; + float m4; + S4 m3s[8]; +}; + +struct SSBO1 +{ + Content content; + Content content1[2]; + Content content2; + char pad3[8]; + float2x2 m0; + float2x2 m1; + float2x3 m2[4]; + float3x2 m3; + float2x2 m4; + float2x2 m5[9]; + packed_float2x3 m6[4][2]; + char pad10[8]; + float3x2 m7; + char pad11[8]; + float array[1]; +}; + +struct S0_1 +{ + float2 a[1]; + float b; +}; + +struct S1_1 +{ + packed_float3 a; + float b; +}; + +struct S2_1 +{ + float3 a[1]; + float b; +}; + +struct S3_1 +{ + float2 a; + float b; +}; + +struct S4_1 +{ + float2 c; +}; + +struct Content_1 +{ + S0_1 m0s[1]; + S1_1 m1s[1]; + S2_1 m2s[1]; + S0_1 m0; + S1_1 m1; + S2_1 m2; + S3_1 m3; + char pad7[4]; + float m4; + S4_1 m3s[8]; +}; + +struct SSBO0 +{ + Content_1 content; + Content_1 content1[2]; + Content_1 content2; + float array[1]; +}; + +kernel void main0(device SSBO0& ssbo_140 [[buffer(0)]], device SSBO1& ssbo_430 [[buffer(1)]]) +{ + Content_1 _60 = ssbo_140.content; + ssbo_430.content.m0s[0].a[0] = _60.m0s[0].a[0]; + ssbo_430.content.m0s[0].b = _60.m0s[0].b; + ssbo_430.content.m1s[0].a = _60.m1s[0].a; + ssbo_430.content.m1s[0].b = _60.m1s[0].b; + ssbo_430.content.m2s[0].a[0] = _60.m2s[0].a[0]; + ssbo_430.content.m2s[0].b = _60.m2s[0].b; + ssbo_430.content.m0.a[0] = _60.m0.a[0]; + ssbo_430.content.m0.b = _60.m0.b; + ssbo_430.content.m1.a = _60.m1.a; + ssbo_430.content.m1.b = _60.m1.b; + ssbo_430.content.m2.a[0] = _60.m2.a[0]; + ssbo_430.content.m2.b = _60.m2.b; + ssbo_430.content.m3.a = _60.m3.a; + ssbo_430.content.m3.b = _60.m3.b; + ssbo_430.content.m4 = _60.m4; + ssbo_430.content.m3s[0].c = _60.m3s[0].c; + ssbo_430.content.m3s[1].c = _60.m3s[1].c; + ssbo_430.content.m3s[2].c = _60.m3s[2].c; + ssbo_430.content.m3s[3].c = _60.m3s[3].c; + ssbo_430.content.m3s[4].c = _60.m3s[4].c; + ssbo_430.content.m3s[5].c = _60.m3s[5].c; + ssbo_430.content.m3s[6].c = _60.m3s[6].c; + ssbo_430.content.m3s[7].c = _60.m3s[7].c; + ssbo_430.content.m1.a = ssbo_430.content.m3.a * ssbo_430.m6[1][1]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/torture-loop.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/torture-loop.comp new file mode 100644 index 000000000..4c367d3e6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/torture-loop.comp @@ -0,0 +1,42 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4x4 mvp; + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(const device SSBO& _24 [[buffer(0)]], device SSBO2& _89 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float4 _99; + _99 = _24.in_data[gl_GlobalInvocationID.x]; + for (int _93 = 0; (_93 + 1) < 10; ) + { + _99 *= 2.0; + _93 += 2; + continue; + } + float4 _98; + _98 = _99; + float4 _103; + for (uint _94 = 0u; _94 < 16u; _98 = _103, _94++) + { + _103 = _98; + for (uint _100 = 0u; _100 < 30u; ) + { + _103 = _24.mvp * _103; + _100++; + continue; + } + } + _89.out_data[gl_GlobalInvocationID.x] = _98; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/type-alias.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/type-alias.comp new file mode 100644 index 000000000..8a68933d8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/type-alias.comp @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct S0 +{ + float4 a; +}; + +struct SSBO0 +{ + S0 s0s[1]; +}; + +struct S1 +{ + float4 a; +}; + +struct SSBO1 +{ + S1 s1s[1]; +}; + +struct SSBO2 +{ + float4 outputs[1]; +}; + +kernel void main0(device SSBO0& _36 [[buffer(0)]], device SSBO1& _55 [[buffer(1)]], device SSBO2& _66 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _66.outputs[gl_GlobalInvocationID.x] = _36.s0s[gl_GlobalInvocationID.x].a + _55.s1s[gl_GlobalInvocationID.x].a; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/udiv.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/udiv.comp new file mode 100644 index 000000000..a298ecdb7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/udiv.comp @@ -0,0 +1,20 @@ +#include +#include + +using namespace metal; + +struct SSBO2 +{ + uint outputs[1]; +}; + +struct SSBO +{ + uint inputs[1]; +}; + +kernel void main0(device SSBO& _23 [[buffer(0)]], device SSBO2& _10 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _10.outputs[gl_GlobalInvocationID.x] = _23.inputs[gl_GlobalInvocationID.x] / 29u; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/writable-ssbo.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/writable-ssbo.comp new file mode 100644 index 000000000..9dc53b6dd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/comp/writable-ssbo.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b = mod(myStorage.b + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp new file mode 100644 index 000000000..a37fe519a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp @@ -0,0 +1,177 @@ +#include +#include + +using namespace metal; + +struct SSBOUint +{ + uint a; + uint b; + uint c; + uint d; + uint2 a2; + uint2 b2; + uint2 c2; + uint2 d2; + uint3 a3; + uint3 b3; + uint3 c3; + uint3 d3; + uint4 a4; + uint4 b4; + uint4 c4; + uint4 d4; +}; + +struct ResType +{ + uint _m0; + uint _m1; +}; + +struct ResType_1 +{ + uint2 _m0; + uint2 _m1; +}; + +struct ResType_2 +{ + uint3 _m0; + uint3 _m1; +}; + +struct ResType_3 +{ + uint4 _m0; + uint4 _m1; +}; + +struct SSBOInt +{ + int a; + int b; + int c; + int d; + int2 a2; + int2 b2; + int2 c2; + int2 d2; + int3 a3; + int3 b3; + int3 c3; + int3 d3; + int4 a4; + int4 b4; + int4 c4; + int4 d4; +}; + +struct ResType_4 +{ + int _m0; + int _m1; +}; + +struct ResType_5 +{ + int2 _m0; + int2 _m1; +}; + +struct ResType_6 +{ + int3 _m0; + int3 _m1; +}; + +struct ResType_7 +{ + int4 _m0; + int4 _m1; +}; + +kernel void main0(device SSBOUint& u [[buffer(0)]], device SSBOInt& i [[buffer(1)]]) +{ + ResType _25; + _25._m0 = u.a + u.b; + _25._m1 = select(uint(1), uint(0), _25._m0 >= max(u.a, u.b)); + u.d = _25._m1; + u.c = _25._m0; + ResType_1 _40; + _40._m0 = u.a2 + u.b2; + _40._m1 = select(uint2(1), uint2(0), _40._m0 >= max(u.a2, u.b2)); + u.d2 = _40._m1; + u.c2 = _40._m0; + ResType_2 _55; + _55._m0 = u.a3 + u.b3; + _55._m1 = select(uint3(1), uint3(0), _55._m0 >= max(u.a3, u.b3)); + u.d3 = _55._m1; + u.c3 = _55._m0; + ResType_3 _70; + _70._m0 = u.a4 + u.b4; + _70._m1 = select(uint4(1), uint4(0), _70._m0 >= max(u.a4, u.b4)); + u.d4 = _70._m1; + u.c4 = _70._m0; + ResType _79; + _79._m0 = u.a - u.b; + _79._m1 = select(uint(1), uint(0), u.a >= u.b); + u.d = _79._m1; + u.c = _79._m0; + ResType_1 _88; + _88._m0 = u.a2 - u.b2; + _88._m1 = select(uint2(1), uint2(0), u.a2 >= u.b2); + u.d2 = _88._m1; + u.c2 = _88._m0; + ResType_2 _97; + _97._m0 = u.a3 - u.b3; + _97._m1 = select(uint3(1), uint3(0), u.a3 >= u.b3); + u.d3 = _97._m1; + u.c3 = _97._m0; + ResType_3 _106; + _106._m0 = u.a4 - u.b4; + _106._m1 = select(uint4(1), uint4(0), u.a4 >= u.b4); + u.d4 = _106._m1; + u.c4 = _106._m0; + ResType _116; + _116._m0 = u.a * u.b; + _116._m1 = mulhi(u.a, u.b); + u.d = _116._m0; + u.c = _116._m1; + ResType_1 _125; + _125._m0 = u.a2 * u.b2; + _125._m1 = mulhi(u.a2, u.b2); + u.d2 = _125._m0; + u.c2 = _125._m1; + ResType_2 _134; + _134._m0 = u.a3 * u.b3; + _134._m1 = mulhi(u.a3, u.b3); + u.d3 = _134._m0; + u.c3 = _134._m1; + ResType_3 _143; + _143._m0 = u.a4 * u.b4; + _143._m1 = mulhi(u.a4, u.b4); + u.d4 = _143._m0; + u.c4 = _143._m1; + ResType_4 _160; + _160._m0 = i.a * i.b; + _160._m1 = mulhi(i.a, i.b); + i.d = _160._m0; + i.c = _160._m1; + ResType_5 _171; + _171._m0 = i.a2 * i.b2; + _171._m1 = mulhi(i.a2, i.b2); + i.d2 = _171._m0; + i.c2 = _171._m1; + ResType_6 _182; + _182._m0 = i.a3 * i.b3; + _182._m1 = mulhi(i.a3, i.b3); + i.d3 = _182._m0; + i.c3 = _182._m1; + ResType_7 _193; + _193._m0 = i.a4 * i.b4; + _193._m1 = mulhi(i.a4, i.b4); + i.d4 = _193._m0; + i.c4 = _193._m1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/image-ms.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/image-ms.desktop.frag new file mode 100644 index 000000000..3b461f0fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/image-ms.desktop.frag @@ -0,0 +1,11 @@ +#include +#include + +using namespace metal; + +fragment void main0(texture2d uImage [[texture(0)]], texture2d_array uImageArray [[texture(1)]], texture2d_ms uImageMS [[texture(2)]]) +{ + uImage.write(uImageMS.read(uint2(int2(1, 2)), 2), uint2(int2(2, 3))); + uImageArray.write(uImageArray.read(uint2(int3(1, 2, 4).xy), uint(int3(1, 2, 4).z)), uint2(int3(2, 3, 7).xy), uint(int3(2, 3, 7).z)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/query-levels.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/query-levels.desktop.frag new file mode 100644 index 000000000..922796b74 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/query-levels.desktop.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = float4(float(int(uSampler.get_num_mip_levels()))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag new file mode 100644 index 000000000..a6f06064a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d_ms uSampler [[texture(0)]], texture2d_ms uImage [[texture(2)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = float4(float(int(uSampler.get_num_samples()) + int(uImage.get_num_samples()))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/basic.desktop.sso.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/basic.desktop.sso.vert new file mode 100644 index 000000000..ffb435712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/basic.desktop.sso.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.uMVP * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert new file mode 100644 index 000000000..32f0d9aa0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + float gl_ClipDistance [[clip_distance]] [2]; + float gl_CullDistance[2]; +}; + +vertex main0_out main0() +{ + main0_out out = {}; + out.gl_Position = float4(10.0); + out.gl_ClipDistance[0] = 1.0; + out.gl_ClipDistance[1] = 4.0; + out.gl_CullDistance[0] = 4.0; + out.gl_CullDistance[1] = 9.0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert new file mode 100644 index 000000000..1d203ba98 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +vertex main0_out main0(uint gl_BaseVertex [[base_vertex]], uint gl_BaseInstance [[base_instance]]) +{ + main0_out out = {}; + out.gl_Position = float4(float(gl_BaseVertex), float(gl_BaseInstance), 0.0, 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/basic.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/basic.flatten.vert new file mode 100644 index 000000000..ffb435712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/basic.flatten.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.uMVP * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/multiindex.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/multiindex.flatten.vert new file mode 100644 index 000000000..f4549abab --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/multiindex.flatten.vert @@ -0,0 +1,27 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4 Data[3][5]; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + int2 aIndex [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _20 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _20.Data[in.aIndex.x][in.aIndex.y]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/push-constant.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/push-constant.flatten.vert new file mode 100644 index 000000000..8f2e8c173 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/push-constant.flatten.vert @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +struct PushMe +{ + float4x4 MVP; + float2x2 Rot; + float Arr[4]; +}; + +struct main0_out +{ + float2 vRot [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float2 Rot [[attribute(0)]]; + float4 Pos [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant PushMe& registers [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = registers.MVP * in.Pos; + out.vRot = (registers.Rot * in.Rot) + float2(registers.Arr[2]); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/rowmajor.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/rowmajor.flatten.vert new file mode 100644 index 000000000..387fe0a83 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/rowmajor.flatten.vert @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVPR; + float4x4 uMVPC; + float2x4 uMVP; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = (_18.uMVPR * in.aVertex) + (in.aVertex * _18.uMVPC); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/struct.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/struct.flatten.vert new file mode 100644 index 000000000..d97a34a85 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/struct.flatten.vert @@ -0,0 +1,40 @@ +#include +#include + +using namespace metal; + +struct Light +{ + packed_float3 Position; + float Radius; + float4 Color; +}; + +struct UBO +{ + float4x4 uMVP; + Light light; +}; + +struct main0_out +{ + float4 vColor [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _18.uMVP * in.aVertex; + out.vColor = float4(0.0); + float3 _39 = in.aVertex.xyz - float3(_18.light.Position); + out.vColor += ((_18.light.Color * fast::clamp(1.0 - (length(_39) / _18.light.Radius), 0.0, 1.0)) * dot(in.aNormal, normalize(_39))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/swizzle.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/swizzle.flatten.vert new file mode 100644 index 000000000..52940411b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/swizzle.flatten.vert @@ -0,0 +1,47 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4 A; + float2 B0; + float2 B1; + float C0; + float3 C1; + packed_float3 D0; + float D1; + float E0; + float E1; + float E2; + float E3; + float F0; + float2 F1; + float F2; +}; + +struct main0_out +{ + float4 oA [[user(locn0)]]; + float4 oB [[user(locn1)]]; + float4 oC [[user(locn2)]]; + float4 oD [[user(locn3)]]; + float4 oE [[user(locn4)]]; + float4 oF [[user(locn5)]]; + float4 gl_Position [[position]]; +}; + +vertex main0_out main0(constant UBO& _22 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = float4(0.0); + out.oA = _22.A; + out.oB = float4(_22.B0, _22.B1); + out.oC = float4(_22.C0, _22.C1) + float4(_22.C1.xy, _22.C1.z, _22.C0); + out.oD = float4(_22.D0[0], _22.D0[1], _22.D0[2], _22.D1) + float4(_22.D0[0], _22.D0[1], _22.D0[2u], _22.D1); + out.oE = float4(_22.E0, _22.E1, _22.E2, _22.E3); + out.oF = float4(_22.F0, _22.F1, _22.F2); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/types.flatten.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/types.flatten.frag new file mode 100644 index 000000000..540c5baeb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/flatten/types.flatten.frag @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct UBO1 +{ + int4 c; + int4 d; +}; + +struct UBO2 +{ + uint4 e; + uint4 f; +}; + +struct UBO0 +{ + float4 a; + float4 b; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(constant UBO0& _41 [[buffer(0)]], constant UBO1& _14 [[buffer(1)]], constant UBO2& _29 [[buffer(2)]]) +{ + main0_out out = {}; + out.FragColor = ((((float4(_14.c) + float4(_14.d)) + float4(_29.e)) + float4(_29.f)) + _41.a) + _41.b; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/16bit-constants.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/16bit-constants.frag new file mode 100644 index 000000000..2aae1e8aa --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/16bit-constants.frag @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + half foo [[color(0)]]; + short bar [[color(1)]]; + ushort baz [[color(2)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.foo = 1.0h; + out.bar = 2; + out.baz = 3u; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..9b757b6a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +constant float _17[5] = { 1.0, 2.0, 3.0, 4.0, 5.0 }; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + for (int _46 = 0; _46 < 4; ) + { + int _33 = _46 + 1; + out.FragColor += float4(_17[_33]); + _46 = _33; + continue; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/basic.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/basic.frag new file mode 100644 index 000000000..f33db61eb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/basic.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; + float2 vTex [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uTex [[texture(0)]], sampler uTexSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = in.vColor * uTex.sample(uTexSmplr, in.vTex); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/binary-func-unpack-pack-arguments.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/binary-func-unpack-pack-arguments.frag new file mode 100644 index 000000000..134cfe184 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/binary-func-unpack-pack-arguments.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + packed_float3 color; + float v; +}; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vIn [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant UBO& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = dot(in.vIn, float3(_15.color)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/binary-unpack-pack-arguments.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/binary-unpack-pack-arguments.frag new file mode 100644 index 000000000..8bd538bec --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/binary-unpack-pack-arguments.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + packed_float3 color; + float v; +}; + +struct main0_out +{ + float3 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vIn [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant UBO& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = cross(in.vIn, float3(_15.color) - in.vIn); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/bitcasting.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/bitcasting.frag new file mode 100644 index 000000000..ae6d45e01 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/bitcasting.frag @@ -0,0 +1,26 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor0 [[color(0)]]; + float4 FragColor1 [[color(1)]]; +}; + +struct main0_in +{ + float4 VertGeom [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d TextureBase [[texture(0)]], texture2d TextureDetail [[texture(1)]], sampler TextureBaseSmplr [[sampler(0)]], sampler TextureDetailSmplr [[sampler(1)]]) +{ + main0_out out = {}; + float4 _20 = TextureBase.sample(TextureBaseSmplr, in.VertGeom.xy); + float4 _31 = TextureDetail.sample(TextureDetailSmplr, in.VertGeom.xy, int2(3, 2)); + out.FragColor0 = as_type(as_type(_20)) * as_type(as_type(_31)); + out.FragColor1 = as_type(as_type(_20)) * as_type(as_type(_31)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/buffer-read.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/buffer-read.frag new file mode 100644 index 000000000..fdd88b568 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/buffer-read.frag @@ -0,0 +1,25 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +fragment main0_out main0(texture2d buf [[texture(0)]]) +{ + main0_out out = {}; + out.FragColor = buf.read(spvTexelBufferCoord(0)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/builtins.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/builtins.frag new file mode 100644 index 000000000..f9085252b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/builtins.frag @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; + float gl_FragDepth [[depth(any)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = gl_FragCoord + in.vColor; + out.gl_FragDepth = 0.5; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..be10da12f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4 results[1024]; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int vIn [[user(locn0)]]; + int vIn2 [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], device UBO& _34 [[buffer(0)]], texture2d Buf [[texture(1)]], sampler BufSmplr [[sampler(1)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + int _40 = Buf.read(uint2(int2(gl_FragCoord.xy)), 0).x % 16; + out.FragColor = (_34.results[_40] + _34.results[_40]) + _34.results[(in.vIn * in.vIn) + (in.vIn2 * in.vIn2)]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/composite-extract-forced-temporary.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/composite-extract-forced-temporary.frag new file mode 100644 index 000000000..6948e4d09 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/composite-extract-forced-temporary.frag @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vTexCoord [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d Texture [[texture(0)]], sampler TextureSmplr [[sampler(0)]]) +{ + main0_out out = {}; + float4 _19 = Texture.sample(TextureSmplr, in.vTexCoord); + float _22 = _19.x; + out.FragColor = float4(_22 * _22); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/constant-array.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/constant-array.frag new file mode 100644 index 000000000..0ba2781b0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/constant-array.frag @@ -0,0 +1,50 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Foobar +{ + float a; + float b; +}; + +constant float4 _37[3] = { float4(1.0), float4(2.0), float4(3.0) }; +constant float4 _49[2] = { float4(1.0), float4(2.0) }; +constant float4 _54[2] = { float4(8.0), float4(10.0) }; +constant float4 _55[2][2] = { { float4(1.0), float4(2.0) }, { float4(8.0), float4(10.0) } }; +constant Foobar _75[2] = { Foobar{ 10.0, 40.0 }, Foobar{ 90.0, 70.0 } }; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Foobar indexable[2] = { Foobar{ 10.0, 40.0 }, Foobar{ 90.0, 70.0 } }; + out.FragColor = ((_37[in.index] + _55[in.index][in.index + 1]) + float4(30.0)) + float4(indexable[in.index].a + indexable[in.index].b); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/constant-composites.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/constant-composites.frag new file mode 100644 index 000000000..deb7eaaf8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/constant-composites.frag @@ -0,0 +1,48 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Foo +{ + float a; + float b; +}; + +constant float _16[4] = { 1.0, 4.0, 3.0, 2.0 }; +constant Foo _28[2] = { Foo{ 10.0, 20.0 }, Foo{ 30.0, 40.0 } }; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int line [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Foo foos[2] = { Foo{ 10.0, 20.0 }, Foo{ 30.0, 40.0 } }; + out.FragColor = float4(_16[in.line]); + out.FragColor += float4(foos[in.line].a * foos[1 - in.line].a); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..b75f86d23 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,45 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vInput [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = in.vInput; + float4 _23 = uSampler.sample(uSamplerSmplr, in.vInput.xy); + float4 _26 = dfdx(in.vInput); + float4 _29 = dfdy(in.vInput); + float4 _32 = fwidth(in.vInput); + float4 _35 = dfdx(in.vInput); + float4 _38 = dfdy(in.vInput); + float4 _41 = fwidth(in.vInput); + float4 _44 = dfdx(in.vInput); + float4 _47 = dfdy(in.vInput); + float4 _50 = fwidth(in.vInput); + if (in.vInput.y > 10.0) + { + out.FragColor += _23; + out.FragColor += _26; + out.FragColor += _29; + out.FragColor += _32; + out.FragColor += _35; + out.FragColor += _38; + out.FragColor += _41; + out.FragColor += _44; + out.FragColor += _47; + out.FragColor += _50; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/depth-greater-than.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/depth-greater-than.frag new file mode 100644 index 000000000..5861509fc --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/depth-greater-than.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float gl_FragDepth [[depth(greater)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.gl_FragDepth = 0.5; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/depth-less-than.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/depth-less-than.frag new file mode 100644 index 000000000..f1177fa64 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/depth-less-than.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float gl_FragDepth [[depth(less)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.gl_FragDepth = 0.5; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/dual-source-blending.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/dual-source-blending.frag new file mode 100644 index 000000000..37938bf8c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/dual-source-blending.frag @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor0 [[color(0), index(0)]]; + float4 FragColor1 [[color(0), index(1)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor0 = float4(1.0); + out.FragColor1 = float4(2.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/early-fragment-tests.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/early-fragment-tests.frag new file mode 100644 index 000000000..850fdc920 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/early-fragment-tests.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +[[ early_fragment_tests ]] fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/false-loop-init.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/false-loop-init.frag new file mode 100644 index 000000000..9233caa05 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/false-loop-init.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 result [[color(0)]]; +}; + +struct main0_in +{ + float4 accum [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.result = float4(0.0); + for (int _48 = 0; _48 < 4; ) + { + out.result += in.accum; + _48 += int((in.accum.y > 10.0) ? 40u : 30u); + continue; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/flush_params.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/flush_params.frag new file mode 100644 index 000000000..64edee872 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/flush_params.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(10.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/for-loop-init.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/for-loop-init.frag new file mode 100644 index 000000000..cef6e11d3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/for-loop-init.frag @@ -0,0 +1,73 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + int FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + int _145; + for (;;) + { + out.FragColor = 16; + _145 = 0; + for (; _145 < 25; ) + { + out.FragColor += 10; + _145++; + continue; + } + for (int _146 = 1; _146 < 30; ) + { + out.FragColor += 11; + _146++; + continue; + } + int _147; + _147 = 0; + for (; _147 < 20; ) + { + out.FragColor += 12; + _147++; + continue; + } + int _62 = _147 + 3; + out.FragColor += _62; + if (_62 == 40) + { + for (int _151 = 0; _151 < 40; ) + { + out.FragColor += 13; + _151++; + continue; + } + break; + } + out.FragColor += _62; + int2 _148; + _148 = int2(0); + for (; _148.x < 10; ) + { + out.FragColor += _148.y; + int2 _144 = _148; + _144.x = _148.x + 4; + _148 = _144; + continue; + } + for (int _150 = _62; _150 < 40; ) + { + out.FragColor += _150; + _150++; + continue; + } + out.FragColor += _62; + break; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/fp16-packing.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/fp16-packing.frag new file mode 100644 index 000000000..e21feb43e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/fp16-packing.frag @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float2 FP32Out [[color(0)]]; + uint FP16Out [[color(1)]]; +}; + +struct main0_in +{ + uint FP16 [[user(locn0)]]; + float2 FP32 [[user(locn1), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FP32Out = float2(as_type(in.FP16)); + out.FP16Out = as_type(half2(in.FP32)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/fp16.desktop.invalid.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/fp16.desktop.invalid.frag new file mode 100644 index 000000000..d9a0390e1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/fp16.desktop.invalid.frag @@ -0,0 +1,16 @@ +#include +#include + +using namespace metal; + +struct main0_in +{ + half4 v4 [[user(locn3)]]; +}; + +fragment void main0(main0_in in [[stage_in]]) +{ + half4 _491; + half4 _563 = modf(in.v4, _491); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/front-facing.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/front-facing.frag new file mode 100644 index 000000000..2f8364249 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/front-facing.frag @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vA [[user(locn0)]]; + float4 vB [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], bool gl_FrontFacing [[front_facing]]) +{ + main0_out out = {}; + if (gl_FrontFacing) + { + out.FragColor = in.vA; + } + else + { + out.FragColor = in.vB; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/gather-dref.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/gather-dref.frag new file mode 100644 index 000000000..c5c5ccf0b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/gather-dref.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uT [[texture(0)]], sampler uTSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uT.gather_compare(uTSmplr, in.vUV.xy, in.vUV.z); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/gather-offset.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/gather-offset.frag new file mode 100644 index 000000000..02b80194b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/gather-offset.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uT [[texture(0)]], sampler uTSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uT.gather(uTSmplr, float2(0.5), int2(0), component::w); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/helper-invocation.msl21.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/helper-invocation.msl21.frag new file mode 100644 index 000000000..bacf6fa12 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/helper-invocation.msl21.frag @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + bool gl_HelperInvocation = simd_is_helper_thread(); + float4 _51; + if (!gl_HelperInvocation) + { + _51 = uSampler.sample(uSamplerSmplr, in.vUV, level(0.0)); + } + else + { + _51 = float4(1.0); + } + out.FragColor = _51; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/illegal-name-test-0.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/illegal-name-test-0.frag new file mode 100644 index 000000000..81cd3b562 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/illegal-name-test-0.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(40.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/in_block.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/in_block.frag new file mode 100644 index 000000000..8178c9a4e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/in_block.frag @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +struct VertexOut +{ + float4 color; + float4 color2; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 VertexOut_color [[user(locn2)]]; + float4 VertexOut_color2 [[user(locn3)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + VertexOut inputs = {}; + inputs.color = in.VertexOut_color; + inputs.color2 = in.VertexOut_color2; + out.FragColor = inputs.color + inputs.color2; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/in_mat.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/in_mat.frag new file mode 100644 index 000000000..b0a1b87d1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/in_mat.frag @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 outFragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 inPos [[user(locn0)]]; + float3 inNormal [[user(locn1)]]; + float4 inInvModelView_0 [[user(locn2)]]; + float4 inInvModelView_1 [[user(locn3)]]; + float4 inInvModelView_2 [[user(locn4)]]; + float4 inInvModelView_3 [[user(locn5)]]; + float inLodBias [[user(locn6)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texturecube samplerColor [[texture(1)]], sampler samplerColorSmplr [[sampler(1)]]) +{ + main0_out out = {}; + float4x4 inInvModelView = {}; + inInvModelView[0] = in.inInvModelView_0; + inInvModelView[1] = in.inInvModelView_1; + inInvModelView[2] = in.inInvModelView_2; + inInvModelView[3] = in.inInvModelView_3; + float4 _31 = inInvModelView * float4(reflect(normalize(in.inPos), normalize(in.inNormal)), 0.0); + float _33 = _31.x; + float3 _59 = float3(_33, _31.yz); + _59.x = _33 * (-1.0); + out.outFragColor = samplerColor.sample(samplerColorSmplr, _59, bias(in.inLodBias)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/input-attachment-ms.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/input-attachment-ms.frag new file mode 100644 index 000000000..906cabbf4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/input-attachment-ms.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d_ms uSubpass0 [[texture(0)]], texture2d_ms uSubpass1 [[texture(1)]], uint gl_SampleID [[sample_id]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = (uSubpass0.read(uint2(gl_FragCoord.xy), 1) + uSubpass1.read(uint2(gl_FragCoord.xy), 2)) + uSubpass0.read(uint2(gl_FragCoord.xy), gl_SampleID); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/input-attachment.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/input-attachment.frag new file mode 100644 index 000000000..122190648 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/input-attachment.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uSubpass0 [[texture(0)]], texture2d uSubpass1 [[texture(1)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = uSubpass0.read(uint2(gl_FragCoord.xy), 0) + uSubpass1.read(uint2(gl_FragCoord.xy), 0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/interpolation-qualifiers-block.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/interpolation-qualifiers-block.frag new file mode 100644 index 000000000..2b420195f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/interpolation-qualifiers-block.frag @@ -0,0 +1,47 @@ +#include +#include + +using namespace metal; + +struct Input +{ + float2 v0; + float2 v1; + float3 v2; + float4 v3; + float v4; + float v5; + float v6; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 Input_v0 [[user(locn0), centroid_no_perspective]]; + float2 Input_v1 [[user(locn1), centroid_no_perspective]]; + float3 Input_v2 [[user(locn2), centroid_no_perspective]]; + float4 Input_v3 [[user(locn3), centroid_no_perspective]]; + float Input_v4 [[user(locn4), centroid_no_perspective]]; + float Input_v5 [[user(locn5), centroid_no_perspective]]; + float Input_v6 [[user(locn6), centroid_no_perspective]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Input inp = {}; + inp.v0 = in.Input_v0; + inp.v1 = in.Input_v1; + inp.v2 = in.Input_v2; + inp.v3 = in.Input_v3; + inp.v4 = in.Input_v4; + inp.v5 = in.Input_v5; + inp.v6 = in.Input_v6; + out.FragColor = float4(inp.v0.x + inp.v1.y, inp.v2.xy, ((inp.v3.w * inp.v4) + inp.v5) - inp.v6); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/interpolation-qualifiers.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/interpolation-qualifiers.frag new file mode 100644 index 000000000..aff6e1b0f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/interpolation-qualifiers.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 v0 [[user(locn0)]]; + float2 v1 [[user(locn1), center_no_perspective]]; + float3 v2 [[user(locn2), centroid_perspective]]; + float4 v3 [[user(locn3), centroid_no_perspective]]; + float v4 [[user(locn4), sample_perspective]]; + float v5 [[user(locn5), sample_no_perspective]]; + float v6 [[user(locn6), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = float4(in.v0.x + in.v1.y, in.v2.xy, ((in.v3.w * in.v4) + in.v5) - in.v6); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/lut-promotion.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/lut-promotion.frag new file mode 100644 index 000000000..c9169b790 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/lut-promotion.frag @@ -0,0 +1,66 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant float _16[16] = { 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0 }; +constant float4 _60[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; +constant float4 _104[4] = { float4(20.0), float4(30.0), float4(50.0), float4(60.0) }; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = _16[in.index]; + if (in.index < 10) + { + out.FragColor += _16[in.index ^ 1]; + } + else + { + out.FragColor += _16[in.index & 1]; + } + bool _63 = in.index > 30; + if (_63) + { + out.FragColor += _60[in.index & 3].y; + } + else + { + out.FragColor += _60[in.index & 1].x; + } + float4 foobar[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; + if (_63) + { + foobar[1].z = 20.0; + } + int _91 = in.index & 3; + out.FragColor += foobar[_91].z; + out.FragColor += _104[_91].z; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/mix.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/mix.frag new file mode 100644 index 000000000..5e385087b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/mix.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vIn0 [[user(locn0)]]; + float4 vIn1 [[user(locn1)]]; + float vIn2 [[user(locn2)]]; + float vIn3 [[user(locn3)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = float4(in.vIn0.x, in.vIn1.y, in.vIn0.z, in.vIn0.w); + out.FragColor = float4(in.vIn3); + out.FragColor = in.vIn0.xyzw; + out.FragColor = float4(in.vIn2); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/mrt-array.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/mrt-array.frag new file mode 100644 index 000000000..d7cea6baf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/mrt-array.frag @@ -0,0 +1,43 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor_0 [[color(0)]]; + float4 FragColor_1 [[color(1)]]; + float4 FragColor_2 [[color(2)]]; + float4 FragColor_3 [[color(3)]]; +}; + +struct main0_in +{ + float4 vA [[user(locn0)]]; + float4 vB [[user(locn1)]]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float4 FragColor[4] = {}; + FragColor[0] = mod(in.vA, in.vB); + FragColor[1] = in.vA + in.vB; + FragColor[2] = in.vA - in.vB; + FragColor[3] = in.vA * in.vB; + out.FragColor_0 = FragColor[0]; + out.FragColor_1 = FragColor[1]; + out.FragColor_2 = FragColor[2]; + out.FragColor_3 = FragColor[3]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/packed-expression-vector-shuffle.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/packed-expression-vector-shuffle.frag new file mode 100644 index 000000000..dd319af55 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/packed-expression-vector-shuffle.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + packed_float3 color; + float v; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(constant UBO& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = float4(_15.color[0], _15.color[1], _15.color[2], float4(1.0).w); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/packing-test-3.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/packing-test-3.frag new file mode 100644 index 000000000..436829e85 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/packing-test-3.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct TestStruct +{ + packed_float3 position; + float radius; +}; + +struct CB0 +{ + TestStruct CB0[16]; +}; + +struct main0_out +{ + float4 _entryPointOutput [[color(0)]]; +}; + +fragment main0_out main0(constant CB0& _26 [[buffer(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out._entryPointOutput = float4(_26.CB0[1].position[0], _26.CB0[1].position[1], _26.CB0[1].position[2], _26.CB0[1].radius); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/pls.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/pls.frag new file mode 100644 index 000000000..ee774a04a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/pls.frag @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 PLSOut0 [[color(0)]]; + float4 PLSOut1 [[color(1)]]; + float4 PLSOut2 [[color(2)]]; + float4 PLSOut3 [[color(3)]]; +}; + +struct main0_in +{ + float4 PLSIn0 [[user(locn0)]]; + float4 PLSIn1 [[user(locn1)]]; + float4 PLSIn2 [[user(locn2)]]; + float4 PLSIn3 [[user(locn3)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.PLSOut0 = in.PLSIn0 * 2.0; + out.PLSOut1 = in.PLSIn1 * 6.0; + out.PLSOut2 = in.PLSIn2 * 7.0; + out.PLSOut3 = in.PLSIn3 * 4.0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/readonly-ssbo.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/readonly-ssbo.frag new file mode 100644 index 000000000..777fd65c8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/readonly-ssbo.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 v; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(const device SSBO& _13 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = _13.v + _13.v; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-depth-separate-image-sampler.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-depth-separate-image-sampler.frag new file mode 100644 index 000000000..17e1421a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-depth-separate-image-sampler.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +fragment main0_out main0(depth2d uDepth [[texture(0)]], texture2d uColor [[texture(1)]], sampler uSampler [[sampler(2)]], sampler uSamplerShadow [[sampler(3)]]) +{ + main0_out out = {}; + out.FragColor = uDepth.sample_compare(uSamplerShadow, float3(0.5).xy, 0.5) + uColor.sample(uSampler, float2(0.5)).x; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-mask.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-mask.frag new file mode 100644 index 000000000..6a282395d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-mask.frag @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; + uint gl_SampleMask [[sample_mask]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(1.0); + out.gl_SampleMask = 0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-position-func.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-position-func.frag new file mode 100644 index 000000000..87875bbc3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-position-func.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], uint gl_SampleID [[sample_id]]) +{ + main0_out out = {}; + float2 gl_SamplePosition = get_sample_position(gl_SampleID); + out.FragColor = float4(gl_SamplePosition, float(in.index), 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-position.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-position.frag new file mode 100644 index 000000000..8d26acb9f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sample-position.frag @@ -0,0 +1,18 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(uint gl_SampleID [[sample_id]]) +{ + main0_out out = {}; + float2 gl_SamplePosition = get_sample_position(gl_SampleID); + out.FragColor = float4(gl_SamplePosition, float(gl_SampleID), 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-1d-lod.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-1d-lod.frag new file mode 100644 index 000000000..96914f805 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-1d-lod.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float vTex [[user(locn0), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture1d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor += ((uSampler.sample(uSamplerSmplr, in.vTex) + uSampler.sample(uSamplerSmplr, in.vTex)) + uSampler.sample(uSamplerSmplr, in.vTex)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-compare-cascade-gradient.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-compare-cascade-gradient.frag new file mode 100644 index 000000000..18c2d4237 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-compare-cascade-gradient.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d_array uTex [[texture(0)]], sampler uShadow [[sampler(1)]]) +{ + main0_out out = {}; + out.FragColor = uTex.sample_compare(uShadow, in.vUV.xy, uint(round(in.vUV.z)), in.vUV.w, level(0)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag new file mode 100644 index 000000000..f01965c9e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d_array uTex [[texture(0)]], sampler uShadow [[sampler(1)]]) +{ + main0_out out = {}; + out.FragColor = uTex.sample_compare(uShadow, in.vUV.xy, uint(round(in.vUV.z)), in.vUV.w, gradient2d(float2(0.0), float2(0.0))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-image-arrays.msl2.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-image-arrays.msl2.frag new file mode 100644 index 000000000..ae37cf9a4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-image-arrays.msl2.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vTex [[user(locn0), flat]]; + int vIndex [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], array, 4> uSampler [[texture(0)]], array, 4> uTextures [[texture(8)]], array uSamplerSmplr [[sampler(0)]], array uSamplers [[sampler(4)]]) +{ + main0_out out = {}; + out.FragColor = float4(0.0); + out.FragColor += uTextures[2].sample(uSamplers[1], in.vTex); + out.FragColor += uSampler[in.vIndex].sample(uSamplerSmplr[in.vIndex], in.vTex); + out.FragColor += uSampler[in.vIndex].sample(uSamplerSmplr[in.vIndex], (in.vTex + float2(0.100000001490116119384765625))); + out.FragColor += uSampler[in.vIndex].sample(uSamplerSmplr[in.vIndex], (in.vTex + float2(0.20000000298023223876953125))); + out.FragColor += uSampler[3].sample(uSamplerSmplr[3], (in.vTex + float2(0.300000011920928955078125))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-ms.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-ms.frag new file mode 100644 index 000000000..8245ed826 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler-ms.frag @@ -0,0 +1,18 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d_ms uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + int2 _17 = int2(gl_FragCoord.xy); + out.FragColor = ((uSampler.read(uint2(_17), 0) + uSampler.read(uint2(_17), 1)) + uSampler.read(uint2(_17), 2)) + uSampler.read(uint2(_17), 3); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler.frag new file mode 100644 index 000000000..f33db61eb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/sampler.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; + float2 vTex [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uTex [[texture(0)]], sampler uTexSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = in.vColor * uTex.sample(uTexSmplr, in.vTex); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/separate-image-sampler-argument.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/separate-image-sampler-argument.frag new file mode 100644 index 000000000..b1261f372 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/separate-image-sampler-argument.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uDepth [[texture(1)]], sampler uSampler [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uDepth.sample(uSampler, float2(0.5)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/shadow-compare-global-alias.invalid.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/shadow-compare-global-alias.invalid.frag new file mode 100644 index 000000000..926172577 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/shadow-compare-global-alias.invalid.frag @@ -0,0 +1,27 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uSampler [[texture(0)]], depth2d uTex [[texture(1)]], sampler uSamplerSmplr [[sampler(0)]], sampler uSamp [[sampler(2)]]) +{ + main0_out out = {}; + out.FragColor = uSampler.sample_compare(uSamplerSmplr, in.vUV.xy, in.vUV.z); + out.FragColor += uTex.sample_compare(uSamp, in.vUV.xy, in.vUV.z); + out.FragColor += uTex.sample_compare(uSamp, in.vUV.xy, in.vUV.z); + out.FragColor += uSampler.sample_compare(uSamplerSmplr, in.vUV.xy, in.vUV.z); + out.FragColor += uTex.sample_compare(uSamp, in.vUV.xy, in.vUV.z); + out.FragColor += uSampler.sample_compare(uSamplerSmplr, in.vUV.xy, in.vUV.z); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/spec-constant-block-size.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/spec-constant-block-size.frag new file mode 100644 index 000000000..36456b814 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/spec-constant-block-size.frag @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 2 +#endif +constant int Value = SPIRV_CROSS_CONSTANT_ID_10; + +struct SpecConstArray +{ + float4 samples[Value]; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int Index [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant SpecConstArray& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = _15.samples[in.Index]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/spec-constant-ternary.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/spec-constant-ternary.frag new file mode 100644 index 000000000..5ab6b4fcb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/spec-constant-ternary.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +constant uint s_tmp [[function_constant(0)]]; +constant uint s = is_function_constant_defined(s_tmp) ? s_tmp : 10u; +constant bool _13 = (s > 20u); +constant uint _16 = _13 ? 30u : 50u; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float(_16); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..4cd2b6852 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/switch-unsigned-case.frag @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct Buff +{ + uint TestVal; +}; + +struct main0_out +{ + float4 fsout_Color [[color(0)]]; +}; + +fragment main0_out main0(constant Buff& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.fsout_Color = float4(1.0); + switch (_15.TestVal) + { + case 0u: + { + out.fsout_Color = float4(0.100000001490116119384765625); + break; + } + case 1u: + { + out.fsout_Color = float4(0.20000000298023223876953125); + break; + } + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/swizzle.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/swizzle.frag new file mode 100644 index 000000000..7a0494e06 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/swizzle.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vNormal [[user(locn1)]]; + float2 vUV [[user(locn2)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d samp [[texture(0)]], sampler sampSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = float4(samp.sample(sampSmplr, in.vUV).xyz, 1.0); + out.FragColor = float4(samp.sample(sampSmplr, in.vUV).xz, 1.0, 4.0); + out.FragColor = float4(samp.sample(sampSmplr, in.vUV).xx, samp.sample(sampSmplr, (in.vUV + float2(0.100000001490116119384765625))).yy); + out.FragColor = float4(in.vNormal, 1.0); + out.FragColor = float4(in.vNormal + float3(1.7999999523162841796875), 1.0); + out.FragColor = float4(in.vUV, in.vUV + float2(1.7999999523162841796875)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..d4db1ae1c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texel-fetch-offset.frag @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + int2 _22 = int2(gl_FragCoord.xy); + out.FragColor = uTexture.read(uint2(_22) + uint2(int2(1)), 0); + out.FragColor += uTexture.read(uint2(_22) + uint2(int2(-1, 1)), 0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texture-multisample-array.msl21.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texture-multisample-array.msl21.frag new file mode 100644 index 000000000..ed1e81f3e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texture-multisample-array.msl21.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int3 vCoord [[user(locn0)]]; + int vSample [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d_ms_array uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uTexture.read(uint2(in.vCoord.xy), uint(in.vCoord.z), in.vSample); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texture-proj-shadow.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texture-proj-shadow.frag new file mode 100644 index 000000000..c5ab0ee00 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/texture-proj-shadow.frag @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vClip3 [[user(locn0)]]; + float4 vClip4 [[user(locn1)]]; + float2 vClip2 [[user(locn2)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uShadow2D [[texture(1)]], texture1d uSampler1D [[texture(2)]], texture2d uSampler2D [[texture(3)]], texture3d uSampler3D [[texture(4)]], sampler uShadow2DSmplr [[sampler(1)]], sampler uSampler1DSmplr [[sampler(2)]], sampler uSampler2DSmplr [[sampler(3)]], sampler uSampler3DSmplr [[sampler(4)]]) +{ + main0_out out = {}; + float4 _20 = in.vClip4; + _20.z = in.vClip4.w; + out.FragColor = uShadow2D.sample_compare(uShadow2DSmplr, _20.xy / _20.z, in.vClip4.z); + out.FragColor = uSampler1D.sample(uSampler1DSmplr, in.vClip2.x / in.vClip2.y).x; + out.FragColor = uSampler2D.sample(uSampler2DSmplr, in.vClip3.xy / in.vClip3.z).x; + out.FragColor = uSampler3D.sample(uSampler3DSmplr, in.vClip4.xyz / in.vClip4.w).x; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/ubo_layout.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/ubo_layout.frag new file mode 100644 index 000000000..0bc27462b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/ubo_layout.frag @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +struct Str +{ + float4x4 foo; +}; + +struct UBO1 +{ + Str foo; +}; + +struct Str_1 +{ + float4x4 foo; +}; + +struct UBO2 +{ + Str_1 foo; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(constant UBO1& ubo1 [[buffer(0)]], constant UBO2& ubo0 [[buffer(1)]]) +{ + main0_out out = {}; + out.FragColor = transpose(ubo1.foo.foo)[0] + ubo0.foo.foo[0]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/unary-enclose.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/unary-enclose.frag new file mode 100644 index 000000000..c8648f1e0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/unary-enclose.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vIn [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = in.vIn; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/write-depth-in-function.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/write-depth-in-function.frag new file mode 100644 index 000000000..4ab74f19e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/frag/write-depth-in-function.frag @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; + float gl_FragDepth [[depth(any)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = 1.0; + out.gl_FragDepth = 0.20000000298023223876953125; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/legacy/vert/transpose.legacy.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/legacy/vert/transpose.legacy.vert new file mode 100644 index 000000000..abd884ca8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/legacy/vert/transpose.legacy.vert @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct Buffer +{ + float4x4 MVPRowMajor; + float4x4 MVPColMajor; + float4x4 M; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 Position [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant Buffer& _13 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = (((_13.M * (in.Position * _13.MVPRowMajor)) + (_13.M * (_13.MVPColMajor * in.Position))) + (_13.M * (_13.MVPRowMajor * in.Position))) + (_13.M * (in.Position * _13.MVPColMajor)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/basic.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/basic.vert new file mode 100644 index 000000000..ffb435712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/basic.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.uMVP * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/copy.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/copy.flatten.vert new file mode 100644 index 000000000..d73ee3282 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/copy.flatten.vert @@ -0,0 +1,45 @@ +#include +#include + +using namespace metal; + +struct Light +{ + packed_float3 Position; + float Radius; + float4 Color; +}; + +struct UBO +{ + float4x4 uMVP; + Light lights[4]; +}; + +struct main0_out +{ + float4 vColor [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _21 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _21.uMVP * in.aVertex; + out.vColor = float4(0.0); + for (int _96 = 0; _96 < 4; ) + { + float3 _68 = in.aVertex.xyz - float3(_21.lights[_96].Position); + out.vColor += ((_21.lights[_96].Color * fast::clamp(1.0 - (length(_68) / _21.lights[_96].Radius), 0.0, 1.0)) * dot(in.aNormal, normalize(_68))); + _96++; + continue; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/dynamic.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/dynamic.flatten.vert new file mode 100644 index 000000000..92911a4ee --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/dynamic.flatten.vert @@ -0,0 +1,45 @@ +#include +#include + +using namespace metal; + +struct Light +{ + packed_float3 Position; + float Radius; + float4 Color; +}; + +struct UBO +{ + float4x4 uMVP; + Light lights[4]; +}; + +struct main0_out +{ + float4 vColor [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _21 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _21.uMVP * in.aVertex; + out.vColor = float4(0.0); + for (int _82 = 0; _82 < 4; ) + { + float3 _54 = in.aVertex.xyz - float3(_21.lights[_82].Position); + out.vColor += ((_21.lights[_82].Color * fast::clamp(1.0 - (length(_54) / _21.lights[_82].Radius), 0.0, 1.0)) * dot(in.aNormal, normalize(_54))); + _82++; + continue; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/functions.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/functions.vert new file mode 100644 index 000000000..f71022526 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/functions.vert @@ -0,0 +1,119 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; + float3 rotDeg; + float3 rotRad; + int2 bits; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float3 vRotDeg [[user(locn1)]]; + float3 vRotRad [[user(locn2)]]; + int2 vLSB [[user(locn3)]]; + int2 vMSB [[user(locn4)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +// Implementation of the GLSL radians() function +template +T radians(T d) +{ + return d * T(0.01745329251); +} + +// Implementation of the GLSL degrees() function +template +T degrees(T r) +{ + return r * T(57.2957795131); +} + +// Implementation of the GLSL findLSB() function +template +T findLSB(T x) +{ + return select(ctz(x), T(-1), x == T(0)); +} + +// Implementation of the signed GLSL findMSB() function +template +T findSMSB(T x) +{ + T v = select(x, T(-1) - x, x < T(0)); + return select(clz(T(0)) - (clz(v) + T(1)), T(-1), v == T(0)); +} + +// Returns the determinant of a 2x2 matrix. +inline float spvDet2x2(float a1, float a2, float b1, float b2) +{ + return a1 * b2 - b1 * a2; +} + +// Returns the determinant of a 3x3 matrix. +inline float spvDet3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3) +{ + return a1 * spvDet2x2(b2, b3, c2, c3) - b1 * spvDet2x2(a2, a3, c2, c3) + c1 * spvDet2x2(a2, a3, b2, b3); +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float4x4 spvInverse4x4(float4x4 m) +{ + float4x4 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = spvDet3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][1] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][2] = spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3]); + adj[0][3] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3]); + + adj[1][0] = -spvDet3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][1] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][2] = -spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3]); + adj[1][3] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3]); + + adj[2][0] = spvDet3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][1] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][2] = spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3]); + adj[2][3] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3]); + + adj[3][0] = -spvDet3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][1] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][2] = -spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2]); + adj[3][3] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] * m[3][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = spvInverse4x4(_18.uMVP) * in.aVertex; + out.vNormal = in.aNormal; + out.vRotDeg = degrees(_18.rotRad); + out.vRotRad = radians(_18.rotDeg); + out.vLSB = findLSB(_18.bits); + out.vMSB = findSMSB(_18.bits); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/in_out_array_mat.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/in_out_array_mat.vert new file mode 100644 index 000000000..0d6976e26 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/in_out_array_mat.vert @@ -0,0 +1,67 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 projection; + float4x4 model; + float lodBias; +}; + +struct main0_out +{ + float3 outPos [[user(locn0)]]; + float3 outNormal [[user(locn1)]]; + float4 outTransModel_0 [[user(locn2)]]; + float4 outTransModel_1 [[user(locn3)]]; + float4 outTransModel_2 [[user(locn4)]]; + float4 outTransModel_3 [[user(locn5)]]; + float outLodBias [[user(locn6)]]; + float4 color [[user(locn7)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float3 inPos [[attribute(0)]]; + float4 colors_0 [[attribute(1)]]; + float4 colors_1 [[attribute(2)]]; + float4 colors_2 [[attribute(3)]]; + float3 inNormal [[attribute(4)]]; + float4 inViewMat_0 [[attribute(5)]]; + float4 inViewMat_1 [[attribute(6)]]; + float4 inViewMat_2 [[attribute(7)]]; + float4 inViewMat_3 [[attribute(8)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& ubo [[buffer(0)]]) +{ + main0_out out = {}; + float4x4 outTransModel = {}; + float4 colors[3] = {}; + float4x4 inViewMat = {}; + colors[0] = in.colors_0; + colors[1] = in.colors_1; + colors[2] = in.colors_2; + inViewMat[0] = in.inViewMat_0; + inViewMat[1] = in.inViewMat_1; + inViewMat[2] = in.inViewMat_2; + inViewMat[3] = in.inViewMat_3; + float4 _64 = float4(in.inPos, 1.0); + out.gl_Position = (ubo.projection * ubo.model) * _64; + out.outPos = float3((ubo.model * _64).xyz); + out.outNormal = float3x3(float3(ubo.model[0].x, ubo.model[0].y, ubo.model[0].z), float3(ubo.model[1].x, ubo.model[1].y, ubo.model[1].z), float3(ubo.model[2].x, ubo.model[2].y, ubo.model[2].z)) * in.inNormal; + out.outLodBias = ubo.lodBias; + outTransModel = transpose(ubo.model) * inViewMat; + outTransModel[2] = float4(in.inNormal, 1.0); + outTransModel[1].y = ubo.lodBias; + out.color = colors[2]; + out.outTransModel_0 = outTransModel[0]; + out.outTransModel_1 = outTransModel[1]; + out.outTransModel_2 = outTransModel[2]; + out.outTransModel_3 = outTransModel[3]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interface-block-block-composites.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interface-block-block-composites.frag new file mode 100644 index 000000000..90d732cc5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interface-block-block-composites.frag @@ -0,0 +1,58 @@ +#include +#include + +using namespace metal; + +struct Vert +{ + float3x3 wMatrix; + float4 wTmp; + float arr[4]; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vMatrix_0 [[user(locn0)]]; + float3 vMatrix_1 [[user(locn1)]]; + float3 vMatrix_2 [[user(locn2)]]; + float3 Vert_wMatrix_0 [[user(locn4)]]; + float3 Vert_wMatrix_1 [[user(locn5)]]; + float3 Vert_wMatrix_2 [[user(locn6)]]; + float4 Vert_wTmp [[user(locn7)]]; + float Vert_arr_0 [[user(locn8)]]; + float Vert_arr_1 [[user(locn9)]]; + float Vert_arr_2 [[user(locn10)]]; + float Vert_arr_3 [[user(locn11)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Vert _17 = {}; + float3x3 vMatrix = {}; + _17.wMatrix[0] = in.Vert_wMatrix_0; + _17.wMatrix[1] = in.Vert_wMatrix_1; + _17.wMatrix[2] = in.Vert_wMatrix_2; + _17.wTmp = in.Vert_wTmp; + _17.arr[0] = in.Vert_arr_0; + _17.arr[1] = in.Vert_arr_1; + _17.arr[2] = in.Vert_arr_2; + _17.arr[3] = in.Vert_arr_3; + vMatrix[0] = in.vMatrix_0; + vMatrix[1] = in.vMatrix_1; + vMatrix[2] = in.vMatrix_2; + out.FragColor = (_17.wMatrix[0].xxyy + _17.wTmp) + vMatrix[1].yyzz; + for (int _56 = 0; _56 < 4; ) + { + out.FragColor += float4(_17.arr[_56]); + _56++; + continue; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interface-block-block-composites.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interface-block-block-composites.vert new file mode 100644 index 000000000..3d97ae6dc --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interface-block-block-composites.vert @@ -0,0 +1,64 @@ +#include +#include + +using namespace metal; + +struct Vert +{ + float arr[3]; + float3x3 wMatrix; + float4 wTmp; +}; + +struct main0_out +{ + float3 vMatrix_0 [[user(locn0)]]; + float3 vMatrix_1 [[user(locn1)]]; + float3 vMatrix_2 [[user(locn2)]]; + float Vert_arr_0 [[user(locn4)]]; + float Vert_arr_1 [[user(locn5)]]; + float Vert_arr_2 [[user(locn6)]]; + float3 Vert_wMatrix_0 [[user(locn7)]]; + float3 Vert_wMatrix_1 [[user(locn8)]]; + float3 Vert_wMatrix_2 [[user(locn9)]]; + float4 Vert_wTmp [[user(locn10)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float3 Matrix_0 [[attribute(0)]]; + float3 Matrix_1 [[attribute(1)]]; + float3 Matrix_2 [[attribute(2)]]; + float4 Pos [[attribute(4)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float3x3 vMatrix = {}; + Vert _20 = {}; + float3x3 Matrix = {}; + Matrix[0] = in.Matrix_0; + Matrix[1] = in.Matrix_1; + Matrix[2] = in.Matrix_2; + vMatrix = Matrix; + _20.wMatrix = Matrix; + _20.arr[0] = 1.0; + _20.arr[1] = 2.0; + _20.arr[2] = 3.0; + _20.wTmp = in.Pos; + out.gl_Position = in.Pos; + out.vMatrix_0 = vMatrix[0]; + out.vMatrix_1 = vMatrix[1]; + out.vMatrix_2 = vMatrix[2]; + out.Vert_arr_0 = _20.arr[0]; + out.Vert_arr_1 = _20.arr[1]; + out.Vert_arr_2 = _20.arr[2]; + out.Vert_wMatrix_0 = _20.wMatrix[0]; + out.Vert_wMatrix_1 = _20.wMatrix[1]; + out.Vert_wMatrix_2 = _20.wMatrix[2]; + out.Vert_wTmp = _20.wTmp; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interpolation-qualifiers-block.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interpolation-qualifiers-block.vert new file mode 100644 index 000000000..4206623b4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interpolation-qualifiers-block.vert @@ -0,0 +1,55 @@ +#include +#include + +using namespace metal; + +struct Output +{ + float2 v0; + float2 v1; + float3 v2; + float4 v3; + float v4; + float v5; + float v6; +}; + +struct main0_out +{ + float2 Output_v0 [[user(locn0)]]; + float2 Output_v1 [[user(locn1)]]; + float3 Output_v2 [[user(locn2)]]; + float4 Output_v3 [[user(locn3)]]; + float Output_v4 [[user(locn4)]]; + float Output_v5 [[user(locn5)]]; + float Output_v6 [[user(locn6)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 Position [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Output outp = {}; + outp.v0 = in.Position.xy; + outp.v1 = in.Position.zw; + outp.v2 = float3(in.Position.x, in.Position.z * in.Position.y, in.Position.x); + outp.v3 = in.Position.xxyy; + outp.v4 = in.Position.w; + outp.v5 = in.Position.y; + outp.v6 = in.Position.x * in.Position.w; + out.gl_Position = in.Position; + out.Output_v0 = outp.v0; + out.Output_v1 = outp.v1; + out.Output_v2 = outp.v2; + out.Output_v3 = outp.v3; + out.Output_v4 = outp.v4; + out.Output_v5 = outp.v5; + out.Output_v6 = outp.v6; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interpolation-qualifiers.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interpolation-qualifiers.vert new file mode 100644 index 000000000..ba2c4fbd2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/interpolation-qualifiers.vert @@ -0,0 +1,36 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float2 v0 [[user(locn0)]]; + float2 v1 [[user(locn1)]]; + float3 v2 [[user(locn2)]]; + float4 v3 [[user(locn3)]]; + float v4 [[user(locn4)]]; + float v5 [[user(locn5)]]; + float v6 [[user(locn6)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 Position [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.v0 = in.Position.xy; + out.v1 = in.Position.zw; + out.v2 = float3(in.Position.x, in.Position.z * in.Position.y, in.Position.x); + out.v3 = in.Position.xxyy; + out.v4 = in.Position.w; + out.v5 = in.Position.y; + out.v6 = in.Position.x * in.Position.w; + out.gl_Position = in.Position; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/layer.msl11.invalid.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/layer.msl11.invalid.vert new file mode 100644 index 000000000..b6f39dca3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/layer.msl11.invalid.vert @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + uint gl_Layer [[render_target_array_index]]; +}; + +struct main0_in +{ + float4 coord [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.gl_Position = in.coord; + out.gl_Layer = uint(int(in.coord.z)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.vert new file mode 100644 index 000000000..28098ee88 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.vert @@ -0,0 +1,20 @@ +#include +#include + +using namespace metal; + +struct _10 +{ + uint4 _m0[1024]; +}; + +struct main0_in +{ + uint4 m_19 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], device _10& _12 [[buffer(0)]], uint gl_VertexIndex [[vertex_id]]) +{ + _12._m0[gl_VertexIndex] = in.m_19; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_buff.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_buff.vert new file mode 100644 index 000000000..458e1f1b7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_buff.vert @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +struct _35 +{ + uint4 _m0[1024]; +}; + +struct _40 +{ + uint4 _m0[1024]; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_17 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], constant _40& _42 [[buffer(0)]], device _35& _37 [[buffer(1)]]) +{ + main0_out out = {}; + out.gl_Position = in.m_17; + for (int _51 = 0; _51 < 1024; ) + { + _37._m0[_51] = _42._m0[_51]; + _51++; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_buff_atomic.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_buff_atomic.vert new file mode 100644 index 000000000..ca4d6a5b9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_buff_atomic.vert @@ -0,0 +1,30 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct _23 +{ + uint _m0; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_17 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], device _23& _25 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = in.m_17; + uint _29 = atomic_fetch_add_explicit((volatile device atomic_uint*)&_25._m0, 1u, memory_order_relaxed); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_tex.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_tex.vert new file mode 100644 index 000000000..c2af1e2e6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/no_stage_out.write_tex.vert @@ -0,0 +1,27 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_17 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], texture1d _37 [[texture(0)]], texture1d _34 [[texture(1)]]) +{ + main0_out out = {}; + out.gl_Position = in.m_17; + for (int _45 = 0; _45 < 128; ) + { + _34.write(_37.read(uint(_45)), uint(_45)); + _45++; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/out_block.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/out_block.vert new file mode 100644 index 000000000..45b897013 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/out_block.vert @@ -0,0 +1,41 @@ +#include +#include + +using namespace metal; + +struct Transform +{ + float4x4 transform; +}; + +struct VertexOut +{ + float4 color; + float4 color2; +}; + +struct main0_out +{ + float4 VertexOut_color [[user(locn2)]]; + float4 VertexOut_color2 [[user(locn3)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float3 position [[attribute(0)]]; + float4 color [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant Transform& block [[buffer(0)]]) +{ + main0_out out = {}; + VertexOut outputs = {}; + out.gl_Position = block.transform * float4(in.position, 1.0); + outputs.color = in.color; + outputs.color2 = in.color + float4(1.0); + out.VertexOut_color = outputs.color; + out.VertexOut_color2 = outputs.color2; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/packed_matrix.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/packed_matrix.vert new file mode 100644 index 000000000..7f058061c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/packed_matrix.vert @@ -0,0 +1,50 @@ +#include +#include + +using namespace metal; + +typedef float3x4 packed_float4x3; + +struct _15 +{ + packed_float4x3 _m0; + packed_float4x3 _m1; +}; + +struct _42 +{ + float4x4 _m0; + float4x4 _m1; + float _m2; + char pad3[12]; + packed_float3 _m3; + float _m4; + packed_float3 _m5; + float _m6; + float _m7; + float _m8; + float2 _m9; +}; + +struct main0_out +{ + float3 m_72 [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_25 [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant _42& _44 [[buffer(12)]], constant _15& _17 [[buffer(13)]]) +{ + main0_out out = {}; + float4 _70 = _44._m0 * float4(float3(_44._m3) + (in.m_25.xyz * (_44._m6 + _44._m7)), 1.0); + out.m_72 = normalize(float4(in.m_25.xyz, 0.0) * _17._m1); + float4 _94 = _70; + _94.y = -_70.y; + out.gl_Position = _94; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/pointsize.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/pointsize.vert new file mode 100644 index 000000000..8e5782bde --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/pointsize.vert @@ -0,0 +1,33 @@ +#include +#include + +using namespace metal; + +struct params +{ + float4x4 mvp; + float psize; +}; + +struct main0_out +{ + float4 color [[user(locn0)]]; + float4 gl_Position [[position]]; + float gl_PointSize [[point_size]]; +}; + +struct main0_in +{ + float4 position [[attribute(0)]]; + float4 color0 [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant params& _19 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _19.mvp * in.position; + out.gl_PointSize = _19.psize; + out.color = in.color0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..9b85a2595 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/read-from-row-major-array.vert @@ -0,0 +1,37 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Block +{ + float2x3 var[3][4]; +}; + +struct main0_out +{ + float v_vtxResult [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 a_position [[attribute(0)]]; +}; + +// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization. +float2x3 spvConvertFromRowMajor2x3(float2x3 m) +{ + return float2x3(float3(m[0][0], m[0][2], m[1][1]), float3(m[0][1], m[1][0], m[1][2])); +} + +vertex main0_out main0(main0_in in [[stage_in]], constant Block& _104 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = in.a_position; + out.v_vtxResult = ((float(abs(spvConvertFromRowMajor2x3(_104.var[0][0])[0].x - 2.0) < 0.0500000007450580596923828125) * float(abs(spvConvertFromRowMajor2x3(_104.var[0][0])[0].y - 6.0) < 0.0500000007450580596923828125)) * float(abs(spvConvertFromRowMajor2x3(_104.var[0][0])[0].z - (-6.0)) < 0.0500000007450580596923828125)) * ((float(abs(spvConvertFromRowMajor2x3(_104.var[0][0])[1].x) < 0.0500000007450580596923828125) * float(abs(spvConvertFromRowMajor2x3(_104.var[0][0])[1].y - 5.0) < 0.0500000007450580596923828125)) * float(abs(spvConvertFromRowMajor2x3(_104.var[0][0])[1].z - 5.0) < 0.0500000007450580596923828125)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/resource-arrays-leaf.ios.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/resource-arrays-leaf.ios.vert new file mode 100644 index 000000000..22dc480b2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/resource-arrays-leaf.ios.vert @@ -0,0 +1,42 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 3 +#endif +constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0; + +struct storage_block +{ + uint4 baz; + int2 quux; +}; + +struct constant_block +{ + float4 foo; + int bar; +}; + +vertex void main0(device storage_block* storage_0 [[buffer(0)]], device storage_block* storage_1 [[buffer(1)]], constant constant_block* constants_0 [[buffer(2)]], constant constant_block* constants_1 [[buffer(3)]], constant constant_block* constants_2 [[buffer(4)]], constant constant_block* constants_3 [[buffer(5)]], array, 3> images [[texture(0)]]) +{ + device storage_block* storage[] = + { + storage_0, + storage_1, + }; + + constant constant_block* constants[] = + { + constants_0, + constants_1, + constants_2, + constants_3, + }; + + storage[0]->baz = uint4(constants[3]->foo); + storage[1]->quux = images[2].read(uint2(int2(constants[1]->bar))).xy; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/resource-arrays.ios.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/resource-arrays.ios.vert new file mode 100644 index 000000000..22dc480b2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/resource-arrays.ios.vert @@ -0,0 +1,42 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 3 +#endif +constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0; + +struct storage_block +{ + uint4 baz; + int2 quux; +}; + +struct constant_block +{ + float4 foo; + int bar; +}; + +vertex void main0(device storage_block* storage_0 [[buffer(0)]], device storage_block* storage_1 [[buffer(1)]], constant constant_block* constants_0 [[buffer(2)]], constant constant_block* constants_1 [[buffer(3)]], constant constant_block* constants_2 [[buffer(4)]], constant constant_block* constants_3 [[buffer(5)]], array, 3> images [[texture(0)]]) +{ + device storage_block* storage[] = + { + storage_0, + storage_1, + }; + + constant constant_block* constants[] = + { + constants_0, + constants_1, + constants_2, + constants_3, + }; + + storage[0]->baz = uint4(constants[3]->foo); + storage[1]->quux = images[2].read(uint2(int2(constants[1]->bar))).xy; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/return-array.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/return-array.vert new file mode 100644 index 000000000..ce13349a0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/return-array.vert @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 vInput1 [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.gl_Position = float4(10.0) + in.vInput1; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/set_builtin_in_func.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/set_builtin_in_func.vert new file mode 100644 index 000000000..51a858af1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/set_builtin_in_func.vert @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + float gl_PointSize [[point_size]]; +}; + +vertex main0_out main0() +{ + main0_out out = {}; + out.gl_PointSize = 1.0; + out.gl_Position = float4(out.gl_PointSize); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/sign-int-types.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/sign-int-types.vert new file mode 100644 index 000000000..2f518b129 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/sign-int-types.vert @@ -0,0 +1,60 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; + float4 uFloatVec4; + float3 uFloatVec3; + float2 uFloatVec2; + float uFloat; + int4 uIntVec4; + int3 uIntVec3; + int2 uIntVec2; + int uInt; +}; + +struct main0_out +{ + float4 vFloatVec4 [[user(locn0)]]; + float3 vFloatVec3 [[user(locn1)]]; + float2 vFloatVec2 [[user(locn2)]]; + float vFloat [[user(locn3)]]; + int4 vIntVec4 [[user(locn4)]]; + int3 vIntVec3 [[user(locn5)]]; + int2 vIntVec2 [[user(locn6)]]; + int vInt [[user(locn7)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; +}; + +// Implementation of the GLSL sign() function for integer types +template::value>::type> +T sign(T x) +{ + return select(select(select(x, T(0), x == T(0)), T(1), x > T(0)), T(-1), x < T(0)); +} + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _21 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _21.uMVP * in.aVertex; + out.vFloatVec4 = sign(_21.uFloatVec4); + out.vFloatVec3 = sign(_21.uFloatVec3); + out.vFloatVec2 = sign(_21.uFloatVec2); + out.vFloat = sign(_21.uFloat); + out.vIntVec4 = sign(_21.uIntVec4); + out.vIntVec3 = sign(_21.uIntVec3); + out.vIntVec2 = sign(_21.uIntVec2); + out.vInt = sign(_21.uInt); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/texture_buffer.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/texture_buffer.vert new file mode 100644 index 000000000..c45d29813 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/texture_buffer.vert @@ -0,0 +1,25 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +vertex main0_out main0(texture2d uSamp [[texture(4)]], texture2d uSampo [[texture(5)]]) +{ + main0_out out = {}; + out.gl_Position = uSamp.read(spvTexelBufferCoord(10)) + uSampo.read(spvTexelBufferCoord(100)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/ubo.alignment.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/ubo.alignment.vert new file mode 100644 index 000000000..9a7ea56c6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/ubo.alignment.vert @@ -0,0 +1,38 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 mvp; + float2 targSize; + char pad2[8]; + packed_float3 color; + float opacity; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float3 vColor [[user(locn1)]]; + float2 vSize [[user(locn2)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _18.mvp * in.aVertex; + out.vNormal = in.aNormal; + out.vColor = float3(_18.color) * _18.opacity; + out.vSize = _18.targSize * _18.opacity; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/ubo.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/ubo.vert new file mode 100644 index 000000000..86ba1e968 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/ubo.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 mvp; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.mvp * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/viewport-index.msl2.invalid.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/viewport-index.msl2.invalid.vert new file mode 100644 index 000000000..e5316c072 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vert/viewport-index.msl2.invalid.vert @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + uint gl_ViewportIndex [[viewport_array_index]]; +}; + +struct main0_in +{ + float4 coord [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.gl_Position = in.coord; + out.gl_ViewportIndex = uint(int(in.coord.z)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/push-constant.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/push-constant.vk.frag new file mode 100644 index 000000000..7b8c502b3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/push-constant.vk.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct PushConstants +{ + float4 value0; + float4 value1; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant PushConstants& push [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = (in.vColor + push.value0) + push.value1; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag new file mode 100644 index 000000000..6baf93f10 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag @@ -0,0 +1,26 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 1.0 +#endif +constant float a = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 2.0 +#endif +constant float b = SPIRV_CROSS_CONSTANT_ID_2; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(a + b); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/spec-constant.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/spec-constant.vk.frag new file mode 100644 index 000000000..aee290f5a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/frag/spec-constant.vk.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +constant float a_tmp [[function_constant(1)]]; +constant float a = is_function_constant_defined(a_tmp) ? a_tmp : 1.0; +constant float b_tmp [[function_constant(2)]]; +constant float b = is_function_constant_defined(b_tmp) ? b_tmp : 2.0; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(a + b); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/vert/small-storage.vk.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/vert/small-storage.vk.vert new file mode 100644 index 000000000..c9ef91b24 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/vert/small-storage.vk.vert @@ -0,0 +1,48 @@ +#include +#include + +using namespace metal; + +struct block +{ + short2 a; + ushort2 b; + char2 c; + uchar2 d; + half2 e; +}; + +struct storage +{ + short3 f; + ushort3 g; + char3 h; + uchar3 i; + half3 j; +}; + +struct main0_out +{ + short4 p [[user(locn0)]]; + ushort4 q [[user(locn1)]]; + half4 r [[user(locn2)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + short foo [[attribute(0)]]; + ushort bar [[attribute(1)]]; + half baz [[attribute(2)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant block& _26 [[buffer(0)]], const device storage& _53 [[buffer(1)]]) +{ + main0_out out = {}; + out.p = short4((int4(int(in.foo)) + int4(int2(_26.a), int2(_26.c))) - int4(int3(_53.f) / int3(_53.h), 1)); + out.q = ushort4((uint4(uint(in.bar)) + uint4(uint2(_26.b), uint2(_26.d))) - uint4(uint3(_53.g) / uint3(_53.i), 1u)); + out.r = half4((float4(float(in.baz)) + float4(float2(_26.e), 0.0, 1.0)) - float4(float3(_53.j), 1.0)); + out.gl_Position = float4(0.0, 0.0, 0.0, 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert new file mode 100644 index 000000000..53e26e4a8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +vertex main0_out main0(uint gl_VertexIndex [[vertex_id]], uint gl_InstanceIndex [[instance_id]]) +{ + main0_out out = {}; + out.gl_Position = float4(1.0, 2.0, 3.0, 4.0) * float(gl_VertexIndex + gl_InstanceIndex); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag.vk new file mode 100644 index 000000000..4aaf397a0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag.vk @@ -0,0 +1,11 @@ +#version 450 +#extension GL_AMD_shader_fragment_mask : require + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS t; + +void main() +{ + vec4 test2 = fragmentFetchAMD(t, 4u); + uint testi2 = fragmentMaskFetchAMD(t); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/amd/fs.invalid.frag b/3rdparty/spirv-cross/reference/opt/shaders/amd/fs.invalid.frag new file mode 100644 index 000000000..aecf69eba --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/amd/fs.invalid.frag @@ -0,0 +1,15 @@ +#version 450 +#extension GL_AMD_shader_fragment_mask : require +#extension GL_AMD_shader_explicit_vertex_parameter : require + +layout(binding = 0) uniform sampler2DMS texture1; + +layout(location = 0) __explicitInterpAMD in vec4 vary; + +void main() +{ + uint testi1 = fragmentMaskFetchAMD(texture1, ivec2(0)); + vec4 test1 = fragmentFetchAMD(texture1, ivec2(1), 2u); + vec4 pos = interpolateAtVertexAMD(vary, 0u); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/amd/gcn_shader.comp b/3rdparty/spirv-cross/reference/opt/shaders/amd/gcn_shader.comp new file mode 100644 index 000000000..e4bb67e9b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/amd/gcn_shader.comp @@ -0,0 +1,9 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require +#extension GL_AMD_gcn_shader : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_ballot.comp b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_ballot.comp new file mode 100644 index 000000000..cc54a244d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_ballot.comp @@ -0,0 +1,28 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require +#extension GL_ARB_shader_ballot : require +#extension GL_AMD_shader_ballot : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer inputData +{ + float inputDataArray[]; +} _12; + +layout(binding = 1, std430) buffer outputData +{ + float outputDataArray[]; +} _74; + +void main() +{ + float _25 = _12.inputDataArray[gl_LocalInvocationID.x]; + bool _31 = _25 > 0.0; + uvec4 _37 = uvec4(unpackUint2x32(ballotARB(_31)), 0u, 0u); + uint _44 = mbcntAMD(packUint2x32(uvec2(_37.xy))); + if (_31) + { + _74.outputDataArray[_44] = _25; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp new file mode 100644 index 000000000..a14343ae1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp @@ -0,0 +1,11 @@ +#version 450 +#extension GL_AMD_shader_ballot : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void main() +{ + float addInvocations = addInvocationsNonUniformAMD(0.0); + int minInvocations = minInvocationsNonUniformAMD(1); + uint maxInvocations = uint(maxInvocationsNonUniformAMD(4)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_group_vote.comp b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_group_vote.comp new file mode 100644 index 000000000..266998177 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_group_vote.comp @@ -0,0 +1,14 @@ +#version 450 +#extension GL_ARB_shader_group_vote : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer inputData +{ + float inputDataArray[]; +} _12; + +void main() +{ + bool _31 = _12.inputDataArray[gl_LocalInvocationID.x] > 0.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_trinary_minmax.comp b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_trinary_minmax.comp new file mode 100644 index 000000000..2644551e4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/amd/shader_trinary_minmax.comp @@ -0,0 +1,8 @@ +#version 450 +#extension GL_AMD_shader_trinary_minmax : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..673ca715b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,16 @@ +#version 450 +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer u0_counters +{ + uint c; +} u0_counter; + +layout(binding = 0, r32ui) uniform writeonly uimageBuffer u0; + +void main() +{ + uint _29 = atomicAdd(u0_counter.c, uint(-1)); + imageStore(u0, floatBitsToInt(uintBitsToFloat(_29)), uvec4(uint(int(gl_GlobalInvocationID.x)))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..e45ae5926 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,16 @@ +#version 450 +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer u0_counters +{ + uint c; +} u0_counter; + +layout(binding = 0, r32ui) uniform writeonly uimageBuffer u0; + +void main() +{ + uint _29 = atomicAdd(u0_counter.c, 1u); + imageStore(u0, floatBitsToInt(uintBitsToFloat(_29)), uvec4(uint(int(gl_GlobalInvocationID.x)))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_iadd.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_iadd.asm.comp new file mode 100644 index 000000000..bed2dffcc --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_iadd.asm.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) restrict buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) restrict buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + _6._m0 = _5._m1 + uvec4(_5._m0); + _6._m0 = uvec4(_5._m0) + _5._m1; + _6._m0 = _5._m1 + _5._m1; + _6._m0 = uvec4(_5._m0 + _5._m0); + _6._m1 = ivec4(_5._m1 + _5._m1); + _6._m1 = _5._m0 + _5._m0; + _6._m1 = ivec4(_5._m1) + _5._m0; + _6._m1 = _5._m0 + ivec4(_5._m1); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_iequal.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_iequal.asm.comp new file mode 100644 index 000000000..bdb3eeb9a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_iequal.asm.comp @@ -0,0 +1,33 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _30 = _5._m0; + uvec4 _31 = _5._m1; + bvec4 _34 = equal(ivec4(_31), _30); + bvec4 _35 = equal(_30, ivec4(_31)); + bvec4 _36 = equal(_31, _31); + bvec4 _37 = equal(_30, _30); + _6._m0 = mix(uvec4(0u), uvec4(1u), _34); + _6._m0 = mix(uvec4(0u), uvec4(1u), _35); + _6._m0 = mix(uvec4(0u), uvec4(1u), _36); + _6._m0 = mix(uvec4(0u), uvec4(1u), _37); + _6._m1 = mix(ivec4(0), ivec4(1), _34); + _6._m1 = mix(ivec4(0), ivec4(1), _35); + _6._m1 = mix(ivec4(0), ivec4(1), _36); + _6._m1 = mix(ivec4(0), ivec4(1), _37); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_sar.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_sar.asm.comp new file mode 100644 index 000000000..283b444cc --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_sar.asm.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _22 = _5._m0; + uvec4 _23 = _5._m1; + _6._m0 = uvec4(ivec4(_23) >> _22); + _6._m0 = uvec4(_22 >> ivec4(_23)); + _6._m0 = uvec4(ivec4(_23) >> ivec4(_23)); + _6._m0 = uvec4(_22 >> _22); + _6._m1 = ivec4(_23) >> ivec4(_23); + _6._m1 = _22 >> _22; + _6._m1 = ivec4(_23) >> _22; + _6._m1 = _22 >> ivec4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_sdiv.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_sdiv.asm.comp new file mode 100644 index 000000000..e28c481d2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_sdiv.asm.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _22 = _5._m0; + uvec4 _23 = _5._m1; + _6._m0 = uvec4(ivec4(_23) / _22); + _6._m0 = uvec4(_22 / ivec4(_23)); + _6._m0 = uvec4(ivec4(_23) / ivec4(_23)); + _6._m0 = uvec4(_22 / _22); + _6._m1 = ivec4(_23) / ivec4(_23); + _6._m1 = _22 / _22; + _6._m1 = ivec4(_23) / _22; + _6._m1 = _22 / ivec4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_slr.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_slr.asm.comp new file mode 100644 index 000000000..78efaf385 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/bitcast_slr.asm.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _22 = _5._m0; + uvec4 _23 = _5._m1; + _6._m0 = _23 >> uvec4(_22); + _6._m0 = uvec4(_22) >> _23; + _6._m0 = _23 >> _23; + _6._m0 = uvec4(_22) >> uvec4(_22); + _6._m1 = ivec4(_23 >> _23); + _6._m1 = ivec4(uvec4(_22) >> uvec4(_22)); + _6._m1 = ivec4(_23 >> uvec4(_22)); + _6._m1 = ivec4(uvec4(_22) >> _23); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..08fccbcde --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,43 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct A +{ + int a; + int b; +}; + +struct A_1 +{ + int a; + int b; +}; + +layout(binding = 1, std430) buffer C1 +{ + A Data[]; +} C1_1; + +layout(binding = 2, std140) uniform C2 +{ + A_1 Data[1024]; +} C2_1; + +layout(binding = 0, std430) buffer B +{ + A Data[]; +} C3; + +layout(binding = 3, std140) uniform B +{ + A_1 Data[1024]; +} C4; + +void main() +{ + C1_1.Data[gl_GlobalInvocationID.x].a = C2_1.Data[gl_GlobalInvocationID.x].a; + C1_1.Data[gl_GlobalInvocationID.x].b = C2_1.Data[gl_GlobalInvocationID.x].b; + C3.Data[gl_GlobalInvocationID.x].a = C4.Data[gl_GlobalInvocationID.x].a; + C3.Data[gl_GlobalInvocationID.x].b = C4.Data[gl_GlobalInvocationID.x].b; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/builtin-compute-bitcast.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/builtin-compute-bitcast.asm.comp new file mode 100644 index 000000000..abb8a7976 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/builtin-compute-bitcast.asm.comp @@ -0,0 +1,13 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer BUF +{ + int values[]; +} _6; + +void main() +{ + _6.values[int(gl_WorkGroupID.y)] = int(gl_GlobalInvocationID.z); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/decoration-group.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/decoration-group.asm.comp new file mode 100644 index 000000000..28ad4d41f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/decoration-group.asm.comp @@ -0,0 +1,38 @@ +#version 430 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 5, std430) buffer _6_15 +{ + float _m0[]; +} _15; + +layout(binding = 0, std430) buffer _7_16 +{ + float _m0[]; +} _16; + +layout(binding = 1, std430) buffer _8_17 +{ + float _m0[]; +} _17; + +layout(binding = 2, std430) restrict readonly buffer _9_18 +{ + float _m0[]; +} _18; + +layout(binding = 3, std430) restrict readonly buffer _10_19 +{ + float _m0[]; +} _19; + +layout(binding = 4, std430) restrict readonly buffer _11_20 +{ + float _m0[]; +} _20; + +void main() +{ + _15._m0[gl_GlobalInvocationID.x] = (((_16._m0[gl_GlobalInvocationID.x] + _17._m0[gl_GlobalInvocationID.x]) + _18._m0[gl_GlobalInvocationID.x]) + _19._m0[gl_GlobalInvocationID.x]) + _20._m0[gl_GlobalInvocationID.x]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..37b286355 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,7 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/hlsl-functionality.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/hlsl-functionality.asm.comp new file mode 100644 index 000000000..29bc02c69 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/hlsl-functionality.asm.comp @@ -0,0 +1,19 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer Buf +{ + vec4 _data[]; +} Buf_1; + +layout(std430) buffer Buf_count +{ + int _count; +} Buf_count_1; + +void main() +{ + int _32 = atomicAdd(Buf_count_1._count, 1); + Buf_1._data[_32] = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/logical.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/logical.asm.comp new file mode 100644 index 000000000..124652b32 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/logical.asm.comp @@ -0,0 +1,7 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/multiple-entry.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/multiple-entry.asm.comp new file mode 100644 index 000000000..6418464f1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/multiple-entry.asm.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) restrict buffer _6_8 +{ + ivec4 _m0; + uvec4 _m1; +} _8; + +layout(binding = 1, std430) restrict buffer _7_9 +{ + uvec4 _m0; + ivec4 _m1; +} _9; + +void main() +{ + _9._m0 = _8._m1 + uvec4(_8._m0); + _9._m0 = uvec4(_8._m0) + _8._m1; + _9._m0 = _8._m1 + _8._m1; + _9._m0 = uvec4(_8._m0 + _8._m0); + _9._m1 = ivec4(_8._m1 + _8._m1); + _9._m1 = _8._m0 + _8._m0; + _9._m1 = ivec4(_8._m1) + _8._m0; + _9._m1 = _8._m0 + ivec4(_8._m1); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/op-phi-swap.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/op-phi-swap.asm.comp new file mode 100644 index 000000000..a1a57e16b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/op-phi-swap.asm.comp @@ -0,0 +1,39 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_4 +{ + float _m0[]; +} _4; + +layout(binding = 1, std430) buffer _3_5 +{ + float _m0[]; +} _5; + +void main() +{ + bool _34; + float _35; + float _35_copy; + float _36; + _34 = true; + _35 = _4._m0[gl_GlobalInvocationID.x]; + _36 = 8.5; + for (;;) + { + if (_34) + { + _34 = false; + _35_copy = _35; + _35 = _36; + _36 = _35_copy; + } + else + { + break; + } + } + _5._m0[gl_GlobalInvocationID.x] = _35 - _36; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/quantize.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/quantize.asm.comp new file mode 100644 index 000000000..c08921380 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/quantize.asm.comp @@ -0,0 +1,19 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + float scalar; + vec2 vec2_val; + vec3 vec3_val; + vec4 vec4_val; +} _4; + +void main() +{ + _4.scalar = unpackHalf2x16(packHalf2x16(vec2(_4.scalar))).x; + _4.vec2_val = unpackHalf2x16(packHalf2x16(_4.vec2_val)); + _4.vec3_val = vec3(unpackHalf2x16(packHalf2x16(_4.vec3_val.xy)), unpackHalf2x16(packHalf2x16(_4.vec3_val.zz)).x); + _4.vec4_val = vec4(unpackHalf2x16(packHalf2x16(_4.vec4_val.xy)), unpackHalf2x16(packHalf2x16(_4.vec4_val.zw))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/specialization-constant-workgroup.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/specialization-constant-workgroup.asm.comp new file mode 100644 index 000000000..8016ebaf9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/specialization-constant-workgroup.asm.comp @@ -0,0 +1,21 @@ +#version 310 es + +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 9u +#endif +#ifndef SPIRV_CROSS_CONSTANT_ID_12 +#define SPIRV_CROSS_CONSTANT_ID_12 4u +#endif + +layout(local_size_x = SPIRV_CROSS_CONSTANT_ID_10, local_size_y = 20, local_size_z = SPIRV_CROSS_CONSTANT_ID_12) in; + +layout(binding = 0, std430) buffer SSBO +{ + float a; +} _4; + +void main() +{ + _4.a += 1.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp new file mode 100644 index 000000000..482cfd8a0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp @@ -0,0 +1,28 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1u +#endif +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 3u +#endif + +layout(local_size_x = SPIRV_CROSS_CONSTANT_ID_0, local_size_y = 2, local_size_z = SPIRV_CROSS_CONSTANT_ID_2) in; + +layout(binding = 0, std430) buffer _6_8 +{ + float _m0[]; +} _8; + +layout(binding = 1, std430) buffer _6_9 +{ + float _m0[]; +} _9; + +uvec3 _22 = gl_WorkGroupSize; + +void main() +{ + _8._m0[gl_WorkGroupID.x] = _9._m0[gl_WorkGroupID.x] + _8._m0[gl_WorkGroupID.x]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/switch-break-ladder.asm.comp b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/switch-break-ladder.asm.comp new file mode 100644 index 000000000..4cf3f126d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/comp/switch-break-ladder.asm.comp @@ -0,0 +1,69 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer BUF +{ + int a; + int b; + int d; +} o; + +void main() +{ + int _44; + _44 = o.a; + int _48; + for (;;) + { + bool _22_ladder_break = false; + switch (_44) + { + case 5: + { + for (;;) + { + bool _30_ladder_break = false; + switch (o.d) + { + case 10: + case 20: + { + _30_ladder_break = true; + break; + } + default: + { + continue; + } + } + if (_30_ladder_break) + { + break; + } + } + _48 = _44 + _44; + break; + } + case 1: + case 2: + case 3: + { + _22_ladder_break = true; + break; + } + default: + { + _48 = _44; + break; + } + } + if (_22_ladder_break) + { + break; + } + _44 = _48 + 1; + continue; + } + o.b = _44; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag new file mode 100644 index 000000000..b5e59f88b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag @@ -0,0 +1,13 @@ +#version 450 + +uniform sampler2D SPIRV_Cross_CombineduTexuSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vUV; + +void main() +{ + FragColor = texture(SPIRV_Cross_CombineduTexuSampler, vUV); + FragColor += textureOffset(SPIRV_Cross_CombineduTexuSampler, vUV, ivec2(1)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag.vk new file mode 100644 index 000000000..bce980895 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag.vk @@ -0,0 +1,14 @@ +#version 450 + +layout(set = 0, binding = 1) uniform texture2D uTex; +layout(set = 0, binding = 0) uniform sampler uSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vUV; + +void main() +{ + FragColor = texture(sampler2D(uTex, uSampler), vUV); + FragColor += textureOffset(sampler2D(uTex, uSampler), vUV, ivec2(1)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/complex-name-workarounds.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/complex-name-workarounds.asm.frag new file mode 100644 index 000000000..c07f1657f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/complex-name-workarounds.asm.frag @@ -0,0 +1,16 @@ +#version 450 + +layout(location = 0) in vec4 _; +layout(location = 1) in vec4 a; +layout(location = 0) out vec4 b; + +void main() +{ + vec4 _28 = (_ + a) + _; + vec4 _34 = (_ - a) + a; + b = _28; + b = _34; + b = _28; + b = _34; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag new file mode 100644 index 000000000..45e83dbc1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag @@ -0,0 +1,12 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) in vec2 foo; +layout(location = 0) out float FooOut; + +void main() +{ + FooOut = foo.x + foo.y; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/default-member-names.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/default-member-names.asm.frag new file mode 100644 index 000000000..13f81b11a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/default-member-names.asm.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) out vec4 _3; + +float _49; + +void main() +{ + _3 = vec4(_49); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..05ce10adf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/empty-struct.asm.frag @@ -0,0 +1,6 @@ +#version 450 + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/for-loop-phi-only-continue.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/for-loop-phi-only-continue.asm.frag new file mode 100644 index 000000000..7a78d00b1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/for-loop-phi-only-continue.asm.frag @@ -0,0 +1,17 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; + +void main() +{ + float _19; + _19 = 0.0; + for (int _22 = 0; _22 < 16; ) + { + _19 += 1.0; + _22++; + continue; + } + FragColor = vec4(_19); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/frem.asm.frag new file mode 100644 index 000000000..1095ab04f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/frem.asm.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void main() +{ + FragColor = vA - vB * trunc(vA / vB); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..16b499414 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(10.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag new file mode 100644 index 000000000..924b6f656 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag @@ -0,0 +1,11 @@ +#version 450 + +uniform samplerCubeShadow SPIRV_Cross_CombinedpointLightShadowMapshadowSamplerPCF; + +layout(location = 0) out float _entryPointOutput; + +void main() +{ + _entryPointOutput = textureGrad(SPIRV_Cross_CombinedpointLightShadowMapshadowSamplerPCF, vec4(vec4(0.100000001490116119384765625, 0.100000001490116119384765625, 0.100000001490116119384765625, 0.5).xyz, 0.5), vec3(0.0), vec3(0.0)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag new file mode 100644 index 000000000..c4e3704e5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag @@ -0,0 +1,14 @@ +#version 450 + +uniform sampler2DArrayShadow SPIRV_Cross_CombinedShadowMapShadowSamplerPCF; + +layout(location = 0) in vec2 texCoords; +layout(location = 1) in float cascadeIndex; +layout(location = 2) in float fragDepth; +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + _entryPointOutput = vec4(textureGrad(SPIRV_Cross_CombinedShadowMapShadowSamplerPCF, vec4(vec4(texCoords, cascadeIndex, fragDepth).xyz, fragDepth), vec2(0.0), vec2(0.0))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..ab2749b4d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uTexture; + +layout(location = 0) out ivec2 Size; + +void main() +{ + Size = textureSize(uTexture, 0) + textureSize(uTexture, 1); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag new file mode 100644 index 000000000..452fd6fb9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag @@ -0,0 +1,13 @@ +#version 450 + +uniform sampler2D SPIRV_Cross_CombinedSampledImageSPIRV_Cross_DummySampler; +uniform sampler2D SPIRV_Cross_CombinedSampledImageSampler; + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + ivec2 _152 = ivec3(int(gl_FragCoord.x * 1280.0), int(gl_FragCoord.y * 720.0), 0).xy; + _entryPointOutput = ((texelFetch(SPIRV_Cross_CombinedSampledImageSPIRV_Cross_DummySampler, _152, 0) + texelFetch(SPIRV_Cross_CombinedSampledImageSPIRV_Cross_DummySampler, _152, 0)) + texture(SPIRV_Cross_CombinedSampledImageSampler, gl_FragCoord.xy)) + texture(SPIRV_Cross_CombinedSampledImageSampler, gl_FragCoord.xy); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag.vk new file mode 100644 index 000000000..55e2c2da6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag.vk @@ -0,0 +1,14 @@ +#version 450 +#extension GL_EXT_samplerless_texture_functions : require + +layout(set = 0, binding = 0) uniform sampler Sampler; +layout(set = 0, binding = 0) uniform texture2D SampledImage; + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + ivec2 _152 = ivec3(int(gl_FragCoord.x * 1280.0), int(gl_FragCoord.y * 720.0), 0).xy; + _entryPointOutput = ((texelFetch(SampledImage, _152, 0) + texelFetch(SampledImage, _152, 0)) + texture(sampler2D(SampledImage, Sampler), gl_FragCoord.xy)) + texture(sampler2D(SampledImage, Sampler), gl_FragCoord.xy); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-query-no-sampler.vk.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-query-no-sampler.vk.asm.frag new file mode 100644 index 000000000..05ce10adf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-query-no-sampler.vk.asm.frag @@ -0,0 +1,6 @@ +#version 450 + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-query-no-sampler.vk.asm.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-query-no-sampler.vk.asm.frag.vk new file mode 100644 index 000000000..05ce10adf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/image-query-no-sampler.vk.asm.frag.vk @@ -0,0 +1,6 @@ +#version 450 + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..8a7f64d7f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,40 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uImage; + +layout(location = 0) in vec4 v0; +layout(location = 0) out vec4 FragColor; + +void main() +{ + float phi; + vec4 _36; + int _51; + _51 = 0; + phi = 1.0; + _36 = vec4(1.0, 2.0, 1.0, 2.0); + for (;;) + { + FragColor = _36; + if (_51 < 4) + { + if (v0[_51] > 0.0) + { + vec2 _48 = vec2(phi); + _51++; + phi += 2.0; + _36 = textureLod(uImage, _48, 0.0); + continue; + } + else + { + break; + } + } + else + { + break; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/inf-nan-constant-double.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/inf-nan-constant-double.asm.frag new file mode 100644 index 000000000..d8e29aa40 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/inf-nan-constant-double.asm.frag @@ -0,0 +1,11 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require + +layout(location = 0) out vec3 FragColor; +layout(location = 0) flat in double vTmp; + +void main() +{ + FragColor = vec3(dvec3(uint64BitsToDouble(0x7ff0000000000000ul), uint64BitsToDouble(0xfff0000000000000ul), uint64BitsToDouble(0x7ff8000000000000ul)) + dvec3(vTmp)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..dd4284c9b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out highp vec3 FragColor; + +void main() +{ + FragColor = vec3(uintBitsToFloat(0x7f800000u), uintBitsToFloat(0xff800000u), uintBitsToFloat(0x7fc00000u)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/invalidation.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/invalidation.asm.frag new file mode 100644 index 000000000..c0dc7b682 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/invalidation.asm.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) in float v0; +layout(location = 1) in float v1; +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = (v0 + v1) * v1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/locations-components.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/locations-components.asm.frag new file mode 100644 index 000000000..b1e378430 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/locations-components.asm.frag @@ -0,0 +1,23 @@ +#version 450 + +layout(location = 1) in vec2 _2; +layout(location = 1, component = 2) in float _3; +layout(location = 2) flat in float _4; +layout(location = 2, component = 1) flat in uint _5; +layout(location = 2, component = 2) flat in uint _6; +layout(location = 0) out vec4 o0; +vec4 v1; +vec4 v2; + +void main() +{ + v1 = vec4(_2.x, _2.y, v1.z, v1.w); + v1.z = _3; + v2.x = _4; + v2.y = uintBitsToFloat(_5); + v2.z = uintBitsToFloat(_6); + o0.y = float(floatBitsToUint(intBitsToFloat(floatBitsToInt(v2.y) + floatBitsToInt(v2.z)))); + o0.x = v1.y + v2.x; + o0 = vec4(o0.x, o0.y, v1.z, v1.x); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag new file mode 100644 index 000000000..af44e6547 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag @@ -0,0 +1,72 @@ +#version 450 + +layout(binding = 0, std140) uniform Foo +{ + layout(row_major) mat4 lightVP[64]; + uint shadowCascadesNum; + int test; +} _11; + +layout(location = 0) in vec3 fragWorld; +layout(location = 0) out int _entryPointOutput; + +int _240; + +void main() +{ + bool _246; + uint _227; + int _237; + for (;;) + { + _227 = 0u; + bool _164; + for (;;) + { + _164 = _227 < _11.shadowCascadesNum; + if (_164) + { + mat4 _228; + for (;;) + { + if (_11.test == 0) + { + _228 = mat4(vec4(0.5, 0.0, 0.0, 0.0), vec4(0.0, 0.5, 0.0, 0.0), vec4(0.0, 0.0, 0.5, 0.0), vec4(0.0, 0.0, 0.0, 1.0)); + break; + } + _228 = mat4(vec4(1.0, 0.0, 0.0, 0.0), vec4(0.0, 1.0, 0.0, 0.0), vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0)); + break; + } + vec4 _177 = (_228 * _11.lightVP[_227]) * vec4(fragWorld, 1.0); + float _179 = _177.z; + float _186 = _177.x; + float _188 = _177.y; + if ((((_179 >= 0.0) && (_179 <= 1.0)) && (max(_186, _188) <= 1.0)) && (min(_186, _188) >= 0.0)) + { + _237 = int(_227); + break; + } + else + { + _227++; + continue; + } + _227++; + continue; + } + else + { + _237 = _240; + break; + } + } + _246 = _164 ? true : false; + if (_246) + { + break; + } + break; + } + _entryPointOutput = _246 ? _237 : (-1); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-header-to-continue.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-header-to-continue.asm.frag new file mode 100644 index 000000000..c2dba928d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-header-to-continue.asm.frag @@ -0,0 +1,43 @@ +#version 450 + +struct Params +{ + vec4 TextureSize; + vec4 Params1; + vec4 Params2; + vec4 Params3; + vec4 Params4; + vec4 Bloom; +}; + +layout(binding = 1, std140) uniform CB1 +{ + Params CB1; +} _8; + +uniform sampler2D SPIRV_Cross_CombinedmapTexturemapSampler; + +layout(location = 0) in vec2 IN_uv; +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + vec4 _49 = texture(SPIRV_Cross_CombinedmapTexturemapSampler, IN_uv); + float _50 = _49.y; + float _55; + float _58; + _55 = 0.0; + _58 = 0.0; + for (int _60 = -3; _60 <= 3; ) + { + float _64 = float(_60); + vec4 _72 = texture(SPIRV_Cross_CombinedmapTexturemapSampler, IN_uv + (vec2(0.0, _8.CB1.TextureSize.w) * _64)); + float _78 = exp(((-_64) * _64) * 0.2222220003604888916015625) * float(abs(_72.y - _50) < clamp(_50 * 0.06399999558925628662109375, 7.999999797903001308441162109375e-05, 0.008000000379979610443115234375)); + _55 += (_72.x * _78); + _58 += _78; + _60++; + continue; + } + _entryPointOutput = vec4(_55 / _58, _50, 0.0, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-merge-to-continue.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-merge-to-continue.asm.frag new file mode 100644 index 000000000..faf32edcf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/loop-merge-to-continue.asm.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 v0; + +void main() +{ + FragColor = vec4(1.0); + int _50; + _50 = 0; + for (; _50 < 4; _50++) + { + for (int _51 = 0; _51 < 4; ) + { + FragColor += vec4(v0[(_50 + _51) & 3]); + _51++; + continue; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..d88c0e36d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,42 @@ +#version 310 es +precision mediump float; +precision highp int; + +const float _46[16] = float[](1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0); +const vec4 _76[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + +layout(location = 0) out float FragColor; +layout(location = 0) flat in mediump int index; + +void main() +{ + vec4 foobar[4] = _76; + vec4 baz[4] = _76; + FragColor = _46[index]; + if (index < 10) + { + FragColor += _46[index ^ 1]; + } + else + { + FragColor += _46[index & 1]; + } + bool _99 = index > 30; + if (_99) + { + FragColor += _76[index & 3].y; + } + else + { + FragColor += _76[index & 1].x; + } + if (_99) + { + foobar[1].z = 20.0; + } + mediump int _37 = index & 3; + FragColor += foobar[_37].z; + baz = vec4[](vec4(20.0), vec4(30.0), vec4(50.0), vec4(60.0)); + FragColor += baz[_37].z; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/multi-for-loop-init.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/multi-for-loop-init.asm.frag new file mode 100644 index 000000000..679d9d557 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/multi-for-loop-init.asm.frag @@ -0,0 +1,22 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int counter; + +void main() +{ + FragColor = vec4(0.0); + mediump int _53 = 0; + mediump uint _54 = 1u; + for (; (_53 < 10) && (int(_54) < int(20u)); ) + { + FragColor += vec4(float(_53)); + FragColor += vec4(float(_54)); + _54 += uint(counter); + _53 += counter; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/op-constant-null.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/op-constant-null.asm.frag new file mode 100644 index 000000000..873a64cb4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/op-constant-null.asm.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = 0.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/op-phi-swap-continue-block.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/op-phi-swap-continue-block.asm.frag new file mode 100644 index 000000000..ee45619a8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/op-phi-swap-continue-block.asm.frag @@ -0,0 +1,29 @@ +#version 450 + +layout(binding = 0, std140) uniform UBO +{ + int uCount; + int uJ; + int uK; +} _5; + +layout(location = 0) out float FragColor; + +void main() +{ + int _23; + int _23_copy; + int _24; + _23 = _5.uK; + _24 = _5.uJ; + for (int _26 = 0; _26 < _5.uCount; ) + { + _23_copy = _23; + _23 = _24; + _24 = _23_copy; + _26++; + continue; + } + FragColor = float(_24 - _23) * float(_5.uJ * _5.uK); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..fbbfe18bf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,16 @@ +#version 450 + +struct Registers +{ + float foo; +}; + +uniform Registers registers; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = 10.0 + registers.foo; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/phi-loop-variable.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/phi-loop-variable.asm.frag new file mode 100644 index 000000000..05ce10adf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/phi-loop-variable.asm.frag @@ -0,0 +1,6 @@ +#version 450 + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag new file mode 100644 index 000000000..bdda0d629 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag @@ -0,0 +1,18 @@ +#version 450 + +struct Registers +{ + int index; +}; + +uniform Registers registers; + +uniform sampler2D SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler[4]; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = (texelFetch(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler[registers.index], ivec2(10), 0) + texelFetch(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler[registers.index], ivec2(4), 0)) + texelFetch(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler[registers.index], ivec2(4), 0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag new file mode 100644 index 000000000..89058f143 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag @@ -0,0 +1,13 @@ +#version 450 + +layout(binding = 0, rgba32f) uniform writeonly imageBuffer RWTex; +layout(binding = 1) uniform samplerBuffer Tex; + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + imageStore(RWTex, 20, vec4(1.0, 2.0, 3.0, 4.0)); + _entryPointOutput = texelFetch(Tex, 10); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/selection-merge-to-continue.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/selection-merge-to-continue.asm.frag new file mode 100644 index 000000000..05c17c7a6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/selection-merge-to-continue.asm.frag @@ -0,0 +1,24 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 v0; + +void main() +{ + FragColor = vec4(1.0); + for (int _54 = 0; _54 < 4; _54++) + { + if (v0.x == 20.0) + { + FragColor += vec4(v0[_54 & 3]); + continue; + } + else + { + FragColor += vec4(v0[_54 & 1]); + continue; + } + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/srem.asm.frag new file mode 100644 index 000000000..05a3d7554 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/srem.asm.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in ivec4 vA; +layout(location = 1) flat in ivec4 vB; + +void main() +{ + FragColor = vec4(vA - vB * (vA / vB)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag new file mode 100644 index 000000000..b2473f4d0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct Foo +{ + float var1; + float var2; +}; + +layout(binding = 0) uniform mediump sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; + +Foo _22; + +void main() +{ + FragColor = texture(uSampler, vec2(_22.var1, _22.var2)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/switch-label-shared-block.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/switch-label-shared-block.asm.frag new file mode 100644 index 000000000..ade9044e3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/switch-label-shared-block.asm.frag @@ -0,0 +1,33 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) flat in mediump int vIndex; +layout(location = 0) out float FragColor; + +void main() +{ + highp float _19; + switch (vIndex) + { + case 0: + case 2: + { + _19 = 1.0; + break; + } + case 1: + default: + { + _19 = 3.0; + break; + } + case 8: + { + _19 = 8.0; + break; + } + } + FragColor = _19; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/switch-merge-to-continue.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/switch-merge-to-continue.asm.frag new file mode 100644 index 000000000..ea4a25995 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/switch-merge-to-continue.asm.frag @@ -0,0 +1,31 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(1.0); + for (int _52 = 0; _52 < 4; _52++) + { + switch (_52) + { + case 0: + { + FragColor.x += 1.0; + break; + } + case 1: + { + FragColor.y += 3.0; + break; + } + default: + { + FragColor.z += 3.0; + break; + } + } + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/temporary-name-alias.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/temporary-name-alias.asm.frag new file mode 100644 index 000000000..05ce10adf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/temporary-name-alias.asm.frag @@ -0,0 +1,6 @@ +#version 450 + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/temporary-phi-hoisting.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/temporary-phi-hoisting.asm.frag new file mode 100644 index 000000000..1ecd61d74 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/temporary-phi-hoisting.asm.frag @@ -0,0 +1,27 @@ +#version 450 + +struct MyStruct +{ + vec4 color; +}; + +layout(std140) uniform MyStruct_CB +{ + MyStruct g_MyStruct[4]; +} _6; + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + vec3 _28; + _28 = vec3(0.0); + for (int _31 = 0; _31 < 4; ) + { + _28 += _6.g_MyStruct[_31].color.xyz; + _31++; + continue; + } + _entryPointOutput = vec4(_28, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..6193de0da --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uTexture; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = texelFetch(uTexture, ivec2(gl_FragCoord.xy), 0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/undef-variable-store.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/undef-variable-store.asm.frag new file mode 100644 index 000000000..84eb23a24 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/undef-variable-store.asm.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + _entryPointOutput = vec4(1.0, 1.0, 0.0, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unknown-depth-state.asm.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unknown-depth-state.asm.vk.frag new file mode 100644 index 000000000..6953ec61d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unknown-depth-state.asm.vk.frag @@ -0,0 +1,13 @@ +#version 450 + +layout(binding = 0) uniform sampler2DShadow uShadow; +uniform sampler2DShadow SPIRV_Cross_CombineduTextureuSampler; + +layout(location = 0) in vec3 vUV; +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = texture(uShadow, vec3(vUV.xy, vUV.z)) + texture(SPIRV_Cross_CombineduTextureuSampler, vec3(vUV.xy, vUV.z)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unknown-depth-state.asm.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unknown-depth-state.asm.vk.frag.vk new file mode 100644 index 000000000..2f997036f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unknown-depth-state.asm.vk.frag.vk @@ -0,0 +1,14 @@ +#version 450 + +layout(set = 0, binding = 0) uniform sampler2DShadow uShadow; +layout(set = 0, binding = 1) uniform texture2D uTexture; +layout(set = 0, binding = 2) uniform samplerShadow uSampler; + +layout(location = 0) in vec3 vUV; +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = texture(uShadow, vec3(vUV.xy, vUV.z)) + texture(sampler2DShadow(uTexture, uSampler), vec3(vUV.xy, vUV.z)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..eb7e8a912 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/unreachable.asm.frag @@ -0,0 +1,23 @@ +#version 450 + +layout(location = 0) flat in int counter; +layout(location = 0) out vec4 FragColor; + +void main() +{ + bool _29; + for (;;) + { + _29 = counter == 10; + if (_29) + { + break; + } + else + { + break; + } + } + FragColor = mix(vec4(30.0), vec4(10.0), bvec4(_29)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/vector-shuffle-oom.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/vector-shuffle-oom.asm.frag new file mode 100644 index 000000000..270c779aa --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/frag/vector-shuffle-oom.asm.frag @@ -0,0 +1,289 @@ +#version 450 + +struct _28 +{ + vec4 _m0; +}; + +layout(binding = 0, std140) uniform _6_7 +{ + vec4 _m0; + float _m1; + vec4 _m2; +} _7; + +layout(binding = 2, std140) uniform _10_11 +{ + vec3 _m0; + vec3 _m1; + float _m2; + vec3 _m3; + float _m4; + vec3 _m5; + float _m6; + vec3 _m7; + float _m8; + vec3 _m9; + float _m10; + vec3 _m11; + float _m12; + vec2 _m13; + vec2 _m14; + vec3 _m15; + float _m16; + float _m17; + float _m18; + float _m19; + float _m20; + vec4 _m21; + vec4 _m22; + layout(row_major) mat4 _m23; + vec4 _m24; +} _11; + +layout(binding = 1, std140) uniform _18_19 +{ + layout(row_major) mat4 _m0; + layout(row_major) mat4 _m1; + layout(row_major) mat4 _m2; + layout(row_major) mat4 _m3; + vec4 _m4; + vec4 _m5; + float _m6; + float _m7; + float _m8; + float _m9; + vec3 _m10; + float _m11; + vec3 _m12; + float _m13; + vec3 _m14; + float _m15; + vec3 _m16; + float _m17; + float _m18; + float _m19; + vec2 _m20; + vec2 _m21; + vec2 _m22; + vec4 _m23; + vec2 _m24; + vec2 _m25; + vec2 _m26; + vec3 _m27; + float _m28; + float _m29; + float _m30; + float _m31; + float _m32; + vec2 _m33; + float _m34; + float _m35; + vec3 _m36; + layout(row_major) mat4 _m37[2]; + vec4 _m38[2]; +} _19; + +uniform sampler2D SPIRV_Cross_Combined; +uniform sampler2D SPIRV_Cross_Combined_1; +uniform sampler2D SPIRV_Cross_Combined_2; + +layout(location = 0) out vec4 _5; + +_28 _74; + +void main() +{ + vec2 _82 = gl_FragCoord.xy * _19._m23.xy; + vec4 _88 = _7._m2 * _7._m0.xyxy; + vec2 _95 = _88.xy; + vec2 _96 = _88.zw; + vec2 _97 = clamp(_82 + (vec2(0.0, -2.0) * _7._m0.xy), _95, _96); + vec3 _109 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _97, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _113 = textureLod(SPIRV_Cross_Combined_1, _97, 0.0); + float _114 = _113.y; + vec3 _129; + if (_114 > 0.0) + { + _129 = _109 + (textureLod(SPIRV_Cross_Combined_2, _97, 0.0).xyz * clamp(_114 * _113.z, 0.0, 1.0)); + } + else + { + _129 = _109; + } + vec3 _130 = _129 * 0.5; + vec2 _144 = clamp(_82 + (vec2(-1.0) * _7._m0.xy), _95, _96); + vec3 _156 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _144, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _160 = textureLod(SPIRV_Cross_Combined_1, _144, 0.0); + float _161 = _160.y; + vec3 _176; + if (_161 > 0.0) + { + _176 = _156 + (textureLod(SPIRV_Cross_Combined_2, _144, 0.0).xyz * clamp(_161 * _160.z, 0.0, 1.0)); + } + else + { + _176 = _156; + } + vec3 _177 = _176 * 0.5; + vec2 _191 = clamp(_82 + (vec2(0.0, -1.0) * _7._m0.xy), _95, _96); + vec3 _203 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _191, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _207 = textureLod(SPIRV_Cross_Combined_1, _191, 0.0); + float _208 = _207.y; + vec3 _223; + if (_208 > 0.0) + { + _223 = _203 + (textureLod(SPIRV_Cross_Combined_2, _191, 0.0).xyz * clamp(_208 * _207.z, 0.0, 1.0)); + } + else + { + _223 = _203; + } + vec3 _224 = _223 * 0.75; + vec2 _238 = clamp(_82 + (vec2(1.0, -1.0) * _7._m0.xy), _95, _96); + vec3 _250 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _238, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _254 = textureLod(SPIRV_Cross_Combined_1, _238, 0.0); + float _255 = _254.y; + vec3 _270; + if (_255 > 0.0) + { + _270 = _250 + (textureLod(SPIRV_Cross_Combined_2, _238, 0.0).xyz * clamp(_255 * _254.z, 0.0, 1.0)); + } + else + { + _270 = _250; + } + vec3 _271 = _270 * 0.5; + vec2 _285 = clamp(_82 + (vec2(-2.0, 0.0) * _7._m0.xy), _95, _96); + vec3 _297 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _285, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _301 = textureLod(SPIRV_Cross_Combined_1, _285, 0.0); + float _302 = _301.y; + vec3 _317; + if (_302 > 0.0) + { + _317 = _297 + (textureLod(SPIRV_Cross_Combined_2, _285, 0.0).xyz * clamp(_302 * _301.z, 0.0, 1.0)); + } + else + { + _317 = _297; + } + vec3 _318 = _317 * 0.5; + vec2 _332 = clamp(_82 + (vec2(-1.0, 0.0) * _7._m0.xy), _95, _96); + vec3 _344 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _332, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _348 = textureLod(SPIRV_Cross_Combined_1, _332, 0.0); + float _349 = _348.y; + vec3 _364; + if (_349 > 0.0) + { + _364 = _344 + (textureLod(SPIRV_Cross_Combined_2, _332, 0.0).xyz * clamp(_349 * _348.z, 0.0, 1.0)); + } + else + { + _364 = _344; + } + vec3 _365 = _364 * 0.75; + vec2 _379 = clamp(_82, _95, _96); + vec3 _391 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _379, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _395 = textureLod(SPIRV_Cross_Combined_1, _379, 0.0); + float _396 = _395.y; + vec3 _411; + if (_396 > 0.0) + { + _411 = _391 + (textureLod(SPIRV_Cross_Combined_2, _379, 0.0).xyz * clamp(_396 * _395.z, 0.0, 1.0)); + } + else + { + _411 = _391; + } + vec3 _412 = _411 * 1.0; + vec2 _426 = clamp(_82 + (vec2(1.0, 0.0) * _7._m0.xy), _95, _96); + vec3 _438 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _426, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _442 = textureLod(SPIRV_Cross_Combined_1, _426, 0.0); + float _443 = _442.y; + vec3 _458; + if (_443 > 0.0) + { + _458 = _438 + (textureLod(SPIRV_Cross_Combined_2, _426, 0.0).xyz * clamp(_443 * _442.z, 0.0, 1.0)); + } + else + { + _458 = _438; + } + vec3 _459 = _458 * 0.75; + vec2 _473 = clamp(_82 + (vec2(2.0, 0.0) * _7._m0.xy), _95, _96); + vec3 _485 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _473, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _489 = textureLod(SPIRV_Cross_Combined_1, _473, 0.0); + float _490 = _489.y; + vec3 _505; + if (_490 > 0.0) + { + _505 = _485 + (textureLod(SPIRV_Cross_Combined_2, _473, 0.0).xyz * clamp(_490 * _489.z, 0.0, 1.0)); + } + else + { + _505 = _485; + } + vec3 _506 = _505 * 0.5; + vec2 _520 = clamp(_82 + (vec2(-1.0, 1.0) * _7._m0.xy), _95, _96); + vec3 _532 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _520, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _536 = textureLod(SPIRV_Cross_Combined_1, _520, 0.0); + float _537 = _536.y; + vec3 _552; + if (_537 > 0.0) + { + _552 = _532 + (textureLod(SPIRV_Cross_Combined_2, _520, 0.0).xyz * clamp(_537 * _536.z, 0.0, 1.0)); + } + else + { + _552 = _532; + } + vec3 _553 = _552 * 0.5; + vec2 _567 = clamp(_82 + (vec2(0.0, 1.0) * _7._m0.xy), _95, _96); + vec3 _579 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _567, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _583 = textureLod(SPIRV_Cross_Combined_1, _567, 0.0); + float _584 = _583.y; + vec3 _599; + if (_584 > 0.0) + { + _599 = _579 + (textureLod(SPIRV_Cross_Combined_2, _567, 0.0).xyz * clamp(_584 * _583.z, 0.0, 1.0)); + } + else + { + _599 = _579; + } + vec3 _600 = _599 * 0.75; + vec2 _614 = clamp(_82 + _7._m0.xy, _95, _96); + vec3 _626 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _614, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _630 = textureLod(SPIRV_Cross_Combined_1, _614, 0.0); + float _631 = _630.y; + vec3 _646; + if (_631 > 0.0) + { + _646 = _626 + (textureLod(SPIRV_Cross_Combined_2, _614, 0.0).xyz * clamp(_631 * _630.z, 0.0, 1.0)); + } + else + { + _646 = _626; + } + vec3 _647 = _646 * 0.5; + vec2 _661 = clamp(_82 + (vec2(0.0, 2.0) * _7._m0.xy), _95, _96); + vec3 _673 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _661, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _677 = textureLod(SPIRV_Cross_Combined_1, _661, 0.0); + float _678 = _677.y; + vec3 _693; + if (_678 > 0.0) + { + _693 = _673 + (textureLod(SPIRV_Cross_Combined_2, _661, 0.0).xyz * clamp(_678 * _677.z, 0.0, 1.0)); + } + else + { + _693 = _673; + } + vec3 _702 = ((((((((((((_130.xyz + _177).xyz + _224).xyz + _271).xyz + _318).xyz + _365).xyz + _412).xyz + _459).xyz + _506).xyz + _553).xyz + _600).xyz + _647).xyz + (_693 * 0.5)).xyz * vec3(0.125); + _28 _704 = _74; + _704._m0 = vec4(_702.x, _702.y, _702.z, vec4(0.0).w); + _28 _705 = _704; + _705._m0.w = 1.0; + _5 = _705._m0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/block-name-namespace.asm.geom b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/block-name-namespace.asm.geom new file mode 100644 index 000000000..bb3e7d6b0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/block-name-namespace.asm.geom @@ -0,0 +1,32 @@ +#version 450 +layout(triangles) in; +layout(max_vertices = 4, triangle_strip) out; + +layout(binding = 0, std140) uniform VertexInput +{ + vec4 a; +} VertexInput_1; + +layout(binding = 0, std430) buffer VertexInput +{ + vec4 b; +} VertexInput_2; + +layout(location = 0) out VertexInput +{ + vec4 vColor; +} VertexInput_3; + +layout(location = 0) in VertexInput +{ + vec4 vColor; +} vin[3]; + + +void main() +{ + gl_Position = (vec4(1.0) + VertexInput_1.a) + VertexInput_2.b; + VertexInput_3.vColor = vin[0].vColor; + EmitVertex(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/inout-split-access-chain-handle.asm.geom b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/inout-split-access-chain-handle.asm.geom new file mode 100644 index 000000000..ca1381cff --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/inout-split-access-chain-handle.asm.geom @@ -0,0 +1,9 @@ +#version 440 +layout(triangles) in; +layout(max_vertices = 5, triangle_strip) out; + +void main() +{ + gl_Position = gl_in[0].gl_Position; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/split-access-chain-input.asm.geom b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/split-access-chain-input.asm.geom new file mode 100644 index 000000000..511d87fcb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/split-access-chain-input.asm.geom @@ -0,0 +1,9 @@ +#version 440 +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +void main() +{ + gl_Position = gl_in[0].gl_Position; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/store-uint-layer.invalid.asm.geom b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/store-uint-layer.invalid.asm.geom new file mode 100644 index 000000000..c768d5da8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/geom/store-uint-layer.invalid.asm.geom @@ -0,0 +1,41 @@ +#version 450 +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +struct VertexOutput +{ + vec4 pos; +}; + +struct GeometryOutput +{ + vec4 pos; + uint layer; +}; + +void _main(VertexOutput _input[3], GeometryOutput stream) +{ + GeometryOutput _output; + _output.layer = 1u; + for (int v = 0; v < 3; v++) + { + _output.pos = _input[v].pos; + gl_Position = _output.pos; + gl_Layer = int(_output.layer); + EmitVertex(); + } + EndPrimitive(); +} + +void main() +{ + VertexOutput _input[3]; + _input[0].pos = gl_in[0].gl_Position; + _input[1].pos = gl_in[1].gl_Position; + _input[2].pos = gl_in[2].gl_Position; + VertexOutput param[3] = _input; + GeometryOutput param_1; + _main(param, param_1); + GeometryOutput stream = param_1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc b/3rdparty/spirv-cross/reference/opt/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc new file mode 100644 index 000000000..8cb7a4e64 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc @@ -0,0 +1,79 @@ +#version 450 +layout(vertices = 3) out; + +struct VertexOutput +{ + vec4 pos; + vec2 uv; +}; + +struct HSOut +{ + vec4 pos; + vec2 uv; +}; + +struct HSConstantOut +{ + float EdgeTess[3]; + float InsideTess; +}; + +struct VertexOutput_1 +{ + vec2 uv; +}; + +struct HSOut_1 +{ + vec2 uv; +}; + +layout(location = 0) in VertexOutput_1 p[]; +layout(location = 0) out HSOut_1 _entryPointOutput[3]; + +HSOut _hs_main(VertexOutput p_1[3], uint i) +{ + HSOut _output; + _output.pos = p_1[i].pos; + _output.uv = p_1[i].uv; + return _output; +} + +HSConstantOut PatchHS(VertexOutput _patch[3]) +{ + HSConstantOut _output; + _output.EdgeTess[0] = (vec2(1.0) + _patch[0].uv).x; + _output.EdgeTess[1] = (vec2(1.0) + _patch[0].uv).x; + _output.EdgeTess[2] = (vec2(1.0) + _patch[0].uv).x; + _output.InsideTess = (vec2(1.0) + _patch[0].uv).x; + return _output; +} + +void main() +{ + VertexOutput p_1[3]; + p_1[0].pos = gl_in[0].gl_Position; + p_1[0].uv = p[0].uv; + p_1[1].pos = gl_in[1].gl_Position; + p_1[1].uv = p[1].uv; + p_1[2].pos = gl_in[2].gl_Position; + p_1[2].uv = p[2].uv; + uint i = gl_InvocationID; + VertexOutput param[3] = p_1; + uint param_1 = i; + HSOut flattenTemp = _hs_main(param, param_1); + gl_out[gl_InvocationID].gl_Position = flattenTemp.pos; + _entryPointOutput[gl_InvocationID].uv = flattenTemp.uv; + barrier(); + if (int(gl_InvocationID) == 0) + { + VertexOutput param_2[3] = p_1; + HSConstantOut _patchConstantResult = PatchHS(param_2); + gl_TessLevelOuter[0] = _patchConstantResult.EdgeTess[0]; + gl_TessLevelOuter[1] = _patchConstantResult.EdgeTess[1]; + gl_TessLevelOuter[2] = _patchConstantResult.EdgeTess[2]; + gl_TessLevelInner[0] = _patchConstantResult.InsideTess; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/empty-io.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/empty-io.asm.vert new file mode 100644 index 000000000..3819a71dd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/empty-io.asm.vert @@ -0,0 +1,14 @@ +#version 450 + +struct VSOutput +{ + int empty_struct_member; +}; + +layout(location = 0) in vec4 position; + +void main() +{ + gl_Position = position; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/global-builtin.sso.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/global-builtin.sso.asm.vert new file mode 100644 index 000000000..20cb3b170 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/global-builtin.sso.asm.vert @@ -0,0 +1,20 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +struct VSOut +{ + float a; +}; + +layout(location = 0) out VSOut _entryPointOutput; + +void main() +{ + _entryPointOutput.a = 40.0; + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant-block.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant-block.asm.vert new file mode 100644 index 000000000..9b2f05a8b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant-block.asm.vert @@ -0,0 +1,9 @@ +#version 450 + +invariant gl_Position; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant-block.sso.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant-block.sso.asm.vert new file mode 100644 index 000000000..eb8869419 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant-block.sso.asm.vert @@ -0,0 +1,17 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; + float gl_PointSize; + float gl_ClipDistance[1]; + float gl_CullDistance[1]; +}; + +invariant gl_Position; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant.asm.vert new file mode 100644 index 000000000..9b2f05a8b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant.asm.vert @@ -0,0 +1,9 @@ +#version 450 + +invariant gl_Position; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant.sso.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant.sso.asm.vert new file mode 100644 index 000000000..4f7e2f5f6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/invariant.sso.asm.vert @@ -0,0 +1,14 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +invariant gl_Position; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert new file mode 100644 index 000000000..d001d30c0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert @@ -0,0 +1,30 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_201 +#define SPIRV_CROSS_CONSTANT_ID_201 -10 +#endif +const int _7 = SPIRV_CROSS_CONSTANT_ID_201; +#ifndef SPIRV_CROSS_CONSTANT_ID_202 +#define SPIRV_CROSS_CONSTANT_ID_202 100u +#endif +const uint _8 = SPIRV_CROSS_CONSTANT_ID_202; +const int _20 = (_7 + 2); +const uint _25 = (_8 % 5u); +const ivec4 _30 = ivec4(20, 30, _20, _20); +const ivec2 _32 = ivec2(_30.y, _30.x); +const int _33 = _30.y; + +layout(location = 0) flat out int _4; + +void main() +{ + vec4 _63 = vec4(0.0); + _63.y = float(_20); + vec4 _66 = _63; + _66.z = float(_25); + vec4 _52 = _66 + vec4(_30); + vec2 _56 = _52.xy + vec2(_32); + gl_Position = vec4(_56.x, _56.y, _52.z, _52.w); + _4 = _33; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert.vk b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert.vk new file mode 100644 index 000000000..f39495646 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert.vk @@ -0,0 +1,24 @@ +#version 450 + +layout(constant_id = 201) const int _7 = -10; +layout(constant_id = 202) const uint _8 = 100u; +const int _20 = (_7 + 2); +const uint _25 = (_8 % 5u); +const ivec4 _30 = ivec4(20, 30, _20, _20); +const ivec2 _32 = ivec2(_30.y, _30.x); +const int _33 = _30.y; + +layout(location = 0) flat out int _4; + +void main() +{ + vec4 _63 = vec4(0.0); + _63.y = float(_20); + vec4 _66 = _63; + _66.z = float(_25); + vec4 _52 = _66 + vec4(_30); + vec2 _56 = _52.xy + vec2(_32); + gl_Position = vec4(_56.x, _56.y, _52.z, _52.w); + _4 = _33; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..c25e9bbe5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,9 @@ +#version 450 + +uniform int SPIRV_Cross_BaseInstance; + +void main() +{ + gl_Position = vec4(float(uint(gl_VertexID) + uint((gl_InstanceID + SPIRV_Cross_BaseInstance)))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/atomic.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/atomic.comp new file mode 100644 index 000000000..89b1351c0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/atomic.comp @@ -0,0 +1,49 @@ +#version 310 es +#extension GL_OES_shader_image_atomic : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 2, std430) buffer SSBO +{ + uint u32; + int i32; +} ssbo; + +layout(binding = 0, r32ui) uniform highp uimage2D uImage; +layout(binding = 1, r32i) uniform highp iimage2D iImage; + +void main() +{ + uint _19 = imageAtomicAdd(uImage, ivec2(1, 5), 1u); + uint _27 = imageAtomicAdd(uImage, ivec2(1, 5), 1u); + imageStore(iImage, ivec2(1, 6), ivec4(int(_27))); + uint _32 = imageAtomicOr(uImage, ivec2(1, 5), 1u); + uint _34 = imageAtomicXor(uImage, ivec2(1, 5), 1u); + uint _36 = imageAtomicAnd(uImage, ivec2(1, 5), 1u); + uint _38 = imageAtomicMin(uImage, ivec2(1, 5), 1u); + uint _40 = imageAtomicMax(uImage, ivec2(1, 5), 1u); + uint _44 = imageAtomicCompSwap(uImage, ivec2(1, 5), 10u, 2u); + int _47 = imageAtomicAdd(iImage, ivec2(1, 6), 1); + int _49 = imageAtomicOr(iImage, ivec2(1, 6), 1); + int _51 = imageAtomicXor(iImage, ivec2(1, 6), 1); + int _53 = imageAtomicAnd(iImage, ivec2(1, 6), 1); + int _55 = imageAtomicMin(iImage, ivec2(1, 6), 1); + int _57 = imageAtomicMax(iImage, ivec2(1, 6), 1); + int _61 = imageAtomicCompSwap(iImage, ivec2(1, 5), 10, 2); + uint _68 = atomicAdd(ssbo.u32, 1u); + uint _70 = atomicOr(ssbo.u32, 1u); + uint _72 = atomicXor(ssbo.u32, 1u); + uint _74 = atomicAnd(ssbo.u32, 1u); + uint _76 = atomicMin(ssbo.u32, 1u); + uint _78 = atomicMax(ssbo.u32, 1u); + uint _80 = atomicExchange(ssbo.u32, 1u); + uint _82 = atomicCompSwap(ssbo.u32, 10u, 2u); + int _85 = atomicAdd(ssbo.i32, 1); + int _87 = atomicOr(ssbo.i32, 1); + int _89 = atomicXor(ssbo.i32, 1); + int _91 = atomicAnd(ssbo.i32, 1); + int _93 = atomicMin(ssbo.i32, 1); + int _95 = atomicMax(ssbo.i32, 1); + int _97 = atomicExchange(ssbo.i32, 1); + int _99 = atomicCompSwap(ssbo.i32, 10, 2); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/bake_gradient.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/bake_gradient.comp new file mode 100644 index 000000000..69634d5d8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/bake_gradient.comp @@ -0,0 +1,26 @@ +#version 310 es +layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in; + +layout(binding = 4, std140) uniform UBO +{ + vec4 uInvSize; + vec4 uScale; +} _46; + +layout(binding = 0) uniform mediump sampler2D uHeight; +layout(binding = 1) uniform mediump sampler2D uDisplacement; +layout(binding = 2, rgba16f) uniform writeonly mediump image2D iHeightDisplacement; +layout(binding = 3, rgba16f) uniform writeonly mediump image2D iGradJacobian; + +void main() +{ + vec4 _59 = (vec2(gl_GlobalInvocationID.xy) * _46.uInvSize.xy).xyxy + (_46.uInvSize * 0.5); + vec2 _67 = _59.xy; + vec2 _128 = _59.zw; + vec2 _157 = ((textureLodOffset(uDisplacement, _128, 0.0, ivec2(1, 0)).xy - textureLodOffset(uDisplacement, _128, 0.0, ivec2(-1, 0)).xy) * 0.60000002384185791015625) * _46.uScale.z; + vec2 _161 = ((textureLodOffset(uDisplacement, _128, 0.0, ivec2(0, 1)).xy - textureLodOffset(uDisplacement, _128, 0.0, ivec2(0, -1)).xy) * 0.60000002384185791015625) * _46.uScale.z; + ivec2 _172 = ivec2(gl_GlobalInvocationID.xy); + imageStore(iHeightDisplacement, _172, vec4(textureLod(uHeight, _67, 0.0).x, 0.0, 0.0, 0.0)); + imageStore(iGradJacobian, _172, vec4((_46.uScale.xy * 0.5) * vec2(textureLodOffset(uHeight, _67, 0.0, ivec2(1, 0)).x - textureLodOffset(uHeight, _67, 0.0, ivec2(-1, 0)).x, textureLodOffset(uHeight, _67, 0.0, ivec2(0, 1)).x - textureLodOffset(uHeight, _67, 0.0, ivec2(0, -1)).x), ((1.0 + _157.x) * (1.0 + _161.y)) - (_157.y * _161.x), 0.0)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/barriers.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/barriers.comp new file mode 100644 index 000000000..a091497a4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/barriers.comp @@ -0,0 +1,28 @@ +#version 310 es +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +void main() +{ + memoryBarrierShared(); + memoryBarrier(); + memoryBarrierImage(); + memoryBarrierBuffer(); + groupMemoryBarrier(); + memoryBarrierShared(); + barrier(); + memoryBarrier(); + memoryBarrierShared(); + barrier(); + memoryBarrierImage(); + memoryBarrierShared(); + barrier(); + memoryBarrierBuffer(); + memoryBarrierShared(); + barrier(); + groupMemoryBarrier(); + memoryBarrierShared(); + barrier(); + memoryBarrierShared(); + barrier(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/basic.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/basic.comp new file mode 100644 index 000000000..f025d53c6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/basic.comp @@ -0,0 +1,28 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + vec4 in_data[]; +} _23; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _45; + +layout(binding = 2, std430) buffer SSBO3 +{ + uint counter; +} _48; + +void main() +{ + vec4 _29 = _23.in_data[gl_GlobalInvocationID.x]; + if (dot(_29, vec4(1.0, 5.0, 6.0, 2.0)) > 8.19999980926513671875) + { + uint _52 = atomicAdd(_48.counter, 1u); + _45.out_data[_52] = _29; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/bitcast-16bit-1.invalid.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/bitcast-16bit-1.invalid.comp new file mode 100644 index 000000000..7420586f0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/bitcast-16bit-1.invalid.comp @@ -0,0 +1,38 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + i16vec4 inputs[]; +} _25; + +layout(binding = 1, std430) buffer SSBO1 +{ + ivec4 outputs[]; +} _39; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + f16vec2 a = int16BitsToFloat16(_25.inputs[ident].xy); + _39.outputs[ident].x = int(packFloat2x16(a + f16vec2(float16_t(1), float16_t(1)))); + _39.outputs[ident].y = packInt2x16(_25.inputs[ident].zw); + _39.outputs[ident].z = int(packUint2x16(u16vec2(_25.inputs[ident].xy))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/bitcast-16bit-2.invalid.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/bitcast-16bit-2.invalid.comp new file mode 100644 index 000000000..416bc4a2e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/bitcast-16bit-2.invalid.comp @@ -0,0 +1,43 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) buffer SSBO1 +{ + i16vec4 outputs[]; +} _21; + +layout(binding = 0, std430) buffer SSBO0 +{ + ivec4 inputs[]; +} _29; + +layout(binding = 2, std140) uniform UBO +{ + f16vec4 const0; +} _40; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + i16vec2 _47 = unpackInt2x16(_29.inputs[ident].x) + float16BitsToInt16(_40.const0.xy); + _21.outputs[ident] = i16vec4(_47.x, _47.y, _21.outputs[ident].z, _21.outputs[ident].w); + i16vec2 _66 = i16vec2(unpackUint2x16(uint(_29.inputs[ident].y)) - float16BitsToUint16(_40.const0.zw)); + _21.outputs[ident] = i16vec4(_21.outputs[ident].x, _21.outputs[ident].y, _66.x, _66.y); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/casts.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/casts.comp new file mode 100644 index 000000000..11ef36287 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/casts.comp @@ -0,0 +1,18 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) buffer SSBO1 +{ + ivec4 outputs[]; +} _21; + +layout(binding = 0, std430) buffer SSBO0 +{ + ivec4 inputs[]; +} _27; + +void main() +{ + _21.outputs[gl_GlobalInvocationID.x] = mix(ivec4(0), ivec4(1), notEqual((_27.inputs[gl_GlobalInvocationID.x] & ivec4(3)), ivec4(uvec4(0u)))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/cfg-preserve-parameter.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/cfg-preserve-parameter.comp new file mode 100644 index 000000000..124652b32 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/cfg-preserve-parameter.comp @@ -0,0 +1,7 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/cfg.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/cfg.comp new file mode 100644 index 000000000..0b7e0c161 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/cfg.comp @@ -0,0 +1,44 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + float data; +} _11; + +float _183; + +void main() +{ + if (_11.data != 0.0) + { + _11.data = 10.0; + } + else + { + _11.data = 15.0; + } + switch (int(_11.data)) + { + case 0: + { + _11.data = 20.0; + break; + } + case 1: + { + _11.data = 30.0; + break; + } + } + float _180; + _180 = _183; + for (int _179 = 0; _179 < 20; ) + { + _180 += 10.0; + _179++; + continue; + } + _11.data = _180; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/coherent-block.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/coherent-block.comp new file mode 100644 index 000000000..bfab6bbea --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/coherent-block.comp @@ -0,0 +1,13 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) coherent restrict writeonly buffer SSBO +{ + vec4 value; +} _10; + +void main() +{ + _10.value = vec4(20.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/coherent-image.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/coherent-image.comp new file mode 100644 index 000000000..b3992f242 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/coherent-image.comp @@ -0,0 +1,15 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) coherent restrict writeonly buffer SSBO +{ + ivec4 value; +} _10; + +layout(binding = 3, r32i) uniform coherent restrict readonly mediump iimage2D uImage; + +void main() +{ + _10.value = imageLoad(uImage, ivec2(10)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/composite-array-initialization.comp new file mode 100644 index 000000000..e91313667 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/composite-array-initialization.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 2, local_size_y = 1, local_size_z = 1) in; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 4.0 +#endif +const float X = SPIRV_CROSS_CONSTANT_ID_0; + +struct Data +{ + float a; + float b; +}; + +layout(binding = 0, std430) buffer SSBO +{ + Data outdata[]; +} _53; + +void main() +{ + Data data[2] = Data[](Data(1.0, 2.0), Data(3.0, 4.0)); + Data data2[2] = Data[](Data(X, 2.0), Data(3.0, 5.0)); + _53.outdata[gl_WorkGroupID.x].a = data[gl_LocalInvocationID.x].a + data2[gl_LocalInvocationID.x].a; + _53.outdata[gl_WorkGroupID.x].b = data[gl_LocalInvocationID.x].b + data2[gl_LocalInvocationID.x].b; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/composite-construct.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/composite-construct.comp new file mode 100644 index 000000000..0e857976c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/composite-construct.comp @@ -0,0 +1,18 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + vec4 as[]; +} _41; + +layout(binding = 1, std430) buffer SSBO1 +{ + vec4 bs[]; +} _55; + +void main() +{ + _41.as[gl_GlobalInvocationID.x] = ((_41.as[gl_GlobalInvocationID.x] + _55.bs[gl_GlobalInvocationID.x]) + _55.bs[gl_GlobalInvocationID.x]) + vec4(10.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/culling.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/culling.comp new file mode 100644 index 000000000..f4dea4b35 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/culling.comp @@ -0,0 +1,28 @@ +#version 310 es +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + float in_data[]; +} _22; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + float out_data[]; +} _38; + +layout(binding = 2, std430) buffer SSBO3 +{ + uint count; +} _41; + +void main() +{ + float _28 = _22.in_data[gl_GlobalInvocationID.x]; + if (_28 > 12.0) + { + uint _45 = atomicAdd(_41.count, 1u); + _38.out_data[_45] = _28; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/defer-parens.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/defer-parens.comp new file mode 100644 index 000000000..c48fb9e08 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/defer-parens.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + vec4 data; + int index; +} _13; + +void main() +{ + vec4 _17 = _13.data; + vec2 _28 = _17.yz + vec2(10.0); + _13.data = vec4(_17.x, _28, _17.w); + _13.data = (_17 + _17) + _17; + _13.data = _28.xxyy; + _13.data = vec4(_28.y); + _13.data = vec4((_17.zw + vec2(10.0))[_13.index]); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/dowhile.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/dowhile.comp new file mode 100644 index 000000000..4370ea307 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/dowhile.comp @@ -0,0 +1,38 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +} _28; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _52; + +void main() +{ + vec4 _57; + int _58; + _58 = 0; + _57 = _28.in_data[gl_GlobalInvocationID.x]; + vec4 _42; + for (;;) + { + _42 = _28.mvp * _57; + int _44 = _58 + 1; + if (_44 < 16) + { + _58 = _44; + _57 = _42; + } + else + { + break; + } + } + _52.out_data[gl_GlobalInvocationID.x] = _42; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/generate_height.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/generate_height.comp new file mode 100644 index 000000000..feb8d41c0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/generate_height.comp @@ -0,0 +1,56 @@ +#version 310 es +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer Distribution +{ + vec2 distribution[]; +} _137; + +layout(binding = 2, std140) uniform UBO +{ + vec4 uModTime; +} _166; + +layout(binding = 1, std430) writeonly buffer HeightmapFFT +{ + uint heights[]; +} _225; + +void main() +{ + uvec2 _264 = uvec2(64u, 1u) * gl_NumWorkGroups.xy; + uvec2 _269 = _264 - gl_GlobalInvocationID.xy; + bvec2 _271 = equal(gl_GlobalInvocationID.xy, uvec2(0u)); + uint _454; + if (_271.x) + { + _454 = 0u; + } + else + { + _454 = _269.x; + } + uint _455; + if (_271.y) + { + _455 = 0u; + } + else + { + _455 = _269.y; + } + uint _276 = _264.x; + uint _280 = (gl_GlobalInvocationID.y * _276) + gl_GlobalInvocationID.x; + uint _290 = (_455 * _276) + _454; + vec2 _297 = vec2(gl_GlobalInvocationID.xy); + vec2 _299 = vec2(_264); + float _309 = sqrt(9.81000041961669921875 * length(_166.uModTime.xy * mix(_297, _297 - _299, greaterThan(_297, _299 * 0.5)))) * _166.uModTime.z; + vec2 _316 = vec2(cos(_309), sin(_309)); + vec2 _387 = _316.xx; + vec2 _392 = _316.yy; + vec2 _395 = _392 * _137.distribution[_280].yx; + vec2 _421 = _392 * _137.distribution[_290].yx; + vec2 _429 = (_137.distribution[_290] * _387) + vec2(-_421.x, _421.y); + _225.heights[_280] = packHalf2x16(((_137.distribution[_280] * _387) + vec2(-_395.x, _395.y)) + vec2(_429.x, -_429.y)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/image.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/image.comp new file mode 100644 index 000000000..cdb57142c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/image.comp @@ -0,0 +1,12 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, rgba8) uniform readonly mediump image2D uImageIn; +layout(binding = 1, rgba8) uniform writeonly mediump image2D uImageOut; + +void main() +{ + ivec2 _23 = ivec2(gl_GlobalInvocationID.xy); + imageStore(uImageOut, _23, imageLoad(uImageIn, _23 + imageSize(uImageIn))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/inout-struct.invalid.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/inout-struct.invalid.comp new file mode 100644 index 000000000..640e25bb9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/inout-struct.invalid.comp @@ -0,0 +1,65 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct Foo +{ + vec4 a; + vec4 b; + vec4 c; + vec4 d; +}; + +layout(binding = 1, std430) readonly buffer SSBO2 +{ + vec4 data[]; +} indata; + +layout(binding = 0, std430) writeonly buffer SSBO +{ + vec4 data[]; +} outdata; + +layout(binding = 2, std430) readonly buffer SSBO3 +{ + Foo foos[]; +} foobar; + +void baz(inout Foo foo) +{ + uint ident = gl_GlobalInvocationID.x; + foo.a = indata.data[(4u * ident) + 0u]; + foo.b = indata.data[(4u * ident) + 1u]; + foo.c = indata.data[(4u * ident) + 2u]; + foo.d = indata.data[(4u * ident) + 3u]; +} + +void meow(inout Foo foo) +{ + foo.a += vec4(10.0); + foo.b += vec4(20.0); + foo.c += vec4(30.0); + foo.d += vec4(40.0); +} + +vec4 bar(Foo foo) +{ + return ((foo.a + foo.b) + foo.c) + foo.d; +} + +void main() +{ + Foo param; + baz(param); + Foo foo = param; + Foo param_1 = foo; + meow(param_1); + foo = param_1; + Foo param_2 = foo; + Foo param_3; + param_3.a = foobar.foos[gl_GlobalInvocationID.x].a; + param_3.b = foobar.foos[gl_GlobalInvocationID.x].b; + param_3.c = foobar.foos[gl_GlobalInvocationID.x].c; + param_3.d = foobar.foos[gl_GlobalInvocationID.x].d; + outdata.data[gl_GlobalInvocationID.x] = bar(param_2) + bar(param_3); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/insert.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/insert.comp new file mode 100644 index 000000000..5ff719449 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/insert.comp @@ -0,0 +1,24 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) writeonly buffer SSBO +{ + vec4 out_data[]; +} _27; + +vec4 _52; + +void main() +{ + vec4 _45 = _52; + _45.x = 10.0; + vec4 _47 = _45; + _47.y = 30.0; + vec4 _49 = _47; + _49.z = 70.0; + vec4 _51 = _49; + _51.w = 90.0; + _27.out_data[gl_GlobalInvocationID.x] = _51; + _27.out_data[gl_GlobalInvocationID.x].y = 20.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/mat3.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/mat3.comp new file mode 100644 index 000000000..b1c585b84 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/mat3.comp @@ -0,0 +1,13 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + mat3 out_data[]; +} _22; + +void main() +{ + _22.out_data[gl_GlobalInvocationID.x] = mat3(vec3(10.0), vec3(20.0), vec3(40.0)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/mod.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/mod.comp new file mode 100644 index 000000000..d8ee0ff83 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/mod.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + vec4 in_data[]; +} _23; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _33; + +void main() +{ + _33.out_data[gl_GlobalInvocationID.x] = mod(_23.in_data[gl_GlobalInvocationID.x], _33.out_data[gl_GlobalInvocationID.x]); + _33.out_data[gl_GlobalInvocationID.x] = uintBitsToFloat(floatBitsToUint(_23.in_data[gl_GlobalInvocationID.x]) % floatBitsToUint(_33.out_data[gl_GlobalInvocationID.x])); + _33.out_data[gl_GlobalInvocationID.x] = intBitsToFloat(floatBitsToInt(_23.in_data[gl_GlobalInvocationID.x]) % floatBitsToInt(_33.out_data[gl_GlobalInvocationID.x])); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/modf.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/modf.comp new file mode 100644 index 000000000..3c8ab6ecd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/modf.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + vec4 in_data[]; +} _23; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _35; + +void main() +{ + vec4 i; + vec4 _31 = modf(_23.in_data[gl_GlobalInvocationID.x], i); + _35.out_data[gl_GlobalInvocationID.x] = _31; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/read-write-only.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/read-write-only.comp new file mode 100644 index 000000000..06227ee2c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/read-write-only.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 2, std430) restrict writeonly buffer SSBO2 +{ + vec4 data4; + vec4 data5; +} _10; + +layout(binding = 0, std430) readonly buffer SSBO0 +{ + vec4 data0; + vec4 data1; +} _15; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + vec4 data2; + vec4 data3; +} _21; + +void main() +{ + _10.data4 = _15.data0 + _21.data2; + _10.data5 = _15.data1 + _21.data3; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/rmw-matrix.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/rmw-matrix.comp new file mode 100644 index 000000000..5c4ac94bc --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/rmw-matrix.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + float a; + vec4 b; + mat4 c; + float a1; + vec4 b1; + mat4 c1; +} _11; + +void main() +{ + _11.a *= _11.a1; + _11.b *= _11.b1; + _11.c = _11.c * _11.c1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/rmw-opt.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/rmw-opt.comp new file mode 100644 index 000000000..342e6632d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/rmw-opt.comp @@ -0,0 +1,23 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + int a; +} _9; + +void main() +{ + _9.a += 10; + _9.a -= 10; + _9.a *= 10; + _9.a /= 10; + _9.a = _9.a << 2; + _9.a = _9.a >> 3; + _9.a &= 40; + _9.a ^= 10; + _9.a %= 40; + _9.a |= 1; + _9.a = 0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/shared.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/shared.comp new file mode 100644 index 000000000..f95cb2b8b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/shared.comp @@ -0,0 +1,23 @@ +#version 310 es +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + float in_data[]; +} _22; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + float out_data[]; +} _44; + +shared float sShared[4]; + +void main() +{ + sShared[gl_LocalInvocationIndex] = _22.in_data[gl_GlobalInvocationID.x]; + memoryBarrierShared(); + barrier(); + _44.out_data[gl_GlobalInvocationID.x] = sShared[3u - gl_LocalInvocationIndex]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/ssbo-array.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/ssbo-array.comp new file mode 100644 index 000000000..6caf8f49f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/ssbo-array.comp @@ -0,0 +1,13 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + vec4 data[]; +} ssbos[2]; + +void main() +{ + ssbos[1].data[gl_GlobalInvocationID.x] = ssbos[0].data[gl_GlobalInvocationID.x]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/struct-layout.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/struct-layout.comp new file mode 100644 index 000000000..0f73fa7fa --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/struct-layout.comp @@ -0,0 +1,23 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct Foo +{ + mat4 m; +}; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + Foo out_data[]; +} _23; + +layout(binding = 0, std430) readonly buffer SSBO +{ + Foo in_data[]; +} _30; + +void main() +{ + _23.out_data[gl_GlobalInvocationID.x].m = _30.in_data[gl_GlobalInvocationID.x].m * _30.in_data[gl_GlobalInvocationID.x].m; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/struct-packing.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/struct-packing.comp new file mode 100644 index 000000000..cd1eda1b3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/struct-packing.comp @@ -0,0 +1,146 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct S0 +{ + vec2 a[1]; + float b; +}; + +struct S1 +{ + vec3 a; + float b; +}; + +struct S2 +{ + vec3 a[1]; + float b; +}; + +struct S3 +{ + vec2 a; + float b; +}; + +struct S4 +{ + vec2 c; +}; + +struct Content +{ + S0 m0s[1]; + S1 m1s[1]; + S2 m2s[1]; + S0 m0; + S1 m1; + S2 m2; + S3 m3; + float m4; + S4 m3s[8]; +}; + +struct S0_1 +{ + vec2 a[1]; + float b; +}; + +struct S1_1 +{ + vec3 a; + float b; +}; + +struct S2_1 +{ + vec3 a[1]; + float b; +}; + +struct S3_1 +{ + vec2 a; + float b; +}; + +struct S4_1 +{ + vec2 c; +}; + +struct Content_1 +{ + S0_1 m0s[1]; + S1_1 m1s[1]; + S2_1 m2s[1]; + S0_1 m0; + S1_1 m1; + S2_1 m2; + S3_1 m3; + float m4; + S4_1 m3s[8]; +}; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + Content content; + Content content1[2]; + Content content2; + mat2 m0; + mat2 m1; + mat2x3 m2[4]; + mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + float array[]; +} ssbo_430; + +layout(binding = 0, std140) restrict buffer SSBO0 +{ + Content_1 content; + Content_1 content1[2]; + Content_1 content2; + mat2 m0; + mat2 m1; + mat2x3 m2[4]; + mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + float array[]; +} ssbo_140; + +void main() +{ + ssbo_430.content.m0s[0].a[0] = ssbo_140.content.m0s[0].a[0]; + ssbo_430.content.m0s[0].b = ssbo_140.content.m0s[0].b; + ssbo_430.content.m1s[0].a = ssbo_140.content.m1s[0].a; + ssbo_430.content.m1s[0].b = ssbo_140.content.m1s[0].b; + ssbo_430.content.m2s[0].a[0] = ssbo_140.content.m2s[0].a[0]; + ssbo_430.content.m2s[0].b = ssbo_140.content.m2s[0].b; + ssbo_430.content.m0.a[0] = ssbo_140.content.m0.a[0]; + ssbo_430.content.m0.b = ssbo_140.content.m0.b; + ssbo_430.content.m1.a = ssbo_140.content.m1.a; + ssbo_430.content.m1.b = ssbo_140.content.m1.b; + ssbo_430.content.m2.a[0] = ssbo_140.content.m2.a[0]; + ssbo_430.content.m2.b = ssbo_140.content.m2.b; + ssbo_430.content.m3.a = ssbo_140.content.m3.a; + ssbo_430.content.m3.b = ssbo_140.content.m3.b; + ssbo_430.content.m4 = ssbo_140.content.m4; + ssbo_430.content.m3s[0].c = ssbo_140.content.m3s[0].c; + ssbo_430.content.m3s[1].c = ssbo_140.content.m3s[1].c; + ssbo_430.content.m3s[2].c = ssbo_140.content.m3s[2].c; + ssbo_430.content.m3s[3].c = ssbo_140.content.m3s[3].c; + ssbo_430.content.m3s[4].c = ssbo_140.content.m3s[4].c; + ssbo_430.content.m3s[5].c = ssbo_140.content.m3s[5].c; + ssbo_430.content.m3s[6].c = ssbo_140.content.m3s[6].c; + ssbo_430.content.m3s[7].c = ssbo_140.content.m3s[7].c; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/torture-loop.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/torture-loop.comp new file mode 100644 index 000000000..5943966c0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/torture-loop.comp @@ -0,0 +1,40 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +} _24; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _89; + +void main() +{ + vec4 _99; + _99 = _24.in_data[gl_GlobalInvocationID.x]; + for (int _93 = 0; (_93 + 1) < 10; ) + { + _99 *= 2.0; + _93 += 2; + continue; + } + vec4 _98; + _98 = _99; + vec4 _103; + for (uint _94 = 0u; _94 < 16u; _98 = _103, _94++) + { + _103 = _98; + for (uint _100 = 0u; _100 < 30u; ) + { + _103 = _24.mvp * _103; + _100++; + continue; + } + } + _89.out_data[gl_GlobalInvocationID.x] = _98; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/type-alias.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/type-alias.comp new file mode 100644 index 000000000..c0f57f4bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/type-alias.comp @@ -0,0 +1,33 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct S0 +{ + vec4 a; +}; + +struct S1 +{ + vec4 a; +}; + +layout(binding = 0, std430) buffer SSBO0 +{ + S0 s0s[]; +} _36; + +layout(binding = 1, std430) buffer SSBO1 +{ + S1 s1s[]; +} _55; + +layout(binding = 2, std430) buffer SSBO2 +{ + vec4 outputs[]; +} _66; + +void main() +{ + _66.outputs[gl_GlobalInvocationID.x] = _36.s0s[gl_GlobalInvocationID.x].a + _55.s1s[gl_GlobalInvocationID.x].a; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/comp/udiv.comp b/3rdparty/spirv-cross/reference/opt/shaders/comp/udiv.comp new file mode 100644 index 000000000..0c1f926ad --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/comp/udiv.comp @@ -0,0 +1,18 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO2 +{ + uint outputs[]; +} _10; + +layout(binding = 0, std430) buffer SSBO +{ + uint inputs[]; +} _23; + +void main() +{ + _10.outputs[gl_GlobalInvocationID.x] = _23.inputs[gl_GlobalInvocationID.x] / 29u; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/enhanced-layouts.comp b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/enhanced-layouts.comp new file mode 100644 index 000000000..45b25064b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/enhanced-layouts.comp @@ -0,0 +1,47 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct Foo +{ + int a; + int b; + int c; +}; + +struct Foo_1 +{ + int a; + int b; + int c; +}; + +layout(binding = 1, std140) buffer SSBO1 +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo foo; + layout(offset = 48) int c[8]; +} ssbo1; + +layout(binding = 2, std430) buffer SSBO2 +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo_1 foo; + layout(offset = 48) int c[8]; +} ssbo2; + +layout(binding = 0, std140) uniform UBO +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo foo; + layout(offset = 48) int c[8]; +} ubo; + +void main() +{ + ssbo1.a = ssbo2.a; + ssbo1.b = ubo.b; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/extended-arithmetic.desktop.comp b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/extended-arithmetic.desktop.comp new file mode 100644 index 000000000..9c55c74db --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/extended-arithmetic.desktop.comp @@ -0,0 +1,159 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct ResType +{ + uint _m0; + uint _m1; +}; + +struct ResType_1 +{ + uvec2 _m0; + uvec2 _m1; +}; + +struct ResType_2 +{ + uvec3 _m0; + uvec3 _m1; +}; + +struct ResType_3 +{ + uvec4 _m0; + uvec4 _m1; +}; + +struct ResType_4 +{ + int _m0; + int _m1; +}; + +struct ResType_5 +{ + ivec2 _m0; + ivec2 _m1; +}; + +struct ResType_6 +{ + ivec3 _m0; + ivec3 _m1; +}; + +struct ResType_7 +{ + ivec4 _m0; + ivec4 _m1; +}; + +layout(binding = 0, std430) buffer SSBOUint +{ + uint a; + uint b; + uint c; + uint d; + uvec2 a2; + uvec2 b2; + uvec2 c2; + uvec2 d2; + uvec3 a3; + uvec3 b3; + uvec3 c3; + uvec3 d3; + uvec4 a4; + uvec4 b4; + uvec4 c4; + uvec4 d4; +} u; + +layout(binding = 1, std430) buffer SSBOInt +{ + int a; + int b; + int c; + int d; + ivec2 a2; + ivec2 b2; + ivec2 c2; + ivec2 d2; + ivec3 a3; + ivec3 b3; + ivec3 c3; + ivec3 d3; + ivec4 a4; + ivec4 b4; + ivec4 c4; + ivec4 d4; +} i; + +void main() +{ + ResType _25; + _25._m0 = uaddCarry(u.a, u.b, _25._m1); + u.d = _25._m1; + u.c = _25._m0; + ResType_1 _40; + _40._m0 = uaddCarry(u.a2, u.b2, _40._m1); + u.d2 = _40._m1; + u.c2 = _40._m0; + ResType_2 _55; + _55._m0 = uaddCarry(u.a3, u.b3, _55._m1); + u.d3 = _55._m1; + u.c3 = _55._m0; + ResType_3 _70; + _70._m0 = uaddCarry(u.a4, u.b4, _70._m1); + u.d4 = _70._m1; + u.c4 = _70._m0; + ResType _79; + _79._m0 = usubBorrow(u.a, u.b, _79._m1); + u.d = _79._m1; + u.c = _79._m0; + ResType_1 _88; + _88._m0 = usubBorrow(u.a2, u.b2, _88._m1); + u.d2 = _88._m1; + u.c2 = _88._m0; + ResType_2 _97; + _97._m0 = usubBorrow(u.a3, u.b3, _97._m1); + u.d3 = _97._m1; + u.c3 = _97._m0; + ResType_3 _106; + _106._m0 = usubBorrow(u.a4, u.b4, _106._m1); + u.d4 = _106._m1; + u.c4 = _106._m0; + ResType _116; + umulExtended(u.a, u.b, _116._m1, _116._m0); + u.d = _116._m0; + u.c = _116._m1; + ResType_1 _125; + umulExtended(u.a2, u.b2, _125._m1, _125._m0); + u.d2 = _125._m0; + u.c2 = _125._m1; + ResType_2 _134; + umulExtended(u.a3, u.b3, _134._m1, _134._m0); + u.d3 = _134._m0; + u.c3 = _134._m1; + ResType_3 _143; + umulExtended(u.a4, u.b4, _143._m1, _143._m0); + u.d4 = _143._m0; + u.c4 = _143._m1; + ResType_4 _160; + imulExtended(i.a, i.b, _160._m1, _160._m0); + i.d = _160._m0; + i.c = _160._m1; + ResType_5 _171; + imulExtended(i.a2, i.b2, _171._m1, _171._m0); + i.d2 = _171._m0; + i.c2 = _171._m1; + ResType_6 _182; + imulExtended(i.a3, i.b3, _182._m1, _182._m0); + i.d3 = _182._m0; + i.c3 = _182._m1; + ResType_7 _193; + imulExtended(i.a4, i.b4, _193._m1, _193._m0); + i.d4 = _193._m0; + i.c4 = _193._m1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/fp64.desktop.comp b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/fp64.desktop.comp new file mode 100644 index 000000000..3839a091f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/fp64.desktop.comp @@ -0,0 +1,63 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct M0 +{ + double v; + dvec2 b[2]; + dmat2x3 c; + dmat3x2 d; +}; + +layout(binding = 0, std430) buffer SSBO0 +{ + dvec4 a; + M0 m0; + dmat4 b; +} ssbo_0; + +layout(binding = 1, std430) buffer SSBO1 +{ + dmat4 a; + dvec4 b; + M0 m0; +} ssbo_1; + +layout(binding = 2, std430) buffer SSBO2 +{ + double a[4]; + dvec2 b[4]; +} ssbo_2; + +layout(binding = 3, std140) buffer SSBO3 +{ + double a[4]; + dvec2 b[4]; +} ssbo_3; + +void main() +{ + ssbo_0.a += dvec4(10.0lf, 20.0lf, 30.0lf, 40.0lf); + ssbo_0.a += dvec4(20.0lf); + dvec4 _40 = ssbo_0.a; + ssbo_0.a = abs(_40); + ssbo_0.a = sign(_40); + ssbo_0.a = floor(_40); + ssbo_0.a = trunc(_40); + ssbo_0.a = round(_40); + ssbo_0.a = roundEven(_40); + ssbo_0.a = ceil(_40); + ssbo_0.a = fract(_40); + ssbo_0.a = mod(_40, dvec4(20.0lf)); + ssbo_0.a = mod(_40, _40); + ssbo_0.a = min(_40, _40); + ssbo_0.a = max(_40, _40); + ssbo_0.a = clamp(_40, _40, _40); + ssbo_0.a = mix(_40, _40, _40); + ssbo_0.a = step(_40, _40); + ssbo_0.a = smoothstep(_40, _40, _40); + ssbo_1.b.x += 1.0lf; + ssbo_2.b[0].x += 1.0lf; + ssbo_3.b[0].x += 1.0lf; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp new file mode 100644 index 000000000..37b286355 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp @@ -0,0 +1,7 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/int64.desktop.comp b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/int64.desktop.comp new file mode 100644 index 000000000..702456b30 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/comp/int64.desktop.comp @@ -0,0 +1,52 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct M0 +{ + int64_t v; + i64vec2 b[2]; + uint64_t c; + uint64_t d[5]; +}; + +layout(binding = 0, std430) buffer SSBO0 +{ + i64vec4 a; + M0 m0; +} ssbo_0; + +layout(binding = 1, std430) buffer SSBO1 +{ + u64vec4 b; + M0 m0; +} ssbo_1; + +layout(binding = 2, std430) buffer SSBO2 +{ + int64_t a[4]; + i64vec2 b[4]; +} ssbo_2; + +layout(binding = 3, std140) buffer SSBO3 +{ + int64_t a[4]; + i64vec2 b[4]; +} ssbo_3; + +void main() +{ + ssbo_0.a += i64vec4(10l, 20l, 30l, 40l); + ssbo_1.b += u64vec4(999999999999999999ul, 8888888888888888ul, 77777777777777777ul, 6666666666666666ul); + ssbo_0.a += i64vec4(20l); + ssbo_0.a = abs(ssbo_0.a + i64vec4(ssbo_1.b)); + ssbo_0.a += i64vec4(1l); + ssbo_1.b += u64vec4(i64vec4(1l)); + ssbo_0.a -= i64vec4(1l); + ssbo_1.b -= u64vec4(i64vec4(1l)); + ssbo_1.b = doubleBitsToUint64(int64BitsToDouble(ssbo_0.a)); + ssbo_0.a = doubleBitsToInt64(uint64BitsToDouble(ssbo_1.b)); + ssbo_2.a[0] += 1l; + ssbo_3.a[0] += 2l; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/clip-cull-distance.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/clip-cull-distance.desktop.frag new file mode 100644 index 000000000..3cc320550 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/clip-cull-distance.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +in float gl_ClipDistance[4]; +in float gl_CullDistance[3]; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = gl_ClipDistance[0] + gl_CullDistance[0]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..29c59012c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,37 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vInput; + +void main() +{ + FragColor = vInput; + vec4 _23 = texture(uSampler, vInput.xy); + vec4 _26 = dFdx(vInput); + vec4 _29 = dFdy(vInput); + vec4 _32 = fwidth(vInput); + vec4 _35 = dFdxCoarse(vInput); + vec4 _38 = dFdyCoarse(vInput); + vec4 _41 = fwidthCoarse(vInput); + vec4 _44 = dFdxFine(vInput); + vec4 _47 = dFdyFine(vInput); + vec4 _50 = fwidthFine(vInput); + vec2 _56 = textureQueryLod(uSampler, vInput.zw); + if (vInput.y > 10.0) + { + FragColor += _23; + FragColor += _26; + FragColor += _29; + FragColor += _32; + FragColor += _35; + FragColor += _38; + FragColor += _41; + FragColor += _44; + FragColor += _47; + FragColor += _50; + FragColor += _56.xyxy; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/depth-greater-than.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/depth-greater-than.desktop.frag new file mode 100644 index 000000000..8b7c29644 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/depth-greater-than.desktop.frag @@ -0,0 +1,9 @@ +#version 450 +layout(depth_greater) out float gl_FragDepth; +layout(early_fragment_tests) in; + +void main() +{ + gl_FragDepth = 0.5; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/depth-less-than.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/depth-less-than.desktop.frag new file mode 100644 index 000000000..44752eb8f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/depth-less-than.desktop.frag @@ -0,0 +1,9 @@ +#version 450 +layout(depth_less) out float gl_FragDepth; +layout(early_fragment_tests) in; + +void main() +{ + gl_FragDepth = 0.5; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/dual-source-blending.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/dual-source-blending.desktop.frag new file mode 100644 index 000000000..3d946b04a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/dual-source-blending.desktop.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0, index = 0) out vec4 FragColor0; +layout(location = 0, index = 1) out vec4 FragColor1; + +void main() +{ + FragColor0 = vec4(1.0); + FragColor1 = vec4(2.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/fp16.invalid.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/fp16.invalid.desktop.frag new file mode 100644 index 000000000..af8882c43 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/fp16.invalid.desktop.frag @@ -0,0 +1,161 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif + +struct ResType +{ + f16vec4 _m0; + ivec4 _m1; +}; + +layout(location = 3) in f16vec4 v4; +layout(location = 2) in f16vec3 v3; +layout(location = 0) in float16_t v1; +layout(location = 1) in f16vec2 v2; + +f16mat2 test_mat2(f16vec2 a, f16vec2 b, f16vec2 c, f16vec2 d) +{ + return f16mat2(f16vec2(a), f16vec2(b)) * f16mat2(f16vec2(c), f16vec2(d)); +} + +f16mat3 test_mat3(f16vec3 a, f16vec3 b, f16vec3 c, f16vec3 d, f16vec3 e, f16vec3 f) +{ + return f16mat3(f16vec3(a), f16vec3(b), f16vec3(c)) * f16mat3(f16vec3(d), f16vec3(e), f16vec3(f)); +} + +void test_constants() +{ + float16_t a = 1.0hf; + float16_t b = 1.5hf; + float16_t c = -1.5hf; + float16_t d = (0.0hf / 0.0hf); + float16_t e = (1.0hf / 0.0hf); + float16_t f = (-1.0hf / 0.0hf); + float16_t g = 1014.0hf; + float16_t h = 9.5367431640625e-07hf; +} + +float16_t test_result() +{ + return 1.0hf; +} + +void test_conversions() +{ + float16_t one = test_result(); + int a = int(one); + uint b = uint(one); + bool c = one != 0.0hf; + float d = float(one); + double e = double(one); + float16_t a2 = float16_t(a); + float16_t b2 = float16_t(b); + float16_t c2 = float16_t(c); + float16_t d2 = float16_t(d); + float16_t e2 = float16_t(e); +} + +void test_builtins() +{ + f16vec4 res = radians(v4); + res = degrees(v4); + res = sin(v4); + res = cos(v4); + res = tan(v4); + res = asin(v4); + res = atan(v4, v3.xyzz); + res = atan(v4); + res = sinh(v4); + res = cosh(v4); + res = tanh(v4); + res = asinh(v4); + res = acosh(v4); + res = atanh(v4); + res = pow(v4, v4); + res = exp(v4); + res = log(v4); + res = exp2(v4); + res = log2(v4); + res = sqrt(v4); + res = inversesqrt(v4); + res = abs(v4); + res = sign(v4); + res = floor(v4); + res = trunc(v4); + res = round(v4); + res = roundEven(v4); + res = ceil(v4); + res = fract(v4); + res = mod(v4, v4); + f16vec4 tmp; + f16vec4 _231 = modf(v4, tmp); + res = _231; + res = min(v4, v4); + res = max(v4, v4); + res = clamp(v4, v4, v4); + res = mix(v4, v4, v4); + res = mix(v4, v4, lessThan(v4, v4)); + res = step(v4, v4); + res = smoothstep(v4, v4, v4); + bvec4 btmp = isnan(v4); + btmp = isinf(v4); + res = fma(v4, v4, v4); + ResType _275; + _275._m0 = frexp(v4, _275._m1); + ivec4 itmp = _275._m1; + res = _275._m0; + res = ldexp(res, itmp); + uint pack0 = packFloat2x16(v4.xy); + uint pack1 = packFloat2x16(v4.zw); + res = f16vec4(unpackFloat2x16(pack0), unpackFloat2x16(pack1)); + float16_t t0 = length(v4); + t0 = distance(v4, v4); + t0 = dot(v4, v4); + f16vec3 res3 = cross(v3, v3); + res = normalize(v4); + res = faceforward(v4, v4, v4); + res = reflect(v4, v4); + res = refract(v4, v4, v1); + btmp = lessThan(v4, v4); + btmp = lessThanEqual(v4, v4); + btmp = greaterThan(v4, v4); + btmp = greaterThanEqual(v4, v4); + btmp = equal(v4, v4); + btmp = notEqual(v4, v4); + res = dFdx(v4); + res = dFdy(v4); + res = dFdxFine(v4); + res = dFdyFine(v4); + res = dFdxCoarse(v4); + res = dFdyCoarse(v4); + res = fwidth(v4); + res = fwidthFine(v4); + res = fwidthCoarse(v4); +} + +void main() +{ + f16vec2 param = v2; + f16vec2 param_1 = v2; + f16vec2 param_2 = v3.xy; + f16vec2 param_3 = v3.xy; + f16mat2 m0 = test_mat2(param, param_1, param_2, param_3); + f16vec3 param_4 = v3; + f16vec3 param_5 = v3; + f16vec3 param_6 = v3; + f16vec3 param_7 = v4.xyz; + f16vec3 param_8 = v4.xyz; + f16vec3 param_9 = v4.yzw; + f16mat3 m1 = test_mat3(param_4, param_5, param_6, param_7, param_8, param_9); + test_constants(); + test_conversions(); + test_builtins(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag new file mode 100644 index 000000000..2d0809fdb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag @@ -0,0 +1,19 @@ +#version 450 + +layout(binding = 0, std430) buffer Foobar +{ + vec4 _data[]; +} Foobar_1; + +layout(binding = 1, std430) buffer Foobaz +{ + vec4 _data[]; +} Foobaz_1; + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + _entryPointOutput = Foobar_1._data[0] + Foobaz_1._data[0]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/image-ms.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/image-ms.desktop.frag new file mode 100644 index 000000000..127698176 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/image-ms.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(binding = 0, rgba8) uniform image2DMS uImage; +layout(binding = 1, rgba8) uniform image2DMSArray uImageArray; + +void main() +{ + vec4 _29 = imageLoad(uImageArray, ivec3(1, 2, 4), 3); + imageStore(uImage, ivec2(2, 3), 1, imageLoad(uImage, ivec2(1, 2), 2)); + imageStore(uImageArray, ivec3(2, 3, 7), 1, _29); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/image-query.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/image-query.desktop.frag new file mode 100644 index 000000000..05ce10adf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/image-query.desktop.frag @@ -0,0 +1,6 @@ +#version 450 + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/in-block-qualifiers.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/in-block-qualifiers.frag new file mode 100644 index 000000000..d4622801d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/in-block-qualifiers.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in VertexData +{ + flat float f; + centroid vec4 g; + flat int h; + float i; +} vin; + +layout(location = 4) flat in float f; +layout(location = 5) centroid in vec4 g; +layout(location = 6) flat in int h; +layout(location = 7) sample in float i; + +void main() +{ + FragColor = ((((((vec4(vin.f) + vin.g) + vec4(float(vin.h))) + vec4(vin.i)) + vec4(f)) + g) + vec4(float(h))) + vec4(i); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/layout-component.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/layout-component.desktop.frag new file mode 100644 index 000000000..13f17feee --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/layout-component.desktop.frag @@ -0,0 +1,16 @@ +#version 450 + +layout(location = 0) out vec2 FragColor; +layout(location = 0, component = 0) in vec2 v0; +layout(location = 0, component = 2) in float v1; +in Vertex +{ + layout(location = 1, component = 2) float v3; +} _20; + + +void main() +{ + FragColor = (v0 + vec2(v1)) + vec2(_20.v3); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/query-levels.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/query-levels.desktop.frag new file mode 100644 index 000000000..4a80cbf81 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/query-levels.desktop.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(float(textureQueryLevels(uSampler))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/query-lod.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/query-lod.desktop.frag new file mode 100644 index 000000000..f43543b8c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/query-lod.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTexCoord; + +void main() +{ + FragColor = textureQueryLod(uSampler, vTexCoord).xyxy; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/sampler-ms-query.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/sampler-ms-query.desktop.frag new file mode 100644 index 000000000..4c30ed152 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/sampler-ms-query.desktop.frag @@ -0,0 +1,14 @@ +#version 450 + +layout(binding = 0) uniform sampler2DMS uSampler; +layout(binding = 1) uniform sampler2DMSArray uSamplerArray; +layout(binding = 2, rgba8) uniform readonly writeonly image2DMS uImage; +layout(binding = 3, rgba8) uniform readonly writeonly image2DMSArray uImageArray; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(float(((textureSamples(uSampler) + textureSamples(uSamplerArray)) + imageSamples(uImage)) + imageSamples(uImageArray))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag new file mode 100644 index 000000000..d5e45bda4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag @@ -0,0 +1,26 @@ +#version 450 + +layout(binding = 0) uniform sampler1DShadow uShadow1D; +layout(binding = 1) uniform sampler2DShadow uShadow2D; +layout(binding = 2) uniform sampler1D uSampler1D; +layout(binding = 3) uniform sampler2D uSampler2D; +layout(binding = 4) uniform sampler3D uSampler3D; + +layout(location = 0) out float FragColor; +layout(location = 1) in vec4 vClip4; +layout(location = 2) in vec2 vClip2; +layout(location = 0) in vec3 vClip3; + +void main() +{ + vec4 _20 = vClip4; + _20.y = vClip4.w; + FragColor = textureProj(uShadow1D, vec4(_20.x, 0.0, vClip4.z, _20.y)); + vec4 _30 = vClip4; + _30.z = vClip4.w; + FragColor = textureProj(uShadow2D, vec4(_30.xy, vClip4.z, _30.z)); + FragColor = textureProj(uSampler1D, vClip2).x; + FragColor = textureProj(uSampler2D, vClip3).x; + FragColor = textureProj(uSampler3D, vClip4).x; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/geom/basic.desktop.sso.geom b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/geom/basic.desktop.sso.geom new file mode 100644 index 000000000..8e51cfa36 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/geom/basic.desktop.sso.geom @@ -0,0 +1,36 @@ +#version 450 +layout(invocations = 4, triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[]; + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + float _37 = float(gl_InvocationID); + vNormal = vin[0].normal + vec3(_37); + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal + vec3(4.0 * _37); + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal + vec3(2.0 * _37); + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/geom/viewport-index.desktop.geom b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/geom/viewport-index.desktop.geom new file mode 100644 index 000000000..773aeb8bf --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/geom/viewport-index.desktop.geom @@ -0,0 +1,9 @@ +#version 450 +layout(triangles) in; +layout(max_vertices = 4, triangle_strip) out; + +void main() +{ + gl_ViewportIndex = 1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/tesc/basic.desktop.sso.tesc b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/tesc/basic.desktop.sso.tesc new file mode 100644 index 000000000..5e958256a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/tesc/basic.desktop.sso.tesc @@ -0,0 +1,27 @@ +#version 450 +layout(vertices = 1) out; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[gl_MaxPatchVertices]; + +out gl_PerVertex +{ + vec4 gl_Position; +} gl_out[1]; + +layout(location = 0) patch out vec3 vFoo; + +void main() +{ + gl_TessLevelInner[0] = 8.8999996185302734375; + gl_TessLevelInner[1] = 6.900000095367431640625; + gl_TessLevelOuter[0] = 8.8999996185302734375; + gl_TessLevelOuter[1] = 6.900000095367431640625; + gl_TessLevelOuter[2] = 3.900000095367431640625; + gl_TessLevelOuter[3] = 4.900000095367431640625; + vFoo = vec3(1.0); + gl_out[gl_InvocationID].gl_Position = gl_in[0].gl_Position + gl_in[1].gl_Position; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/tese/triangle.desktop.sso.tese b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/tese/triangle.desktop.sso.tese new file mode 100644 index 000000000..31027dae8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/tese/triangle.desktop.sso.tese @@ -0,0 +1,18 @@ +#version 450 +layout(triangles, cw, fractional_even_spacing) in; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[gl_MaxPatchVertices]; + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +void main() +{ + gl_Position = ((gl_in[0].gl_Position * gl_TessCoord.x) + (gl_in[1].gl_Position * gl_TessCoord.y)) + (gl_in[2].gl_Position * gl_TessCoord.z); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/basic.desktop.sso.vert b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/basic.desktop.sso.vert new file mode 100644 index 000000000..5f527e08c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/basic.desktop.sso.vert @@ -0,0 +1,22 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; +} _16; + +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = _16.uMVP * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert new file mode 100644 index 000000000..a7c5d761c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert @@ -0,0 +1,20 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; + float gl_PointSize; + float gl_ClipDistance[4]; + float gl_CullDistance[3]; +}; + +void main() +{ + gl_Position = vec4(1.0); + gl_ClipDistance[0] = 0.0; + gl_ClipDistance[1] = 0.0; + gl_ClipDistance[2] = 0.0; + gl_ClipDistance[3] = 0.0; + gl_CullDistance[1] = 4.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/clip-cull-distance.desktop.vert b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/clip-cull-distance.desktop.vert new file mode 100644 index 000000000..2f3d49f55 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/clip-cull-distance.desktop.vert @@ -0,0 +1,15 @@ +#version 450 + +out float gl_ClipDistance[4]; +out float gl_CullDistance[3]; + +void main() +{ + gl_Position = vec4(1.0); + gl_ClipDistance[0] = 0.0; + gl_ClipDistance[1] = 0.0; + gl_ClipDistance[2] = 0.0; + gl_ClipDistance[3] = 0.0; + gl_CullDistance[1] = 4.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/out-block-qualifiers.vert b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/out-block-qualifiers.vert new file mode 100644 index 000000000..7c731684b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/out-block-qualifiers.vert @@ -0,0 +1,27 @@ +#version 450 + +layout(location = 0) out VertexData +{ + flat float f; + centroid vec4 g; + flat int h; + float i; +} vout; + +layout(location = 4) flat out float f; +layout(location = 5) centroid out vec4 g; +layout(location = 6) flat out int h; +layout(location = 7) out float i; + +void main() +{ + vout.f = 10.0; + vout.g = vec4(20.0); + vout.h = 20; + vout.i = 30.0; + f = 10.0; + g = vec4(20.0); + h = 20; + i = 30.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert new file mode 100644 index 000000000..6121dd8f1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert @@ -0,0 +1,8 @@ +#version 450 +#extension GL_ARB_shader_draw_parameters : require + +void main() +{ + gl_Position = vec4(float(gl_BaseVertexARB), float(gl_BaseInstanceARB), float(gl_DrawIDARB), 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert new file mode 100644 index 000000000..b6948fbc4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert @@ -0,0 +1,7 @@ +#version 460 + +void main() +{ + gl_Position = vec4(float(gl_BaseVertex), float(gl_BaseInstance), float(gl_DrawID), 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/array.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/array.flatten.vert new file mode 100644 index 000000000..de4eb3b78 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/array.flatten.vert @@ -0,0 +1,10 @@ +#version 310 es + +uniform vec4 UBO[56]; +layout(location = 0) in vec4 aVertex; + +void main() +{ + gl_Position = ((mat4(UBO[40], UBO[41], UBO[42], UBO[43]) * aVertex) + UBO[55]) + ((UBO[50] + UBO[45]) + vec4(UBO[54].x)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/basic.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/basic.flatten.vert new file mode 100644 index 000000000..f7eb758f2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/basic.flatten.vert @@ -0,0 +1,13 @@ +#version 310 es + +uniform vec4 UBO[4]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/copy.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/copy.flatten.vert new file mode 100644 index 000000000..33caec4f7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/copy.flatten.vert @@ -0,0 +1,27 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + vec4 Color; +}; + +uniform vec4 UBO[12]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec4 vColor; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vColor = vec4(0.0); + for (int _96 = 0; _96 < 4; ) + { + vec3 _68 = aVertex.xyz - Light(UBO[_96 * 2 + 4].xyz, UBO[_96 * 2 + 4].w, UBO[_96 * 2 + 5]).Position; + vColor += ((UBO[_96 * 2 + 5] * clamp(1.0 - (length(_68) / Light(UBO[_96 * 2 + 4].xyz, UBO[_96 * 2 + 4].w, UBO[_96 * 2 + 5]).Radius), 0.0, 1.0)) * dot(aNormal, normalize(_68))); + _96++; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/dynamic.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/dynamic.flatten.vert new file mode 100644 index 000000000..7129af286 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/dynamic.flatten.vert @@ -0,0 +1,27 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + vec4 Color; +}; + +uniform vec4 UBO[12]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec4 vColor; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vColor = vec4(0.0); + for (int _82 = 0; _82 < 4; ) + { + vec3 _54 = aVertex.xyz - UBO[_82 * 2 + 4].xyz; + vColor += ((UBO[_82 * 2 + 5] * clamp(1.0 - (length(_54) / UBO[_82 * 2 + 4].w), 0.0, 1.0)) * dot(aNormal, normalize(_54))); + _82++; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/matrix-conversion.flatten.frag b/3rdparty/spirv-cross/reference/opt/shaders/flatten/matrix-conversion.flatten.frag new file mode 100644 index 000000000..ee79bf5b8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/matrix-conversion.flatten.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform vec4 UBO[4]; +layout(location = 0) out vec3 FragColor; +layout(location = 0) flat in vec3 vNormal; + +void main() +{ + mat4 _19 = mat4(UBO[0], UBO[1], UBO[2], UBO[3]); + FragColor = mat3(_19[0].xyz, _19[1].xyz, _19[2].xyz) * vNormal; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/matrixindex.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/matrixindex.flatten.vert new file mode 100644 index 000000000..f6d0fa486 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/matrixindex.flatten.vert @@ -0,0 +1,19 @@ +#version 310 es + +uniform vec4 UBO[14]; +layout(location = 0) out vec4 oA; +layout(location = 1) out vec4 oB; +layout(location = 2) out vec4 oC; +layout(location = 3) out vec4 oD; +layout(location = 4) out vec4 oE; + +void main() +{ + gl_Position = vec4(0.0); + oA = UBO[1]; + oB = vec4(UBO[4].y, UBO[5].y, UBO[6].y, UBO[7].y); + oC = UBO[9]; + oD = vec4(UBO[10].x, UBO[11].x, UBO[12].x, UBO[13].x); + oE = vec4(UBO[1].z, UBO[6].y, UBO[9].z, UBO[12].y); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag b/3rdparty/spirv-cross/reference/opt/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag new file mode 100644 index 000000000..ef6bb526a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag @@ -0,0 +1,24 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uTextures[2 * 3 * 1]; + +layout(location = 1) in vec2 vUV; +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in int vIndex; + +void main() +{ + vec4 values3[2 * 3 * 1]; + for (int z = 0; z < 2; z++) + { + for (int y = 0; y < 3; y++) + { + for (int x = 0; x < 1; x++) + { + values3[z * 3 * 1 + y * 1 + x] = texture(uTextures[z * 3 * 1 + y * 1 + x], vUV); + } + } + } + FragColor = (values3[1 * 3 * 1 + 2 * 1 + 0] + values3[0 * 3 * 1 + 2 * 1 + 0]) + values3[(vIndex + 1) * 3 * 1 + 2 * 1 + vIndex]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/multiindex.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/multiindex.flatten.vert new file mode 100644 index 000000000..3850bf6c7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/multiindex.flatten.vert @@ -0,0 +1,10 @@ +#version 310 es + +uniform vec4 UBO[15]; +layout(location = 0) in ivec2 aIndex; + +void main() +{ + gl_Position = UBO[aIndex.x * 5 + aIndex.y * 1 + 0]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/push-constant.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/push-constant.flatten.vert new file mode 100644 index 000000000..216c1f9d1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/push-constant.flatten.vert @@ -0,0 +1,13 @@ +#version 310 es + +uniform vec4 PushMe[6]; +layout(location = 1) in vec4 Pos; +layout(location = 0) out vec2 vRot; +layout(location = 0) in vec2 Rot; + +void main() +{ + gl_Position = mat4(PushMe[0], PushMe[1], PushMe[2], PushMe[3]) * Pos; + vRot = (mat2(PushMe[4].xy, PushMe[4].zw) * Rot) + vec2(PushMe[5].z); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/rowmajor.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/rowmajor.flatten.vert new file mode 100644 index 000000000..b74aa004b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/rowmajor.flatten.vert @@ -0,0 +1,10 @@ +#version 310 es + +uniform vec4 UBO[12]; +layout(location = 0) in vec4 aVertex; + +void main() +{ + gl_Position = (mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex) + (aVertex * mat4(UBO[4], UBO[5], UBO[6], UBO[7])); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/struct.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/struct.flatten.vert new file mode 100644 index 000000000..35db010c7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/struct.flatten.vert @@ -0,0 +1,22 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + vec4 Color; +}; + +uniform vec4 UBO[6]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec4 vColor; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vColor = vec4(0.0); + vec3 _39 = aVertex.xyz - UBO[4].xyz; + vColor += ((UBO[5] * clamp(1.0 - (length(_39) / UBO[4].w), 0.0, 1.0)) * dot(aNormal, normalize(_39))); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/struct.rowmajor.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/struct.rowmajor.flatten.vert new file mode 100644 index 000000000..709d99291 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/struct.rowmajor.flatten.vert @@ -0,0 +1,21 @@ +#version 310 es + +struct Foo +{ + mat3x4 MVP0; + mat3x4 MVP1; +}; + +uniform vec4 UBO[8]; +layout(location = 0) in vec4 v0; +layout(location = 1) in vec4 v1; +layout(location = 0) out vec3 V0; +layout(location = 1) out vec3 V1; + +void main() +{ + Foo _20 = Foo(transpose(mat4x3(UBO[0].xyz, UBO[1].xyz, UBO[2].xyz, UBO[3].xyz)), transpose(mat4x3(UBO[4].xyz, UBO[5].xyz, UBO[6].xyz, UBO[7].xyz))); + V0 = v0 * _20.MVP0; + V1 = v1 * _20.MVP1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/swizzle.flatten.vert b/3rdparty/spirv-cross/reference/opt/shaders/flatten/swizzle.flatten.vert new file mode 100644 index 000000000..92afb475e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/swizzle.flatten.vert @@ -0,0 +1,21 @@ +#version 310 es + +uniform vec4 UBO[8]; +layout(location = 0) out vec4 oA; +layout(location = 1) out vec4 oB; +layout(location = 2) out vec4 oC; +layout(location = 3) out vec4 oD; +layout(location = 4) out vec4 oE; +layout(location = 5) out vec4 oF; + +void main() +{ + gl_Position = vec4(0.0); + oA = UBO[0]; + oB = vec4(UBO[1].xy, UBO[1].zw); + oC = vec4(UBO[2].x, UBO[3].xyz); + oD = vec4(UBO[4].xyz, UBO[4].w); + oE = vec4(UBO[5].x, UBO[5].y, UBO[5].z, UBO[5].w); + oF = vec4(UBO[6].x, UBO[6].zw, UBO[7].x); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/flatten/types.flatten.frag b/3rdparty/spirv-cross/reference/opt/shaders/flatten/types.flatten.frag new file mode 100644 index 000000000..a74327d97 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/flatten/types.flatten.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump ivec4 UBO1[2]; +uniform mediump uvec4 UBO2[2]; +uniform vec4 UBO0[2]; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = ((((vec4(UBO1[0]) + vec4(UBO1[1])) + vec4(UBO2[0])) + vec4(UBO2[1])) + UBO0[0]) + UBO0[1]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/16bit-constants.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/16bit-constants.frag new file mode 100644 index 000000000..48fbad97a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/16bit-constants.frag @@ -0,0 +1,29 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif + +layout(location = 0) out float16_t foo; +layout(location = 1) out int16_t bar; +layout(location = 2) out uint16_t baz; + +void main() +{ + foo = 1.0hf; + bar = 2s; + baz = 3us; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..f50d0d4e4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; +precision highp int; + +const float _17[5] = float[](1.0, 2.0, 3.0, 4.0, 5.0); + +layout(location = 0) out vec4 FragColor; + +void main() +{ + for (mediump int _46 = 0; _46 < 4; ) + { + mediump int _33 = _46 + 1; + FragColor += vec4(_17[_33]); + _46 = _33; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/basic.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/basic.frag new file mode 100644 index 000000000..2a4e44042 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/basic.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTex; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; + +void main() +{ + FragColor = vColor * texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..56b4c89a5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0, std430) buffer UBO +{ + vec4 results[1024]; +} _34; + +layout(binding = 1) uniform highp isampler2D Buf; + +layout(location = 0) flat in mediump int vIn; +layout(location = 1) flat in mediump int vIn2; +layout(location = 0) out vec4 FragColor; + +void main() +{ + mediump int _40 = texelFetch(Buf, ivec2(gl_FragCoord.xy), 0).x % 16; + FragColor = (_34.results[_40] + _34.results[_40]) + _34.results[(vIn * vIn) + (vIn2 * vIn2)]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/composite-extract-forced-temporary.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/composite-extract-forced-temporary.frag new file mode 100644 index 000000000..eb59732fd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/composite-extract-forced-temporary.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D Texture; + +layout(location = 0) in vec2 vTexCoord; +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec4 _19 = texture(Texture, vTexCoord); + float _22 = _19.x; + FragColor = vec4(_22 * _22); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/constant-array.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/constant-array.frag new file mode 100644 index 000000000..749fc8098 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/constant-array.frag @@ -0,0 +1,22 @@ +#version 310 es +precision mediump float; +precision highp int; + +const vec4 _37[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0)); +const vec4 _55[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0))); + +struct Foobar +{ + float a; + float b; +}; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int index; + +void main() +{ + Foobar indexable[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0)); + FragColor = ((_37[index] + _55[index][index + 1]) + vec4(30.0)) + vec4(indexable[index].a + indexable[index].b); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/constant-composites.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/constant-composites.frag new file mode 100644 index 000000000..d4ab613e8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/constant-composites.frag @@ -0,0 +1,22 @@ +#version 310 es +precision mediump float; +precision highp int; + +const float _16[4] = float[](1.0, 4.0, 3.0, 2.0); + +struct Foo +{ + float a; + float b; +}; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int line; + +void main() +{ + Foo foos[2] = Foo[](Foo(10.0, 20.0), Foo(30.0, 40.0)); + FragColor = vec4(_16[line]); + FragColor += vec4(foos[line].a * foos[1 - line].a); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/false-loop-init.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/false-loop-init.frag new file mode 100644 index 000000000..7ded0eab4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/false-loop-init.frag @@ -0,0 +1,18 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 result; +layout(location = 0) in vec4 accum; + +void main() +{ + result = vec4(0.0); + for (mediump int _48 = 0; _48 < 4; ) + { + result += accum; + _48 += int((accum.y > 10.0) ? 40u : 30u); + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/flush_params.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/flush_params.frag new file mode 100644 index 000000000..16b499414 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/flush_params.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(10.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/for-loop-init.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/for-loop-init.frag new file mode 100644 index 000000000..3aee71c7a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/for-loop-init.frag @@ -0,0 +1,67 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out mediump int FragColor; + +void main() +{ + mediump int _145; + for (;;) + { + FragColor = 16; + _145 = 0; + for (; _145 < 25; ) + { + FragColor += 10; + _145++; + continue; + } + for (mediump int _146 = 1; _146 < 30; ) + { + FragColor += 11; + _146++; + continue; + } + mediump int _147; + _147 = 0; + for (; _147 < 20; ) + { + FragColor += 12; + _147++; + continue; + } + mediump int _62 = _147 + 3; + FragColor += _62; + if (_62 == 40) + { + for (mediump int _151 = 0; _151 < 40; ) + { + FragColor += 13; + _151++; + continue; + } + break; + } + FragColor += _62; + mediump ivec2 _148; + _148 = ivec2(0); + for (; _148.x < 10; ) + { + FragColor += _148.y; + mediump ivec2 _144 = _148; + _144.x = _148.x + 4; + _148 = _144; + continue; + } + for (mediump int _150 = _62; _150 < 40; ) + { + FragColor += _150; + _150++; + continue; + } + FragColor += _62; + break; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/frexp-modf.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/frexp-modf.frag new file mode 100644 index 000000000..25f3360aa --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/frexp-modf.frag @@ -0,0 +1,33 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct ResType +{ + highp float _m0; + int _m1; +}; + +struct ResType_1 +{ + highp vec2 _m0; + ivec2 _m1; +}; + +layout(location = 0) in float v0; +layout(location = 1) in vec2 v1; +layout(location = 0) out float FragColor; + +void main() +{ + ResType _22; + _22._m0 = frexp(v0 + 1.0, _22._m1); + ResType_1 _35; + _35._m0 = frexp(v1, _35._m1); + float r0; + float _41 = modf(v0, r0); + vec2 r1; + vec2 _45 = modf(v1, r1); + FragColor = ((((_22._m0 + _35._m0.x) + _35._m0.y) + _41) + _45.x) + _45.y; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/front-facing.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/front-facing.frag new file mode 100644 index 000000000..cc9aecc8b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/front-facing.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void main() +{ + if (gl_FrontFacing) + { + FragColor = vA; + } + else + { + FragColor = vB; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/gather-dref.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/gather-dref.frag new file mode 100644 index 000000000..5416f79cb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/gather-dref.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2DShadow uT; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec3 vUV; + +void main() +{ + FragColor = textureGather(uT, vUV.xy, vUV.z); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/ground.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/ground.frag new file mode 100644 index 000000000..f59a402fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/ground.frag @@ -0,0 +1,35 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 4, std140) uniform GlobalPSData +{ + vec4 g_CamPos; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_ResolutionParams; + vec4 g_TimeParams; + vec4 g_FogColor_Distance; +} _101; + +layout(binding = 2) uniform mediump sampler2D TexNormalmap; + +layout(location = 3) out vec4 LightingOut; +layout(location = 2) out vec4 NormalOut; +layout(location = 1) out vec4 SpecularOut; +layout(location = 0) out vec4 AlbedoOut; +layout(location = 0) in vec2 TexCoord; +layout(location = 1) in vec3 EyeVec; + +void main() +{ + vec3 _68 = normalize((texture(TexNormalmap, TexCoord).xyz * 2.0) - vec3(1.0)); + float _113 = smoothstep(0.0, 0.1500000059604644775390625, (_101.g_CamPos.y + EyeVec.y) * 0.004999999888241291046142578125); + float _125 = smoothstep(0.699999988079071044921875, 0.75, _68.y); + vec3 _130 = mix(vec3(0.100000001490116119384765625), mix(vec3(0.100000001490116119384765625, 0.300000011920928955078125, 0.100000001490116119384765625), vec3(0.800000011920928955078125), vec3(_113)), vec3(_125)); + LightingOut = vec4(0.0); + NormalOut = vec4((_68 * 0.5) + vec3(0.5), 0.0); + SpecularOut = vec4(1.0 - (_125 * _113), 0.0, 0.0, 0.0); + AlbedoOut = vec4(_130 * _130, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/helper-invocation.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/helper-invocation.frag new file mode 100644 index 000000000..759a21bdc --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/helper-invocation.frag @@ -0,0 +1,23 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uSampler; + +layout(location = 0) in vec2 vUV; +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec4 _51; + if (!gl_HelperInvocation) + { + _51 = textureLod(uSampler, vUV, 0.0); + } + else + { + _51 = vec4(1.0); + } + FragColor = _51; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag new file mode 100644 index 000000000..91d7e37cd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag @@ -0,0 +1,28 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int vA; +layout(location = 1) flat in mediump int vB; + +void main() +{ + FragColor = vec4(0.0); + mediump int _58; + for (mediump int _57 = 0, _60 = 0; _57 < vA; FragColor += vec4(1.0), _60 = _58, _57 += (_58 + 10)) + { + if ((vA + _57) == 20) + { + _58 = 50; + continue; + } + else + { + _58 = ((vB + _57) == 40) ? 60 : _60; + continue; + } + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/image-load-store-uint-coord.asm.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/image-load-store-uint-coord.asm.frag new file mode 100644 index 000000000..5dfb4d002 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/image-load-store-uint-coord.asm.frag @@ -0,0 +1,17 @@ +#version 450 + +layout(binding = 1, rgba32f) uniform image2D RWIm; +layout(binding = 0, rgba32f) uniform writeonly imageBuffer RWBuf; +layout(binding = 1) uniform sampler2D ROIm; +layout(binding = 0) uniform samplerBuffer ROBuf; + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + imageStore(RWIm, ivec2(uvec2(10u)), vec4(10.0, 0.5, 8.0, 2.0)); + vec4 _69 = imageLoad(RWIm, ivec2(uvec2(30u))); + imageStore(RWBuf, int(80u), _69); + _entryPointOutput = (_69 + texelFetch(ROIm, ivec2(uvec2(50u, 60u)), 0)) + texelFetch(ROBuf, int(80u)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/loop-dominator-and-switch-default.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/loop-dominator-and-switch-default.frag new file mode 100644 index 000000000..a9457f22d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/loop-dominator-and-switch-default.frag @@ -0,0 +1,68 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 fragColor; + +vec4 _80; + +void main() +{ + mediump int _18 = int(_80.x); + vec4 _82; + _82 = _80; + vec4 _89; + for (mediump int _81 = 0; _81 < _18; _82 = _89, _81++) + { + vec4 _83; + switch (_18) + { + case 0: + { + vec4 _74 = _82; + _74.y = 0.0; + _83 = _74; + break; + } + case 1: + { + vec4 _76 = _82; + _76.y = 1.0; + _83 = _76; + break; + } + default: + { + mediump int _84; + vec4 _88; + _88 = _82; + _84 = 0; + mediump int _50; + for (;;) + { + _50 = _84 + 1; + if (_84 < _18) + { + vec4 _72 = _88; + _72.y = _88.y + 0.5; + _88 = _72; + _84 = _50; + continue; + } + else + { + break; + } + } + _89 = _88; + continue; + } + } + vec4 _79 = _83; + _79.y = _83.y + 0.5; + _89 = _79; + continue; + } + fragColor = _82; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/lut-promotion.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/lut-promotion.frag new file mode 100644 index 000000000..2f57d84fd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/lut-promotion.frag @@ -0,0 +1,41 @@ +#version 310 es +precision mediump float; +precision highp int; + +const float _16[16] = float[](1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0); +const vec4 _60[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); +const vec4 _104[4] = vec4[](vec4(20.0), vec4(30.0), vec4(50.0), vec4(60.0)); + +layout(location = 0) out float FragColor; +layout(location = 0) flat in mediump int index; + +void main() +{ + FragColor = _16[index]; + if (index < 10) + { + FragColor += _16[index ^ 1]; + } + else + { + FragColor += _16[index & 1]; + } + bool _63 = index > 30; + if (_63) + { + FragColor += _60[index & 3].y; + } + else + { + FragColor += _60[index & 1].x; + } + vec4 foobar[4] = _60; + if (_63) + { + foobar[1].z = 20.0; + } + mediump int _91 = index & 3; + FragColor += foobar[_91].z; + FragColor += _104[_91].z; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/mix.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/mix.frag new file mode 100644 index 000000000..f791d45ce --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/mix.frag @@ -0,0 +1,18 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vIn0; +layout(location = 1) in vec4 vIn1; +layout(location = 2) in float vIn2; +layout(location = 3) in float vIn3; + +void main() +{ + FragColor = vec4(vIn0.x, vIn1.y, vIn0.z, vIn0.w); + FragColor = vec4(vIn3); + FragColor = vIn0.xyzw; + FragColor = vec4(vIn2); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/partial-write-preserve.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/partial-write-preserve.frag new file mode 100644 index 000000000..d6c1fe8b4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/partial-write-preserve.frag @@ -0,0 +1,8 @@ +#version 310 es +precision mediump float; +precision highp int; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/pls.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/pls.frag new file mode 100644 index 000000000..1cafdbd36 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/pls.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 PLSOut0; +layout(location = 0) in vec4 PLSIn0; +layout(location = 1) out vec4 PLSOut1; +layout(location = 1) in vec4 PLSIn1; +layout(location = 2) out vec4 PLSOut2; +layout(location = 2) in vec4 PLSIn2; +layout(location = 3) out vec4 PLSOut3; +layout(location = 3) in vec4 PLSIn3; + +void main() +{ + PLSOut0 = PLSIn0 * 2.0; + PLSOut1 = PLSIn1 * 6.0; + PLSOut2 = PLSIn2 * 7.0; + PLSOut3 = PLSIn3 * 4.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/sample-parameter.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/sample-parameter.frag new file mode 100644 index 000000000..3c130e68d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/sample-parameter.frag @@ -0,0 +1,13 @@ +#version 310 es +#extension GL_OES_sample_variables : require +precision mediump float; +precision highp int; + +layout(location = 0) out vec2 FragColor; + +void main() +{ + FragColor = (gl_SamplePosition + vec2(float(gl_SampleMaskIn[0]))) + vec2(float(gl_SampleID)); + gl_SampleMask[0] = 1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler-ms.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler-ms.frag new file mode 100644 index 000000000..d78b805d0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler-ms.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2DMS uSampler; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + ivec2 _17 = ivec2(gl_FragCoord.xy); + FragColor = ((texelFetch(uSampler, _17, 0) + texelFetch(uSampler, _17, 1)) + texelFetch(uSampler, _17, 2)) + texelFetch(uSampler, _17, 3); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler-proj.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler-proj.frag new file mode 100644 index 000000000..865dec6c8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler-proj.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTex; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vTex; + +void main() +{ + highp vec4 _19 = vTex; + _19.z = vTex.w; + FragColor = textureProj(uTex, _19.xyz); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler.frag new file mode 100644 index 000000000..2a4e44042 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/sampler.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTex; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; + +void main() +{ + FragColor = vColor * texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..4177f9e99 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/switch-unsigned-case.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0, std140) uniform Buff +{ + mediump uint TestVal; +} _15; + +layout(location = 0) out vec4 fsout_Color; + +void main() +{ + fsout_Color = vec4(1.0); + switch (_15.TestVal) + { + case 0u: + { + fsout_Color = vec4(0.100000001490116119384765625); + break; + } + case 1u: + { + fsout_Color = vec4(0.20000000298023223876953125); + break; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/swizzle.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/swizzle.frag new file mode 100644 index 000000000..a229e5b0d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/swizzle.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D samp; + +layout(location = 0) out vec4 FragColor; +layout(location = 2) in vec2 vUV; +layout(location = 1) in vec3 vNormal; + +void main() +{ + FragColor = vec4(texture(samp, vUV).xyz, 1.0); + FragColor = vec4(texture(samp, vUV).xz, 1.0, 4.0); + FragColor = vec4(texture(samp, vUV).xx, texture(samp, vUV + vec2(0.100000001490116119384765625)).yy); + FragColor = vec4(vNormal, 1.0); + FragColor = vec4(vNormal + vec3(1.7999999523162841796875), 1.0); + FragColor = vec4(vUV, vUV + vec2(1.7999999523162841796875)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..416f764d4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/texel-fetch-offset.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTexture; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + mediump ivec2 _22 = ivec2(gl_FragCoord.xy); + FragColor = texelFetchOffset(uTexture, _22, 0, ivec2(1)); + FragColor += texelFetchOffset(uTexture, _22, 0, ivec2(-1, 1)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/ubo_layout.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/ubo_layout.frag new file mode 100644 index 000000000..4b66e1396 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/ubo_layout.frag @@ -0,0 +1,31 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct Str +{ + mat4 foo; +}; + +struct Str_1 +{ + mat4 foo; +}; + +layout(binding = 0, std140) uniform UBO1 +{ + layout(row_major) Str foo; +} ubo1; + +layout(binding = 1, std140) uniform UBO2 +{ + Str_1 foo; +} ubo0; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = ubo1.foo.foo[0] + ubo0.foo.foo[0]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/frag/unary-enclose.frag b/3rdparty/spirv-cross/reference/opt/shaders/frag/unary-enclose.frag new file mode 100644 index 000000000..e7b0bf534 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/frag/unary-enclose.frag @@ -0,0 +1,12 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vIn; + +void main() +{ + FragColor = vIn; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/geom/basic.geom b/3rdparty/spirv-cross/reference/opt/shaders/geom/basic.geom new file mode 100644 index 000000000..f91136f60 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/geom/basic.geom @@ -0,0 +1,27 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(invocations = 4, triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + float _37 = float(gl_InvocationID); + vNormal = vin[0].normal + vec3(_37); + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal + vec3(4.0 * _37); + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal + vec3(2.0 * _37); + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/geom/lines-adjacency.geom b/3rdparty/spirv-cross/reference/opt/shaders/geom/lines-adjacency.geom new file mode 100644 index 000000000..46a21e9fb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/geom/lines-adjacency.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(lines_adjacency) in; +layout(max_vertices = 3, line_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[4]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/geom/lines.geom b/3rdparty/spirv-cross/reference/opt/shaders/geom/lines.geom new file mode 100644 index 000000000..c5aaa53d3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/geom/lines.geom @@ -0,0 +1,23 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(lines) in; +layout(max_vertices = 2, line_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[2]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/geom/points.geom b/3rdparty/spirv-cross/reference/opt/shaders/geom/points.geom new file mode 100644 index 000000000..4d59137c3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/geom/points.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(points) in; +layout(max_vertices = 3, points) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[1]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/geom/single-invocation.geom b/3rdparty/spirv-cross/reference/opt/shaders/geom/single-invocation.geom new file mode 100644 index 000000000..fdccacc04 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/geom/single-invocation.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/geom/triangles-adjacency.geom b/3rdparty/spirv-cross/reference/opt/shaders/geom/triangles-adjacency.geom new file mode 100644 index 000000000..e9e6857a1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/geom/triangles-adjacency.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(triangles_adjacency) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[6]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/geom/triangles.geom b/3rdparty/spirv-cross/reference/opt/shaders/geom/triangles.geom new file mode 100644 index 000000000..fdccacc04 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/geom/triangles.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/explicit-lod.legacy.frag b/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/explicit-lod.legacy.frag new file mode 100644 index 000000000..6e8dbf1a9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/explicit-lod.legacy.frag @@ -0,0 +1,12 @@ +#version 100 +#extension GL_EXT_shader_texture_lod : require +precision mediump float; +precision highp int; + +uniform mediump sampler2D tex; + +void main() +{ + gl_FragData[0] = texture2DLodEXT(tex, vec2(0.4000000059604644775390625, 0.60000002384185791015625), 0.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/io-blocks.legacy.frag b/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/io-blocks.legacy.frag new file mode 100644 index 000000000..d5a60d53e --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/io-blocks.legacy.frag @@ -0,0 +1,12 @@ +#version 100 +precision mediump float; +precision highp int; + +varying vec4 vin_color; +varying highp vec3 vin_normal; + +void main() +{ + gl_FragData[0] = vin_color + vin_normal.xyzz; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/struct-varying.legacy.frag b/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/struct-varying.legacy.frag new file mode 100644 index 000000000..e131f2e21 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/struct-varying.legacy.frag @@ -0,0 +1,18 @@ +#version 100 +precision mediump float; +precision highp int; + +struct Inputs +{ + highp vec4 a; + highp vec2 b; +}; + +varying highp vec4 vin_a; +varying highp vec2 vin_b; + +void main() +{ + gl_FragData[0] = ((((Inputs(vin_a, vin_b).a + Inputs(vin_a, vin_b).b.xxyy) + Inputs(vin_a, vin_b).a) + Inputs(vin_a, vin_b).b.yyxx) + vin_a) + vin_b.xxyy; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/implicit-lod.legacy.vert b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/implicit-lod.legacy.vert new file mode 100644 index 000000000..6e4410744 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/implicit-lod.legacy.vert @@ -0,0 +1,9 @@ +#version 100 + +uniform mediump sampler2D tex; + +void main() +{ + gl_Position = texture2D(tex, vec2(0.4000000059604644775390625, 0.60000002384185791015625)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/io-block.legacy.vert b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/io-block.legacy.vert new file mode 100644 index 000000000..3c518dc79 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/io-block.legacy.vert @@ -0,0 +1,13 @@ +#version 100 + +attribute vec4 Position; +varying vec4 vout_color; +varying vec3 vout_normal; + +void main() +{ + gl_Position = Position; + vout_color = vec4(1.0); + vout_normal = vec3(0.5); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/struct-varying.legacy.vert b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/struct-varying.legacy.vert new file mode 100644 index 000000000..01a3d7353 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/struct-varying.legacy.vert @@ -0,0 +1,29 @@ +#version 100 + +struct Output +{ + vec4 a; + vec2 b; +}; + +varying vec4 vout_a; +varying vec2 vout_b; + +void main() +{ + { + Output vout = Output(vec4(0.5), vec2(0.25)); + vout_a = vout.a; + vout_b = vout.b; + } + { + Output vout = Output(vec4(0.5), vec2(0.25)); + vout_a = vout.a; + vout_b = vout.b; + } + vout_a = Output(vout_a, vout_b).a; + vout_b = Output(vout_a, vout_b).b; + vout_a.x = 1.0; + vout_b.y = 1.0; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/transpose.legacy.vert b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/transpose.legacy.vert new file mode 100644 index 000000000..0d30c0e24 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/legacy/vert/transpose.legacy.vert @@ -0,0 +1,18 @@ +#version 100 + +struct Buffer +{ + mat4 MVPRowMajor; + mat4 MVPColMajor; + mat4 M; +}; + +uniform Buffer _13; + +attribute vec4 Position; + +void main() +{ + gl_Position = (((_13.M * (Position * _13.MVPRowMajor)) + (_13.M * (_13.MVPColMajor * Position))) + (_13.M * (_13.MVPRowMajor * Position))) + (_13.M * (Position * _13.MVPColMajor)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tesc/basic.tesc b/3rdparty/spirv-cross/reference/opt/shaders/tesc/basic.tesc new file mode 100644 index 000000000..6019151ad --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tesc/basic.tesc @@ -0,0 +1,17 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(vertices = 1) out; + +layout(location = 0) patch out vec3 vFoo; + +void main() +{ + gl_TessLevelInner[0] = 8.8999996185302734375; + gl_TessLevelInner[1] = 6.900000095367431640625; + gl_TessLevelOuter[0] = 8.8999996185302734375; + gl_TessLevelOuter[1] = 6.900000095367431640625; + gl_TessLevelOuter[2] = 3.900000095367431640625; + gl_TessLevelOuter[3] = 4.900000095367431640625; + vFoo = vec3(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tesc/water_tess.tesc b/3rdparty/spirv-cross/reference/opt/shaders/tesc/water_tess.tesc new file mode 100644 index 000000000..69307d3c9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tesc/water_tess.tesc @@ -0,0 +1,79 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(vertices = 1) out; + +layout(std140) uniform UBO +{ + vec4 uScale; + vec3 uCamPos; + vec2 uPatchSize; + vec2 uMaxTessLevel; + float uDistanceMod; + vec4 uFrustum[6]; +} _41; + +layout(location = 1) patch out vec2 vOutPatchPosBase; +layout(location = 2) patch out vec4 vPatchLods; +layout(location = 0) in vec2 vPatchPosBase[]; + +void main() +{ + vec2 _430 = (vPatchPosBase[0] - vec2(10.0)) * _41.uScale.xy; + vec2 _440 = ((vPatchPosBase[0] + _41.uPatchSize) + vec2(10.0)) * _41.uScale.xy; + vec3 _445 = vec3(_430.x, -10.0, _430.y); + vec3 _450 = vec3(_440.x, 10.0, _440.y); + vec4 _466 = vec4((_445 + _450) * 0.5, 1.0); + vec3 _513 = vec3(length(_450 - _445) * (-0.5)); + bool _515 = any(lessThanEqual(vec3(dot(_41.uFrustum[0], _466), dot(_41.uFrustum[1], _466), dot(_41.uFrustum[2], _466)), _513)); + bool _525; + if (!_515) + { + _525 = any(lessThanEqual(vec3(dot(_41.uFrustum[3], _466), dot(_41.uFrustum[4], _466), dot(_41.uFrustum[5], _466)), _513)); + } + else + { + _525 = _515; + } + if (!(!_525)) + { + gl_TessLevelOuter[0] = -1.0; + gl_TessLevelOuter[1] = -1.0; + gl_TessLevelOuter[2] = -1.0; + gl_TessLevelOuter[3] = -1.0; + gl_TessLevelInner[0] = -1.0; + gl_TessLevelInner[1] = -1.0; + } + else + { + vOutPatchPosBase = vPatchPosBase[0]; + vec2 _678 = (vPatchPosBase[0] + (vec2(-0.5) * _41.uPatchSize)) * _41.uScale.xy; + vec2 _706 = (vPatchPosBase[0] + (vec2(0.5, -0.5) * _41.uPatchSize)) * _41.uScale.xy; + float _725 = clamp(log2((length(_41.uCamPos - vec3(_706.x, 0.0, _706.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x); + vec2 _734 = (vPatchPosBase[0] + (vec2(1.5, -0.5) * _41.uPatchSize)) * _41.uScale.xy; + vec2 _762 = (vPatchPosBase[0] + (vec2(-0.5, 0.5) * _41.uPatchSize)) * _41.uScale.xy; + float _781 = clamp(log2((length(_41.uCamPos - vec3(_762.x, 0.0, _762.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x); + vec2 _790 = (vPatchPosBase[0] + (vec2(0.5) * _41.uPatchSize)) * _41.uScale.xy; + float _809 = clamp(log2((length(_41.uCamPos - vec3(_790.x, 0.0, _790.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x); + vec2 _818 = (vPatchPosBase[0] + (vec2(1.5, 0.5) * _41.uPatchSize)) * _41.uScale.xy; + float _837 = clamp(log2((length(_41.uCamPos - vec3(_818.x, 0.0, _818.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x); + vec2 _846 = (vPatchPosBase[0] + (vec2(-0.5, 1.5) * _41.uPatchSize)) * _41.uScale.xy; + vec2 _874 = (vPatchPosBase[0] + (vec2(0.5, 1.5) * _41.uPatchSize)) * _41.uScale.xy; + float _893 = clamp(log2((length(_41.uCamPos - vec3(_874.x, 0.0, _874.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x); + vec2 _902 = (vPatchPosBase[0] + (vec2(1.5) * _41.uPatchSize)) * _41.uScale.xy; + float _612 = dot(vec4(_781, _809, clamp(log2((length(_41.uCamPos - vec3(_846.x, 0.0, _846.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x), _893), vec4(0.25)); + float _618 = dot(vec4(clamp(log2((length(_41.uCamPos - vec3(_678.x, 0.0, _678.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x), _725, _781, _809), vec4(0.25)); + float _624 = dot(vec4(_725, clamp(log2((length(_41.uCamPos - vec3(_734.x, 0.0, _734.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x), _809, _837), vec4(0.25)); + float _630 = dot(vec4(_809, _837, _893, clamp(log2((length(_41.uCamPos - vec3(_902.x, 0.0, _902.y)) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod), 0.0, _41.uMaxTessLevel.x)), vec4(0.25)); + vec4 _631 = vec4(_612, _618, _624, _630); + vPatchLods = _631; + vec4 _928 = exp2(-min(_631, _631.yzwx)) * _41.uMaxTessLevel.y; + gl_TessLevelOuter[0] = _928.x; + gl_TessLevelOuter[1] = _928.y; + gl_TessLevelOuter[2] = _928.z; + gl_TessLevelOuter[3] = _928.w; + float _935 = _41.uMaxTessLevel.y * exp2(-min(min(min(_612, _618), min(_624, _630)), _809)); + gl_TessLevelInner[0] = _935; + gl_TessLevelInner[1] = _935; + } +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/ccw.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/ccw.tese new file mode 100644 index 000000000..a2a4508ac --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/ccw.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, ccw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/cw.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/cw.tese new file mode 100644 index 000000000..95781493d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/cw.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/equal.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/equal.tese new file mode 100644 index 000000000..6d30518a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/equal.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, equal_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/fractional_even.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/fractional_even.tese new file mode 100644 index 000000000..95781493d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/fractional_even.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/fractional_odd.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/fractional_odd.tese new file mode 100644 index 000000000..608c19aba --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/fractional_odd.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_odd_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/input-array.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/input-array.tese new file mode 100644 index 000000000..8a1aaf9fd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/input-array.tese @@ -0,0 +1,11 @@ +#version 450 +layout(quads, ccw, fractional_odd_spacing) in; + +layout(location = 0) in vec4 Floats[]; +layout(location = 2) in vec4 Floats2[]; + +void main() +{ + gl_Position = (Floats[0] * gl_TessCoord.x) + (Floats2[1] * gl_TessCoord.y); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/line.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/line.tese new file mode 100644 index 000000000..8b6ad8da2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/line.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(isolines, point_mode, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/triangle.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/triangle.tese new file mode 100644 index 000000000..95781493d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/triangle.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/tese/water_tess.tese b/3rdparty/spirv-cross/reference/opt/shaders/tese/water_tess.tese new file mode 100644 index 000000000..a2aa10447 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/tese/water_tess.tese @@ -0,0 +1,36 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(quads, cw, fractional_even_spacing) in; + +layout(binding = 1, std140) uniform UBO +{ + mat4 uMVP; + vec4 uScale; + vec2 uInvScale; + vec3 uCamPos; + vec2 uPatchSize; + vec2 uInvHeightmapSize; +} _31; + +layout(binding = 0) uniform mediump sampler2D uHeightmapDisplacement; + +layout(location = 0) patch in vec2 vOutPatchPosBase; +layout(location = 1) patch in vec4 vPatchLods; +layout(location = 1) out vec4 vGradNormalTex; +layout(location = 0) out vec3 vWorld; + +void main() +{ + vec2 _201 = vOutPatchPosBase + (gl_TessCoord.xy * _31.uPatchSize); + vec2 _214 = mix(vPatchLods.yx, vPatchLods.zw, vec2(gl_TessCoord.x)); + float _221 = mix(_214.x, _214.y, gl_TessCoord.y); + mediump float _223 = floor(_221); + vec2 _125 = _201 * _31.uInvHeightmapSize; + vec2 _141 = _31.uInvHeightmapSize * exp2(_223); + vGradNormalTex = vec4(_125 + (_31.uInvHeightmapSize * 0.5), _125 * _31.uScale.zw); + mediump vec3 _253 = mix(textureLod(uHeightmapDisplacement, _125 + (_141 * 0.5), _223).xyz, textureLod(uHeightmapDisplacement, _125 + (_141 * 1.0), _223 + 1.0).xyz, vec3(_221 - _223)); + vec2 _171 = (_201 * _31.uScale.xy) + _253.yz; + vWorld = vec3(_171.x, _253.x, _171.y); + gl_Position = _31.uMVP * vec4(vWorld, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/basic.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/basic.vert new file mode 100644 index 000000000..05504eb2f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/basic.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(std140) uniform UBO +{ + mat4 uMVP; +} _16; + +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = _16.uMVP * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/ground.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/ground.vert new file mode 100644 index 000000000..c82c1037b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/ground.vert @@ -0,0 +1,87 @@ +#version 310 es + +struct PatchData +{ + vec4 Position; + vec4 LODs; +}; + +layout(binding = 0, std140) uniform PerPatch +{ + PatchData Patches[256]; +} _53; + +layout(binding = 2, std140) uniform GlobalGround +{ + vec4 GroundScale; + vec4 GroundPosition; + vec4 InvGroundSize_PatchScale; +} _156; + +layout(binding = 0, std140) uniform GlobalVSData +{ + vec4 g_ViewProj_Row0; + vec4 g_ViewProj_Row1; + vec4 g_ViewProj_Row2; + vec4 g_ViewProj_Row3; + vec4 g_CamPos; + vec4 g_CamRight; + vec4 g_CamUp; + vec4 g_CamFront; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_TimeParams; + vec4 g_ResolutionParams; + vec4 g_CamAxisRight; + vec4 g_FogColor_Distance; + vec4 g_ShadowVP_Row0; + vec4 g_ShadowVP_Row1; + vec4 g_ShadowVP_Row2; + vec4 g_ShadowVP_Row3; +} _236; + +layout(binding = 1) uniform mediump sampler2D TexLOD; +layout(binding = 0) uniform mediump sampler2D TexHeightmap; + +layout(location = 1) in vec4 LODWeights; +uniform int SPIRV_Cross_BaseInstance; +layout(location = 0) in vec2 Position; +layout(location = 1) out vec3 EyeVec; +layout(location = 0) out vec2 TexCoord; + +void main() +{ + float _300 = all(equal(LODWeights, vec4(0.0))) ? _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.w : dot(LODWeights, _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].LODs); + float _302 = floor(_300); + uint _307 = uint(_302); + uvec2 _309 = uvec2(Position); + uvec2 _316 = (uvec2(1u) << uvec2(_307, _307 + 1u)) - uvec2(1u); + uint _382; + if (_309.x < 32u) + { + _382 = _316.x; + } + else + { + _382 = 0u; + } + uint _383; + if (_309.y < 32u) + { + _383 = _316.y; + } + else + { + _383 = 0u; + } + vec4 _344 = vec4((_309 + uvec2(_382, _383)).xyxy & (~_316).xxyy); + vec2 _173 = ((_53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.xz * _156.InvGroundSize_PatchScale.zw) + mix(_344.xy, _344.zw, vec2(_300 - _302))) * _156.InvGroundSize_PatchScale.xy; + mediump float _360 = textureLod(TexLOD, _173, 0.0).x * 7.96875; + float _362 = floor(_360); + vec2 _185 = _156.InvGroundSize_PatchScale.xy * exp2(_362); + vec3 _230 = (vec3(_173.x, mix(textureLod(TexHeightmap, _173 + (_185 * 0.5), _362).x, textureLod(TexHeightmap, _173 + (_185 * 1.0), _362 + 1.0).x, _360 - _362), _173.y) * _156.GroundScale.xyz) + _156.GroundPosition.xyz; + EyeVec = _230 - _236.g_CamPos.xyz; + TexCoord = _173 + (_156.InvGroundSize_PatchScale.xy * 0.5); + gl_Position = (((_236.g_ViewProj_Row0 * _230.x) + (_236.g_ViewProj_Row1 * _230.y)) + (_236.g_ViewProj_Row2 * _230.z)) + _236.g_ViewProj_Row3; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/invariant.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/invariant.vert new file mode 100644 index 000000000..648ea2947 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/invariant.vert @@ -0,0 +1,19 @@ +#version 310 es + +invariant gl_Position; + +layout(location = 0) in vec4 vInput0; +layout(location = 1) in vec4 vInput1; +layout(location = 2) in vec4 vInput2; +layout(location = 0) invariant out vec4 vColor; + +void main() +{ + vec4 _20 = vInput1 * vInput2; + vec4 _21 = vInput0 + _20; + gl_Position = _21; + vec4 _27 = vInput0 - vInput1; + vec4 _29 = _27 * vInput2; + vColor = _29; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/ocean.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/ocean.vert new file mode 100644 index 000000000..8f82c316d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/ocean.vert @@ -0,0 +1,119 @@ +#version 310 es + +struct PatchData +{ + vec4 Position; + vec4 LODs; +}; + +layout(binding = 0, std140) uniform Offsets +{ + PatchData Patches[256]; +} _53; + +layout(binding = 4, std140) uniform GlobalOcean +{ + vec4 OceanScale; + vec4 OceanPosition; + vec4 InvOceanSize_PatchScale; + vec4 NormalTexCoordScale; +} _180; + +layout(binding = 0, std140) uniform GlobalVSData +{ + vec4 g_ViewProj_Row0; + vec4 g_ViewProj_Row1; + vec4 g_ViewProj_Row2; + vec4 g_ViewProj_Row3; + vec4 g_CamPos; + vec4 g_CamRight; + vec4 g_CamUp; + vec4 g_CamFront; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_TimeParams; + vec4 g_ResolutionParams; + vec4 g_CamAxisRight; + vec4 g_FogColor_Distance; + vec4 g_ShadowVP_Row0; + vec4 g_ShadowVP_Row1; + vec4 g_ShadowVP_Row2; + vec4 g_ShadowVP_Row3; +} _273; + +layout(binding = 1) uniform mediump sampler2D TexLOD; +layout(binding = 0) uniform mediump sampler2D TexDisplacement; + +layout(location = 1) in vec4 LODWeights; +uniform int SPIRV_Cross_BaseInstance; +layout(location = 0) in vec4 Position; +layout(location = 0) out vec3 EyeVec; +layout(location = 1) out vec4 TexCoord; + +uvec4 _474; + +void main() +{ + float _350 = all(equal(LODWeights, vec4(0.0))) ? _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.w : dot(LODWeights, _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].LODs); + float _352 = floor(_350); + uint _357 = uint(_352); + uvec4 _359 = uvec4(Position); + uvec2 _366 = (uvec2(1u) << uvec2(_357, _357 + 1u)) - uvec2(1u); + bool _369 = _359.x < 32u; + uint _465; + if (_369) + { + _465 = _366.x; + } + else + { + _465 = 0u; + } + uvec4 _443 = _474; + _443.x = _465; + bool _379 = _359.y < 32u; + uint _468; + if (_379) + { + _468 = _366.x; + } + else + { + _468 = 0u; + } + uvec4 _447 = _443; + _447.y = _468; + uint _470; + if (_369) + { + _470 = _366.y; + } + else + { + _470 = 0u; + } + uvec4 _451 = _447; + _451.z = _470; + uint _472; + if (_379) + { + _472 = _366.y; + } + else + { + _472 = 0u; + } + uvec4 _455 = _451; + _455.w = _472; + vec4 _415 = vec4((_359.xyxy + _455) & (~_366).xxyy); + vec2 _197 = ((_53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.xz * _180.InvOceanSize_PatchScale.zw) + mix(_415.xy, _415.zw, vec2(_350 - _352))) * _180.InvOceanSize_PatchScale.xy; + vec2 _204 = _197 * _180.NormalTexCoordScale.zw; + mediump float _431 = textureLod(TexLOD, _197, 0.0).x * 7.96875; + float _433 = floor(_431); + vec2 _220 = (_180.InvOceanSize_PatchScale.xy * exp2(_433)) * _180.NormalTexCoordScale.zw; + vec3 _267 = ((vec3(_197.x, 0.0, _197.y) + mix(textureLod(TexDisplacement, _204 + (_220 * 0.5), _433).yxz, textureLod(TexDisplacement, _204 + (_220 * 1.0), _433 + 1.0).yxz, vec3(_431 - _433))) * _180.OceanScale.xyz) + _180.OceanPosition.xyz; + EyeVec = _267 - _273.g_CamPos.xyz; + TexCoord = vec4(_204, _204 * _180.NormalTexCoordScale.xy) + ((_180.InvOceanSize_PatchScale.xyxy * 0.5) * _180.NormalTexCoordScale.zwzw); + gl_Position = (((_273.g_ViewProj_Row0 * _267.x) + (_273.g_ViewProj_Row1 * _267.y)) + (_273.g_ViewProj_Row2 * _267.z)) + _273.g_ViewProj_Row3; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..25fc9495d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/read-from-row-major-array.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(binding = 0, std140) uniform Block +{ + layout(row_major) mat2x3 var[3][4]; +} _104; + +layout(location = 0) in vec4 a_position; +layout(location = 0) out mediump float v_vtxResult; + +void main() +{ + gl_Position = a_position; + v_vtxResult = ((float(abs(_104.var[0][0][0].x - 2.0) < 0.0500000007450580596923828125) * float(abs(_104.var[0][0][0].y - 6.0) < 0.0500000007450580596923828125)) * float(abs(_104.var[0][0][0].z - (-6.0)) < 0.0500000007450580596923828125)) * ((float(abs(_104.var[0][0][1].x) < 0.0500000007450580596923828125) * float(abs(_104.var[0][0][1].y - 5.0) < 0.0500000007450580596923828125)) * float(abs(_104.var[0][0][1].z - 5.0) < 0.0500000007450580596923828125)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/return-array.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/return-array.vert new file mode 100644 index 000000000..b78ca5f1b --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/return-array.vert @@ -0,0 +1,9 @@ +#version 310 es + +layout(location = 1) in vec4 vInput1; + +void main() +{ + gl_Position = vec4(10.0) + vInput1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/texture_buffer.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/texture_buffer.vert new file mode 100644 index 000000000..e9442ce11 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/texture_buffer.vert @@ -0,0 +1,11 @@ +#version 310 es +#extension GL_OES_texture_buffer : require + +layout(binding = 4) uniform highp samplerBuffer uSamp; +layout(binding = 5, rgba32f) uniform readonly highp imageBuffer uSampo; + +void main() +{ + gl_Position = texelFetch(uSamp, 10) + imageLoad(uSampo, 100); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vert/ubo.vert b/3rdparty/spirv-cross/reference/opt/shaders/vert/ubo.vert new file mode 100644 index 000000000..4e7236b29 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vert/ubo.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(binding = 0, std140) uniform UBO +{ + mat4 mvp; +} _16; + +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = _16.mvp * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp new file mode 100644 index 000000000..9344a7a24 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp @@ -0,0 +1,46 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 100 +#endif +const int a = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 200 +#endif +const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 300 +#endif +const int c = SPIRV_CROSS_CONSTANT_ID_2; +const int _18 = (c + 50); +#ifndef SPIRV_CROSS_CONSTANT_ID_3 +#define SPIRV_CROSS_CONSTANT_ID_3 400 +#endif +const int e = SPIRV_CROSS_CONSTANT_ID_3; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +layout(binding = 0, std430) buffer SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[_18]; +} _22; + +void main() +{ + _22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp.vk new file mode 100644 index 000000000..d9a2822f7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp.vk @@ -0,0 +1,34 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(constant_id = 0) const int a = 100; +layout(constant_id = 1) const int b = 200; +layout(constant_id = 2) const int c = 300; +const int _18 = (c + 50); +layout(constant_id = 3) const int e = 400; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +layout(set = 1, binding = 0, std430) buffer SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[_18]; +} _22; + +void main() +{ + _22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp new file mode 100644 index 000000000..888f4b164 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp @@ -0,0 +1,34 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 2 +#endif +const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1 +#endif +const int a = SPIRV_CROSS_CONSTANT_ID_0; +const uint _21 = (uint(a) + 0u); +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 1u +#endif +const uint _27 = gl_WorkGroupSize.x; +const uint _28 = (_21 + _27); +const uint _29 = gl_WorkGroupSize.y; +const uint _30 = (_28 + _29); +const int _32 = (1 - a); + +layout(local_size_x = SPIRV_CROSS_CONSTANT_ID_10, local_size_y = 20, local_size_z = 1) in; + +layout(binding = 0, std430) writeonly buffer SSBO +{ + int v[]; +} _17; + +void main() +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + _17.v[_30] = b + spec_const_array_size[_32]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp.vk new file mode 100644 index 000000000..bdf72dff4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp.vk @@ -0,0 +1,24 @@ +#version 450 +layout(local_size_x_id = 10, local_size_y = 20, local_size_z = 1) in; + +layout(constant_id = 1) const int b = 2; +layout(constant_id = 0) const int a = 1; +const uint _21 = (uint(a) + 0u); +const uint _27 = gl_WorkGroupSize.x; +const uint _28 = (_21 + _27); +const uint _29 = gl_WorkGroupSize.y; +const uint _30 = (_28 + _29); +const int _32 = (1 - a); + +layout(set = 1, binding = 0, std430) writeonly buffer SSBO +{ + int v[]; +} _17; + +void main() +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + _17.v[_30] = b + spec_const_array_size[_32]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp.vk new file mode 100644 index 000000000..6d288574f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp.vk @@ -0,0 +1,110 @@ +#version 450 +#extension GL_KHR_shader_subgroup_basic : require +#extension GL_KHR_shader_subgroup_ballot : require +#extension GL_KHR_shader_subgroup_shuffle : require +#extension GL_KHR_shader_subgroup_shuffle_relative : require +#extension GL_KHR_shader_subgroup_vote : require +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_KHR_shader_subgroup_clustered : require +#extension GL_KHR_shader_subgroup_quad : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(set = 0, binding = 0, std430) buffer SSBO +{ + float FragColor; +} _9; + +void main() +{ + _9.FragColor = float(gl_NumSubgroups); + _9.FragColor = float(gl_SubgroupID); + _9.FragColor = float(gl_SubgroupSize); + _9.FragColor = float(gl_SubgroupInvocationID); + subgroupMemoryBarrier(); + subgroupBarrier(); + subgroupMemoryBarrier(); + subgroupMemoryBarrierBuffer(); + subgroupMemoryBarrierShared(); + subgroupMemoryBarrierImage(); + bool elected = subgroupElect(); + _9.FragColor = vec4(gl_SubgroupEqMask).x; + _9.FragColor = vec4(gl_SubgroupGeMask).x; + _9.FragColor = vec4(gl_SubgroupGtMask).x; + _9.FragColor = vec4(gl_SubgroupLeMask).x; + _9.FragColor = vec4(gl_SubgroupLtMask).x; + vec4 broadcasted = subgroupBroadcast(vec4(10.0), 8u); + vec3 first = subgroupBroadcastFirst(vec3(20.0)); + uvec4 ballot_value = subgroupBallot(true); + bool inverse_ballot_value = subgroupInverseBallot(ballot_value); + bool bit_extracted = subgroupBallotBitExtract(uvec4(10u), 8u); + uint bit_count = subgroupBallotBitCount(ballot_value); + uint inclusive_bit_count = subgroupBallotInclusiveBitCount(ballot_value); + uint exclusive_bit_count = subgroupBallotExclusiveBitCount(ballot_value); + uint lsb = subgroupBallotFindLSB(ballot_value); + uint msb = subgroupBallotFindMSB(ballot_value); + uint shuffled = subgroupShuffle(10u, 8u); + uint shuffled_xor = subgroupShuffleXor(30u, 8u); + uint shuffled_up = subgroupShuffleUp(20u, 4u); + uint shuffled_down = subgroupShuffleDown(20u, 4u); + bool has_all = subgroupAll(true); + bool has_any = subgroupAny(true); + bool has_equal = subgroupAllEqual(true); + vec4 added = subgroupAdd(vec4(20.0)); + ivec4 iadded = subgroupAdd(ivec4(20)); + vec4 multiplied = subgroupMul(vec4(20.0)); + ivec4 imultiplied = subgroupMul(ivec4(20)); + vec4 lo = subgroupMin(vec4(20.0)); + vec4 hi = subgroupMax(vec4(20.0)); + ivec4 slo = subgroupMin(ivec4(20)); + ivec4 shi = subgroupMax(ivec4(20)); + uvec4 ulo = subgroupMin(uvec4(20u)); + uvec4 uhi = subgroupMax(uvec4(20u)); + uvec4 anded = subgroupAnd(ballot_value); + uvec4 ored = subgroupOr(ballot_value); + uvec4 xored = subgroupXor(ballot_value); + added = subgroupInclusiveAdd(added); + iadded = subgroupInclusiveAdd(iadded); + multiplied = subgroupInclusiveMul(multiplied); + imultiplied = subgroupInclusiveMul(imultiplied); + lo = subgroupInclusiveMin(lo); + hi = subgroupInclusiveMax(hi); + slo = subgroupInclusiveMin(slo); + shi = subgroupInclusiveMax(shi); + ulo = subgroupInclusiveMin(ulo); + uhi = subgroupInclusiveMax(uhi); + anded = subgroupInclusiveAnd(anded); + ored = subgroupInclusiveOr(ored); + xored = subgroupInclusiveXor(ored); + added = subgroupExclusiveAdd(lo); + added = subgroupExclusiveAdd(multiplied); + multiplied = subgroupExclusiveMul(multiplied); + iadded = subgroupExclusiveAdd(imultiplied); + imultiplied = subgroupExclusiveMul(imultiplied); + lo = subgroupExclusiveMin(lo); + hi = subgroupExclusiveMax(hi); + ulo = subgroupExclusiveMin(ulo); + uhi = subgroupExclusiveMax(uhi); + slo = subgroupExclusiveMin(slo); + shi = subgroupExclusiveMax(shi); + anded = subgroupExclusiveAnd(anded); + ored = subgroupExclusiveOr(ored); + xored = subgroupExclusiveXor(ored); + added = subgroupClusteredAdd(added, 4u); + multiplied = subgroupClusteredMul(multiplied, 4u); + iadded = subgroupClusteredAdd(iadded, 4u); + imultiplied = subgroupClusteredMul(imultiplied, 4u); + lo = subgroupClusteredMin(lo, 4u); + hi = subgroupClusteredMax(hi, 4u); + ulo = subgroupClusteredMin(ulo, 4u); + uhi = subgroupClusteredMax(uhi, 4u); + slo = subgroupClusteredMin(slo, 4u); + shi = subgroupClusteredMax(shi, 4u); + anded = subgroupClusteredAnd(anded, 4u); + ored = subgroupClusteredOr(ored, 4u); + xored = subgroupClusteredXor(xored, 4u); + vec4 swap_horiz = subgroupQuadSwapHorizontal(vec4(20.0)); + vec4 swap_vertical = subgroupQuadSwapVertical(vec4(20.0)); + vec4 swap_diagonal = subgroupQuadSwapDiagonal(vec4(20.0)); + vec4 quad_broadcast = subgroupQuadBroadcast(vec4(20.0), 3u); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag new file mode 100644 index 000000000..f0729fdcd --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2DShadow SPIRV_Cross_CombineduDepthuSampler; +uniform mediump sampler2D SPIRV_Cross_CombineduDepthuSampler1; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = texture(SPIRV_Cross_CombineduDepthuSampler, vec3(vec3(1.0).xy, 1.0)) + texture(SPIRV_Cross_CombineduDepthuSampler1, vec2(1.0)).x; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag.vk new file mode 100644 index 000000000..b6ad1e39c --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag.vk @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 2) uniform mediump texture2D uDepth; +layout(set = 0, binding = 0) uniform mediump samplerShadow uSampler; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = texture(sampler2DShadow(uDepth, uSampler), vec3(vec3(1.0).xy, 1.0)) + texture(sampler2D(uDepth, uSampler1), vec2(1.0)).x; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler.vk.frag new file mode 100644 index 000000000..29c247d6d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler.vk.frag @@ -0,0 +1,17 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2D SPIRV_Cross_CombineduTexture0uSampler0; +uniform mediump sampler2D SPIRV_Cross_CombineduTexture1uSampler1; +uniform mediump sampler2D SPIRV_Cross_CombineduTexture1uSampler0; +uniform mediump sampler2D SPIRV_Cross_CombineduTexture0uSampler1; + +layout(location = 0) in vec2 vTex; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = ((((texture(SPIRV_Cross_CombineduTexture0uSampler0, vTex) + texture(SPIRV_Cross_CombineduTexture1uSampler1, vTex)) + (texture(SPIRV_Cross_CombineduTexture0uSampler0, vTex) + texture(SPIRV_Cross_CombineduTexture1uSampler0, vTex))) + (texture(SPIRV_Cross_CombineduTexture0uSampler1, vTex) + texture(SPIRV_Cross_CombineduTexture1uSampler1, vTex))) + (texture(SPIRV_Cross_CombineduTexture0uSampler0, vTex) + texture(SPIRV_Cross_CombineduTexture0uSampler1, vTex))) + (texture(SPIRV_Cross_CombineduTexture1uSampler0, vTex) + texture(SPIRV_Cross_CombineduTexture1uSampler1, vTex)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler.vk.frag.vk new file mode 100644 index 000000000..7a543c416 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/combined-texture-sampler.vk.frag.vk @@ -0,0 +1,17 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 2) uniform mediump texture2D uTexture0; +layout(set = 0, binding = 3) uniform mediump texture2D uTexture1; +layout(set = 0, binding = 0) uniform mediump sampler uSampler0; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; + +layout(location = 0) in vec2 vTex; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = ((((texture(sampler2D(uTexture0, uSampler0), vTex) + texture(sampler2D(uTexture1, uSampler1), vTex)) + (texture(sampler2D(uTexture0, uSampler0), vTex) + texture(sampler2D(uTexture1, uSampler0), vTex))) + (texture(sampler2D(uTexture0, uSampler1), vTex) + texture(sampler2D(uTexture1, uSampler1), vTex))) + (texture(sampler2D(uTexture0, uSampler0), vTex) + texture(sampler2D(uTexture0, uSampler1), vTex))) + (texture(sampler2D(uTexture1, uSampler0), vTex) + texture(sampler2D(uTexture1, uSampler1), vTex)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/desktop-mediump.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/desktop-mediump.vk.frag new file mode 100644 index 000000000..8f7508ee8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/desktop-mediump.vk.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 F; +layout(location = 1) flat in ivec4 I; +layout(location = 2) flat in uvec4 U; + +void main() +{ + FragColor = (F + vec4(I)) + vec4(U); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/desktop-mediump.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/desktop-mediump.vk.frag.vk new file mode 100644 index 000000000..4c0506b11 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/desktop-mediump.vk.frag.vk @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) out mediump vec4 FragColor; +layout(location = 0) in mediump vec4 F; +layout(location = 1) flat in mediump ivec4 I; +layout(location = 2) flat in mediump uvec4 U; + +void main() +{ + FragColor = (F + vec4(I)) + vec4(U); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment-ms.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment-ms.vk.frag new file mode 100644 index 000000000..ea460c1fa --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment-ms.vk.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(binding = 0) uniform sampler2DMS uSubpass0; +layout(binding = 1) uniform sampler2DMS uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = (texelFetch(uSubpass0, ivec2(gl_FragCoord.xy), 1) + texelFetch(uSubpass1, ivec2(gl_FragCoord.xy), 2)) + texelFetch(uSubpass0, ivec2(gl_FragCoord.xy), gl_SampleID); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment-ms.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment-ms.vk.frag.vk new file mode 100644 index 000000000..462df22a1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment-ms.vk.frag.vk @@ -0,0 +1,12 @@ +#version 450 + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform subpassInputMS uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = (subpassLoad(uSubpass0, 1) + subpassLoad(uSubpass1, 2)) + subpassLoad(uSubpass0, gl_SampleID); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment.vk.frag new file mode 100644 index 000000000..8d216b2c4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment.vk.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uSubpass0; +layout(binding = 1) uniform mediump sampler2D uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = texelFetch(uSubpass0, ivec2(gl_FragCoord.xy), 0) + texelFetch(uSubpass1, ivec2(gl_FragCoord.xy), 0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment.vk.frag.vk new file mode 100644 index 000000000..c8b5d9a70 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/input-attachment.vk.frag.vk @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform mediump subpassInput uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform mediump subpassInput uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = subpassLoad(uSubpass0) + subpassLoad(uSubpass1); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/push-constant.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/push-constant.vk.frag new file mode 100644 index 000000000..c04a7ca48 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/push-constant.vk.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct PushConstants +{ + vec4 value0; + vec4 value1; +}; + +uniform PushConstants push; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; + +void main() +{ + FragColor = (vColor + push.value0) + push.value1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/push-constant.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/push-constant.vk.frag.vk new file mode 100644 index 000000000..6cec90f19 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/push-constant.vk.frag.vk @@ -0,0 +1,18 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(push_constant, std430) uniform PushConstants +{ + vec4 value0; + vec4 value1; +} push; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; + +void main() +{ + FragColor = (vColor + push.value0) + push.value1; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag new file mode 100644 index 000000000..c17c8e6d6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSamp; +uniform sampler2D SPIRV_Cross_CombineduTuS; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = texture(uSamp, vec2(0.5)) + texture(SPIRV_Cross_CombineduTuS, vec2(0.5)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag.vk new file mode 100644 index 000000000..5a5ec2029 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag.vk @@ -0,0 +1,13 @@ +#version 450 + +layout(set = 0, binding = 0) uniform sampler2D uSamp; +layout(set = 0, binding = 1) uniform texture2D uT; +layout(set = 0, binding = 2) uniform sampler uS; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = texture(uSamp, vec2(0.5)) + texture(sampler2D(uT, uS), vec2(0.5)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag new file mode 100644 index 000000000..df2994efb --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2D SPIRV_Cross_CombineduTextureuSampler[4]; +uniform mediump sampler2DArray SPIRV_Cross_CombineduTextureArrayuSampler[4]; +uniform mediump samplerCube SPIRV_Cross_CombineduTextureCubeuSampler[4]; +uniform mediump sampler3D SPIRV_Cross_CombineduTexture3DuSampler[4]; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec2 _95 = (vTex + (vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler[1], 0)))) + (vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler[2], 1))); + FragColor = ((((texture(SPIRV_Cross_CombineduTextureuSampler[2], _95) + texture(SPIRV_Cross_CombineduTextureuSampler[1], _95)) + texture(SPIRV_Cross_CombineduTextureuSampler[1], _95)) + texture(SPIRV_Cross_CombineduTextureArrayuSampler[3], vTex3)) + texture(SPIRV_Cross_CombineduTextureCubeuSampler[1], vTex3)) + texture(SPIRV_Cross_CombineduTexture3DuSampler[2], vTex3); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag.vk new file mode 100644 index 000000000..d275a0f40 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag.vk @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 1) uniform mediump texture2D uTexture[4]; +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 4) uniform mediump texture2DArray uTextureArray[4]; +layout(set = 0, binding = 3) uniform mediump textureCube uTextureCube[4]; +layout(set = 0, binding = 2) uniform mediump texture3D uTexture3D[4]; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec2 _95 = (vTex + (vec2(1.0) / vec2(textureSize(sampler2D(uTexture[1], uSampler), 0)))) + (vec2(1.0) / vec2(textureSize(sampler2D(uTexture[2], uSampler), 1))); + FragColor = ((((texture(sampler2D(uTexture[2], uSampler), _95) + texture(sampler2D(uTexture[1], uSampler), _95)) + texture(sampler2D(uTexture[1], uSampler), _95)) + texture(sampler2DArray(uTextureArray[3], uSampler), vTex3)) + texture(samplerCube(uTextureCube[1], uSampler), vTex3)) + texture(sampler3D(uTexture3D[2], uSampler), vTex3); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture.vk.frag new file mode 100644 index 000000000..aad1e4366 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture.vk.frag @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2D SPIRV_Cross_CombineduTextureuSampler; +uniform mediump sampler2DArray SPIRV_Cross_CombineduTextureArrayuSampler; +uniform mediump samplerCube SPIRV_Cross_CombineduTextureCubeuSampler; +uniform mediump sampler3D SPIRV_Cross_CombineduTexture3DuSampler; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec2 _73 = (vTex + (vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler, 0)))) + (vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler, 1))); + FragColor = (((texture(SPIRV_Cross_CombineduTextureuSampler, _73) + texture(SPIRV_Cross_CombineduTextureuSampler, _73)) + texture(SPIRV_Cross_CombineduTextureArrayuSampler, vTex3)) + texture(SPIRV_Cross_CombineduTextureCubeuSampler, vTex3)) + texture(SPIRV_Cross_CombineduTexture3DuSampler, vTex3); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture.vk.frag.vk new file mode 100644 index 000000000..b79374aba --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/separate-sampler-texture.vk.frag.vk @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 1) uniform mediump texture2D uTexture; +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 4) uniform mediump texture2DArray uTextureArray; +layout(set = 0, binding = 3) uniform mediump textureCube uTextureCube; +layout(set = 0, binding = 2) uniform mediump texture3D uTexture3D; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec2 _73 = (vTex + (vec2(1.0) / vec2(textureSize(sampler2D(uTexture, uSampler), 0)))) + (vec2(1.0) / vec2(textureSize(sampler2D(uTexture, uSampler), 1))); + FragColor = (((texture(sampler2D(uTexture, uSampler), _73) + texture(sampler2D(uTexture, uSampler), _73)) + texture(sampler2DArray(uTextureArray, uSampler), vTex3)) + texture(samplerCube(uTextureCube, uSampler), vTex3)) + texture(sampler3D(uTexture3D, uSampler), vTex3); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-block-size.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-block-size.vk.frag new file mode 100644 index 000000000..19ea6ae06 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-block-size.vk.frag @@ -0,0 +1,22 @@ +#version 310 es +precision mediump float; +precision highp int; + +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 2 +#endif +const int Value = SPIRV_CROSS_CONSTANT_ID_10; + +layout(binding = 0, std140) uniform SpecConstArray +{ + vec4 samples[Value]; +} _15; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int Index; + +void main() +{ + FragColor = _15.samples[Index]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-block-size.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-block-size.vk.frag.vk new file mode 100644 index 000000000..133761a83 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-block-size.vk.frag.vk @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(constant_id = 10) const int Value = 2; + +layout(set = 0, binding = 0, std140) uniform SpecConstArray +{ + vec4 samples[Value]; +} _15; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int Index; + +void main() +{ + FragColor = _15.samples[Index]; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-ternary.vk.frag b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-ternary.vk.frag new file mode 100644 index 000000000..e03dfcb9a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-ternary.vk.frag @@ -0,0 +1,16 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 10u +#endif +const uint s = SPIRV_CROSS_CONSTANT_ID_0; +const bool _13 = (s > 20u); +const uint _16 = _13 ? 30u : 50u; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = float(_16); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-ternary.vk.frag.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-ternary.vk.frag.vk new file mode 100644 index 000000000..59d3b99b9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/frag/spec-constant-ternary.vk.frag.vk @@ -0,0 +1,13 @@ +#version 450 + +layout(constant_id = 0) const uint s = 10u; +const bool _13 = (s > 20u); +const uint _16 = _13 ? 30u : 50u; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = float(_16); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/multiview.nocompat.vk.vert.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/multiview.nocompat.vk.vert.vk new file mode 100644 index 000000000..90055473d --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/multiview.nocompat.vk.vert.vk @@ -0,0 +1,15 @@ +#version 310 es +#extension GL_EXT_multiview : require + +layout(set = 0, binding = 0, std140) uniform MVPs +{ + mat4 MVP[2]; +} _19; + +layout(location = 0) in vec4 Position; + +void main() +{ + gl_Position = _19.MVP[gl_ViewIndex] * Position; +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/small-storage.vk.vert b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/small-storage.vk.vert new file mode 100644 index 000000000..2ade81104 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/small-storage.vk.vert @@ -0,0 +1,63 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +#extension GL_EXT_shader_8bit_storage : require +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif + +layout(binding = 0, std140) uniform block +{ + i16vec2 a; + u16vec2 b; + i8vec2 c; + u8vec2 d; + f16vec2 e; +} _26; + +layout(binding = 1, std430) readonly buffer storage +{ + i16vec3 f; + u16vec3 g; + i8vec3 h; + u8vec3 i; + f16vec3 j; +} _53; + +struct pushconst +{ + i16vec4 k; + u16vec4 l; + i8vec4 m; + u8vec4 n; + f16vec4 o; +}; + +uniform pushconst _76; + +layout(location = 0) out i16vec4 p; +layout(location = 0, component = 0) in int16_t foo; +layout(location = 1) out u16vec4 q; +layout(location = 0, component = 1) in uint16_t bar; +layout(location = 2) out f16vec4 r; +layout(location = 1) in float16_t baz; + +void main() +{ + p = i16vec4((((ivec4(int(foo)) + ivec4(ivec2(_26.a), ivec2(_26.c))) - ivec4(ivec3(_53.f) / ivec3(_53.h), 1)) + ivec4(_76.k)) + ivec4(_76.m)); + q = u16vec4((((uvec4(uint(bar)) + uvec4(uvec2(_26.b), uvec2(_26.d))) - uvec4(uvec3(_53.g) / uvec3(_53.i), 1u)) + uvec4(_76.l)) + uvec4(_76.n)); + r = f16vec4(((vec4(float(baz)) + vec4(vec2(_26.e), 0.0, 1.0)) - vec4(vec3(_53.j), 1.0)) + vec4(_76.o)); + gl_Position = vec4(0.0, 0.0, 0.0, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/small-storage.vk.vert.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/small-storage.vk.vert.vk new file mode 100644 index 000000000..c283d6064 --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/small-storage.vk.vert.vk @@ -0,0 +1,59 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +#extension GL_EXT_shader_8bit_storage : require +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif + +layout(set = 0, binding = 0, std140) uniform block +{ + i16vec2 a; + u16vec2 b; + i8vec2 c; + u8vec2 d; + f16vec2 e; +} _26; + +layout(set = 0, binding = 1, std430) readonly buffer storage +{ + i16vec3 f; + u16vec3 g; + i8vec3 h; + u8vec3 i; + f16vec3 j; +} _53; + +layout(push_constant, std430) uniform pushconst +{ + i16vec4 k; + u16vec4 l; + i8vec4 m; + u8vec4 n; + f16vec4 o; +} _76; + +layout(location = 0) out i16vec4 p; +layout(location = 0, component = 0) in int16_t foo; +layout(location = 1) out u16vec4 q; +layout(location = 0, component = 1) in uint16_t bar; +layout(location = 2) out f16vec4 r; +layout(location = 1) in float16_t baz; + +void main() +{ + p = i16vec4((((ivec4(int(foo)) + ivec4(ivec2(_26.a), ivec2(_26.c))) - ivec4(ivec3(_53.f) / ivec3(_53.h), 1)) + ivec4(_76.k)) + ivec4(_76.m)); + q = u16vec4((((uvec4(uint(bar)) + uvec4(uvec2(_26.b), uvec2(_26.d))) - uvec4(uvec3(_53.g) / uvec3(_53.i), 1u)) + uvec4(_76.l)) + uvec4(_76.n)); + r = f16vec4(((vec4(float(baz)) + vec4(vec2(_26.e), 0.0, 1.0)) - vec4(vec3(_53.j), 1.0)) + vec4(_76.o)); + gl_Position = vec4(0.0, 0.0, 0.0, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/vulkan-vertex.vk.vert b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/vulkan-vertex.vk.vert new file mode 100644 index 000000000..60ba1882f --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/vulkan-vertex.vk.vert @@ -0,0 +1,9 @@ +#version 310 es + +uniform int SPIRV_Cross_BaseInstance; + +void main() +{ + gl_Position = vec4(1.0, 2.0, 3.0, 4.0) * float(gl_VertexID + (gl_InstanceID + SPIRV_Cross_BaseInstance)); +} + diff --git a/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/vulkan-vertex.vk.vert.vk b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/vulkan-vertex.vk.vert.vk new file mode 100644 index 000000000..8c4930d7a --- /dev/null +++ b/3rdparty/spirv-cross/reference/opt/shaders/vulkan/vert/vulkan-vertex.vk.vert.vk @@ -0,0 +1,7 @@ +#version 310 es + +void main() +{ + gl_Position = vec4(1.0, 2.0, 3.0, 4.0) * float(gl_VertexIndex + gl_InstanceIndex); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/asm/comp/specialization-constant-workgroup.nofxc.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/asm/comp/specialization-constant-workgroup.nofxc.asm.comp new file mode 100644 index 000000000..8c239b319 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/asm/comp/specialization-constant-workgroup.nofxc.asm.comp @@ -0,0 +1,22 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 9u +#endif +static const uint _5 = SPIRV_CROSS_CONSTANT_ID_10; +#ifndef SPIRV_CROSS_CONSTANT_ID_12 +#define SPIRV_CROSS_CONSTANT_ID_12 4u +#endif +static const uint _6 = SPIRV_CROSS_CONSTANT_ID_12; +static const uint3 gl_WorkGroupSize = uint3(_5, 20u, _6); + +RWByteAddressBuffer _4 : register(u0); + +void comp_main() +{ + _4.Store(0, asuint(asfloat(_4.Load(0)) + 1.0f)); +} + +[numthreads(SPIRV_CROSS_CONSTANT_ID_10, 20, SPIRV_CROSS_CONSTANT_ID_12)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/asm/vert/empty-struct-composite.asm.vert b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/asm/vert/empty-struct-composite.asm.vert new file mode 100644 index 000000000..ba1f57674 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/asm/vert/empty-struct-composite.asm.vert @@ -0,0 +1,15 @@ +struct Test +{ + int empty_struct_member; +}; + +void vert_main() +{ + Test _14 = { 0 }; + Test t = _14; +} + +void main() +{ + vert_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/comp/bitfield.comp b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/comp/bitfield.comp new file mode 100644 index 000000000..6839d9569 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/comp/bitfield.comp @@ -0,0 +1,113 @@ +uint SPIRV_Cross_bitfieldInsert(uint Base, uint Insert, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : (((1u << Count) - 1) << (Offset & 31)); + return (Base & ~Mask) | ((Insert << Offset) & Mask); +} + +uint2 SPIRV_Cross_bitfieldInsert(uint2 Base, uint2 Insert, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : (((1u << Count) - 1) << (Offset & 31)); + return (Base & ~Mask) | ((Insert << Offset) & Mask); +} + +uint3 SPIRV_Cross_bitfieldInsert(uint3 Base, uint3 Insert, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : (((1u << Count) - 1) << (Offset & 31)); + return (Base & ~Mask) | ((Insert << Offset) & Mask); +} + +uint4 SPIRV_Cross_bitfieldInsert(uint4 Base, uint4 Insert, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : (((1u << Count) - 1) << (Offset & 31)); + return (Base & ~Mask) | ((Insert << Offset) & Mask); +} + +uint SPIRV_Cross_bitfieldUExtract(uint Base, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : ((1 << Count) - 1); + return (Base >> Offset) & Mask; +} + +uint2 SPIRV_Cross_bitfieldUExtract(uint2 Base, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : ((1 << Count) - 1); + return (Base >> Offset) & Mask; +} + +uint3 SPIRV_Cross_bitfieldUExtract(uint3 Base, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : ((1 << Count) - 1); + return (Base >> Offset) & Mask; +} + +uint4 SPIRV_Cross_bitfieldUExtract(uint4 Base, uint Offset, uint Count) +{ + uint Mask = Count == 32 ? 0xffffffff : ((1 << Count) - 1); + return (Base >> Offset) & Mask; +} + +int SPIRV_Cross_bitfieldSExtract(int Base, int Offset, int Count) +{ + int Mask = Count == 32 ? -1 : ((1 << Count) - 1); + int Masked = (Base >> Offset) & Mask; + int ExtendShift = (32 - Count) & 31; + return (Masked << ExtendShift) >> ExtendShift; +} + +int2 SPIRV_Cross_bitfieldSExtract(int2 Base, int Offset, int Count) +{ + int Mask = Count == 32 ? -1 : ((1 << Count) - 1); + int2 Masked = (Base >> Offset) & Mask; + int ExtendShift = (32 - Count) & 31; + return (Masked << ExtendShift) >> ExtendShift; +} + +int3 SPIRV_Cross_bitfieldSExtract(int3 Base, int Offset, int Count) +{ + int Mask = Count == 32 ? -1 : ((1 << Count) - 1); + int3 Masked = (Base >> Offset) & Mask; + int ExtendShift = (32 - Count) & 31; + return (Masked << ExtendShift) >> ExtendShift; +} + +int4 SPIRV_Cross_bitfieldSExtract(int4 Base, int Offset, int Count) +{ + int Mask = Count == 32 ? -1 : ((1 << Count) - 1); + int4 Masked = (Base >> Offset) & Mask; + int ExtendShift = (32 - Count) & 31; + return (Masked << ExtendShift) >> ExtendShift; +} + +void comp_main() +{ + int signed_value = 0; + uint unsigned_value = 0u; + int3 signed_values = int3(0, 0, 0); + uint3 unsigned_values = uint3(0u, 0u, 0u); + int s = SPIRV_Cross_bitfieldSExtract(signed_value, 5, 20); + uint u = SPIRV_Cross_bitfieldUExtract(unsigned_value, 6, 21); + s = int(SPIRV_Cross_bitfieldInsert(s, 40, 5, 4)); + u = SPIRV_Cross_bitfieldInsert(u, 60u, 5, 4); + u = reversebits(u); + s = reversebits(s); + int v0 = countbits(u); + int v1 = countbits(s); + int v2 = firstbithigh(u); + int v3 = firstbitlow(s); + int3 s_1 = SPIRV_Cross_bitfieldSExtract(signed_values, 5, 20); + uint3 u_1 = SPIRV_Cross_bitfieldUExtract(unsigned_values, 6, 21); + s_1 = int3(SPIRV_Cross_bitfieldInsert(s_1, int3(40, 40, 40), 5, 4)); + u_1 = SPIRV_Cross_bitfieldInsert(u_1, uint3(60u, 60u, 60u), 5, 4); + u_1 = reversebits(u_1); + s_1 = reversebits(s_1); + int3 v0_1 = countbits(u_1); + int3 v1_1 = countbits(s_1); + int3 v2_1 = firstbithigh(u_1); + int3 v3_1 = firstbitlow(s_1); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/frag/spec-constant.frag b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/frag/spec-constant.frag new file mode 100644 index 000000000..89a5f5ca8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl-no-opt/frag/spec-constant.frag @@ -0,0 +1,142 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 1.0f +#endif +static const float a = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 2.0f +#endif +static const float b = SPIRV_CROSS_CONSTANT_ID_2; +#ifndef SPIRV_CROSS_CONSTANT_ID_3 +#define SPIRV_CROSS_CONSTANT_ID_3 3 +#endif +static const int c = SPIRV_CROSS_CONSTANT_ID_3; +static const uint _18 = (uint(c) + 0u); +static const int _21 = (-c); +static const int _23 = (~c); +#ifndef SPIRV_CROSS_CONSTANT_ID_4 +#define SPIRV_CROSS_CONSTANT_ID_4 4 +#endif +static const int d = SPIRV_CROSS_CONSTANT_ID_4; +static const int _26 = (c + d); +static const int _28 = (c - d); +static const int _30 = (c * d); +static const int _32 = (c / d); +#ifndef SPIRV_CROSS_CONSTANT_ID_5 +#define SPIRV_CROSS_CONSTANT_ID_5 5u +#endif +static const uint e = SPIRV_CROSS_CONSTANT_ID_5; +#ifndef SPIRV_CROSS_CONSTANT_ID_6 +#define SPIRV_CROSS_CONSTANT_ID_6 6u +#endif +static const uint f = SPIRV_CROSS_CONSTANT_ID_6; +static const uint _36 = (e / f); +static const int _38 = (c % d); +static const uint _40 = (e % f); +static const int _42 = (c >> d); +static const uint _44 = (e >> f); +static const int _46 = (c << d); +static const int _48 = (c | d); +static const int _50 = (c ^ d); +static const int _52 = (c & d); +#ifndef SPIRV_CROSS_CONSTANT_ID_7 +#define SPIRV_CROSS_CONSTANT_ID_7 false +#endif +static const bool g = SPIRV_CROSS_CONSTANT_ID_7; +#ifndef SPIRV_CROSS_CONSTANT_ID_8 +#define SPIRV_CROSS_CONSTANT_ID_8 true +#endif +static const bool h = SPIRV_CROSS_CONSTANT_ID_8; +static const bool _58 = (g || h); +static const bool _60 = (g && h); +static const bool _62 = (!g); +static const bool _64 = (g == h); +static const bool _66 = (g != h); +static const bool _68 = (c == d); +static const bool _70 = (c != d); +static const bool _72 = (c < d); +static const bool _74 = (e < f); +static const bool _76 = (c > d); +static const bool _78 = (e > f); +static const bool _80 = (c <= d); +static const bool _82 = (e <= f); +static const bool _84 = (c >= d); +static const bool _86 = (e >= f); +static const int _92 = int(e + 0u); +static const bool _94 = (c != int(0u)); +static const bool _96 = (e != 0u); +static const int _100 = int(g); +static const uint _103 = uint(g); +static const int _111 = (c + 3); +static const int _118 = (c + 2); +static const int _124 = (d + 2); + +struct Foo +{ + float elems[_124]; +}; + +static float4 FragColor; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + float t0 = a; + float t1 = b; + uint c0 = _18; + int c1 = _21; + int c2 = _23; + int c3 = _26; + int c4 = _28; + int c5 = _30; + int c6 = _32; + uint c7 = _36; + int c8 = _38; + uint c9 = _40; + int c10 = _42; + uint c11 = _44; + int c12 = _46; + int c13 = _48; + int c14 = _50; + int c15 = _52; + bool c16 = _58; + bool c17 = _60; + bool c18 = _62; + bool c19 = _64; + bool c20 = _66; + bool c21 = _68; + bool c22 = _70; + bool c23 = _72; + bool c24 = _74; + bool c25 = _76; + bool c26 = _78; + bool c27 = _80; + bool c28 = _82; + bool c29 = _84; + bool c30 = _86; + int c31 = c8 + c3; + int c32 = _92; + bool c33 = _94; + bool c34 = _96; + int c35 = _100; + uint c36 = _103; + float c37 = float(g); + float vec0[_111][8]; + vec0[0][0] = 10.0f; + float vec1[_118]; + vec1[0] = 20.0f; + Foo foo; + foo.elems[c] = 10.0f; + FragColor = (((t0 + t1).xxxx + vec0[0][0].xxxx) + vec1[0].xxxx) + foo.elems[c].xxxx; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp new file mode 100644 index 000000000..b8265fc99 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp @@ -0,0 +1,19 @@ +RWByteAddressBuffer _4 : register(u0); + +void comp_main() +{ + uint _21 = _4.Load(_4.Load(0) * 4 + 4); + for (uint _23 = 0u; _23 < 64u; ) + { + _4.Store(_23 * 4 + 4, 0u); + _23++; + continue; + } + _4.Store(_4.Load(0) * 4 + 4, _21); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..919fe9bab --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,24 @@ +RWByteAddressBuffer u0_counter : register(u1); +RWBuffer u0 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + uint _29; + u0_counter.InterlockedAdd(0, -1, _29); + float4 r0; + r0.x = asfloat(_29); + u0[(uint(asint(r0.x)) * 1u) + (uint(0) >> 2u)] = uint(int(gl_GlobalInvocationID.x)).x; +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..35960b0a6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,24 @@ +RWByteAddressBuffer u0_counter : register(u1); +RWBuffer u0 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + uint _29; + u0_counter.InterlockedAdd(0, 1, _29); + float4 r0; + r0.x = asfloat(_29); + u0[(uint(asint(r0.x)) * 1u) + (uint(0) >> 2u)] = uint(int(gl_GlobalInvocationID.x)).x; +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..a12274c01 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,45 @@ +struct A +{ + int a; + int b; +}; + +struct A_1 +{ + int a; + int b; +}; + +RWByteAddressBuffer C1 : register(u1); +cbuffer C2 : register(b2) +{ + A_1 C2_1_Data[1024] : packoffset(c0); +}; + +RWByteAddressBuffer C3 : register(u0); +cbuffer B : register(b3) +{ + A_1 C4_Data[1024] : packoffset(c0); +}; + + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + C1.Store(gl_GlobalInvocationID.x * 8 + 0, uint(C2_1_Data[gl_GlobalInvocationID.x].a)); + C1.Store(gl_GlobalInvocationID.x * 8 + 4, uint(C2_1_Data[gl_GlobalInvocationID.x].b)); + C3.Store(gl_GlobalInvocationID.x * 8 + 0, uint(C4_Data[gl_GlobalInvocationID.x].a)); + C3.Store(gl_GlobalInvocationID.x * 8 + 4, uint(C4_Data[gl_GlobalInvocationID.x].b)); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/control-flow-hints.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/control-flow-hints.asm.comp new file mode 100644 index 000000000..970010034 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/control-flow-hints.asm.comp @@ -0,0 +1,41 @@ +RWByteAddressBuffer bar : register(u0); +RWByteAddressBuffer foo : register(u1); + +void _main() +{ + [unroll] + for (int i = 0; i < 16; i++) + { + bar.Store4(i * 16 + 0, asuint(asfloat(foo.Load4(i * 16 + 0)))); + } + [loop] + for (int i_1 = 0; i_1 < 16; i_1++) + { + bar.Store4((15 - i_1) * 16 + 0, asuint(asfloat(foo.Load4(i_1 * 16 + 0)))); + } + float v = asfloat(bar.Load(160)); + float w = asfloat(foo.Load(160)); + [branch] + if (v > 10.0f) + { + foo.Store4(320, asuint(5.0f.xxxx)); + } + float value = 20.0f; + [flatten] + if (w > 40.0f) + { + value = 20.0f; + } + foo.Store4(320, asuint(value.xxxx)); +} + +void comp_main() +{ + _main(); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..44bde09f2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,33 @@ +ByteAddressBuffer ssbo : register(t1); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void Load(uint size) +{ + int byteAddrTemp = int(size >> uint(2)); + uint4 data = uint4(ssbo.Load(byteAddrTemp * 4 + 0), ssbo.Load((byteAddrTemp + 1) * 4 + 0), ssbo.Load((byteAddrTemp + 2) * 4 + 0), ssbo.Load((byteAddrTemp + 3) * 4 + 0)); +} + +void _main(uint3 id) +{ + uint param = 4u; + Load(param); +} + +void comp_main() +{ + uint3 id = gl_GlobalInvocationID; + uint3 param = id; + _main(param); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp new file mode 100644 index 000000000..c567fbaf1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp @@ -0,0 +1,32 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1u +#endif +static const uint _3 = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 3u +#endif +static const uint _4 = SPIRV_CROSS_CONSTANT_ID_2; +static const uint3 gl_WorkGroupSize = uint3(_3, 2u, _4); + +RWByteAddressBuffer _8 : register(u0); +RWByteAddressBuffer _9 : register(u1); + +static uint3 gl_WorkGroupID; +struct SPIRV_Cross_Input +{ + uint3 gl_WorkGroupID : SV_GroupID; +}; + +static uint3 _22 = gl_WorkGroupSize; + +void comp_main() +{ + _8.Store(gl_WorkGroupID.x * 4 + 0, asuint(asfloat(_9.Load(gl_WorkGroupID.x * 4 + 0)) + asfloat(_8.Load(gl_WorkGroupID.x * 4 + 0)))); +} + +[numthreads(SPIRV_CROSS_CONSTANT_ID_0, 2, SPIRV_CROSS_CONSTANT_ID_2)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_WorkGroupID = stage_input.gl_WorkGroupID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag new file mode 100644 index 000000000..3d5d6288f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag @@ -0,0 +1,32 @@ +cbuffer _4_5 : register(b0) +{ + column_major float2x4 _5_m0 : packoffset(c0); + float4 _5_m1 : packoffset(c4); +}; + + +static float2 _3; + +struct SPIRV_Cross_Output +{ + float2 _3 : SV_Target0; +}; + +float2 _23() +{ + float2 _25 = mul(_5_m0, _5_m1); + return _25; +} + +void frag_main() +{ + _3 = _23(); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output._3 = _3; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag new file mode 100644 index 000000000..3951fd511 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag @@ -0,0 +1,30 @@ +Texture2D uTex : register(t1); +SamplerState uSampler : register(s0); + +static float4 FragColor; +static float2 vUV; + +struct SPIRV_Cross_Input +{ + float2 vUV : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uTex.Sample(uSampler, vUV); + FragColor += uTex.Sample(uSampler, vUV, int2(1, 1)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUV = stage_input.vUV; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..38d12cd63 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/empty-struct.asm.frag @@ -0,0 +1,27 @@ +struct EmptyStructTest +{ + int empty_struct_member; +}; + +float GetValue(EmptyStructTest self) +{ + return 0.0f; +} + +float GetValue_1(EmptyStructTest self) +{ + return 0.0f; +} + +void frag_main() +{ + EmptyStructTest _23 = { 0 }; + EmptyStructTest emptyStruct; + float value = GetValue(emptyStruct); + value = GetValue_1(_23); +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/frem.asm.frag new file mode 100644 index 000000000..67998c56a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/frem.asm.frag @@ -0,0 +1,29 @@ +static float4 FragColor; +static float4 vA; +static float4 vB; + +struct SPIRV_Cross_Input +{ + float4 vA : TEXCOORD0; + float4 vB : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = fmod(vA, vB); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vA = stage_input.vA; + vB = stage_input.vB; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..e8978b6cd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,47 @@ +static float4 FragColor; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float4 foo(float4 foo_1) +{ + return foo_1 + 1.0f.xxxx; +} + +float4 foo(float3 foo_1) +{ + return foo_1.xyzz + 1.0f.xxxx; +} + +float4 foo_1(float4 foo_2) +{ + return foo_2 + 2.0f.xxxx; +} + +float4 foo(float2 foo_2) +{ + return foo_2.xyxy + 2.0f.xxxx; +} + +void frag_main() +{ + float4 foo_3 = 1.0f.xxxx; + float4 foo_2 = foo(foo_3); + float3 foo_5 = 1.0f.xxx; + float4 foo_4 = foo(foo_5); + float4 foo_7 = 1.0f.xxxx; + float4 foo_6 = foo_1(foo_7); + float2 foo_9 = 1.0f.xx; + float4 foo_8 = foo(foo_9); + FragColor = ((foo_2 + foo_4) + foo_6) + foo_8; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..ed53720d9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,31 @@ +Texture2D uTexture : register(t0); +SamplerState _uTexture_sampler : register(s0); + +static int2 Size; + +struct SPIRV_Cross_Output +{ + int2 Size : SV_Target0; +}; + +uint2 SPIRV_Cross_textureSize(Texture2D Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +void frag_main() +{ + uint _19_dummy_parameter; + uint _20_dummy_parameter; + Size = int2(SPIRV_Cross_textureSize(uTexture, uint(0), _19_dummy_parameter)) + int2(SPIRV_Cross_textureSize(uTexture, uint(1), _20_dummy_parameter)); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.Size = Size; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..f668d6344 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,56 @@ +Texture2D uImage : register(t0); +SamplerState _uImage_sampler : register(s0); + +static float4 v0; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 v0 : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + int i = 0; + float phi; + float4 _36; + phi = 1.0f; + _36 = float4(1.0f, 2.0f, 1.0f, 2.0f); + for (;;) + { + FragColor = _36; + if (i < 4) + { + if (v0[i] > 0.0f) + { + float2 _48 = phi.xx; + i++; + phi += 2.0f; + _36 = uImage.SampleLevel(_uImage_sampler, _48, 0.0f); + continue; + } + else + { + break; + } + } + else + { + break; + } + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + v0 = stage_input.v0; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..d20cf995a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,19 @@ +static float3 FragColor; + +struct SPIRV_Cross_Output +{ + float3 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float3(asfloat(0x7f800000u), asfloat(0xff800000u), asfloat(0x7fc00000u)); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..a7aec01ba --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,55 @@ +static const float _46[16] = { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; +static const float4 _76[4] = { 0.0f.xxxx, 1.0f.xxxx, 8.0f.xxxx, 5.0f.xxxx }; +static const float4 _90[4] = { 20.0f.xxxx, 30.0f.xxxx, 50.0f.xxxx, 60.0f.xxxx }; + +static float FragColor; +static int index; + +struct SPIRV_Cross_Input +{ + nointerpolation int index : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + float4 foobar[4] = _76; + float4 baz[4] = _76; + FragColor = _46[index]; + if (index < 10) + { + FragColor += _46[index ^ 1]; + } + else + { + FragColor += _46[index & 1]; + } + if (index > 30) + { + FragColor += _76[index & 3].y; + } + else + { + FragColor += _76[index & 1].x; + } + if (index > 30) + { + foobar[1].z = 20.0f; + } + FragColor += foobar[index & 3].z; + baz = _90; + FragColor += baz[index & 3].z; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + index = stage_input.index; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..ab0471a07 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,30 @@ +cbuffer Registers +{ + float registers_foo : packoffset(c0); +}; + + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float add_value(float v, float w) +{ + return v + w; +} + +void frag_main() +{ + FragColor = add_value(10.0f, registers_foo); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/srem.asm.frag new file mode 100644 index 000000000..db5e71745 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/srem.asm.frag @@ -0,0 +1,29 @@ +static float4 FragColor; +static int4 vA; +static int4 vB; + +struct SPIRV_Cross_Input +{ + nointerpolation int4 vA : TEXCOORD0; + nointerpolation int4 vB : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float4(vA - vB * (vA / vB)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vA = stage_input.vA; + vB = stage_input.vB; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..695d5fe9d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,29 @@ +Texture2D uTexture : register(t0); +SamplerState _uTexture_sampler : register(s0); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uTexture.Load(int3(int2(gl_FragCoord.xy), 0)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag new file mode 100644 index 000000000..027dd00c5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag @@ -0,0 +1,41 @@ +Texture2D uShadow : register(t0); +SamplerComparisonState _uShadow_sampler : register(s0); +Texture2D uTexture : register(t1); +SamplerComparisonState uSampler : register(s2); + +static float3 vUV; +static float FragColor; + +struct SPIRV_Cross_Input +{ + float3 vUV : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float sample_combined() +{ + return uShadow.SampleCmp(_uShadow_sampler, vUV.xy, vUV.z); +} + +float sample_separate() +{ + return uTexture.SampleCmp(uSampler, vUV.xy, vUV.z); +} + +void frag_main() +{ + FragColor = sample_combined() + sample_separate(); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUV = stage_input.vUV; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..c2fa519df --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/frag/unreachable.asm.frag @@ -0,0 +1,44 @@ +static int counter; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + nointerpolation int counter : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float4 _21; + +void frag_main() +{ + float4 _24; + _24 = _21; + float4 _33; + for (;;) + { + if (counter == 10) + { + _33 = 10.0f.xxxx; + break; + } + else + { + _33 = 30.0f.xxxx; + break; + } + } + FragColor = _33; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + counter = stage_input.counter; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert new file mode 100644 index 000000000..ddec81f20 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert @@ -0,0 +1,50 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_201 +#define SPIRV_CROSS_CONSTANT_ID_201 -10 +#endif +static const int _7 = SPIRV_CROSS_CONSTANT_ID_201; +#ifndef SPIRV_CROSS_CONSTANT_ID_202 +#define SPIRV_CROSS_CONSTANT_ID_202 100u +#endif +static const uint _8 = SPIRV_CROSS_CONSTANT_ID_202; +#ifndef SPIRV_CROSS_CONSTANT_ID_200 +#define SPIRV_CROSS_CONSTANT_ID_200 3.141590118408203125f +#endif +static const float _9 = SPIRV_CROSS_CONSTANT_ID_200; +static const int _20 = (_7 + 2); +static const uint _25 = (_8 % 5u); +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 int4(20, 30, _20, _20) +#endif +static const int4 _30 = SPIRV_CROSS_CONSTANT_ID_0; +static const int2 _32 = int2(_30.y, _30.x); +static const int _33 = _30.y; + +static float4 gl_Position; +static int _4; + +struct SPIRV_Cross_Output +{ + nointerpolation int _4 : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + float4 pos = 0.0f.xxxx; + pos.y += float(_20); + pos.z += float(_25); + pos += float4(_30); + float2 _56 = pos.xy + float2(_32); + pos = float4(_56.x, _56.y, pos.z, pos.w); + gl_Position = pos; + _4 = _33; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output._4 = _4; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..a18c6e705 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,37 @@ +static float4 gl_Position; +static int gl_VertexIndex; +static int gl_InstanceIndex; +struct SPIRV_Cross_Input +{ + uint gl_VertexIndex : SV_VertexID; + uint gl_InstanceIndex : SV_InstanceID; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +float4 _main(uint vid, uint iid) +{ + return float(vid + iid).xxxx; +} + +void vert_main() +{ + uint vid = uint(gl_VertexIndex); + uint iid = uint(gl_InstanceIndex); + uint param = vid; + uint param_1 = iid; + gl_Position = _main(param, param_1); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_VertexIndex = int(stage_input.gl_VertexIndex); + gl_InstanceIndex = int(stage_input.gl_InstanceIndex); + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..8d5e771fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert @@ -0,0 +1,28 @@ +static float4 gl_Position; +static int gl_VertexID; +static int gl_InstanceID; +struct SPIRV_Cross_Input +{ + uint gl_VertexID : SV_VertexID; + uint gl_InstanceID : SV_InstanceID; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = float(gl_VertexID + gl_InstanceID).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_VertexID = int(stage_input.gl_VertexID); + gl_InstanceID = int(stage_input.gl_InstanceID); + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/access-chains.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/access-chains.comp new file mode 100644 index 000000000..924e91912 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/access-chains.comp @@ -0,0 +1,21 @@ +RWByteAddressBuffer wo : register(u1); +ByteAddressBuffer ro : register(t0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + wo.Store4(gl_GlobalInvocationID.x * 64 + 272, asuint(asfloat(ro.Load4(gl_GlobalInvocationID.x * 64 + 160)))); + wo.Store4(gl_GlobalInvocationID.x * 16 + 480, asuint(asfloat(ro.Load4(gl_GlobalInvocationID.x * 16 + 480)))); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/address-buffers.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/address-buffers.comp new file mode 100644 index 000000000..a252fc8ae --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/address-buffers.comp @@ -0,0 +1,15 @@ +RWByteAddressBuffer WriteOnly : register(u2); +ByteAddressBuffer ReadOnly : register(t0); +RWByteAddressBuffer ReadWrite : register(u1); + +void comp_main() +{ + WriteOnly.Store4(0, asuint(asfloat(ReadOnly.Load4(0)))); + ReadWrite.Store4(0, asuint(asfloat(ReadWrite.Load4(0)) + 10.0f.xxxx)); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/atomic.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/atomic.comp new file mode 100644 index 000000000..72e15bf77 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/atomic.comp @@ -0,0 +1,89 @@ +RWByteAddressBuffer ssbo : register(u2); +RWTexture2D uImage : register(u0); +RWTexture2D iImage : register(u1); + +groupshared int int_atomic; +groupshared uint uint_atomic; +groupshared int int_atomic_array[1]; +groupshared uint uint_atomic_array[1]; + +void comp_main() +{ + uint _19; + InterlockedAdd(uImage[int2(1, 5)], 1u, _19); + uint _27; + InterlockedAdd(uImage[int2(1, 5)], 1u, _27); + iImage[int2(1, 6)] = int(_27).x; + uint _32; + InterlockedOr(uImage[int2(1, 5)], 1u, _32); + uint _34; + InterlockedXor(uImage[int2(1, 5)], 1u, _34); + uint _36; + InterlockedAnd(uImage[int2(1, 5)], 1u, _36); + uint _38; + InterlockedMin(uImage[int2(1, 5)], 1u, _38); + uint _40; + InterlockedMax(uImage[int2(1, 5)], 1u, _40); + uint _44; + InterlockedCompareExchange(uImage[int2(1, 5)], 10u, 2u, _44); + int _47; + InterlockedAdd(iImage[int2(1, 6)], 1, _47); + int _49; + InterlockedOr(iImage[int2(1, 6)], 1, _49); + int _51; + InterlockedXor(iImage[int2(1, 6)], 1, _51); + int _53; + InterlockedAnd(iImage[int2(1, 6)], 1, _53); + int _55; + InterlockedMin(iImage[int2(1, 6)], 1, _55); + int _57; + InterlockedMax(iImage[int2(1, 6)], 1, _57); + int _61; + InterlockedCompareExchange(iImage[int2(1, 5)], 10, 2, _61); + uint _68; + ssbo.InterlockedAdd(0, 1u, _68); + uint _70; + ssbo.InterlockedOr(0, 1u, _70); + uint _72; + ssbo.InterlockedXor(0, 1u, _72); + uint _74; + ssbo.InterlockedAnd(0, 1u, _74); + uint _76; + ssbo.InterlockedMin(0, 1u, _76); + uint _78; + ssbo.InterlockedMax(0, 1u, _78); + uint _80; + ssbo.InterlockedExchange(0, 1u, _80); + uint _82; + ssbo.InterlockedCompareExchange(0, 10u, 2u, _82); + int _85; + ssbo.InterlockedAdd(4, 1, _85); + int _87; + ssbo.InterlockedOr(4, 1, _87); + int _89; + ssbo.InterlockedXor(4, 1, _89); + int _91; + ssbo.InterlockedAnd(4, 1, _91); + int _93; + ssbo.InterlockedMin(4, 1, _93); + int _95; + ssbo.InterlockedMax(4, 1, _95); + int _97; + ssbo.InterlockedExchange(4, 1, _97); + int _99; + ssbo.InterlockedCompareExchange(4, 10, 2, _99); + int _102; + InterlockedAdd(int_atomic, 10, _102); + uint _105; + InterlockedAdd(uint_atomic, 10u, _105); + int _110; + InterlockedAdd(int_atomic_array[0], 10, _110); + uint _115; + InterlockedAdd(uint_atomic_array[0], 10u, _115); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/barriers.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/barriers.comp new file mode 100644 index 000000000..15af8ac11 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/barriers.comp @@ -0,0 +1,81 @@ +static const uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +void barrier_shared() +{ + GroupMemoryBarrier(); +} + +void full_barrier() +{ + AllMemoryBarrier(); +} + +void image_barrier() +{ + DeviceMemoryBarrier(); +} + +void buffer_barrier() +{ + DeviceMemoryBarrier(); +} + +void group_barrier() +{ + AllMemoryBarrier(); +} + +void barrier_shared_exec() +{ + GroupMemoryBarrierWithGroupSync(); +} + +void full_barrier_exec() +{ + AllMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); +} + +void image_barrier_exec() +{ + DeviceMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); +} + +void buffer_barrier_exec() +{ + DeviceMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); +} + +void group_barrier_exec() +{ + AllMemoryBarrier(); + GroupMemoryBarrierWithGroupSync(); +} + +void exec_barrier() +{ + GroupMemoryBarrierWithGroupSync(); +} + +void comp_main() +{ + barrier_shared(); + full_barrier(); + image_barrier(); + buffer_barrier(); + group_barrier(); + barrier_shared_exec(); + full_barrier_exec(); + image_barrier_exec(); + buffer_barrier_exec(); + group_barrier_exec(); + exec_barrier(); +} + +[numthreads(4, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/builtins.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/builtins.comp new file mode 100644 index 000000000..5d84883b2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/builtins.comp @@ -0,0 +1,32 @@ +static const uint3 gl_WorkGroupSize = uint3(8u, 4u, 2u); + +static uint3 gl_WorkGroupID; +static uint3 gl_LocalInvocationID; +static uint3 gl_GlobalInvocationID; +static uint gl_LocalInvocationIndex; +struct SPIRV_Cross_Input +{ + uint3 gl_WorkGroupID : SV_GroupID; + uint3 gl_LocalInvocationID : SV_GroupThreadID; + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; + uint gl_LocalInvocationIndex : SV_GroupIndex; +}; + +void comp_main() +{ + uint3 local_id = gl_LocalInvocationID; + uint3 global_id = gl_GlobalInvocationID; + uint local_index = gl_LocalInvocationIndex; + uint3 work_group_size = gl_WorkGroupSize; + uint3 work_group_id = gl_WorkGroupID; +} + +[numthreads(8, 4, 2)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_WorkGroupID = stage_input.gl_WorkGroupID; + gl_LocalInvocationID = stage_input.gl_LocalInvocationID; + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + gl_LocalInvocationIndex = stage_input.gl_LocalInvocationIndex; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/composite-array-initialization.comp new file mode 100644 index 000000000..675913cf1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/composite-array-initialization.comp @@ -0,0 +1,62 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 4.0f +#endif +static const float X = SPIRV_CROSS_CONSTANT_ID_0; +static const uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u); + +struct Data +{ + float a; + float b; +}; + +static const Data _21 = { 1.0f, 2.0f }; +static const Data _24 = { 3.0f, 4.0f }; +static const Data _25[2] = { { 1.0f, 2.0f }, { 3.0f, 4.0f } }; +static const Data _30 = { 3.0f, 5.0f }; + +RWByteAddressBuffer _61 : register(u0); + +static uint3 gl_WorkGroupID; +static uint3 gl_LocalInvocationID; +static uint gl_LocalInvocationIndex; +struct SPIRV_Cross_Input +{ + uint3 gl_WorkGroupID : SV_GroupID; + uint3 gl_LocalInvocationID : SV_GroupThreadID; + uint gl_LocalInvocationIndex : SV_GroupIndex; +}; + +static Data data[2]; +static Data data2[2]; + +Data combine(Data a, Data b) +{ + Data _46 = { a.a + b.a, a.b + b.b }; + return _46; +} + +void comp_main() +{ + data = _25; + Data _28 = { X, 2.0f }; + Data _31[2] = { _28, _30 }; + data2 = _31; + if (gl_LocalInvocationIndex == 0u) + { + Data param = data[gl_LocalInvocationID.x]; + Data param_1 = data2[gl_LocalInvocationID.x]; + Data _79 = combine(param, param_1); + _61.Store(gl_WorkGroupID.x * 8 + 0, asuint(_79.a)); + _61.Store(gl_WorkGroupID.x * 8 + 4, asuint(_79.b)); + } +} + +[numthreads(2, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_WorkGroupID = stage_input.gl_WorkGroupID; + gl_LocalInvocationID = stage_input.gl_LocalInvocationID; + gl_LocalInvocationIndex = stage_input.gl_LocalInvocationIndex; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/globallycoherent.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/globallycoherent.comp new file mode 100644 index 000000000..69886256f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/globallycoherent.comp @@ -0,0 +1,18 @@ +globallycoherent RWByteAddressBuffer _29 : register(u3); +ByteAddressBuffer _33 : register(t2); +RWTexture2D uImageIn : register(u0); +globallycoherent RWTexture2D uImageOut : register(u1); + +void comp_main() +{ + int2 coord = int2(9, 7); + float4 indata = uImageIn[coord].xxxx; + uImageOut[coord] = indata.x; + _29.Store(0, asuint(asfloat(_33.Load(0)))); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/image.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/image.comp new file mode 100644 index 000000000..c8504e636 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/image.comp @@ -0,0 +1,71 @@ +RWTexture2D uImageInF : register(u0); +RWTexture2D uImageOutF : register(u1); +RWTexture2D uImageInI : register(u2); +RWTexture2D uImageOutI : register(u3); +RWTexture2D uImageInU : register(u4); +RWTexture2D uImageOutU : register(u5); +RWBuffer uImageInBuffer : register(u6); +RWBuffer uImageOutBuffer : register(u7); +RWTexture2D uImageInF2 : register(u8); +RWTexture2D uImageOutF2 : register(u9); +RWTexture2D uImageInI2 : register(u10); +RWTexture2D uImageOutI2 : register(u11); +RWTexture2D uImageInU2 : register(u12); +RWTexture2D uImageOutU2 : register(u13); +RWBuffer uImageInBuffer2 : register(u14); +RWBuffer uImageOutBuffer2 : register(u15); +RWTexture2D uImageInF4 : register(u16); +RWTexture2D uImageOutF4 : register(u17); +RWTexture2D uImageInI4 : register(u18); +RWTexture2D uImageOutI4 : register(u19); +RWTexture2D uImageInU4 : register(u20); +RWTexture2D uImageOutU4 : register(u21); +RWBuffer uImageInBuffer4 : register(u22); +RWBuffer uImageOutBuffer4 : register(u23); +RWTexture2D uImageNoFmtF : register(u24); +RWTexture2D uImageNoFmtU : register(u25); +RWTexture2D uImageNoFmtI : register(u26); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + float4 f = uImageInF[int2(gl_GlobalInvocationID.xy)].xxxx; + uImageOutF[int2(gl_GlobalInvocationID.xy)] = f.x; + int4 i = uImageInI[int2(gl_GlobalInvocationID.xy)].xxxx; + uImageOutI[int2(gl_GlobalInvocationID.xy)] = i.x; + uint4 u = uImageInU[int2(gl_GlobalInvocationID.xy)].xxxx; + uImageOutU[int2(gl_GlobalInvocationID.xy)] = u.x; + float4 b = uImageInBuffer[int(gl_GlobalInvocationID.x)].xxxx; + uImageOutBuffer[int(gl_GlobalInvocationID.x)] = b.x; + float4 f2 = uImageInF2[int2(gl_GlobalInvocationID.xy)].xyyy; + uImageOutF2[int2(gl_GlobalInvocationID.xy)] = f2.xy; + int4 i2 = uImageInI2[int2(gl_GlobalInvocationID.xy)].xyyy; + uImageOutI2[int2(gl_GlobalInvocationID.xy)] = i2.xy; + uint4 u2 = uImageInU2[int2(gl_GlobalInvocationID.xy)].xyyy; + uImageOutU2[int2(gl_GlobalInvocationID.xy)] = u2.xy; + float4 b2 = uImageInBuffer2[int(gl_GlobalInvocationID.x)].xyyy; + uImageOutBuffer2[int(gl_GlobalInvocationID.x)] = b2.xy; + float4 f4 = uImageInF4[int2(gl_GlobalInvocationID.xy)]; + uImageOutF4[int2(gl_GlobalInvocationID.xy)] = f4; + int4 i4 = uImageInI4[int2(gl_GlobalInvocationID.xy)]; + uImageOutI4[int2(gl_GlobalInvocationID.xy)] = i4; + uint4 u4 = uImageInU4[int2(gl_GlobalInvocationID.xy)]; + uImageOutU4[int2(gl_GlobalInvocationID.xy)] = u4; + float4 b4 = uImageInBuffer4[int(gl_GlobalInvocationID.x)]; + uImageOutBuffer4[int(gl_GlobalInvocationID.x)] = b4; + uImageNoFmtF[int2(gl_GlobalInvocationID.xy)] = b2; + uImageNoFmtU[int2(gl_GlobalInvocationID.xy)] = u4; + uImageNoFmtI[int2(gl_GlobalInvocationID.xy)] = i4; +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/inverse.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/inverse.comp new file mode 100644 index 000000000..3be954a6f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/inverse.comp @@ -0,0 +1,122 @@ +RWByteAddressBuffer _15 : register(u0); +ByteAddressBuffer _20 : register(t1); + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float2x2 SPIRV_Cross_Inverse(float2x2 m) +{ + float2x2 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = m[1][1]; + adj[0][1] = -m[0][1]; + + adj[1][0] = -m[1][0]; + adj[1][1] = m[0][0]; + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the determinant of a 2x2 matrix. +float SPIRV_Cross_Det2x2(float a1, float a2, float b1, float b2) +{ + return a1 * b2 - b1 * a2; +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float3x3 SPIRV_Cross_Inverse(float3x3 m) +{ + float3x3 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = SPIRV_Cross_Det2x2(m[1][1], m[1][2], m[2][1], m[2][2]); + adj[0][1] = -SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[2][1], m[2][2]); + adj[0][2] = SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[1][1], m[1][2]); + + adj[1][0] = -SPIRV_Cross_Det2x2(m[1][0], m[1][2], m[2][0], m[2][2]); + adj[1][1] = SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[2][0], m[2][2]); + adj[1][2] = -SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[1][0], m[1][2]); + + adj[2][0] = SPIRV_Cross_Det2x2(m[1][0], m[1][1], m[2][0], m[2][1]); + adj[2][1] = -SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[2][0], m[2][1]); + adj[2][2] = SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[1][0], m[1][1]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the determinant of a 3x3 matrix. +float SPIRV_Cross_Det3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3) +{ + return a1 * SPIRV_Cross_Det2x2(b2, b3, c2, c3) - b1 * SPIRV_Cross_Det2x2(a2, a3, c2, c3) + c1 * SPIRV_Cross_Det2x2(a2, a3, b2, b3); +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float4x4 SPIRV_Cross_Inverse(float4x4 m) +{ + float4x4 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = SPIRV_Cross_Det3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][1] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][2] = SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3]); + adj[0][3] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3]); + + adj[1][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3]); + adj[1][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3]); + + adj[2][0] = SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][1] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][2] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3]); + adj[2][3] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3]); + + adj[3][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2]); + adj[3][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] * m[3][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +void comp_main() +{ + float2x2 _23 = asfloat(uint2x2(_20.Load2(0), _20.Load2(8))); + float2x2 _24 = SPIRV_Cross_Inverse(_23); + _15.Store2(0, asuint(_24[0])); + _15.Store2(8, asuint(_24[1])); + float3x3 _29 = asfloat(uint3x3(_20.Load3(16), _20.Load3(32), _20.Load3(48))); + float3x3 _30 = SPIRV_Cross_Inverse(_29); + _15.Store3(16, asuint(_30[0])); + _15.Store3(32, asuint(_30[1])); + _15.Store3(48, asuint(_30[2])); + float4x4 _35 = asfloat(uint4x4(_20.Load4(64), _20.Load4(80), _20.Load4(96), _20.Load4(112))); + float4x4 _36 = SPIRV_Cross_Inverse(_35); + _15.Store4(64, asuint(_36[0])); + _15.Store4(80, asuint(_36[1])); + _15.Store4(96, asuint(_36[2])); + _15.Store4(112, asuint(_36[3])); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/num-workgroups-alone.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/num-workgroups-alone.comp new file mode 100644 index 000000000..dee39e3d5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/num-workgroups-alone.comp @@ -0,0 +1,17 @@ +RWByteAddressBuffer _10 : register(u0); +cbuffer SPIRV_Cross_NumWorkgroups : register(b0) +{ + uint3 SPIRV_Cross_NumWorkgroups_1_count : packoffset(c0); +}; + + +void comp_main() +{ + _10.Store3(0, SPIRV_Cross_NumWorkgroups_1_count); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/num-workgroups-with-builtins.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/num-workgroups-with-builtins.comp new file mode 100644 index 000000000..1c98e5e56 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/num-workgroups-with-builtins.comp @@ -0,0 +1,24 @@ +RWByteAddressBuffer _10 : register(u0); +cbuffer SPIRV_Cross_NumWorkgroups : register(b0) +{ + uint3 SPIRV_Cross_NumWorkgroups_1_count : packoffset(c0); +}; + + +static uint3 gl_WorkGroupID; +struct SPIRV_Cross_Input +{ + uint3 gl_WorkGroupID : SV_GroupID; +}; + +void comp_main() +{ + _10.Store3(0, SPIRV_Cross_NumWorkgroups_1_count + gl_WorkGroupID); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_WorkGroupID = stage_input.gl_WorkGroupID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/rmw-matrix.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/rmw-matrix.comp new file mode 100644 index 000000000..ed6666935 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/rmw-matrix.comp @@ -0,0 +1,20 @@ +RWByteAddressBuffer _11 : register(u0); + +void comp_main() +{ + _11.Store(0, asuint(asfloat(_11.Load(0)) * asfloat(_11.Load(96)))); + _11.Store4(16, asuint(asfloat(_11.Load4(16)) * asfloat(_11.Load4(112)))); + float4x4 _35 = asfloat(uint4x4(_11.Load4(128), _11.Load4(144), _11.Load4(160), _11.Load4(176))); + float4x4 _37 = asfloat(uint4x4(_11.Load4(32), _11.Load4(48), _11.Load4(64), _11.Load4(80))); + float4x4 _38 = mul(_35, _37); + _11.Store4(32, asuint(_38[0])); + _11.Store4(48, asuint(_38[1])); + _11.Store4(64, asuint(_38[2])); + _11.Store4(80, asuint(_38[3])); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/rwbuffer-matrix.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/rwbuffer-matrix.comp new file mode 100644 index 000000000..e79829283 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/rwbuffer-matrix.comp @@ -0,0 +1,137 @@ +RWByteAddressBuffer _28 : register(u0); +cbuffer UBO : register(b1) +{ + int _68_index0 : packoffset(c0); + int _68_index1 : packoffset(c0.y); +}; + + +void row_to_col() +{ + float4x4 _55 = asfloat(uint4x4(_28.Load(64), _28.Load(80), _28.Load(96), _28.Load(112), _28.Load(68), _28.Load(84), _28.Load(100), _28.Load(116), _28.Load(72), _28.Load(88), _28.Load(104), _28.Load(120), _28.Load(76), _28.Load(92), _28.Load(108), _28.Load(124))); + _28.Store4(0, asuint(_55[0])); + _28.Store4(16, asuint(_55[1])); + _28.Store4(32, asuint(_55[2])); + _28.Store4(48, asuint(_55[3])); + float2x2 _58 = asfloat(uint2x2(_28.Load(144), _28.Load(152), _28.Load(148), _28.Load(156))); + _28.Store2(128, asuint(_58[0])); + _28.Store2(136, asuint(_58[1])); + float2x3 _61 = asfloat(uint2x3(_28.Load(192), _28.Load(200), _28.Load(208), _28.Load(196), _28.Load(204), _28.Load(212))); + _28.Store3(160, asuint(_61[0])); + _28.Store3(176, asuint(_61[1])); + float3x2 _64 = asfloat(uint3x2(_28.Load(240), _28.Load(256), _28.Load(244), _28.Load(260), _28.Load(248), _28.Load(264))); + _28.Store2(216, asuint(_64[0])); + _28.Store2(224, asuint(_64[1])); + _28.Store2(232, asuint(_64[2])); +} + +void col_to_row() +{ + float4x4 _34 = asfloat(uint4x4(_28.Load4(0), _28.Load4(16), _28.Load4(32), _28.Load4(48))); + _28.Store(64, asuint(_34[0].x)); + _28.Store(68, asuint(_34[1].x)); + _28.Store(72, asuint(_34[2].x)); + _28.Store(76, asuint(_34[3].x)); + _28.Store(80, asuint(_34[0].y)); + _28.Store(84, asuint(_34[1].y)); + _28.Store(88, asuint(_34[2].y)); + _28.Store(92, asuint(_34[3].y)); + _28.Store(96, asuint(_34[0].z)); + _28.Store(100, asuint(_34[1].z)); + _28.Store(104, asuint(_34[2].z)); + _28.Store(108, asuint(_34[3].z)); + _28.Store(112, asuint(_34[0].w)); + _28.Store(116, asuint(_34[1].w)); + _28.Store(120, asuint(_34[2].w)); + _28.Store(124, asuint(_34[3].w)); + float2x2 _40 = asfloat(uint2x2(_28.Load2(128), _28.Load2(136))); + _28.Store(144, asuint(_40[0].x)); + _28.Store(148, asuint(_40[1].x)); + _28.Store(152, asuint(_40[0].y)); + _28.Store(156, asuint(_40[1].y)); + float2x3 _46 = asfloat(uint2x3(_28.Load3(160), _28.Load3(176))); + _28.Store(192, asuint(_46[0].x)); + _28.Store(196, asuint(_46[1].x)); + _28.Store(200, asuint(_46[0].y)); + _28.Store(204, asuint(_46[1].y)); + _28.Store(208, asuint(_46[0].z)); + _28.Store(212, asuint(_46[1].z)); + float3x2 _52 = asfloat(uint3x2(_28.Load2(216), _28.Load2(224), _28.Load2(232))); + _28.Store(240, asuint(_52[0].x)); + _28.Store(244, asuint(_52[1].x)); + _28.Store(248, asuint(_52[2].x)); + _28.Store(256, asuint(_52[0].y)); + _28.Store(260, asuint(_52[1].y)); + _28.Store(264, asuint(_52[2].y)); +} + +void write_dynamic_index_row() +{ + _28.Store(_68_index0 * 4 + _68_index1 * 16 + 64, asuint(1.0f)); + _28.Store(_68_index0 * 4 + _68_index1 * 8 + 144, asuint(2.0f)); + _28.Store(_68_index0 * 4 + _68_index1 * 8 + 192, asuint(3.0f)); + _28.Store(_68_index0 * 4 + _68_index1 * 16 + 240, asuint(4.0f)); + _28.Store(_68_index0 * 4 + 64, asuint(1.0f.x)); + _28.Store(_68_index0 * 4 + 80, asuint(1.0f.xxxx.y)); + _28.Store(_68_index0 * 4 + 96, asuint(1.0f.xxxx.z)); + _28.Store(_68_index0 * 4 + 112, asuint(1.0f.xxxx.w)); + _28.Store(_68_index0 * 4 + 144, asuint(2.0f.x)); + _28.Store(_68_index0 * 4 + 152, asuint(2.0f.xx.y)); + _28.Store(_68_index0 * 4 + 192, asuint(3.0f.x)); + _28.Store(_68_index0 * 4 + 200, asuint(3.0f.xxx.y)); + _28.Store(_68_index0 * 4 + 208, asuint(3.0f.xxx.z)); + _28.Store(_68_index0 * 4 + 240, asuint(4.0f.x)); + _28.Store(_68_index0 * 4 + 256, asuint(4.0f.xx.y)); +} + +void write_dynamic_index_col() +{ + _28.Store(_68_index0 * 16 + _68_index1 * 4 + 0, asuint(1.0f)); + _28.Store(_68_index0 * 8 + _68_index1 * 4 + 128, asuint(2.0f)); + _28.Store(_68_index0 * 16 + _68_index1 * 4 + 160, asuint(3.0f)); + _28.Store(_68_index0 * 8 + _68_index1 * 4 + 216, asuint(4.0f)); + _28.Store4(_68_index0 * 16 + 0, asuint(1.0f.xxxx)); + _28.Store2(_68_index0 * 8 + 128, asuint(2.0f.xx)); + _28.Store3(_68_index0 * 16 + 160, asuint(3.0f.xxx)); + _28.Store2(_68_index0 * 8 + 216, asuint(4.0f.xx)); +} + +void read_dynamic_index_row() +{ + float a0 = asfloat(_28.Load(_68_index0 * 4 + _68_index1 * 16 + 64)); + float a1 = asfloat(_28.Load(_68_index0 * 4 + _68_index1 * 8 + 144)); + float a2 = asfloat(_28.Load(_68_index0 * 4 + _68_index1 * 8 + 192)); + float a3 = asfloat(_28.Load(_68_index0 * 4 + _68_index1 * 16 + 240)); + float4 v0 = asfloat(uint4(_28.Load(_68_index0 * 4 + 64), _28.Load(_68_index0 * 4 + 80), _28.Load(_68_index0 * 4 + 96), _28.Load(_68_index0 * 4 + 112))); + float2 v1 = asfloat(uint2(_28.Load(_68_index0 * 4 + 144), _28.Load(_68_index0 * 4 + 152))); + float3 v2 = asfloat(uint3(_28.Load(_68_index0 * 4 + 192), _28.Load(_68_index0 * 4 + 200), _28.Load(_68_index0 * 4 + 208))); + float2 v3 = asfloat(uint2(_28.Load(_68_index0 * 4 + 240), _28.Load(_68_index0 * 4 + 256))); +} + +void read_dynamic_index_col() +{ + float a0 = asfloat(_28.Load(_68_index0 * 16 + _68_index1 * 4 + 0)); + float a1 = asfloat(_28.Load(_68_index0 * 8 + _68_index1 * 4 + 128)); + float a2 = asfloat(_28.Load(_68_index0 * 16 + _68_index1 * 4 + 160)); + float a3 = asfloat(_28.Load(_68_index0 * 8 + _68_index1 * 4 + 216)); + float4 v0 = asfloat(_28.Load4(_68_index0 * 16 + 0)); + float2 v1 = asfloat(_28.Load2(_68_index0 * 8 + 128)); + float3 v2 = asfloat(_28.Load3(_68_index0 * 16 + 160)); + float2 v3 = asfloat(_28.Load2(_68_index0 * 8 + 216)); +} + +void comp_main() +{ + row_to_col(); + col_to_row(); + write_dynamic_index_row(); + write_dynamic_index_col(); + read_dynamic_index_row(); + read_dynamic_index_col(); +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/shared.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/shared.comp new file mode 100644 index 000000000..5d1290038 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/shared.comp @@ -0,0 +1,31 @@ +static const uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +ByteAddressBuffer _22 : register(t0); +RWByteAddressBuffer _44 : register(u1); + +static uint3 gl_GlobalInvocationID; +static uint gl_LocalInvocationIndex; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; + uint gl_LocalInvocationIndex : SV_GroupIndex; +}; + +groupshared float sShared[4]; + +void comp_main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = asfloat(_22.Load(ident * 4 + 0)); + sShared[gl_LocalInvocationIndex] = idata; + GroupMemoryBarrierWithGroupSync(); + _44.Store(ident * 4 + 0, asuint(sShared[(4u - gl_LocalInvocationIndex) - 1u])); +} + +[numthreads(4, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + gl_LocalInvocationIndex = stage_input.gl_LocalInvocationIndex; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/spec-constant-op-member-array.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/spec-constant-op-member-array.comp new file mode 100644 index 000000000..ad815b282 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/spec-constant-op-member-array.comp @@ -0,0 +1,49 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 100 +#endif +static const int a = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 200 +#endif +static const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 300 +#endif +static const int c = SPIRV_CROSS_CONSTANT_ID_2; +static const int _18 = (c + 50); +#ifndef SPIRV_CROSS_CONSTANT_ID_3 +#define SPIRV_CROSS_CONSTANT_ID_3 400 +#endif +static const int e = SPIRV_CROSS_CONSTANT_ID_3; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +RWByteAddressBuffer _22 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + _22.Store(gl_GlobalInvocationID.x * 4 + 2800, uint(int(_22.Load(gl_GlobalInvocationID.x * 4 + 2800)) + (int(_22.Load(gl_GlobalInvocationID.x * 4 + 2400)) + e))); +} + +[numthreads(1, 1, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/spec-constant-work-group-size.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/spec-constant-work-group-size.comp new file mode 100644 index 000000000..55ebf32bb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/spec-constant-work-group-size.comp @@ -0,0 +1,43 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 2 +#endif +static const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1 +#endif +static const int a = SPIRV_CROSS_CONSTANT_ID_0; +static const uint _26 = (uint(a) + 0u); +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 1u +#endif +static const uint _27 = SPIRV_CROSS_CONSTANT_ID_10; +static const uint3 gl_WorkGroupSize = uint3(_27, 20u, 1u); +static const uint _32 = gl_WorkGroupSize.x; +static const uint _33 = (_26 + _32); +static const uint _34 = gl_WorkGroupSize.y; +static const uint _35 = (_33 + _34); +static const int _42 = (1 - a); + +RWByteAddressBuffer _23 : register(u0); + +static uint3 gl_GlobalInvocationID; +struct SPIRV_Cross_Input +{ + uint3 gl_GlobalInvocationID : SV_DispatchThreadID; +}; + +void comp_main() +{ + int spec_const_array_size[b]; + spec_const_array_size[0] = 10; + spec_const_array_size[1] = 40; + spec_const_array_size[a] = a; + _23.Store((_35 + gl_GlobalInvocationID.x) * 4 + 0, uint(b + spec_const_array_size[_42])); +} + +[numthreads(SPIRV_CROSS_CONSTANT_ID_10, 20, 1)] +void main(SPIRV_Cross_Input stage_input) +{ + gl_GlobalInvocationID = stage_input.gl_GlobalInvocationID; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/ssbo-array.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/ssbo-array.comp new file mode 100644 index 000000000..90927421c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/ssbo-array.comp @@ -0,0 +1,11 @@ +RWByteAddressBuffer ssbo0 : register(u0); + +void comp_main() +{ +} + +[numthreads(1, 1, 1)] +void main() +{ + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp new file mode 100644 index 000000000..b87574f1a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp @@ -0,0 +1,93 @@ +RWByteAddressBuffer _9 : register(u0, space0); + +static uint4 gl_SubgroupEqMask; +static uint4 gl_SubgroupGeMask; +static uint4 gl_SubgroupGtMask; +static uint4 gl_SubgroupLeMask; +static uint4 gl_SubgroupLtMask; +void comp_main() +{ + _9.Store(0, asuint(float(WaveGetLaneCount()))); + _9.Store(0, asuint(float(WaveGetLaneIndex()))); + bool elected = WaveIsFirstLane(); + _9.Store(0, asuint(float4(gl_SubgroupEqMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupGeMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupGtMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupLeMask).x)); + _9.Store(0, asuint(float4(gl_SubgroupLtMask).x)); + float4 broadcasted = WaveReadLaneAt(10.0f.xxxx, 8u); + float3 first = WaveReadLaneFirst(20.0f.xxx); + uint4 ballot_value = WaveActiveBallot(true); + uint bit_count = countbits(ballot_value.x) + countbits(ballot_value.y) + countbits(ballot_value.z) + countbits(ballot_value.w); + bool has_all = WaveActiveAllTrue(true); + bool has_any = WaveActiveAnyTrue(true); + bool has_equal = WaveActiveAllEqualBool(true); + float4 added = WaveActiveSum(20.0f.xxxx); + int4 iadded = WaveActiveSum(int4(20, 20, 20, 20)); + float4 multiplied = WaveActiveProduct(20.0f.xxxx); + int4 imultiplied = WaveActiveProduct(int4(20, 20, 20, 20)); + float4 lo = WaveActiveMin(20.0f.xxxx); + float4 hi = WaveActiveMax(20.0f.xxxx); + int4 slo = WaveActiveMin(int4(20, 20, 20, 20)); + int4 shi = WaveActiveMax(int4(20, 20, 20, 20)); + uint4 ulo = WaveActiveMin(uint4(20u, 20u, 20u, 20u)); + uint4 uhi = WaveActiveMax(uint4(20u, 20u, 20u, 20u)); + uint4 anded = WaveActiveBitAnd(ballot_value); + uint4 ored = WaveActiveBitOr(ballot_value); + uint4 xored = WaveActiveBitXor(ballot_value); + added = WavePrefixSum(added) + added; + iadded = WavePrefixSum(iadded) + iadded; + multiplied = WavePrefixProduct(multiplied) * multiplied; + imultiplied = WavePrefixProduct(imultiplied) * imultiplied; + added = WavePrefixSum(multiplied); + multiplied = WavePrefixProduct(multiplied); + iadded = WavePrefixSum(imultiplied); + imultiplied = WavePrefixProduct(imultiplied); + float4 swap_horiz = QuadReadAcrossX(20.0f.xxxx); + float4 swap_vertical = QuadReadAcrossY(20.0f.xxxx); + float4 swap_diagonal = QuadReadAcrossDiagonal(20.0f.xxxx); + float4 quad_broadcast = QuadReadLaneAt(20.0f.xxxx, 3u); +} + +[numthreads(1, 1, 1)] +void main() +{ + gl_SubgroupEqMask = 1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96)); + if (WaveGetLaneIndex() >= 32) gl_SubgroupEqMask.x = 0; + if (WaveGetLaneIndex() >= 64 || WaveGetLaneIndex() < 32) gl_SubgroupEqMask.y = 0; + if (WaveGetLaneIndex() >= 96 || WaveGetLaneIndex() < 64) gl_SubgroupEqMask.z = 0; + if (WaveGetLaneIndex() < 96) gl_SubgroupEqMask.w = 0; + gl_SubgroupGeMask = ~((1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u); + if (WaveGetLaneIndex() >= 32) gl_SubgroupGeMask.x = 0u; + if (WaveGetLaneIndex() >= 64) gl_SubgroupGeMask.y = 0u; + if (WaveGetLaneIndex() >= 96) gl_SubgroupGeMask.z = 0u; + if (WaveGetLaneIndex() < 32) gl_SubgroupGeMask.y = ~0u; + if (WaveGetLaneIndex() < 64) gl_SubgroupGeMask.z = ~0u; + if (WaveGetLaneIndex() < 96) gl_SubgroupGeMask.w = ~0u; + uint gt_lane_index = WaveGetLaneIndex() + 1; + gl_SubgroupGtMask = ~((1u << (gt_lane_index - uint4(0, 32, 64, 96))) - 1u); + if (gt_lane_index >= 32) gl_SubgroupGtMask.x = 0u; + if (gt_lane_index >= 64) gl_SubgroupGtMask.y = 0u; + if (gt_lane_index >= 96) gl_SubgroupGtMask.z = 0u; + if (gt_lane_index >= 128) gl_SubgroupGtMask.w = 0u; + if (gt_lane_index < 32) gl_SubgroupGtMask.y = ~0u; + if (gt_lane_index < 64) gl_SubgroupGtMask.z = ~0u; + if (gt_lane_index < 96) gl_SubgroupGtMask.w = ~0u; + uint le_lane_index = WaveGetLaneIndex() + 1; + gl_SubgroupLeMask = (1u << (le_lane_index - uint4(0, 32, 64, 96))) - 1u; + if (le_lane_index >= 32) gl_SubgroupLeMask.x = ~0u; + if (le_lane_index >= 64) gl_SubgroupLeMask.y = ~0u; + if (le_lane_index >= 96) gl_SubgroupLeMask.z = ~0u; + if (le_lane_index >= 128) gl_SubgroupLeMask.w = ~0u; + if (le_lane_index < 32) gl_SubgroupLeMask.y = 0u; + if (le_lane_index < 64) gl_SubgroupLeMask.z = 0u; + if (le_lane_index < 96) gl_SubgroupLeMask.w = 0u; + gl_SubgroupLtMask = (1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u; + if (WaveGetLaneIndex() >= 32) gl_SubgroupLtMask.x = ~0u; + if (WaveGetLaneIndex() >= 64) gl_SubgroupLtMask.y = ~0u; + if (WaveGetLaneIndex() >= 96) gl_SubgroupLtMask.z = ~0u; + if (WaveGetLaneIndex() < 32) gl_SubgroupLtMask.y = 0u; + if (WaveGetLaneIndex() < 64) gl_SubgroupLtMask.z = 0u; + if (WaveGetLaneIndex() < 96) gl_SubgroupLtMask.w = 0u; + comp_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..407fa2bda --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,30 @@ +static const float _17[5] = { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f }; + +static float4 FragColor; +static float4 v0; + +struct SPIRV_Cross_Input +{ + float4 v0 : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + for (int i = 0; i < 4; i++, FragColor += _17[i].xxxx) + { + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + v0 = stage_input.v0; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic-color-3comp.sm30.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic-color-3comp.sm30.frag new file mode 100644 index 000000000..d3697d650 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic-color-3comp.sm30.frag @@ -0,0 +1,26 @@ +static float3 FragColor; +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : COLOR0; +}; + +void frag_main() +{ + FragColor = vColor.xyz; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = float4(FragColor, 0.0); + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic-color-3comp.sm50.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic-color-3comp.sm50.frag new file mode 100644 index 000000000..52f6fed6c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic-color-3comp.sm50.frag @@ -0,0 +1,26 @@ +static float3 FragColor; +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float3 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = vColor.xyz; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic.frag new file mode 100644 index 000000000..6d067041c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/basic.frag @@ -0,0 +1,32 @@ +Texture2D uTex : register(t0); +SamplerState _uTex_sampler : register(s0); + +static float4 FragColor; +static float4 vColor; +static float2 vTex; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; + float2 vTex : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = vColor * uTex.Sample(_uTex_sampler, vTex); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + vTex = stage_input.vTex; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/bit-conversions.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/bit-conversions.frag new file mode 100644 index 000000000..2ed359bfc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/bit-conversions.frag @@ -0,0 +1,27 @@ +static float2 value; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 value : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + int i = asint(value.x); + FragColor = float4(1.0f, 0.0f, asfloat(i), 1.0f); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + value = stage_input.value; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/boolean-mix.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/boolean-mix.frag new file mode 100644 index 000000000..f3e84898d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/boolean-mix.frag @@ -0,0 +1,27 @@ +static float2 FragColor; +static float2 x0; + +struct SPIRV_Cross_Input +{ + float2 x0 : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float2 FragColor : SV_Target0; +}; + +void frag_main() +{ + bool2 _27 = (x0.x > x0.y).xx; + FragColor = float2(_27.x ? float2(1.0f, 0.0f).x : float2(0.0f, 1.0f).x, _27.y ? float2(1.0f, 0.0f).y : float2(0.0f, 1.0f).y); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + x0 = stage_input.x0; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/builtins.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/builtins.frag new file mode 100644 index 000000000..922eca7c2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/builtins.frag @@ -0,0 +1,33 @@ +static float4 gl_FragCoord; +static float gl_FragDepth; +static float4 FragColor; +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; + float gl_FragDepth : SV_Depth; +}; + +void frag_main() +{ + FragColor = gl_FragCoord + vColor; + gl_FragDepth = 0.5f; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + vColor = stage_input.vColor; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_FragDepth = gl_FragDepth; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/bvec-operations.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/bvec-operations.frag new file mode 100644 index 000000000..2398d8c6a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/bvec-operations.frag @@ -0,0 +1,29 @@ +static float2 value; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 value : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + bool2 _25 = bool2(value.x == 0.0f, value.y == 0.0f); + bool2 bools1 = bool2(!_25.x, !_25.y); + bool2 bools2 = bool2(value.x <= float2(1.5f, 0.5f).x, value.y <= float2(1.5f, 0.5f).y); + FragColor = float4(1.0f, 0.0f, float(bools1.x), float(bools2.x)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + value = stage_input.value; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/clip-cull-distance.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/clip-cull-distance.frag new file mode 100644 index 000000000..52f1ac30b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/clip-cull-distance.frag @@ -0,0 +1,30 @@ +static float gl_ClipDistance[2]; +static float gl_CullDistance[1]; +static float FragColor; + +struct SPIRV_Cross_Input +{ + float2 gl_ClipDistance0 : SV_ClipDistance0; + float gl_CullDistance0 : SV_CullDistance0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = (gl_ClipDistance[0] + gl_CullDistance[0]) + gl_ClipDistance[1]; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_ClipDistance[0] = stage_input.gl_ClipDistance0.x; + gl_ClipDistance[1] = stage_input.gl_ClipDistance0.y; + gl_CullDistance[0] = stage_input.gl_CullDistance0.x; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/combined-texture-sampler-parameter.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/combined-texture-sampler-parameter.frag new file mode 100644 index 000000000..7fcff423b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/combined-texture-sampler-parameter.frag @@ -0,0 +1,44 @@ +Texture2D uSampler : register(t0); +SamplerState _uSampler_sampler : register(s0); +Texture2D uSamplerShadow : register(t1); +SamplerComparisonState _uSamplerShadow_sampler : register(s1); + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float4 samp2(Texture2D s, SamplerState _s_sampler) +{ + return s.Sample(_s_sampler, 1.0f.xx) + s.Load(int3(int2(10, 10), 0)); +} + +float4 samp3(Texture2D s, SamplerState _s_sampler) +{ + return samp2(s, _s_sampler); +} + +float samp4(Texture2D s, SamplerComparisonState _s_sampler) +{ + return s.SampleCmp(_s_sampler, 1.0f.xxx.xy, 1.0f.xxx.z); +} + +float samp(Texture2D s0, SamplerState _s0_sampler, Texture2D s1, SamplerComparisonState _s1_sampler) +{ + return samp3(s0, _s0_sampler).x + samp4(s1, _s1_sampler); +} + +void frag_main() +{ + FragColor = samp(uSampler, _uSampler_sampler, uSamplerShadow, _uSamplerShadow_sampler); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/combined-texture-sampler-shadow.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/combined-texture-sampler-shadow.frag new file mode 100644 index 000000000..af5b0b557 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/combined-texture-sampler-shadow.frag @@ -0,0 +1,40 @@ +Texture2D uDepth : register(t2); +SamplerComparisonState uSampler : register(s0); +SamplerState uSampler1 : register(s1); + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float samp2(Texture2D t, SamplerComparisonState s) +{ + return t.SampleCmp(s, 1.0f.xxx.xy, 1.0f.xxx.z); +} + +float samp3(Texture2D t, SamplerState s) +{ + return t.Sample(s, 1.0f.xx).x; +} + +float samp(Texture2D t, SamplerComparisonState s, SamplerState s1) +{ + float r0 = samp2(t, s); + float r1 = samp3(t, s1); + return r0 + r1; +} + +void frag_main() +{ + FragColor = samp(uDepth, uSampler, uSampler1); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..d5ccb9b98 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,40 @@ +RWByteAddressBuffer _34 : register(u0); +Texture2D Buf : register(t1); +SamplerState _Buf_sampler : register(s1); + +static float4 gl_FragCoord; +static int vIn; +static int vIn2; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + nointerpolation int vIn : TEXCOORD0; + nointerpolation int vIn2 : TEXCOORD1; + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + int4 coords = Buf.Load(int3(int2(gl_FragCoord.xy), 0)); + float4 foo = asfloat(_34.Load4((coords.x % 16) * 16 + 0)); + int c = vIn * vIn; + int d = vIn2 * vIn2; + FragColor = (foo + foo) + asfloat(_34.Load4((c + d) * 16 + 0)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + vIn = stage_input.vIn; + vIn2 = stage_input.vIn2; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag new file mode 100644 index 000000000..d330706c7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag @@ -0,0 +1,44 @@ +struct CBO_1 +{ + float4 a; + float4 b; + float4 c; + float4 d; +}; + +ConstantBuffer cbo[2][4] : register(b4, space0); +cbuffer PushMe +{ + float4 push_a : packoffset(c0); + float4 push_b : packoffset(c1); + float4 push_c : packoffset(c2); + float4 push_d : packoffset(c3); +}; + + +static float4 FragColor; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = cbo[1][2].a; + FragColor += cbo[1][2].b; + FragColor += cbo[1][2].c; + FragColor += cbo[1][2].d; + FragColor += push_a; + FragColor += push_b; + FragColor += push_c; + FragColor += push_d; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/constant-composites.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/constant-composites.frag new file mode 100644 index 000000000..2613e1c2c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/constant-composites.frag @@ -0,0 +1,43 @@ +struct Foo +{ + float a; + float b; +}; + +static const float _16[4] = { 1.0f, 4.0f, 3.0f, 2.0f }; +static const Foo _24 = { 10.0f, 20.0f }; +static const Foo _27 = { 30.0f, 40.0f }; +static const Foo _28[2] = { { 10.0f, 20.0f }, { 30.0f, 40.0f } }; + +static float4 FragColor; +static int _line; + +struct SPIRV_Cross_Input +{ + nointerpolation int _line : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +static float lut[4]; +static Foo foos[2]; + +void frag_main() +{ + lut = _16; + foos = _28; + FragColor = lut[_line].xxxx; + FragColor += (foos[_line].a * foos[1 - _line].a).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + _line = stage_input._line; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..322102ce8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,55 @@ +Texture2D uSampler : register(t0); +SamplerState _uSampler_sampler : register(s0); + +static float4 FragColor; +static float4 vInput; + +struct SPIRV_Cross_Input +{ + float4 vInput : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = vInput; + float4 t = uSampler.Sample(_uSampler_sampler, vInput.xy); + float4 d0 = ddx(vInput); + float4 d1 = ddy(vInput); + float4 d2 = fwidth(vInput); + float4 d3 = ddx_coarse(vInput); + float4 d4 = ddy_coarse(vInput); + float4 d5 = fwidth(vInput); + float4 d6 = ddx_fine(vInput); + float4 d7 = ddy_fine(vInput); + float4 d8 = fwidth(vInput); + float _56_tmp = uSampler.CalculateLevelOfDetail(_uSampler_sampler, vInput.zw); + float2 lod = float2(_56_tmp, _56_tmp); + if (vInput.y > 10.0f) + { + FragColor += t; + FragColor += d0; + FragColor += d1; + FragColor += d2; + FragColor += d3; + FragColor += d4; + FragColor += d5; + FragColor += d6; + FragColor += d7; + FragColor += d8; + FragColor += lod.xyxy; + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vInput = stage_input.vInput; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/depth-greater-than.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/depth-greater-than.frag new file mode 100644 index 000000000..b9f50db00 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/depth-greater-than.frag @@ -0,0 +1,19 @@ +static float gl_FragDepth; +struct SPIRV_Cross_Output +{ + float gl_FragDepth : SV_DepthGreaterEqual; +}; + +void frag_main() +{ + gl_FragDepth = 0.5f; +} + +[earlydepthstencil] +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_FragDepth = gl_FragDepth; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/depth-less-than.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/depth-less-than.frag new file mode 100644 index 000000000..a702fd9f8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/depth-less-than.frag @@ -0,0 +1,19 @@ +static float gl_FragDepth; +struct SPIRV_Cross_Output +{ + float gl_FragDepth : SV_DepthLessEqual; +}; + +void frag_main() +{ + gl_FragDepth = 0.5f; +} + +[earlydepthstencil] +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_FragDepth = gl_FragDepth; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/early-fragment-test.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/early-fragment-test.frag new file mode 100644 index 000000000..ae2569d5c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/early-fragment-test.frag @@ -0,0 +1,9 @@ +void frag_main() +{ +} + +[earlydepthstencil] +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/fp16-packing.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/fp16-packing.frag new file mode 100644 index 000000000..d87828225 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/fp16-packing.frag @@ -0,0 +1,44 @@ +static float2 FP32Out; +static uint FP16; +static uint FP16Out; +static float2 FP32; + +struct SPIRV_Cross_Input +{ + nointerpolation uint FP16 : TEXCOORD0; + nointerpolation float2 FP32 : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float2 FP32Out : SV_Target0; + uint FP16Out : SV_Target1; +}; + +uint SPIRV_Cross_packHalf2x16(float2 value) +{ + uint2 Packed = f32tof16(value); + return Packed.x | (Packed.y << 16); +} + +float2 SPIRV_Cross_unpackHalf2x16(uint value) +{ + return f16tof32(uint2(value & 0xffff, value >> 16)); +} + +void frag_main() +{ + FP32Out = SPIRV_Cross_unpackHalf2x16(FP16); + FP16Out = SPIRV_Cross_packHalf2x16(FP32); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + FP16 = stage_input.FP16; + FP32 = stage_input.FP32; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FP32Out = FP32Out; + stage_output.FP16Out = FP16Out; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/fp16.invalid.desktop.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/fp16.invalid.desktop.frag new file mode 100644 index 000000000..e10d6724e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/fp16.invalid.desktop.frag @@ -0,0 +1,179 @@ +static min16float4 v4; +static min16float3 v3; +static min16float v1; +static min16float2 v2; +static float o1; +static float2 o2; +static float3 o3; +static float4 o4; + +struct SPIRV_Cross_Input +{ + min16float v1 : TEXCOORD0; + min16float2 v2 : TEXCOORD1; + min16float3 v3 : TEXCOORD2; + min16float4 v4 : TEXCOORD3; +}; + +struct SPIRV_Cross_Output +{ + float o1 : SV_Target0; + float2 o2 : SV_Target1; + float3 o3 : SV_Target2; + float4 o4 : SV_Target3; +}; + +float mod(float x, float y) +{ + return x - y * floor(x / y); +} + +float2 mod(float2 x, float2 y) +{ + return x - y * floor(x / y); +} + +float3 mod(float3 x, float3 y) +{ + return x - y * floor(x / y); +} + +float4 mod(float4 x, float4 y) +{ + return x - y * floor(x / y); +} + +uint SPIRV_Cross_packFloat2x16(min16float2 value) +{ + uint2 Packed = f32tof16(value); + return Packed.x | (Packed.y << 16); +} + +min16float2 SPIRV_Cross_unpackFloat2x16(uint value) +{ + return min16float2(f16tof32(uint2(value & 0xffff, value >> 16))); +} + +void test_constants() +{ + min16float a = min16float(1.0); + min16float b = min16float(1.5); + min16float c = min16float(-1.5); + min16float d = min16float(0.0 / 0.0); + min16float e = min16float(1.0 / 0.0); + min16float f = min16float(-1.0 / 0.0); + min16float g = min16float(1014.0); + min16float h = min16float(9.5367431640625e-07); +} + +min16float test_result() +{ + return min16float(1.0); +} + +void test_conversions() +{ + min16float one = test_result(); + int a = int(one); + uint b = uint(one); + bool c = one != min16float(0.0); + float d = float(one); + double e = double(one); + min16float a2 = min16float(a); + min16float b2 = min16float(b); + min16float c2 = min16float(c); + min16float d2 = min16float(d); + min16float e2 = min16float(e); +} + +void test_builtins() +{ + min16float4 res = radians(v4); + res = degrees(v4); + res = sin(v4); + res = cos(v4); + res = tan(v4); + res = asin(v4); + res = atan2(v4, v3.xyzz); + res = atan(v4); + res = sinh(v4); + res = cosh(v4); + res = tanh(v4); + res = pow(v4, v4); + res = exp(v4); + res = log(v4); + res = exp2(v4); + res = log2(v4); + res = sqrt(v4); + res = rsqrt(v4); + res = abs(v4); + res = sign(v4); + res = floor(v4); + res = trunc(v4); + res = round(v4); + res = ceil(v4); + res = frac(v4); + res = mod(v4, v4); + min16float4 tmp; + min16float4 _144 = modf(v4, tmp); + res = _144; + res = min(v4, v4); + res = max(v4, v4); + res = clamp(v4, v4, v4); + res = lerp(v4, v4, v4); + bool4 _164 = bool4(v4.x < v4.x, v4.y < v4.y, v4.z < v4.z, v4.w < v4.w); + res = min16float4(_164.x ? v4.x : v4.x, _164.y ? v4.y : v4.y, _164.z ? v4.z : v4.z, _164.w ? v4.w : v4.w); + res = step(v4, v4); + res = smoothstep(v4, v4, v4); + bool4 btmp = isnan(v4); + btmp = isinf(v4); + res = mad(v4, v4, v4); + uint pack0 = SPIRV_Cross_packFloat2x16(v4.xy); + uint pack1 = SPIRV_Cross_packFloat2x16(v4.zw); + res = min16float4(SPIRV_Cross_unpackFloat2x16(pack0), SPIRV_Cross_unpackFloat2x16(pack1)); + min16float t0 = length(v4); + t0 = distance(v4, v4); + t0 = dot(v4, v4); + min16float3 res3 = cross(v3, v3); + res = normalize(v4); + res = faceforward(v4, v4, v4); + res = reflect(v4, v4); + res = refract(v4, v4, v1); + btmp = bool4(v4.x < v4.x, v4.y < v4.y, v4.z < v4.z, v4.w < v4.w); + btmp = bool4(v4.x <= v4.x, v4.y <= v4.y, v4.z <= v4.z, v4.w <= v4.w); + btmp = bool4(v4.x > v4.x, v4.y > v4.y, v4.z > v4.z, v4.w > v4.w); + btmp = bool4(v4.x >= v4.x, v4.y >= v4.y, v4.z >= v4.z, v4.w >= v4.w); + btmp = bool4(v4.x == v4.x, v4.y == v4.y, v4.z == v4.z, v4.w == v4.w); + btmp = bool4(v4.x != v4.x, v4.y != v4.y, v4.z != v4.z, v4.w != v4.w); + res = ddx(v4); + res = ddy(v4); + res = ddx_fine(v4); + res = ddy_fine(v4); + res = ddx_coarse(v4); + res = ddy_coarse(v4); + res = fwidth(v4); + res = fwidth(v4); + res = fwidth(v4); +} + +void frag_main() +{ + test_constants(); + test_conversions(); + test_builtins(); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + v4 = stage_input.v4; + v3 = stage_input.v3; + v1 = stage_input.v1; + v2 = stage_input.v2; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.o1 = o1; + stage_output.o2 = o2; + stage_output.o3 = o3; + stage_output.o4 = o4; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/front-facing.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/front-facing.frag new file mode 100644 index 000000000..4ed09a2bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/front-facing.frag @@ -0,0 +1,39 @@ +static bool gl_FrontFacing; +static float4 FragColor; +static float4 vA; +static float4 vB; + +struct SPIRV_Cross_Input +{ + float4 vA : TEXCOORD0; + float4 vB : TEXCOORD1; + bool gl_FrontFacing : SV_IsFrontFace; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + if (gl_FrontFacing) + { + FragColor = vA; + } + else + { + FragColor = vB; + } +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FrontFacing = stage_input.gl_FrontFacing; + vA = stage_input.vA; + vB = stage_input.vB; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/image-query-selective.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/image-query-selective.frag new file mode 100644 index 000000000..25c12da66 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/image-query-selective.frag @@ -0,0 +1,146 @@ +Texture1D uSampler1DUint : register(t0); +SamplerState _uSampler1DUint_sampler : register(s0); +Texture1D uSampler1DInt : register(t0); +SamplerState _uSampler1DInt_sampler : register(s0); +Texture1D uSampler1DFloat : register(t0); +SamplerState _uSampler1DFloat_sampler : register(s0); +Texture2DArray uSampler2DArray : register(t2); +SamplerState _uSampler2DArray_sampler : register(s2); +Texture3D uSampler3D : register(t3); +SamplerState _uSampler3D_sampler : register(s3); +TextureCube uSamplerCube : register(t4); +SamplerState _uSamplerCube_sampler : register(s4); +TextureCubeArray uSamplerCubeArray : register(t5); +SamplerState _uSamplerCubeArray_sampler : register(s5); +Buffer uSamplerBuffer : register(t6); +Texture2DMS uSamplerMS : register(t7); +SamplerState _uSamplerMS_sampler : register(s7); +Texture2DMSArray uSamplerMSArray : register(t8); +SamplerState _uSamplerMSArray_sampler : register(s8); +Texture2D uSampler2D : register(t1); +SamplerState _uSampler2D_sampler : register(s1); + +uint SPIRV_Cross_textureSize(Texture1D Tex, uint Level, out uint Param) +{ + uint ret; + Tex.GetDimensions(Level, ret.x, Param); + return ret; +} + +uint SPIRV_Cross_textureSize(Texture1D Tex, uint Level, out uint Param) +{ + uint ret; + Tex.GetDimensions(Level, ret.x, Param); + return ret; +} + +uint SPIRV_Cross_textureSize(Texture1D Tex, uint Level, out uint Param) +{ + uint ret; + Tex.GetDimensions(Level, ret.x, Param); + return ret; +} + +uint2 SPIRV_Cross_textureSize(Texture2D Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(Texture2DArray Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(Texture3D Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param); + return ret; +} + +uint SPIRV_Cross_textureSize(Buffer Tex, uint Level, out uint Param) +{ + uint ret; + Tex.GetDimensions(ret.x); + Param = 0u; + return ret; +} + +uint2 SPIRV_Cross_textureSize(TextureCube Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(TextureCubeArray Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param); + return ret; +} + +uint2 SPIRV_Cross_textureSize(Texture2DMS Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(ret.x, ret.y, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(Texture2DMSArray Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(ret.x, ret.y, ret.z, Param); + return ret; +} + +void frag_main() +{ + uint _17_dummy_parameter; + int a = int(SPIRV_Cross_textureSize(uSampler1DUint, uint(0), _17_dummy_parameter)); + uint _24_dummy_parameter; + a = int(SPIRV_Cross_textureSize(uSampler1DInt, uint(0), _24_dummy_parameter)); + uint _32_dummy_parameter; + a = int(SPIRV_Cross_textureSize(uSampler1DFloat, uint(0), _32_dummy_parameter)); + uint _42_dummy_parameter; + int3 c = int3(SPIRV_Cross_textureSize(uSampler2DArray, uint(0), _42_dummy_parameter)); + uint _50_dummy_parameter; + int3 d = int3(SPIRV_Cross_textureSize(uSampler3D, uint(0), _50_dummy_parameter)); + uint _60_dummy_parameter; + int2 e = int2(SPIRV_Cross_textureSize(uSamplerCube, uint(0), _60_dummy_parameter)); + uint _68_dummy_parameter; + int3 f = int3(SPIRV_Cross_textureSize(uSamplerCubeArray, uint(0), _68_dummy_parameter)); + uint _76_dummy_parameter; + int g = int(SPIRV_Cross_textureSize(uSamplerBuffer, 0u, _76_dummy_parameter)); + uint _84_dummy_parameter; + int2 h = int2(SPIRV_Cross_textureSize(uSamplerMS, 0u, _84_dummy_parameter)); + uint _92_dummy_parameter; + int3 i = int3(SPIRV_Cross_textureSize(uSamplerMSArray, 0u, _92_dummy_parameter)); + int _100; + SPIRV_Cross_textureSize(uSampler2D, 0u, _100); + int l1 = int(_100); + int _104; + SPIRV_Cross_textureSize(uSampler2DArray, 0u, _104); + int l2 = int(_104); + int _108; + SPIRV_Cross_textureSize(uSampler3D, 0u, _108); + int l3 = int(_108); + int _112; + SPIRV_Cross_textureSize(uSamplerCube, 0u, _112); + int l4 = int(_112); + int _116; + SPIRV_Cross_textureSize(uSamplerMS, 0u, _116); + int s0 = int(_116); + int _120; + SPIRV_Cross_textureSize(uSamplerMSArray, 0u, _120); + int s1 = int(_120); +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/image-query.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/image-query.frag new file mode 100644 index 000000000..71cefc103 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/image-query.frag @@ -0,0 +1,132 @@ +Texture1D uSampler1D : register(t0); +SamplerState _uSampler1D_sampler : register(s0); +Texture2D uSampler2D : register(t1); +SamplerState _uSampler2D_sampler : register(s1); +Texture2DArray uSampler2DArray : register(t2); +SamplerState _uSampler2DArray_sampler : register(s2); +Texture3D uSampler3D : register(t3); +SamplerState _uSampler3D_sampler : register(s3); +TextureCube uSamplerCube : register(t4); +SamplerState _uSamplerCube_sampler : register(s4); +TextureCubeArray uSamplerCubeArray : register(t5); +SamplerState _uSamplerCubeArray_sampler : register(s5); +Buffer uSamplerBuffer : register(t6); +Texture2DMS uSamplerMS : register(t7); +SamplerState _uSamplerMS_sampler : register(s7); +Texture2DMSArray uSamplerMSArray : register(t8); +SamplerState _uSamplerMSArray_sampler : register(s8); + +uint SPIRV_Cross_textureSize(Texture1D Tex, uint Level, out uint Param) +{ + uint ret; + Tex.GetDimensions(Level, ret.x, Param); + return ret; +} + +uint2 SPIRV_Cross_textureSize(Texture2D Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(Texture2DArray Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(Texture3D Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param); + return ret; +} + +uint SPIRV_Cross_textureSize(Buffer Tex, uint Level, out uint Param) +{ + uint ret; + Tex.GetDimensions(ret.x); + Param = 0u; + return ret; +} + +uint2 SPIRV_Cross_textureSize(TextureCube Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(TextureCubeArray Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param); + return ret; +} + +uint2 SPIRV_Cross_textureSize(Texture2DMS Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(ret.x, ret.y, Param); + return ret; +} + +uint3 SPIRV_Cross_textureSize(Texture2DMSArray Tex, uint Level, out uint Param) +{ + uint3 ret; + Tex.GetDimensions(ret.x, ret.y, ret.z, Param); + return ret; +} + +void frag_main() +{ + uint _17_dummy_parameter; + int a = int(SPIRV_Cross_textureSize(uSampler1D, uint(0), _17_dummy_parameter)); + uint _27_dummy_parameter; + int2 b = int2(SPIRV_Cross_textureSize(uSampler2D, uint(0), _27_dummy_parameter)); + uint _37_dummy_parameter; + int3 c = int3(SPIRV_Cross_textureSize(uSampler2DArray, uint(0), _37_dummy_parameter)); + uint _45_dummy_parameter; + int3 d = int3(SPIRV_Cross_textureSize(uSampler3D, uint(0), _45_dummy_parameter)); + uint _53_dummy_parameter; + int2 e = int2(SPIRV_Cross_textureSize(uSamplerCube, uint(0), _53_dummy_parameter)); + uint _61_dummy_parameter; + int3 f = int3(SPIRV_Cross_textureSize(uSamplerCubeArray, uint(0), _61_dummy_parameter)); + uint _69_dummy_parameter; + int g = int(SPIRV_Cross_textureSize(uSamplerBuffer, 0u, _69_dummy_parameter)); + uint _77_dummy_parameter; + int2 h = int2(SPIRV_Cross_textureSize(uSamplerMS, 0u, _77_dummy_parameter)); + uint _85_dummy_parameter; + int3 i = int3(SPIRV_Cross_textureSize(uSamplerMSArray, 0u, _85_dummy_parameter)); + int _89; + SPIRV_Cross_textureSize(uSampler1D, 0u, _89); + int l0 = int(_89); + int _93; + SPIRV_Cross_textureSize(uSampler2D, 0u, _93); + int l1 = int(_93); + int _97; + SPIRV_Cross_textureSize(uSampler2DArray, 0u, _97); + int l2 = int(_97); + int _101; + SPIRV_Cross_textureSize(uSampler3D, 0u, _101); + int l3 = int(_101); + int _105; + SPIRV_Cross_textureSize(uSamplerCube, 0u, _105); + int l4 = int(_105); + int _109; + SPIRV_Cross_textureSize(uSamplerCubeArray, 0u, _109); + int l5 = int(_109); + int _113; + SPIRV_Cross_textureSize(uSamplerMS, 0u, _113); + int s0 = int(_113); + int _117; + SPIRV_Cross_textureSize(uSamplerMSArray, 0u, _117); + int s1 = int(_117); +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/input-attachment-ms.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/input-attachment-ms.frag new file mode 100644 index 000000000..130b79965 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/input-attachment-ms.frag @@ -0,0 +1,37 @@ +Texture2DMS uSubpass0 : register(t0); +Texture2DMS uSubpass1 : register(t1); + +static float4 gl_FragCoord; +static int gl_SampleID; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; + uint gl_SampleID : SV_SampleIndex; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float4 load_subpasses(Texture2DMS uInput) +{ + return uInput.Load(int2(gl_FragCoord.xy), gl_SampleID); +} + +void frag_main() +{ + FragColor = (uSubpass0.Load(int2(gl_FragCoord.xy), 1) + uSubpass1.Load(int2(gl_FragCoord.xy), 2)) + load_subpasses(uSubpass0); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + gl_SampleID = stage_input.gl_SampleID; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/input-attachment.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/input-attachment.frag new file mode 100644 index 000000000..0b815ae08 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/input-attachment.frag @@ -0,0 +1,34 @@ +Texture2D uSubpass0 : register(t0); +Texture2D uSubpass1 : register(t1); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float4 load_subpasses(Texture2D uInput) +{ + return uInput.Load(int3(int2(gl_FragCoord.xy), 0)); +} + +void frag_main() +{ + FragColor = uSubpass0.Load(int3(int2(gl_FragCoord.xy), 0)) + load_subpasses(uSubpass1); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/io-block.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/io-block.frag new file mode 100644 index 000000000..52c1f518b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/io-block.frag @@ -0,0 +1,28 @@ +static float4 FragColor; + +struct VertexOut +{ + float4 a : TEXCOORD1; + float4 b : TEXCOORD2; +}; + +static VertexOut _12; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = _12.a + _12.b; +} + +SPIRV_Cross_Output main(in VertexOut stage_input_12) +{ + _12 = stage_input_12; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/lut-promotion.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/lut-promotion.frag new file mode 100644 index 000000000..d148bc12c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/lut-promotion.frag @@ -0,0 +1,55 @@ +static const float _16[16] = { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }; +static const float4 _60[4] = { 0.0f.xxxx, 1.0f.xxxx, 8.0f.xxxx, 5.0f.xxxx }; +static const float4 _104[4] = { 20.0f.xxxx, 30.0f.xxxx, 50.0f.xxxx, 60.0f.xxxx }; + +static float FragColor; +static int index; + +struct SPIRV_Cross_Input +{ + nointerpolation int index : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = _16[index]; + if (index < 10) + { + FragColor += _16[index ^ 1]; + } + else + { + FragColor += _16[index & 1]; + } + if (index > 30) + { + FragColor += _60[index & 3].y; + } + else + { + FragColor += _60[index & 1].x; + } + float4 foobar[4] = _60; + if (index > 30) + { + foobar[1].z = 20.0f; + } + FragColor += foobar[index & 3].z; + float4 baz[4] = _60; + baz = _104; + FragColor += baz[index & 3].z; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + index = stage_input.index; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/matrix-input.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/matrix-input.frag new file mode 100644 index 000000000..92d87d396 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/matrix-input.frag @@ -0,0 +1,26 @@ +static float4 FragColor; +static float4x4 m; + +struct SPIRV_Cross_Input +{ + float4x4 m : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = ((m[0] + m[1]) + m[2]) + m[3]; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + m = stage_input.m; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/mod.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/mod.frag new file mode 100644 index 000000000..1da8f21e4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/mod.frag @@ -0,0 +1,71 @@ +static float4 a4; +static float4 b4; +static float3 a3; +static float3 b3; +static float2 a2; +static float2 b2; +static float a1; +static float b1; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 a4 : TEXCOORD0; + float3 a3 : TEXCOORD1; + float2 a2 : TEXCOORD2; + float a1 : TEXCOORD3; + float4 b4 : TEXCOORD4; + float3 b3 : TEXCOORD5; + float2 b2 : TEXCOORD6; + float b1 : TEXCOORD7; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float mod(float x, float y) +{ + return x - y * floor(x / y); +} + +float2 mod(float2 x, float2 y) +{ + return x - y * floor(x / y); +} + +float3 mod(float3 x, float3 y) +{ + return x - y * floor(x / y); +} + +float4 mod(float4 x, float4 y) +{ + return x - y * floor(x / y); +} + +void frag_main() +{ + float4 m0 = mod(a4, b4); + float3 m1 = mod(a3, b3); + float2 m2 = mod(a2, b2); + float m3 = mod(a1, b1); + FragColor = ((m0 + m1.xyzx) + m2.xyxy) + m3.xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + a4 = stage_input.a4; + b4 = stage_input.b4; + a3 = stage_input.a3; + b3 = stage_input.b3; + a2 = stage_input.a2; + b2 = stage_input.b2; + a1 = stage_input.a1; + b1 = stage_input.b1; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/mrt.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/mrt.frag new file mode 100644 index 000000000..e69e91196 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/mrt.frag @@ -0,0 +1,31 @@ +static float4 RT0; +static float4 RT1; +static float4 RT2; +static float4 RT3; + +struct SPIRV_Cross_Output +{ + float4 RT0 : SV_Target0; + float4 RT1 : SV_Target1; + float4 RT2 : SV_Target2; + float4 RT3 : SV_Target3; +}; + +void frag_main() +{ + RT0 = 1.0f.xxxx; + RT1 = 2.0f.xxxx; + RT2 = 3.0f.xxxx; + RT3 = 4.0f.xxxx; +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.RT0 = RT0; + stage_output.RT1 = RT1; + stage_output.RT2 = RT2; + stage_output.RT3 = RT3; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/no-return.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/no-return.frag new file mode 100644 index 000000000..3b50282fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/no-return.frag @@ -0,0 +1,8 @@ +void frag_main() +{ +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/no-return2.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/no-return2.frag new file mode 100644 index 000000000..a22ffa772 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/no-return2.frag @@ -0,0 +1,17 @@ +static float4 vColor; + +struct SPIRV_Cross_Input +{ + float4 vColor : TEXCOORD0; +}; + +void frag_main() +{ + float4 v = vColor; +} + +void main(SPIRV_Cross_Input stage_input) +{ + vColor = stage_input.vColor; + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/partial-write-preserve.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/partial-write-preserve.frag new file mode 100644 index 000000000..bb5b9056e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/partial-write-preserve.frag @@ -0,0 +1,76 @@ +struct B +{ + float a; + float b; +}; + +static const B _80 = { 10.0f, 20.0f }; + +cbuffer UBO : register(b0) +{ + int _42_some_value : packoffset(c0); +}; + + +void partial_inout(inout float4 x) +{ + x.x = 10.0f; +} + +void complete_inout(out float4 x) +{ + x = 50.0f.xxxx; +} + +void branchy_inout(inout float4 v) +{ + v.y = 20.0f; + if (_42_some_value == 20) + { + v = 50.0f.xxxx; + } +} + +void branchy_inout_2(out float4 v) +{ + if (_42_some_value == 20) + { + v = 50.0f.xxxx; + } + else + { + v = 70.0f.xxxx; + } + v.y = 20.0f; +} + +void partial_inout(inout B b) +{ + b.b = 40.0f; +} + +void frag_main() +{ + float4 a = 10.0f.xxxx; + float4 param = a; + partial_inout(param); + a = param; + float4 param_1; + complete_inout(param_1); + a = param_1; + float4 param_2 = a; + branchy_inout(param_2); + a = param_2; + float4 param_3; + branchy_inout_2(param_3); + a = param_3; + B b = _80; + B param_4 = b; + partial_inout(param_4); + b = param_4; +} + +void main() +{ + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/point-coord-compat.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/point-coord-compat.frag new file mode 100644 index 000000000..629153982 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/point-coord-compat.frag @@ -0,0 +1,19 @@ +static float2 FragColor; + +struct SPIRV_Cross_Output +{ + float2 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float2(0.5f, 0.5f); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/query-lod.desktop.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/query-lod.desktop.frag new file mode 100644 index 000000000..fd95798bf --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/query-lod.desktop.frag @@ -0,0 +1,30 @@ +Texture2D uSampler : register(t0); +SamplerState _uSampler_sampler : register(s0); + +static float4 FragColor; +static float2 vTexCoord; + +struct SPIRV_Cross_Input +{ + float2 vTexCoord : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + float _19_tmp = uSampler.CalculateLevelOfDetail(_uSampler_sampler, vTexCoord); + FragColor = float2(_19_tmp, _19_tmp).xyxy; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vTexCoord = stage_input.vTexCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/resources.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/resources.frag new file mode 100644 index 000000000..2a3b2be51 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/resources.frag @@ -0,0 +1,44 @@ +cbuffer CBuffer : register(b3) +{ + float4 cbuf_a : packoffset(c0); +}; + +cbuffer PushMe +{ + float4 registers_d : packoffset(c0); +}; + +Texture2D uSampledImage : register(t4); +SamplerState _uSampledImage_sampler : register(s4); +Texture2D uTexture : register(t5); +SamplerState uSampler : register(s6); + +static float2 vTex; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 vTex : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + float4 c0 = uSampledImage.Sample(_uSampledImage_sampler, vTex); + float4 c1 = uTexture.Sample(uSampler, vTex); + float4 c2 = cbuf_a + registers_d; + FragColor = (c0 + c1) + c2; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vTex = stage_input.vTex; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/row-major-layout-in-struct.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/row-major-layout-in-struct.frag new file mode 100644 index 000000000..5fc45b2b3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/row-major-layout-in-struct.frag @@ -0,0 +1,47 @@ +struct NonFoo +{ + float4x4 v; + float4x4 w; +}; + +struct Foo +{ + row_major float4x4 v; + row_major float4x4 w; +}; + +cbuffer UBO : register(b0) +{ + Foo _17_foo : packoffset(c0); +}; + + +static float4 FragColor; +static float4 vUV; + +struct SPIRV_Cross_Input +{ + float4 vUV : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + NonFoo f; + f.v = _17_foo.v; + f.w = _17_foo.w; + FragColor = mul(mul(vUV, f.w), f.v); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUV = stage_input.vUV; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sample-cmp-level-zero.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sample-cmp-level-zero.frag new file mode 100644 index 000000000..b52b1df55 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sample-cmp-level-zero.frag @@ -0,0 +1,66 @@ +Texture2D uSampler2D : register(t0); +SamplerComparisonState _uSampler2D_sampler : register(s0); +Texture2DArray uSampler2DArray : register(t1); +SamplerComparisonState _uSampler2DArray_sampler : register(s1); +TextureCube uSamplerCube : register(t2); +SamplerComparisonState _uSamplerCube_sampler : register(s2); +TextureCubeArray uSamplerCubeArray : register(t3); +SamplerComparisonState _uSamplerCubeArray_sampler : register(s3); + +static float3 vUVRef; +static float4 vDirRef; +static float FragColor; + +struct SPIRV_Cross_Input +{ + float3 vUVRef : TEXCOORD0; + float4 vDirRef : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float SPIRV_Cross_projectTextureCoordinate(float2 coord) +{ + return coord.x / coord.y; +} + +float2 SPIRV_Cross_projectTextureCoordinate(float3 coord) +{ + return float2(coord.x, coord.y) / coord.z; +} + +float3 SPIRV_Cross_projectTextureCoordinate(float4 coord) +{ + return float3(coord.x, coord.y, coord.z) / coord.w; +} + +void frag_main() +{ + float s0 = uSampler2D.SampleCmp(_uSampler2D_sampler, vUVRef.xy, vUVRef.z, int2(-1, -1)); + float s1 = uSampler2DArray.SampleCmp(_uSampler2DArray_sampler, vDirRef.xyz, vDirRef.w, int2(-1, -1)); + float s2 = uSamplerCube.SampleCmp(_uSamplerCube_sampler, vDirRef.xyz, vDirRef.w); + float s3 = uSamplerCubeArray.SampleCmp(_uSamplerCubeArray_sampler, vDirRef, 0.5f); + float l0 = uSampler2D.SampleCmpLevelZero(_uSampler2D_sampler, vUVRef.xy, vUVRef.z, int2(-1, -1)); + float l1 = uSampler2DArray.SampleCmpLevelZero(_uSampler2DArray_sampler, vDirRef.xyz, vDirRef.w, int2(-1, -1)); + float l2 = uSamplerCube.SampleCmpLevelZero(_uSamplerCube_sampler, vDirRef.xyz, vDirRef.w); + float4 _80 = vDirRef; + _80.z = vDirRef.w; + float p0 = uSampler2D.SampleCmp(_uSampler2D_sampler, SPIRV_Cross_projectTextureCoordinate(_80.xyz), vDirRef.z, int2(1, 1)); + float4 _87 = vDirRef; + _87.z = vDirRef.w; + float p1 = uSampler2D.SampleCmpLevelZero(_uSampler2D_sampler, SPIRV_Cross_projectTextureCoordinate(_87.xyz), vDirRef.z, int2(1, 1)); + FragColor = (((((((s0 + s1) + s2) + s3) + l0) + l1) + l2) + p0) + p1; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vUVRef = stage_input.vUVRef; + vDirRef = stage_input.vDirRef; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sampler-array.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sampler-array.frag new file mode 100644 index 000000000..e941357d2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sampler-array.frag @@ -0,0 +1,44 @@ +Texture2D uCombined[4] : register(t0); +SamplerState _uCombined_sampler[4] : register(s0); +Texture2D uTex[4] : register(t4); +SamplerState uSampler[4] : register(s8); +RWTexture2D uImage[8] : register(u12); + +static float4 gl_FragCoord; +static float2 vTex; +static int vIndex; + +struct SPIRV_Cross_Input +{ + float2 vTex : TEXCOORD0; + nointerpolation int vIndex : TEXCOORD1; + float4 gl_FragCoord : SV_Position; +}; + +float4 sample_in_function(Texture2D samp, SamplerState _samp_sampler) +{ + return samp.Sample(_samp_sampler, vTex); +} + +float4 sample_in_function2(Texture2D tex, SamplerState samp) +{ + return tex.Sample(samp, vTex); +} + +void frag_main() +{ + float4 color = uCombined[vIndex].Sample(_uCombined_sampler[vIndex], vTex); + color += uTex[vIndex].Sample(uSampler[vIndex], vTex); + int _72 = vIndex + 1; + color += sample_in_function(uCombined[_72], _uCombined_sampler[_72]); + color += sample_in_function2(uTex[vIndex + 1], uSampler[vIndex + 1]); + uImage[vIndex][int2(gl_FragCoord.xy)] = color; +} + +void main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + vTex = stage_input.vTex; + vIndex = stage_input.vIndex; + frag_main(); +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sampler-image-arrays.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sampler-image-arrays.frag new file mode 100644 index 000000000..856f04cf4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/sampler-image-arrays.frag @@ -0,0 +1,54 @@ +Texture2D uSampler[4] : register(t0); +SamplerState _uSampler_sampler[4] : register(s0); +Texture2D uTextures[4] : register(t8); +SamplerState uSamplers[4] : register(s4); + +static int vIndex; +static float2 vTex; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + nointerpolation float2 vTex : TEXCOORD0; + nointerpolation int vIndex : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float4 sample_from_global() +{ + return uSampler[vIndex].Sample(_uSampler_sampler[vIndex], vTex + 0.100000001490116119384765625f.xx); +} + +float4 sample_from_argument(Texture2D samplers[4], SamplerState _samplers_sampler[4]) +{ + return samplers[vIndex].Sample(_samplers_sampler[vIndex], vTex + 0.20000000298023223876953125f.xx); +} + +float4 sample_single_from_argument(Texture2D samp, SamplerState _samp_sampler) +{ + return samp.Sample(_samp_sampler, vTex + 0.300000011920928955078125f.xx); +} + +void frag_main() +{ + FragColor = 0.0f.xxxx; + FragColor += uTextures[2].Sample(uSamplers[1], vTex); + FragColor += uSampler[vIndex].Sample(_uSampler_sampler[vIndex], vTex); + FragColor += sample_from_global(); + FragColor += sample_from_argument(uSampler, _uSampler_sampler); + FragColor += sample_single_from_argument(uSampler[3], _uSampler_sampler[3]); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vIndex = stage_input.vIndex; + vTex = stage_input.vTex; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag new file mode 100644 index 000000000..08d8b0e18 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag @@ -0,0 +1,32 @@ +uniform sampler2D uSamp; +uniform sampler2D SPIRV_Cross_CombineduTuS; + +static float4 FragColor; + +struct SPIRV_Cross_Output +{ + float4 FragColor : COLOR0; +}; + +float4 samp(sampler2D uSamp_1) +{ + return tex2D(uSamp_1, 0.5f.xx); +} + +float4 samp_1(sampler2D SPIRV_Cross_CombinedTS) +{ + return tex2D(SPIRV_Cross_CombinedTS, 0.5f.xx); +} + +void frag_main() +{ + FragColor = samp(uSamp) + samp_1(SPIRV_Cross_CombineduTuS); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = float4(FragColor); + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/spec-constant-block-size.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/spec-constant-block-size.frag new file mode 100644 index 000000000..415886dd3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/spec-constant-block-size.frag @@ -0,0 +1,37 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 2 +#endif +static const int Value = SPIRV_CROSS_CONSTANT_ID_10; + +cbuffer SpecConstArray : register(b0) +{ + float4 _15_samples[Value] : packoffset(c0); +}; + + +static float4 FragColor; +static int Index; + +struct SPIRV_Cross_Input +{ + nointerpolation int Index : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = _15_samples[Index]; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + Index = stage_input.Index; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/spec-constant-ternary.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/spec-constant-ternary.frag new file mode 100644 index 000000000..58735aad0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/spec-constant-ternary.frag @@ -0,0 +1,26 @@ +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 10u +#endif +static const uint s = SPIRV_CROSS_CONSTANT_ID_0; +static const bool _13 = (s > 20u); +static const uint _16 = _13 ? 30u : 50u; + +static float FragColor; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = float(_16); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..d7ec92f0a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/switch-unsigned-case.frag @@ -0,0 +1,38 @@ +cbuffer Buff : register(b0) +{ + uint _15_TestVal : packoffset(c0); +}; + + +static float4 fsout_Color; + +struct SPIRV_Cross_Output +{ + float4 fsout_Color : SV_Target0; +}; + +void frag_main() +{ + fsout_Color = 1.0f.xxxx; + switch (_15_TestVal) + { + case 0u: + { + fsout_Color = 0.100000001490116119384765625f.xxxx; + break; + } + case 1u: + { + fsout_Color = 0.20000000298023223876953125f.xxxx; + break; + } + } +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.fsout_Color = fsout_Color; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/swizzle-scalar.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/swizzle-scalar.frag new file mode 100644 index 000000000..ab310b82f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/swizzle-scalar.frag @@ -0,0 +1,41 @@ +static float4 Float; +static float vFloat; +static int4 Int; +static int vInt; +static float4 Float2; +static int4 Int2; + +struct SPIRV_Cross_Input +{ + nointerpolation float vFloat : TEXCOORD0; + nointerpolation int vInt : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 Float : SV_Target0; + int4 Int : SV_Target1; + float4 Float2 : SV_Target2; + int4 Int2 : SV_Target3; +}; + +void frag_main() +{ + Float = vFloat.xxxx * 2.0f; + Int = vInt.xxxx * int4(2, 2, 2, 2); + Float2 = 10.0f.xxxx; + Int2 = int4(10, 10, 10, 10); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vFloat = stage_input.vFloat; + vInt = stage_input.vInt; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.Float = Float; + stage_output.Int = Int; + stage_output.Float2 = Float2; + stage_output.Int2 = Int2; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/tex-sampling-ms.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/tex-sampling-ms.frag new file mode 100644 index 000000000..143531538 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/tex-sampling-ms.frag @@ -0,0 +1,32 @@ +Texture2DMS uTex : register(t0); +SamplerState _uTex_sampler : register(s0); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uTex.Load(int2(gl_FragCoord.xy), 0); + FragColor += uTex.Load(int2(gl_FragCoord.xy), 1); + FragColor += uTex.Load(int2(gl_FragCoord.xy), 2); + FragColor += uTex.Load(int2(gl_FragCoord.xy), 3); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/tex-sampling.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/tex-sampling.frag new file mode 100644 index 000000000..7fa60957d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/tex-sampling.frag @@ -0,0 +1,118 @@ +Texture1D tex1d : register(t0); +SamplerState _tex1d_sampler : register(s0); +Texture2D tex2d : register(t1); +SamplerState _tex2d_sampler : register(s1); +Texture3D tex3d : register(t2); +SamplerState _tex3d_sampler : register(s2); +TextureCube texCube : register(t3); +SamplerState _texCube_sampler : register(s3); +Texture1D tex1dShadow : register(t4); +SamplerComparisonState _tex1dShadow_sampler : register(s4); +Texture2D tex2dShadow : register(t5); +SamplerComparisonState _tex2dShadow_sampler : register(s5); +TextureCube texCubeShadow : register(t6); +SamplerComparisonState _texCubeShadow_sampler : register(s6); +Texture1DArray tex1dArray : register(t7); +SamplerState _tex1dArray_sampler : register(s7); +Texture2DArray tex2dArray : register(t8); +SamplerState _tex2dArray_sampler : register(s8); +TextureCubeArray texCubeArray : register(t9); +SamplerState _texCubeArray_sampler : register(s9); +Texture2D separateTex2d : register(t12); +SamplerState samplerNonDepth : register(s11); +Texture2D separateTex2dDepth : register(t13); +SamplerComparisonState samplerDepth : register(s10); + +static float texCoord1d; +static float2 texCoord2d; +static float3 texCoord3d; +static float4 texCoord4d; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float texCoord1d : TEXCOORD0; + float2 texCoord2d : TEXCOORD1; + float3 texCoord3d : TEXCOORD2; + float4 texCoord4d : TEXCOORD3; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +float SPIRV_Cross_projectTextureCoordinate(float2 coord) +{ + return coord.x / coord.y; +} + +float2 SPIRV_Cross_projectTextureCoordinate(float3 coord) +{ + return float2(coord.x, coord.y) / coord.z; +} + +float3 SPIRV_Cross_projectTextureCoordinate(float4 coord) +{ + return float3(coord.x, coord.y, coord.z) / coord.w; +} + +void frag_main() +{ + float4 texcolor = tex1d.Sample(_tex1d_sampler, texCoord1d); + texcolor += tex1d.Sample(_tex1d_sampler, texCoord1d, 1); + texcolor += tex1d.SampleLevel(_tex1d_sampler, texCoord1d, 2.0f); + texcolor += tex1d.SampleGrad(_tex1d_sampler, texCoord1d, 1.0f, 2.0f); + texcolor += tex1d.Sample(_tex1d_sampler, SPIRV_Cross_projectTextureCoordinate(float2(texCoord1d, 2.0f))); + texcolor += tex1d.SampleBias(_tex1d_sampler, texCoord1d, 1.0f); + texcolor += tex2d.Sample(_tex2d_sampler, texCoord2d); + texcolor += tex2d.Sample(_tex2d_sampler, texCoord2d, int2(1, 2)); + texcolor += tex2d.SampleLevel(_tex2d_sampler, texCoord2d, 2.0f); + texcolor += tex2d.SampleGrad(_tex2d_sampler, texCoord2d, float2(1.0f, 2.0f), float2(3.0f, 4.0f)); + texcolor += tex2d.Sample(_tex2d_sampler, SPIRV_Cross_projectTextureCoordinate(float3(texCoord2d, 2.0f))); + texcolor += tex2d.SampleBias(_tex2d_sampler, texCoord2d, 1.0f); + texcolor += tex3d.Sample(_tex3d_sampler, texCoord3d); + texcolor += tex3d.Sample(_tex3d_sampler, texCoord3d, int3(1, 2, 3)); + texcolor += tex3d.SampleLevel(_tex3d_sampler, texCoord3d, 2.0f); + texcolor += tex3d.SampleGrad(_tex3d_sampler, texCoord3d, float3(1.0f, 2.0f, 3.0f), float3(4.0f, 5.0f, 6.0f)); + texcolor += tex3d.Sample(_tex3d_sampler, SPIRV_Cross_projectTextureCoordinate(float4(texCoord3d, 2.0f))); + texcolor += tex3d.SampleBias(_tex3d_sampler, texCoord3d, 1.0f); + texcolor += texCube.Sample(_texCube_sampler, texCoord3d); + texcolor += texCube.SampleLevel(_texCube_sampler, texCoord3d, 2.0f); + texcolor += texCube.SampleBias(_texCube_sampler, texCoord3d, 1.0f); + float3 _170 = float3(texCoord1d, 0.0f, 0.0f); + texcolor.w += tex1dShadow.SampleCmp(_tex1dShadow_sampler, _170.x, _170.z); + float3 _188 = float3(texCoord2d, 0.0f); + texcolor.w += tex2dShadow.SampleCmp(_tex2dShadow_sampler, _188.xy, _188.z); + float4 _204 = float4(texCoord3d, 0.0f); + texcolor.w += texCubeShadow.SampleCmp(_texCubeShadow_sampler, _204.xyz, _204.w); + texcolor += tex1dArray.Sample(_tex1dArray_sampler, texCoord2d); + texcolor += tex2dArray.Sample(_tex2dArray_sampler, texCoord3d); + texcolor += texCubeArray.Sample(_texCubeArray_sampler, texCoord4d); + texcolor += tex2d.GatherRed(_tex2d_sampler, texCoord2d); + texcolor += tex2d.GatherRed(_tex2d_sampler, texCoord2d); + texcolor += tex2d.GatherGreen(_tex2d_sampler, texCoord2d); + texcolor += tex2d.GatherBlue(_tex2d_sampler, texCoord2d); + texcolor += tex2d.GatherAlpha(_tex2d_sampler, texCoord2d); + texcolor += tex2d.GatherRed(_tex2d_sampler, texCoord2d, int2(1, 1)); + texcolor += tex2d.GatherRed(_tex2d_sampler, texCoord2d, int2(1, 1)); + texcolor += tex2d.GatherGreen(_tex2d_sampler, texCoord2d, int2(1, 1)); + texcolor += tex2d.GatherBlue(_tex2d_sampler, texCoord2d, int2(1, 1)); + texcolor += tex2d.GatherAlpha(_tex2d_sampler, texCoord2d, int2(1, 1)); + texcolor += tex2d.Load(int3(int2(1, 2), 0)); + texcolor += separateTex2d.Sample(samplerNonDepth, texCoord2d); + texcolor.w += separateTex2dDepth.SampleCmp(samplerDepth, texCoord3d.xy, texCoord3d.z); + FragColor = texcolor; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + texCoord1d = stage_input.texCoord1d; + texCoord2d = stage_input.texCoord2d; + texCoord3d = stage_input.texCoord3d; + texCoord4d = stage_input.texCoord4d; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..f2a02e162 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texel-fetch-offset.frag @@ -0,0 +1,30 @@ +Texture2D uTexture : register(t0); +SamplerState _uTexture_sampler : register(s0); + +static float4 gl_FragCoord; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float4 gl_FragCoord : SV_Position; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = uTexture.Load(int3(int2(gl_FragCoord.xy), 0), int2(1, 1)); + FragColor += uTexture.Load(int3(int2(gl_FragCoord.xy), 0), int2(-1, 1)); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_FragCoord = stage_input.gl_FragCoord; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texture-proj-shadow.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texture-proj-shadow.frag new file mode 100644 index 000000000..4beaa1176 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texture-proj-shadow.frag @@ -0,0 +1,66 @@ +Texture1D uShadow1D : register(t0); +SamplerComparisonState _uShadow1D_sampler : register(s0); +Texture2D uShadow2D : register(t1); +SamplerComparisonState _uShadow2D_sampler : register(s1); +Texture1D uSampler1D : register(t2); +SamplerState _uSampler1D_sampler : register(s2); +Texture2D uSampler2D : register(t3); +SamplerState _uSampler2D_sampler : register(s3); +Texture3D uSampler3D : register(t4); +SamplerState _uSampler3D_sampler : register(s4); + +static float FragColor; +static float4 vClip4; +static float2 vClip2; +static float3 vClip3; + +struct SPIRV_Cross_Input +{ + float3 vClip3 : TEXCOORD0; + float4 vClip4 : TEXCOORD1; + float2 vClip2 : TEXCOORD2; +}; + +struct SPIRV_Cross_Output +{ + float FragColor : SV_Target0; +}; + +float SPIRV_Cross_projectTextureCoordinate(float2 coord) +{ + return coord.x / coord.y; +} + +float2 SPIRV_Cross_projectTextureCoordinate(float3 coord) +{ + return float2(coord.x, coord.y) / coord.z; +} + +float3 SPIRV_Cross_projectTextureCoordinate(float4 coord) +{ + return float3(coord.x, coord.y, coord.z) / coord.w; +} + +void frag_main() +{ + float4 _20 = vClip4; + _20.y = vClip4.w; + FragColor = uShadow1D.SampleCmp(_uShadow1D_sampler, SPIRV_Cross_projectTextureCoordinate(_20.xy), vClip4.z); + float4 _30 = vClip4; + _30.z = vClip4.w; + FragColor = uShadow2D.SampleCmp(_uShadow2D_sampler, SPIRV_Cross_projectTextureCoordinate(_30.xyz), vClip4.z); + FragColor = uSampler1D.Sample(_uSampler1D_sampler, SPIRV_Cross_projectTextureCoordinate(vClip2)).x; + FragColor = uSampler2D.Sample(_uSampler2D_sampler, SPIRV_Cross_projectTextureCoordinate(vClip3)).x; + FragColor = uSampler3D.Sample(_uSampler3D_sampler, SPIRV_Cross_projectTextureCoordinate(vClip4)).x; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vClip4 = stage_input.vClip4; + vClip2 = stage_input.vClip2; + vClip3 = stage_input.vClip3; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texture-size-combined-image-sampler.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texture-size-combined-image-sampler.frag new file mode 100644 index 000000000..d5c373746 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/texture-size-combined-image-sampler.frag @@ -0,0 +1,30 @@ +Texture2D uTex : register(t0); +SamplerState uSampler : register(s1); + +static int2 FooOut; + +struct SPIRV_Cross_Output +{ + int2 FooOut : SV_Target0; +}; + +uint2 SPIRV_Cross_textureSize(Texture2D Tex, uint Level, out uint Param) +{ + uint2 ret; + Tex.GetDimensions(Level, ret.x, ret.y, Param); + return ret; +} + +void frag_main() +{ + uint _23_dummy_parameter; + FooOut = int2(SPIRV_Cross_textureSize(uTex, uint(0), _23_dummy_parameter)); +} + +SPIRV_Cross_Output main() +{ + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FooOut = FooOut; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/unary-enclose.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/unary-enclose.frag new file mode 100644 index 000000000..597c57f80 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/unary-enclose.frag @@ -0,0 +1,32 @@ +static float4 FragColor; +static float4 vIn; +static int4 vIn1; + +struct SPIRV_Cross_Input +{ + float4 vIn : TEXCOORD0; + nointerpolation int4 vIn1 : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + FragColor = -(-vIn); + int4 a = ~(~vIn1); + bool b = false; + b = !(!b); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vIn = stage_input.vIn; + vIn1 = stage_input.vIn1; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/unorm-snorm-packing.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/unorm-snorm-packing.frag new file mode 100644 index 000000000..57b595063 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/unorm-snorm-packing.frag @@ -0,0 +1,109 @@ +static float4 FP32Out; +static uint UNORM8; +static uint SNORM8; +static uint UNORM16; +static uint SNORM16; +static uint UNORM8Out; +static float4 FP32; +static uint SNORM8Out; +static uint UNORM16Out; +static uint SNORM16Out; + +struct SPIRV_Cross_Input +{ + nointerpolation uint SNORM8 : TEXCOORD0; + nointerpolation uint UNORM8 : TEXCOORD1; + nointerpolation uint SNORM16 : TEXCOORD2; + nointerpolation uint UNORM16 : TEXCOORD3; + nointerpolation float4 FP32 : TEXCOORD4; +}; + +struct SPIRV_Cross_Output +{ + float4 FP32Out : SV_Target0; + uint UNORM8Out : SV_Target1; + uint SNORM8Out : SV_Target2; + uint UNORM16Out : SV_Target3; + uint SNORM16Out : SV_Target4; +}; + +uint SPIRV_Cross_packUnorm4x8(float4 value) +{ + uint4 Packed = uint4(round(saturate(value) * 255.0)); + return Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24); +} + +float4 SPIRV_Cross_unpackUnorm4x8(uint value) +{ + uint4 Packed = uint4(value & 0xff, (value >> 8) & 0xff, (value >> 16) & 0xff, value >> 24); + return float4(Packed) / 255.0; +} + +uint SPIRV_Cross_packSnorm4x8(float4 value) +{ + int4 Packed = int4(round(clamp(value, -1.0, 1.0) * 127.0)) & 0xff; + return uint(Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24)); +} + +float4 SPIRV_Cross_unpackSnorm4x8(uint value) +{ + int SignedValue = int(value); + int4 Packed = int4(SignedValue << 24, SignedValue << 16, SignedValue << 8, SignedValue) >> 24; + return clamp(float4(Packed) / 127.0, -1.0, 1.0); +} + +uint SPIRV_Cross_packUnorm2x16(float2 value) +{ + uint2 Packed = uint2(round(saturate(value) * 65535.0)); + return Packed.x | (Packed.y << 16); +} + +float2 SPIRV_Cross_unpackUnorm2x16(uint value) +{ + uint2 Packed = uint2(value & 0xffff, value >> 16); + return float2(Packed) / 65535.0; +} + +uint SPIRV_Cross_packSnorm2x16(float2 value) +{ + int2 Packed = int2(round(clamp(value, -1.0, 1.0) * 32767.0)) & 0xffff; + return uint(Packed.x | (Packed.y << 16)); +} + +float2 SPIRV_Cross_unpackSnorm2x16(uint value) +{ + int SignedValue = int(value); + int2 Packed = int2(SignedValue << 16, SignedValue) >> 16; + return clamp(float2(Packed) / 32767.0, -1.0, 1.0); +} + +void frag_main() +{ + FP32Out = SPIRV_Cross_unpackUnorm4x8(UNORM8); + FP32Out = SPIRV_Cross_unpackSnorm4x8(SNORM8); + float2 _21 = SPIRV_Cross_unpackUnorm2x16(UNORM16); + FP32Out = float4(_21.x, _21.y, FP32Out.z, FP32Out.w); + float2 _26 = SPIRV_Cross_unpackSnorm2x16(SNORM16); + FP32Out = float4(_26.x, _26.y, FP32Out.z, FP32Out.w); + UNORM8Out = SPIRV_Cross_packUnorm4x8(FP32); + SNORM8Out = SPIRV_Cross_packSnorm4x8(FP32); + UNORM16Out = SPIRV_Cross_packUnorm2x16(FP32.xy); + SNORM16Out = SPIRV_Cross_packSnorm2x16(FP32.zw); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + UNORM8 = stage_input.UNORM8; + SNORM8 = stage_input.SNORM8; + UNORM16 = stage_input.UNORM16; + SNORM16 = stage_input.SNORM16; + FP32 = stage_input.FP32; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FP32Out = FP32Out; + stage_output.UNORM8Out = UNORM8Out; + stage_output.SNORM8Out = SNORM8Out; + stage_output.UNORM16Out = UNORM16Out; + stage_output.SNORM16Out = SNORM16Out; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/frag/various-glsl-ops.frag b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/various-glsl-ops.frag new file mode 100644 index 000000000..f0b345482 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/frag/various-glsl-ops.frag @@ -0,0 +1,28 @@ +static float2 interpolant; +static float4 FragColor; + +struct SPIRV_Cross_Input +{ + float2 interpolant : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float4 FragColor : SV_Target0; +}; + +void frag_main() +{ + float4 color = float4(0.0f, 0.0f, 0.0f, EvaluateAttributeSnapped(interpolant, 0.100000001490116119384765625f.xx).x); + color += float4(0.0f, 0.0f, 0.0f, ddx_coarse(interpolant.x)); + FragColor = color; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + interpolant = stage_input.interpolant; + frag_main(); + SPIRV_Cross_Output stage_output; + stage_output.FragColor = FragColor; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/basic.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/basic.vert new file mode 100644 index 000000000..f357d907c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/basic.vert @@ -0,0 +1,39 @@ +cbuffer UBO +{ + row_major float4x4 _16_uMVP : packoffset(c0); +}; + + +static float4 gl_Position; +static float4 aVertex; +static float3 vNormal; +static float3 aNormal; + +struct SPIRV_Cross_Input +{ + float4 aVertex : TEXCOORD0; + float3 aNormal : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float3 vNormal : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = mul(aVertex, _16_uMVP); + vNormal = aNormal; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + aVertex = stage_input.aVertex; + aNormal = stage_input.aNormal; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.vNormal = vNormal; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/clip-cull-distance.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/clip-cull-distance.vert new file mode 100644 index 000000000..7e0d104ac --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/clip-cull-distance.vert @@ -0,0 +1,28 @@ +static float4 gl_Position; +static float gl_ClipDistance[2]; +static float gl_CullDistance[1]; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; + float2 gl_ClipDistance0 : SV_ClipDistance0; + float gl_CullDistance0 : SV_CullDistance0; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + gl_ClipDistance[0] = 0.0f; + gl_ClipDistance[1] = 0.0f; + gl_CullDistance[0] = 4.0f; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.gl_ClipDistance0.x = gl_ClipDistance[0]; + stage_output.gl_ClipDistance0.y = gl_ClipDistance[1]; + stage_output.gl_CullDistance0.x = gl_CullDistance[0]; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/instancing.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/instancing.vert new file mode 100644 index 000000000..48b2df20d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/instancing.vert @@ -0,0 +1,28 @@ +static float4 gl_Position; +static int gl_VertexIndex; +static int gl_InstanceIndex; +struct SPIRV_Cross_Input +{ + uint gl_VertexIndex : SV_VertexID; + uint gl_InstanceIndex : SV_InstanceID; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = float(gl_VertexIndex + gl_InstanceIndex).xxxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + gl_VertexIndex = int(stage_input.gl_VertexIndex); + gl_InstanceIndex = int(stage_input.gl_InstanceIndex); + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/locations.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/locations.vert new file mode 100644 index 000000000..b06b204bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/locations.vert @@ -0,0 +1,75 @@ +struct Foo +{ + float3 a; + float3 b; + float3 c; +}; + +static float4 gl_Position; +static float4 Input2; +static float4 Input4; +static float4 Input0; +static float vLocation0; +static float vLocation1; +static float vLocation2[2]; +static Foo vLocation4; +static float vLocation9; + +struct VertexOut +{ + float3 color : TEXCOORD7; + float3 foo : TEXCOORD8; +}; + +static VertexOut vout; + +struct SPIRV_Cross_Input +{ + float4 Input0 : TEXCOORD0; + float4 Input2 : TEXCOORD2; + float4 Input4 : TEXCOORD4; +}; + +struct SPIRV_Cross_Output +{ + float vLocation0 : TEXCOORD0; + float vLocation1 : TEXCOORD1; + float vLocation2[2] : TEXCOORD2; + Foo vLocation4 : TEXCOORD4; + float vLocation9 : TEXCOORD9; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = ((1.0f.xxxx + Input2) + Input4) + Input0; + vLocation0 = 0.0f; + vLocation1 = 1.0f; + vLocation2[0] = 2.0f; + vLocation2[1] = 2.0f; + Foo foo; + foo.a = 1.0f.xxx; + foo.b = 1.0f.xxx; + foo.c = 1.0f.xxx; + vLocation4 = foo; + vLocation9 = 9.0f; + vout.color = 2.0f.xxx; + vout.foo = 4.0f.xxx; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input, out VertexOut stage_outputvout) +{ + Input2 = stage_input.Input2; + Input4 = stage_input.Input4; + Input0 = stage_input.Input0; + vert_main(); + stage_outputvout = vout; + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.vLocation0 = vLocation0; + stage_output.vLocation1 = vLocation1; + stage_output.vLocation2 = vLocation2; + stage_output.vLocation4 = vLocation4; + stage_output.vLocation9 = vLocation9; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/matrix-attribute.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/matrix-attribute.vert new file mode 100644 index 000000000..a3d0eef56 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/matrix-attribute.vert @@ -0,0 +1,35 @@ +static float4 gl_Position; +static float4x4 m; +static float3 pos; + +struct SPIRV_Cross_Input +{ + float3 pos : TEXCOORD0; + float4 m_0 : TEXCOORD1_0; + float4 m_1 : TEXCOORD1_1; + float4 m_2 : TEXCOORD1_2; + float4 m_3 : TEXCOORD1_3; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = mul(float4(pos, 1.0f), m); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + m[0] = stage_input.m_0; + m[1] = stage_input.m_1; + m[2] = stage_input.m_2; + m[3] = stage_input.m_3; + pos = stage_input.pos; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/matrix-output.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/matrix-output.vert new file mode 100644 index 000000000..dc776cb5e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/matrix-output.vert @@ -0,0 +1,23 @@ +static float4 gl_Position; +static float4x4 m; + +struct SPIRV_Cross_Output +{ + float4x4 m : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + m = float4x4(float4(1.0f, 0.0f, 0.0f, 0.0f), float4(0.0f, 1.0f, 0.0f, 0.0f), float4(0.0f, 0.0f, 1.0f, 0.0f), float4(0.0f, 0.0f, 0.0f, 1.0f)); +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.m = m; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/no-input.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/no-input.vert new file mode 100644 index 000000000..c98544dbe --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/no-input.vert @@ -0,0 +1,18 @@ +static float4 gl_Position; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/point-size-compat.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/point-size-compat.vert new file mode 100644 index 000000000..95f45d02f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/point-size-compat.vert @@ -0,0 +1,20 @@ +static float4 gl_Position; +static float gl_PointSize; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + gl_PointSize = 1.0f; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/qualifiers.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/qualifiers.vert new file mode 100644 index 000000000..13ee2a8c1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/qualifiers.vert @@ -0,0 +1,50 @@ +static float4 gl_Position; +static float vFlat; +static float vCentroid; +static float vSample; +static float vNoperspective; + +struct Block +{ + nointerpolation float vFlat : TEXCOORD4; + centroid float vCentroid : TEXCOORD5; + sample float vSample : TEXCOORD6; + noperspective float vNoperspective : TEXCOORD7; +}; + +static Block vout; + +struct SPIRV_Cross_Output +{ + nointerpolation float vFlat : TEXCOORD0; + centroid float vCentroid : TEXCOORD1; + sample float vSample : TEXCOORD2; + noperspective float vNoperspective : TEXCOORD3; + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = 1.0f.xxxx; + vFlat = 0.0f; + vCentroid = 1.0f; + vSample = 2.0f; + vNoperspective = 3.0f; + vout.vFlat = 0.0f; + vout.vCentroid = 1.0f; + vout.vSample = 2.0f; + vout.vNoperspective = 3.0f; +} + +SPIRV_Cross_Output main(out Block stage_outputvout) +{ + vert_main(); + stage_outputvout = vout; + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.vFlat = vFlat; + stage_output.vCentroid = vCentroid; + stage_output.vSample = vSample; + stage_output.vNoperspective = vNoperspective; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..f656c0a79 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/read-from-row-major-array.vert @@ -0,0 +1,65 @@ +cbuffer Block : register(b0) +{ + column_major float2x3 _104_var[3][4] : packoffset(c0); +}; + + +static float4 gl_Position; +static float4 a_position; +static float v_vtxResult; + +struct SPIRV_Cross_Input +{ + float4 a_position : TEXCOORD0; +}; + +struct SPIRV_Cross_Output +{ + float v_vtxResult : TEXCOORD0; + float4 gl_Position : SV_Position; +}; + +float compare_float(float a, float b) +{ + return float(abs(a - b) < 0.0500000007450580596923828125f); +} + +float compare_vec3(float3 a, float3 b) +{ + float param = a.x; + float param_1 = b.x; + float param_2 = a.y; + float param_3 = b.y; + float param_4 = a.z; + float param_5 = b.z; + return (compare_float(param, param_1) * compare_float(param_2, param_3)) * compare_float(param_4, param_5); +} + +float compare_mat2x3(float2x3 a, float2x3 b) +{ + float3 param = a[0]; + float3 param_1 = b[0]; + float3 param_2 = a[1]; + float3 param_3 = b[1]; + return compare_vec3(param, param_1) * compare_vec3(param_2, param_3); +} + +void vert_main() +{ + gl_Position = a_position; + float result = 1.0f; + float2x3 param = _104_var[0][0]; + float2x3 param_1 = float2x3(float3(2.0f, 6.0f, -6.0f), float3(0.0f, 5.0f, 5.0f)); + result *= compare_mat2x3(param, param_1); + v_vtxResult = result; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + a_position = stage_input.a_position; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + stage_output.v_vtxResult = v_vtxResult; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/return-array.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/return-array.vert new file mode 100644 index 000000000..83e3a2812 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/return-array.vert @@ -0,0 +1,48 @@ +static const float4 _20[2] = { 10.0f.xxxx, 20.0f.xxxx }; + +static float4 gl_Position; +static float4 vInput0; +static float4 vInput1; + +struct SPIRV_Cross_Input +{ + float4 vInput0 : TEXCOORD0; + float4 vInput1 : TEXCOORD1; +}; + +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void test(out float4 SPIRV_Cross_return_value[2]) +{ + SPIRV_Cross_return_value = _20; +} + +void test2(out float4 SPIRV_Cross_return_value[2]) +{ + float4 foobar[2]; + foobar[0] = vInput0; + foobar[1] = vInput1; + SPIRV_Cross_return_value = foobar; +} + +void vert_main() +{ + float4 _42[2]; + test(_42); + float4 _44[2]; + test2(_44); + gl_Position = _42[0] + _44[1]; +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + vInput0 = stage_input.vInput0; + vInput1 = stage_input.vInput1; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/sampler-buffers.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/sampler-buffers.vert new file mode 100644 index 000000000..a4329dbf3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/sampler-buffers.vert @@ -0,0 +1,27 @@ +Buffer uFloatSampler : register(t1); +Buffer uIntSampler : register(t2); +Buffer uUintSampler : register(t3); + +static float4 gl_Position; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +float4 sample_from_function(Buffer s0, Buffer s1, Buffer s2) +{ + return (s0.Load(20) + asfloat(s1.Load(40))) + asfloat(s2.Load(60)); +} + +void vert_main() +{ + gl_Position = sample_from_function(uFloatSampler, uIntSampler, uUintSampler); +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/struct-composite-decl.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/struct-composite-decl.vert new file mode 100644 index 000000000..5b2c5824f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/struct-composite-decl.vert @@ -0,0 +1,50 @@ +struct VOut +{ + float4 a; + float4 b; + float4 c; + float4 d; +}; + +static VOut vout; +static float4 a; +static float4 b; +static float4 c; +static float4 d; + +struct SPIRV_Cross_Input +{ + float4 a : TEXCOORD0; + float4 b : TEXCOORD1; + float4 c : TEXCOORD2; + float4 d : TEXCOORD3; +}; + +struct SPIRV_Cross_Output +{ + VOut vout : TEXCOORD0; +}; + +void emit_result(VOut v) +{ + vout = v; +} + +void vert_main() +{ + VOut _26 = { a, b, c, d }; + VOut param = _26; + emit_result(param); +} + +SPIRV_Cross_Output main(SPIRV_Cross_Input stage_input) +{ + a = stage_input.a; + b = stage_input.b; + c = stage_input.c; + d = stage_input.d; + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.vout = vout; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-hlsl/vert/texture_buffer.vert b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/texture_buffer.vert new file mode 100644 index 000000000..1c92f6fe6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-hlsl/vert/texture_buffer.vert @@ -0,0 +1,21 @@ +Buffer uSamp : register(t4); +RWBuffer uSampo : register(u5); + +static float4 gl_Position; +struct SPIRV_Cross_Output +{ + float4 gl_Position : SV_Position; +}; + +void vert_main() +{ + gl_Position = uSamp.Load(10) + uSampo[100]; +} + +SPIRV_Cross_Output main() +{ + vert_main(); + SPIRV_Cross_Output stage_output; + stage_output.gl_Position = gl_Position; + return stage_output; +} diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/comp/variable-pointers.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/comp/variable-pointers.asm.comp new file mode 100644 index 000000000..90c1429e5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/comp/variable-pointers.asm.comp @@ -0,0 +1,70 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct foo +{ + int a[128]; + uint b; + float2 c; +}; + +struct bar +{ + int d; +}; + +struct baz +{ + int e[128]; +}; + +device int* select_buffer(device foo& buf, device baz& buf2, constant bar& cb) +{ + return (cb.d != 0) ? &buf.a[0u] : &buf2.e[0u]; +} + +device int* select_buffer_null(device foo& buf, constant bar& cb) +{ + return (cb.d != 0) ? &buf.a[0u] : nullptr; +} + +threadgroup int* select_tgsm(constant bar& cb, threadgroup int (&tgsm)[128]) +{ + return (cb.d != 0) ? &tgsm[0u] : nullptr; +} + +kernel void main0(device foo& buf [[buffer(0)]], constant bar& cb [[buffer(3)]], device baz& buf2 [[buffer(4)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + threadgroup int tgsm[128]; + device int* sbuf = select_buffer(buf, buf2, cb); + device int* sbuf2 = select_buffer_null(buf, cb); + threadgroup int* stgsm = select_tgsm(cb, tgsm); + threadgroup int* cur = stgsm; + device int* _73; + _73 = &buf.a[0u]; + int _77; + for (;;) + { + threadgroup int* _76 = cur; + _77 = *_73; + if (_77 != 0) + { + int _81 = *_76; + int _82 = _77 + _81; + *_73 = _82; + *_76 = _82; + cur = &_76[1u]; + _73 = &_73[1u]; + continue; + } + else + { + break; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag new file mode 100644 index 000000000..d02aeb6f2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag @@ -0,0 +1,236 @@ +#include +#include + +using namespace metal; + +struct VertexOutput +{ + float4 HPosition; + float4 Uv_EdgeDistance1; + float4 UvStuds_EdgeDistance2; + float4 Color; + float4 LightPosition_Fog; + float4 View_Depth; + float4 Normal_SpecPower; + float3 Tangent; + float4 PosLightSpace_Reflectance; + float studIndex; +}; + +struct Surface +{ + float3 albedo; + float3 normal; + float specular; + float gloss; + float reflectance; + float opacity; +}; + +struct SurfaceInput +{ + float4 Color; + float2 Uv; + float2 UvStuds; +}; + +struct Globals +{ + float4x4 ViewProjection; + float4 ViewRight; + float4 ViewUp; + float4 ViewDir; + float3 CameraPosition; + float3 AmbientColor; + float3 Lamp0Color; + float3 Lamp0Dir; + float3 Lamp1Color; + float4 FogParams; + float3 FogColor; + float4 LightBorder; + float4 LightConfig0; + float4 LightConfig1; + float4 LightConfig2; + float4 LightConfig3; + float4 RefractionBias_FadeDistance_GlowFactor; + float4 OutlineBrightness_ShadowInfo; + float4 ShadowMatrix0; + float4 ShadowMatrix1; + float4 ShadowMatrix2; +}; + +struct CB0 +{ + Globals CB0; +}; + +struct Params +{ + float4 LqmatFarTilingFactor; +}; + +struct CB2 +{ + Params CB2; +}; + +constant VertexOutput _121 = {}; +constant SurfaceInput _122 = {}; +constant float2 _123 = {}; +constant float4 _124 = {}; +constant Surface _125 = {}; +constant float4 _192 = {}; +constant float4 _219 = {}; +constant float4 _297 = {}; + +struct main0_out +{ + float4 _entryPointOutput [[color(0)]]; +}; + +struct main0_in +{ + float4 IN_Uv_EdgeDistance1 [[user(locn0)]]; + float4 IN_UvStuds_EdgeDistance2 [[user(locn1)]]; + float4 IN_Color [[user(locn2)]]; + float4 IN_LightPosition_Fog [[user(locn3)]]; + float4 IN_View_Depth [[user(locn4)]]; + float4 IN_Normal_SpecPower [[user(locn5)]]; + float3 IN_Tangent [[user(locn6)]]; + float4 IN_PosLightSpace_Reflectance [[user(locn7)]]; + float IN_studIndex [[user(locn8)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant CB0& _19 [[buffer(0)]], texture2d StudsMapTexture [[texture(0)]], texture2d ShadowMapTexture [[texture(1)]], texturecube EnvironmentMapTexture [[texture(2)]], texture2d DiffuseMapTexture [[texture(3)]], texture2d NormalMapTexture [[texture(4)]], texture2d SpecularMapTexture [[texture(5)]], texture3d LightMapTexture [[texture(6)]], texture2d NormalDetailMapTexture [[texture(8)]], sampler StudsMapSampler [[sampler(0)]], sampler ShadowMapSampler [[sampler(1)]], sampler EnvironmentMapSampler [[sampler(2)]], sampler DiffuseMapSampler [[sampler(3)]], sampler NormalMapSampler [[sampler(4)]], sampler SpecularMapSampler [[sampler(5)]], sampler LightMapSampler [[sampler(6)]], sampler NormalDetailMapSampler [[sampler(8)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + VertexOutput _128 = _121; + _128.HPosition = gl_FragCoord; + VertexOutput _130 = _128; + _130.Uv_EdgeDistance1 = in.IN_Uv_EdgeDistance1; + VertexOutput _132 = _130; + _132.UvStuds_EdgeDistance2 = in.IN_UvStuds_EdgeDistance2; + VertexOutput _134 = _132; + _134.Color = in.IN_Color; + VertexOutput _136 = _134; + _136.LightPosition_Fog = in.IN_LightPosition_Fog; + VertexOutput _138 = _136; + _138.View_Depth = in.IN_View_Depth; + VertexOutput _140 = _138; + _140.Normal_SpecPower = in.IN_Normal_SpecPower; + VertexOutput _142 = _140; + _142.Tangent = in.IN_Tangent; + VertexOutput _144 = _142; + _144.PosLightSpace_Reflectance = in.IN_PosLightSpace_Reflectance; + VertexOutput _146 = _144; + _146.studIndex = in.IN_studIndex; + SurfaceInput _147 = _122; + _147.Color = in.IN_Color; + SurfaceInput _149 = _147; + _149.Uv = in.IN_Uv_EdgeDistance1.xy; + SurfaceInput _151 = _149; + _151.UvStuds = in.IN_UvStuds_EdgeDistance2.xy; + SurfaceInput _156 = _151; + _156.UvStuds.y = (fract(_151.UvStuds.y) + in.IN_studIndex) * 0.25; + float _163 = _146.View_Depth.w * _19.CB0.RefractionBias_FadeDistance_GlowFactor.y; + float _165 = fast::clamp(1.0 - _163, 0.0, 1.0); + float2 _166 = in.IN_Uv_EdgeDistance1.xy * 1.0; + bool _173; + float4 _193; + do + { + _173 = 0.0 == 0.0; + if (_173) + { + _193 = DiffuseMapTexture.sample(DiffuseMapSampler, _166); + break; + } + else + { + float _180 = 1.0 / (1.0 - 0.0); + _193 = mix(DiffuseMapTexture.sample(DiffuseMapSampler, (_166 * 0.25)), DiffuseMapTexture.sample(DiffuseMapSampler, _166), float4(fast::clamp((fast::clamp(1.0 - (_146.View_Depth.w * 0.00333332992158830165863037109375), 0.0, 1.0) * _180) - (0.0 * _180), 0.0, 1.0))); + break; + } + _193 = _192; + break; + } while (false); + float4 _194 = _193 * 1.0; + float4 _220; + do + { + if (_173) + { + _220 = NormalMapTexture.sample(NormalMapSampler, _166); + break; + } + else + { + float _207 = 1.0 / (1.0 - 0.0); + _220 = mix(NormalMapTexture.sample(NormalMapSampler, (_166 * 0.25)), NormalMapTexture.sample(NormalMapSampler, _166), float4(fast::clamp((_165 * _207) - (0.0 * _207), 0.0, 1.0))); + break; + } + _220 = _219; + break; + } while (false); + float2 _223 = float2(1.0); + float2 _224 = (_220.wy * 2.0) - _223; + float3 _232 = float3(_224, sqrt(fast::clamp(1.0 + dot(-_224, _224), 0.0, 1.0))); + float2 _240 = (NormalDetailMapTexture.sample(NormalDetailMapSampler, (_166 * 0.0)).wy * 2.0) - _223; + float2 _252 = _232.xy + (float3(_240, sqrt(fast::clamp(1.0 + dot(-_240, _240), 0.0, 1.0))).xy * 0.0); + float3 _253 = float3(_252.x, _252.y, _232.z); + float2 _255 = _253.xy * _165; + float3 _256 = float3(_255.x, _255.y, _253.z); + float3 _271 = ((in.IN_Color.xyz * _194.xyz) * (1.0 + (_256.x * 0.300000011920928955078125))) * (StudsMapTexture.sample(StudsMapSampler, _156.UvStuds).x * 2.0); + float4 _298; + do + { + if (0.75 == 0.0) + { + _298 = SpecularMapTexture.sample(SpecularMapSampler, _166); + break; + } + else + { + float _285 = 1.0 / (1.0 - 0.75); + _298 = mix(SpecularMapTexture.sample(SpecularMapSampler, (_166 * 0.25)), SpecularMapTexture.sample(SpecularMapSampler, _166), float4(fast::clamp((_165 * _285) - (0.75 * _285), 0.0, 1.0))); + break; + } + _298 = _297; + break; + } while (false); + float2 _303 = mix(float2(0.800000011920928955078125, 120.0), (_298.xy * float2(2.0, 256.0)) + float2(0.0, 0.00999999977648258209228515625), float2(_165)); + Surface _304 = _125; + _304.albedo = _271; + Surface _305 = _304; + _305.normal = _256; + float _306 = _303.x; + Surface _307 = _305; + _307.specular = _306; + float _308 = _303.y; + Surface _309 = _307; + _309.gloss = _308; + float _312 = (_298.xy.y * _165) * 0.0; + Surface _313 = _309; + _313.reflectance = _312; + float4 _318 = float4(_271, _146.Color.w); + float3 _329 = normalize(((in.IN_Tangent * _313.normal.x) + (cross(in.IN_Normal_SpecPower.xyz, in.IN_Tangent) * _313.normal.y)) + (in.IN_Normal_SpecPower.xyz * _313.normal.z)); + float3 _332 = -_19.CB0.Lamp0Dir; + float _333 = dot(_329, _332); + float _357 = fast::clamp(dot(step(_19.CB0.LightConfig3.xyz, abs(in.IN_LightPosition_Fog.xyz - _19.CB0.LightConfig2.xyz)), float3(1.0)), 0.0, 1.0); + float4 _368 = mix(LightMapTexture.sample(LightMapSampler, (in.IN_LightPosition_Fog.xyz.yzx - (in.IN_LightPosition_Fog.xyz.yzx * _357))), _19.CB0.LightBorder, float4(_357)); + float2 _376 = ShadowMapTexture.sample(ShadowMapSampler, in.IN_PosLightSpace_Reflectance.xyz.xy).xy; + float _392 = (1.0 - (((step(_376.x, in.IN_PosLightSpace_Reflectance.xyz.z) * fast::clamp(9.0 - (20.0 * abs(in.IN_PosLightSpace_Reflectance.xyz.z - 0.5)), 0.0, 1.0)) * _376.y) * _19.CB0.OutlineBrightness_ShadowInfo.w)) * _368.w; + float3 _403 = mix(_318.xyz, EnvironmentMapTexture.sample(EnvironmentMapSampler, reflect(-in.IN_View_Depth.xyz, _329)).xyz, float3(_312)); + float4 _404 = float4(_403.x, _403.y, _403.z, _318.w); + float3 _422 = (((_19.CB0.AmbientColor + (((_19.CB0.Lamp0Color * fast::clamp(_333, 0.0, 1.0)) + (_19.CB0.Lamp1Color * fast::max(-_333, 0.0))) * _392)) + _368.xyz) * _404.xyz) + (_19.CB0.Lamp0Color * (((step(0.0, _333) * _306) * _392) * pow(fast::clamp(dot(_329, normalize(_332 + normalize(in.IN_View_Depth.xyz))), 0.0, 1.0), _308))); + float4 _425 = float4(_422.x, _422.y, _422.z, _124.w); + _425.w = _404.w; + float2 _435 = fast::min(in.IN_Uv_EdgeDistance1.wz, in.IN_UvStuds_EdgeDistance2.wz); + float _439 = fast::min(_435.x, _435.y) / _163; + float3 _445 = _425.xyz * fast::clamp((fast::clamp((_163 * _19.CB0.OutlineBrightness_ShadowInfo.x) + _19.CB0.OutlineBrightness_ShadowInfo.y, 0.0, 1.0) * (1.5 - _439)) + _439, 0.0, 1.0); + float4 _446 = float4(_445.x, _445.y, _445.z, _425.w); + float3 _453 = mix(_19.CB0.FogColor, _446.xyz, float3(fast::clamp(_146.LightPosition_Fog.w, 0.0, 1.0))); + out._entryPointOutput = float4(_453.x, _453.y, _453.z, _446.w); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/frag/texture-access.swizzle.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/frag/texture-access.swizzle.asm.frag new file mode 100644 index 000000000..f418c072a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/frag/texture-access.swizzle.asm.frag @@ -0,0 +1,184 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct spvAux +{ + uint swizzleConst[1]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +enum class spvSwizzle : uint +{ + none = 0, + zero, + one, + red, + green, + blue, + alpha +}; + +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type& x) +{ + return static_cast(x); +} +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type&& x) +{ + return static_cast(x); +} + +template +inline T spvGetSwizzle(vec x, T c, spvSwizzle s) +{ + switch (s) + { + case spvSwizzle::none: + return c; + case spvSwizzle::zero: + return 0; + case spvSwizzle::one: + return 1; + case spvSwizzle::red: + return x.r; + case spvSwizzle::green: + return x.g; + case spvSwizzle::blue: + return x.b; + case spvSwizzle::alpha: + return x.a; + } +} + +// Wrapper function that swizzles texture samples and fetches. +template +inline vec spvTextureSwizzle(vec x, uint s) +{ + if (!s) + return x; + return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) & 0xFF)), spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF))); +} + +template +inline T spvTextureSwizzle(T x, uint s) +{ + return spvTextureSwizzle(vec(x, 0, 0, 1), s).x; +} + +// Wrapper function that swizzles texture gathers. +template +inline vec spvGatherSwizzle(sampler s, thread Tex& t, Ts... params, component c, uint sw) METAL_CONST_ARG(c) +{ + if (sw) + { + switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF)) + { + case spvSwizzle::none: + break; + case spvSwizzle::zero: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + case spvSwizzle::red: + return t.gather(s, spvForward(params)..., component::x); + case spvSwizzle::green: + return t.gather(s, spvForward(params)..., component::y); + case spvSwizzle::blue: + return t.gather(s, spvForward(params)..., component::z); + case spvSwizzle::alpha: + return t.gather(s, spvForward(params)..., component::w); + } + } + switch (c) + { + case component::x: + return t.gather(s, spvForward(params)..., component::x); + case component::y: + return t.gather(s, spvForward(params)..., component::y); + case component::z: + return t.gather(s, spvForward(params)..., component::z); + case component::w: + return t.gather(s, spvForward(params)..., component::w); + } +} + +// Wrapper function that swizzles depth texture gathers. +template +inline vec spvGatherCompareSwizzle(sampler s, thread Tex& t, Ts... params, uint sw) +{ + if (sw) + { + switch (spvSwizzle(sw & 0xFF)) + { + case spvSwizzle::none: + case spvSwizzle::red: + break; + case spvSwizzle::zero: + case spvSwizzle::green: + case spvSwizzle::blue: + case spvSwizzle::alpha: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + } + } + return t.gather_compare(s, spvForward(params)...); +} + +fragment void main0(constant spvAux& spvAuxBuffer [[buffer(0)]], texture1d tex1d [[texture(0)]], texture2d tex2d [[texture(1)]], texture3d tex3d [[texture(2)]], texturecube texCube [[texture(3)]], texture2d_array tex2dArray [[texture(4)]], texturecube_array texCubeArray [[texture(5)]], texture2d texBuffer [[texture(6)]], depth2d depth2d [[texture(7)]], depthcube depthCube [[texture(8)]], depth2d_array depth2dArray [[texture(9)]], depthcube_array depthCubeArray [[texture(10)]], sampler tex1dSamp [[sampler(0)]], sampler tex2dSamp [[sampler(1)]], sampler tex3dSamp [[sampler(2)]], sampler texCubeSamp [[sampler(3)]], sampler tex2dArraySamp [[sampler(4)]], sampler texCubeArraySamp [[sampler(5)]], sampler depth2dSamp [[sampler(7)]], sampler depthCubeSamp [[sampler(8)]], sampler depth2dArraySamp [[sampler(9)]], sampler depthCubeArraySamp [[sampler(10)]]) +{ + float4 c = spvTextureSwizzle(tex1d.sample(tex1dSamp, 0.0), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSamp, float2(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSamp, float3(0.0)), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(texCube.sample(texCubeSamp, float3(0.0)), spvAuxBuffer.swizzleConst[3]); + c = spvTextureSwizzle(tex2dArray.sample(tex2dArraySamp, float3(0.0).xy, uint(round(float3(0.0).z))), spvAuxBuffer.swizzleConst[4]); + c = spvTextureSwizzle(texCubeArray.sample(texCubeArraySamp, float4(0.0).xyz, uint(round(float4(0.0).w))), spvAuxBuffer.swizzleConst[5]); + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSamp, float3(0.0, 0.0, 1.0).xy, float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[7]); + c.x = spvTextureSwizzle(depthCube.sample_compare(depthCubeSamp, float4(0.0, 0.0, 0.0, 1.0).xyz, float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[8]); + c.x = spvTextureSwizzle(depth2dArray.sample_compare(depth2dArraySamp, float4(0.0, 0.0, 0.0, 1.0).xy, uint(round(float4(0.0, 0.0, 0.0, 1.0).z)), float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[9]); + c.x = spvTextureSwizzle(depthCubeArray.sample_compare(depthCubeArraySamp, float4(0.0).xyz, uint(round(float4(0.0).w)), 1.0), spvAuxBuffer.swizzleConst[10]); + c = spvTextureSwizzle(tex1d.sample(tex1dSamp, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSamp, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSamp, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[2]); + float4 _152 = float4(0.0, 0.0, 1.0, 1.0); + _152.z = float4(0.0, 0.0, 1.0, 1.0).w; + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSamp, _152.xy / _152.z, float4(0.0, 0.0, 1.0, 1.0).z), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.sample(tex1dSamp, 0.0), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSamp, float2(0.0), level(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSamp, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(texCube.sample(texCubeSamp, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[3]); + c = spvTextureSwizzle(tex2dArray.sample(tex2dArraySamp, float3(0.0).xy, uint(round(float3(0.0).z)), level(0.0)), spvAuxBuffer.swizzleConst[4]); + c = spvTextureSwizzle(texCubeArray.sample(texCubeArraySamp, float4(0.0).xyz, uint(round(float4(0.0).w)), level(0.0)), spvAuxBuffer.swizzleConst[5]); + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSamp, float3(0.0, 0.0, 1.0).xy, float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.sample(tex1dSamp, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSamp, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSamp, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w, level(0.0)), spvAuxBuffer.swizzleConst[2]); + float4 _202 = float4(0.0, 0.0, 1.0, 1.0); + _202.z = float4(0.0, 0.0, 1.0, 1.0).w; + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSamp, _202.xy / _202.z, float4(0.0, 0.0, 1.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.read(uint(0)), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.read(uint2(int2(0)), 0), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.read(uint3(int3(0)), 0), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(tex2dArray.read(uint2(int3(0).xy), uint(int3(0).z), 0), spvAuxBuffer.swizzleConst[4]); + c = texBuffer.read(spvTexelBufferCoord(0)); + c = spvGatherSwizzle, float2, int2>(tex2dSamp, tex2d, float2(0.0), int2(0), component::x, spvAuxBuffer.swizzleConst[1]); + c = spvGatherSwizzle, float3>(texCubeSamp, texCube, float3(0.0), component::y, spvAuxBuffer.swizzleConst[3]); + c = spvGatherSwizzle, float2, uint, int2>(tex2dArraySamp, tex2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), int2(0), component::z, spvAuxBuffer.swizzleConst[4]); + c = spvGatherSwizzle, float3, uint>(texCubeArraySamp, texCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), component::w, spvAuxBuffer.swizzleConst[5]); + c = spvGatherCompareSwizzle, float2, float>(depth2dSamp, depth2d, float2(0.0), 1.0, spvAuxBuffer.swizzleConst[7]); + c = spvGatherCompareSwizzle, float3, float>(depthCubeSamp, depthCube, float3(0.0), 1.0, spvAuxBuffer.swizzleConst[8]); + c = spvGatherCompareSwizzle, float2, uint, float>(depth2dArraySamp, depth2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), 1.0, spvAuxBuffer.swizzleConst[9]); + c = spvGatherCompareSwizzle, float3, uint, float>(depthCubeArraySamp, depthCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), 1.0, spvAuxBuffer.swizzleConst[10]); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/vert/empty-struct-composite.asm.vert b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/vert/empty-struct-composite.asm.vert new file mode 100644 index 000000000..e9cd6a540 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/vert/empty-struct-composite.asm.vert @@ -0,0 +1,16 @@ +#include +#include + +using namespace metal; + +struct Test +{ + int empty_struct_member; +}; + +vertex void main0() +{ + Test _14 = Test{ 0 }; + Test t = _14; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/bitfield.comp b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/bitfield.comp new file mode 100644 index 000000000..62ef02c99 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/bitfield.comp @@ -0,0 +1,47 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +// Implementation of the GLSL findLSB() function +template +T findLSB(T x) +{ + return select(ctz(x), T(-1), x == T(0)); +} + +// Implementation of the signed GLSL findMSB() function +template +T findSMSB(T x) +{ + T v = select(x, T(-1) - x, x < T(0)); + return select(clz(T(0)) - (clz(v) + T(1)), T(-1), v == T(0)); +} + +// Implementation of the unsigned GLSL findMSB() function +template +T findUMSB(T x) +{ + return select(clz(T(0)) - (clz(x) + T(1)), T(-1), x == T(0)); +} + +kernel void main0() +{ + int signed_value = 0; + uint unsigned_value = 0u; + int s = extract_bits(signed_value, 5, 20); + uint u = extract_bits(unsigned_value, 6, 21); + s = insert_bits(s, 40, 5, 4); + u = insert_bits(u, 60u, 5, 4); + u = reverse_bits(u); + s = reverse_bits(s); + int v0 = popcount(u); + int v1 = popcount(s); + int v2 = findUMSB(u); + int v3 = findSMSB(s); + int v4 = findLSB(u); + int v5 = findLSB(s); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/loop.comp b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/loop.comp new file mode 100644 index 000000000..d7677fb43 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/loop.comp @@ -0,0 +1,107 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4x4 mvp; + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(const device SSBO& _24 [[buffer(0)]], device SSBO2& _177 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + float4 idat = _24.in_data[ident]; + int k = 0; + uint i = 0u; + if (idat.y == 20.0) + { + do + { + k *= 2; + i++; + } while (i < ident); + } + switch (k) + { + case 10: + { + for (;;) + { + i++; + if (i > 10u) + { + break; + } + continue; + } + break; + } + default: + { + for (;;) + { + i += 2u; + if (i > 20u) + { + break; + } + continue; + } + break; + } + } + while (k < 10) + { + idat *= 2.0; + k++; + } + for (uint i_1 = 0u; i_1 < 16u; i_1++, k++) + { + for (uint j = 0u; j < 30u; j++) + { + idat = _24.mvp * idat; + } + } + k = 0; + for (;;) + { + k++; + if (k > 10) + { + k += 2; + } + else + { + k += 3; + continue; + } + k += 10; + continue; + } + k = 0; + do + { + k++; + } while (k > 10); + int l = 0; + for (;;) + { + if (l == 5) + { + l++; + continue; + } + idat += float4(1.0); + l++; + continue; + } + _177.out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/return.comp b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/return.comp new file mode 100644 index 000000000..4015ddb37 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/comp/return.comp @@ -0,0 +1,36 @@ +#include +#include + +using namespace metal; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(device SSBO2& _27 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + if (ident == 2u) + { + _27.out_data[ident] = float4(20.0); + } + else + { + if (ident == 4u) + { + _27.out_data[ident] = float4(10.0); + return; + } + } + for (int i = 0; i < 20; i++) + { + if (i == 10) + { + break; + } + return; + } + _27.out_data[ident] = float4(10.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/in_block_assign.frag b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/in_block_assign.frag new file mode 100644 index 000000000..427c689c4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/in_block_assign.frag @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct VOUT +{ + float4 a; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 VOUT_a [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + VOUT Clip = {}; + Clip.a = in.VOUT_a; + VOUT tmp = Clip; + tmp.a += float4(1.0); + out.FragColor = tmp.a; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-int.swizzle.frag b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-int.swizzle.frag new file mode 100644 index 000000000..ec765650a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-int.swizzle.frag @@ -0,0 +1,169 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct spvAux +{ + uint swizzleConst[1]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +enum class spvSwizzle : uint +{ + none = 0, + zero, + one, + red, + green, + blue, + alpha +}; + +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type& x) +{ + return static_cast(x); +} +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type&& x) +{ + return static_cast(x); +} + +template +inline T spvGetSwizzle(vec x, T c, spvSwizzle s) +{ + switch (s) + { + case spvSwizzle::none: + return c; + case spvSwizzle::zero: + return 0; + case spvSwizzle::one: + return 1; + case spvSwizzle::red: + return x.r; + case spvSwizzle::green: + return x.g; + case spvSwizzle::blue: + return x.b; + case spvSwizzle::alpha: + return x.a; + } +} + +// Wrapper function that swizzles texture samples and fetches. +template +inline vec spvTextureSwizzle(vec x, uint s) +{ + if (!s) + return x; + return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) & 0xFF)), spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF))); +} + +template +inline T spvTextureSwizzle(T x, uint s) +{ + return spvTextureSwizzle(vec(x, 0, 0, 1), s).x; +} + +// Wrapper function that swizzles texture gathers. +template +inline vec spvGatherSwizzle(sampler s, thread Tex& t, Ts... params, component c, uint sw) METAL_CONST_ARG(c) +{ + if (sw) + { + switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF)) + { + case spvSwizzle::none: + break; + case spvSwizzle::zero: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + case spvSwizzle::red: + return t.gather(s, spvForward(params)..., component::x); + case spvSwizzle::green: + return t.gather(s, spvForward(params)..., component::y); + case spvSwizzle::blue: + return t.gather(s, spvForward(params)..., component::z); + case spvSwizzle::alpha: + return t.gather(s, spvForward(params)..., component::w); + } + } + switch (c) + { + case component::x: + return t.gather(s, spvForward(params)..., component::x); + case component::y: + return t.gather(s, spvForward(params)..., component::y); + case component::z: + return t.gather(s, spvForward(params)..., component::z); + case component::w: + return t.gather(s, spvForward(params)..., component::w); + } +} + +// Wrapper function that swizzles depth texture gathers. +template +inline vec spvGatherCompareSwizzle(sampler s, thread Tex& t, Ts... params, uint sw) +{ + if (sw) + { + switch (spvSwizzle(sw & 0xFF)) + { + case spvSwizzle::none: + case spvSwizzle::red: + break; + case spvSwizzle::zero: + case spvSwizzle::green: + case spvSwizzle::blue: + case spvSwizzle::alpha: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + } + } + return t.gather_compare(s, spvForward(params)...); +} + +fragment void main0(constant spvAux& spvAuxBuffer [[buffer(0)]], texture1d tex1d [[texture(0)]], texture2d tex2d [[texture(1)]], texture3d tex3d [[texture(2)]], texturecube texCube [[texture(3)]], texture2d_array tex2dArray [[texture(4)]], texturecube_array texCubeArray [[texture(5)]], texture2d texBuffer [[texture(6)]], sampler tex1dSmplr [[sampler(0)]], sampler tex2dSmplr [[sampler(1)]], sampler tex3dSmplr [[sampler(2)]], sampler texCubeSmplr [[sampler(3)]], sampler tex2dArraySmplr [[sampler(4)]], sampler texCubeArraySmplr [[sampler(5)]]) +{ + float4 c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0)), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[3])); + c = float4(spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z))), spvAuxBuffer.swizzleConst[4])); + c = float4(spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w))), spvAuxBuffer.swizzleConst[5])); + c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0), level(0.0)), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[3])); + c = float4(spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z)), level(0.0)), spvAuxBuffer.swizzleConst[4])); + c = float4(spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w)), level(0.0)), spvAuxBuffer.swizzleConst[5])); + c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w, level(0.0)), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(tex1d.read(uint(0)), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.read(uint2(int2(0)), 0), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.read(uint3(int3(0)), 0), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(tex2dArray.read(uint2(int3(0).xy), uint(int3(0).z), 0), spvAuxBuffer.swizzleConst[4])); + c = float4(texBuffer.read(spvTexelBufferCoord(0))); + c = float4(spvGatherSwizzle, float2, int2>(tex2dSmplr, tex2d, float2(0.0), int2(0), component::x, spvAuxBuffer.swizzleConst[1])); + c = float4(spvGatherSwizzle, float3>(texCubeSmplr, texCube, float3(0.0), component::y, spvAuxBuffer.swizzleConst[3])); + c = float4(spvGatherSwizzle, float2, uint, int2>(tex2dArraySmplr, tex2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), int2(0), component::z, spvAuxBuffer.swizzleConst[4])); + c = float4(spvGatherSwizzle, float3, uint>(texCubeArraySmplr, texCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), component::w, spvAuxBuffer.swizzleConst[5])); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-leaf.swizzle.frag b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-leaf.swizzle.frag new file mode 100644 index 000000000..b821c1c50 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-leaf.swizzle.frag @@ -0,0 +1,190 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct spvAux +{ + uint swizzleConst[1]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +enum class spvSwizzle : uint +{ + none = 0, + zero, + one, + red, + green, + blue, + alpha +}; + +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type& x) +{ + return static_cast(x); +} +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type&& x) +{ + return static_cast(x); +} + +template +inline T spvGetSwizzle(vec x, T c, spvSwizzle s) +{ + switch (s) + { + case spvSwizzle::none: + return c; + case spvSwizzle::zero: + return 0; + case spvSwizzle::one: + return 1; + case spvSwizzle::red: + return x.r; + case spvSwizzle::green: + return x.g; + case spvSwizzle::blue: + return x.b; + case spvSwizzle::alpha: + return x.a; + } +} + +// Wrapper function that swizzles texture samples and fetches. +template +inline vec spvTextureSwizzle(vec x, uint s) +{ + if (!s) + return x; + return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) & 0xFF)), spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF))); +} + +template +inline T spvTextureSwizzle(T x, uint s) +{ + return spvTextureSwizzle(vec(x, 0, 0, 1), s).x; +} + +// Wrapper function that swizzles texture gathers. +template +inline vec spvGatherSwizzle(sampler s, thread Tex& t, Ts... params, component c, uint sw) METAL_CONST_ARG(c) +{ + if (sw) + { + switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF)) + { + case spvSwizzle::none: + break; + case spvSwizzle::zero: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + case spvSwizzle::red: + return t.gather(s, spvForward(params)..., component::x); + case spvSwizzle::green: + return t.gather(s, spvForward(params)..., component::y); + case spvSwizzle::blue: + return t.gather(s, spvForward(params)..., component::z); + case spvSwizzle::alpha: + return t.gather(s, spvForward(params)..., component::w); + } + } + switch (c) + { + case component::x: + return t.gather(s, spvForward(params)..., component::x); + case component::y: + return t.gather(s, spvForward(params)..., component::y); + case component::z: + return t.gather(s, spvForward(params)..., component::z); + case component::w: + return t.gather(s, spvForward(params)..., component::w); + } +} + +// Wrapper function that swizzles depth texture gathers. +template +inline vec spvGatherCompareSwizzle(sampler s, thread Tex& t, Ts... params, uint sw) +{ + if (sw) + { + switch (spvSwizzle(sw & 0xFF)) + { + case spvSwizzle::none: + case spvSwizzle::red: + break; + case spvSwizzle::zero: + case spvSwizzle::green: + case spvSwizzle::blue: + case spvSwizzle::alpha: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + } + } + return t.gather_compare(s, spvForward(params)...); +} + +float4 doSwizzle(thread texture1d tex1d, thread const sampler tex1dSmplr, thread texture2d tex2d, thread const sampler tex2dSmplr, thread texture3d tex3d, thread const sampler tex3dSmplr, thread texturecube texCube, thread const sampler texCubeSmplr, thread texture2d_array tex2dArray, thread const sampler tex2dArraySmplr, thread texturecube_array texCubeArray, thread const sampler texCubeArraySmplr, thread depth2d depth2d, thread const sampler depth2dSmplr, thread depthcube depthCube, thread const sampler depthCubeSmplr, thread depth2d_array depth2dArray, thread const sampler depth2dArraySmplr, thread depthcube_array depthCubeArray, thread const sampler depthCubeArraySmplr, thread texture2d texBuffer, constant spvAux& spvAuxBuffer) +{ + float4 c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[3]); + c = spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z))), spvAuxBuffer.swizzleConst[4]); + c = spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w))), spvAuxBuffer.swizzleConst[5]); + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, float3(0.0, 0.0, 1.0).xy, float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[7]); + c.x = spvTextureSwizzle(depthCube.sample_compare(depthCubeSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz, float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[8]); + c.x = spvTextureSwizzle(depth2dArray.sample_compare(depth2dArraySmplr, float4(0.0, 0.0, 0.0, 1.0).xy, uint(round(float4(0.0, 0.0, 0.0, 1.0).z)), float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[9]); + c.x = spvTextureSwizzle(depthCubeArray.sample_compare(depthCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w)), 1.0), spvAuxBuffer.swizzleConst[10]); + c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[2]); + float4 _103 = float4(0.0, 0.0, 1.0, 1.0); + _103.z = float4(0.0, 0.0, 1.0, 1.0).w; + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, _103.xy / _103.z, float4(0.0, 0.0, 1.0, 1.0).z), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0), level(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[3]); + c = spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z)), level(0.0)), spvAuxBuffer.swizzleConst[4]); + c = spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w)), level(0.0)), spvAuxBuffer.swizzleConst[5]); + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, float3(0.0, 0.0, 1.0).xy, float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w, level(0.0)), spvAuxBuffer.swizzleConst[2]); + float4 _131 = float4(0.0, 0.0, 1.0, 1.0); + _131.z = float4(0.0, 0.0, 1.0, 1.0).w; + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, _131.xy / _131.z, float4(0.0, 0.0, 1.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.read(uint(0)), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.read(uint2(int2(0)), 0), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.read(uint3(int3(0)), 0), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(tex2dArray.read(uint2(int3(0).xy), uint(int3(0).z), 0), spvAuxBuffer.swizzleConst[4]); + c = texBuffer.read(spvTexelBufferCoord(0)); + c = spvGatherSwizzle, float2, int2>(tex2dSmplr, tex2d, float2(0.0), int2(0), component::x, spvAuxBuffer.swizzleConst[1]); + c = spvGatherSwizzle, float3>(texCubeSmplr, texCube, float3(0.0), component::y, spvAuxBuffer.swizzleConst[3]); + c = spvGatherSwizzle, float2, uint, int2>(tex2dArraySmplr, tex2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), int2(0), component::z, spvAuxBuffer.swizzleConst[4]); + c = spvGatherSwizzle, float3, uint>(texCubeArraySmplr, texCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), component::w, spvAuxBuffer.swizzleConst[5]); + c = spvGatherCompareSwizzle, float2, float>(depth2dSmplr, depth2d, float2(0.0), 1.0, spvAuxBuffer.swizzleConst[7]); + c = spvGatherCompareSwizzle, float3, float>(depthCubeSmplr, depthCube, float3(0.0), 1.0, spvAuxBuffer.swizzleConst[8]); + c = spvGatherCompareSwizzle, float2, uint, float>(depth2dArraySmplr, depth2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), 1.0, spvAuxBuffer.swizzleConst[9]); + c = spvGatherCompareSwizzle, float3, uint, float>(depthCubeArraySmplr, depthCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), 1.0, spvAuxBuffer.swizzleConst[10]); + return c; +} + +fragment void main0(constant spvAux& spvAuxBuffer [[buffer(0)]], texture1d tex1d [[texture(0)]], texture2d tex2d [[texture(1)]], texture3d tex3d [[texture(2)]], texturecube texCube [[texture(3)]], texture2d_array tex2dArray [[texture(4)]], texturecube_array texCubeArray [[texture(5)]], texture2d texBuffer [[texture(6)]], depth2d depth2d [[texture(7)]], depthcube depthCube [[texture(8)]], depth2d_array depth2dArray [[texture(9)]], depthcube_array depthCubeArray [[texture(10)]], sampler tex1dSmplr [[sampler(0)]], sampler tex2dSmplr [[sampler(1)]], sampler tex3dSmplr [[sampler(2)]], sampler texCubeSmplr [[sampler(3)]], sampler tex2dArraySmplr [[sampler(4)]], sampler texCubeArraySmplr [[sampler(5)]], sampler depth2dSmplr [[sampler(7)]], sampler depthCubeSmplr [[sampler(8)]], sampler depth2dArraySmplr [[sampler(9)]], sampler depthCubeArraySmplr [[sampler(10)]]) +{ + float4 c = doSwizzle(tex1d, tex1dSmplr, tex2d, tex2dSmplr, tex3d, tex3dSmplr, texCube, texCubeSmplr, tex2dArray, tex2dArraySmplr, texCubeArray, texCubeArraySmplr, depth2d, depth2dSmplr, depthCube, depthCubeSmplr, depth2dArray, depth2dArraySmplr, depthCubeArray, depthCubeArraySmplr, texBuffer, spvAuxBuffer); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-uint.swizzle.frag b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-uint.swizzle.frag new file mode 100644 index 000000000..f4ff6717e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access-uint.swizzle.frag @@ -0,0 +1,169 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct spvAux +{ + uint swizzleConst[1]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +enum class spvSwizzle : uint +{ + none = 0, + zero, + one, + red, + green, + blue, + alpha +}; + +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type& x) +{ + return static_cast(x); +} +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type&& x) +{ + return static_cast(x); +} + +template +inline T spvGetSwizzle(vec x, T c, spvSwizzle s) +{ + switch (s) + { + case spvSwizzle::none: + return c; + case spvSwizzle::zero: + return 0; + case spvSwizzle::one: + return 1; + case spvSwizzle::red: + return x.r; + case spvSwizzle::green: + return x.g; + case spvSwizzle::blue: + return x.b; + case spvSwizzle::alpha: + return x.a; + } +} + +// Wrapper function that swizzles texture samples and fetches. +template +inline vec spvTextureSwizzle(vec x, uint s) +{ + if (!s) + return x; + return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) & 0xFF)), spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF))); +} + +template +inline T spvTextureSwizzle(T x, uint s) +{ + return spvTextureSwizzle(vec(x, 0, 0, 1), s).x; +} + +// Wrapper function that swizzles texture gathers. +template +inline vec spvGatherSwizzle(sampler s, thread Tex& t, Ts... params, component c, uint sw) METAL_CONST_ARG(c) +{ + if (sw) + { + switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF)) + { + case spvSwizzle::none: + break; + case spvSwizzle::zero: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + case spvSwizzle::red: + return t.gather(s, spvForward(params)..., component::x); + case spvSwizzle::green: + return t.gather(s, spvForward(params)..., component::y); + case spvSwizzle::blue: + return t.gather(s, spvForward(params)..., component::z); + case spvSwizzle::alpha: + return t.gather(s, spvForward(params)..., component::w); + } + } + switch (c) + { + case component::x: + return t.gather(s, spvForward(params)..., component::x); + case component::y: + return t.gather(s, spvForward(params)..., component::y); + case component::z: + return t.gather(s, spvForward(params)..., component::z); + case component::w: + return t.gather(s, spvForward(params)..., component::w); + } +} + +// Wrapper function that swizzles depth texture gathers. +template +inline vec spvGatherCompareSwizzle(sampler s, thread Tex& t, Ts... params, uint sw) +{ + if (sw) + { + switch (spvSwizzle(sw & 0xFF)) + { + case spvSwizzle::none: + case spvSwizzle::red: + break; + case spvSwizzle::zero: + case spvSwizzle::green: + case spvSwizzle::blue: + case spvSwizzle::alpha: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + } + } + return t.gather_compare(s, spvForward(params)...); +} + +fragment void main0(constant spvAux& spvAuxBuffer [[buffer(0)]], texture1d tex1d [[texture(0)]], texture2d tex2d [[texture(1)]], texture3d tex3d [[texture(2)]], texturecube texCube [[texture(3)]], texture2d_array tex2dArray [[texture(4)]], texturecube_array texCubeArray [[texture(5)]], texture2d texBuffer [[texture(6)]], sampler tex1dSmplr [[sampler(0)]], sampler tex2dSmplr [[sampler(1)]], sampler tex3dSmplr [[sampler(2)]], sampler texCubeSmplr [[sampler(3)]], sampler tex2dArraySmplr [[sampler(4)]], sampler texCubeArraySmplr [[sampler(5)]]) +{ + float4 c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0)), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[3])); + c = float4(spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z))), spvAuxBuffer.swizzleConst[4])); + c = float4(spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w))), spvAuxBuffer.swizzleConst[5])); + c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0), level(0.0)), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[3])); + c = float4(spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z)), level(0.0)), spvAuxBuffer.swizzleConst[4])); + c = float4(spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w)), level(0.0)), spvAuxBuffer.swizzleConst[5])); + c = float4(spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w, level(0.0)), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(tex1d.read(uint(0)), spvAuxBuffer.swizzleConst[0])); + c = float4(spvTextureSwizzle(tex2d.read(uint2(int2(0)), 0), spvAuxBuffer.swizzleConst[1])); + c = float4(spvTextureSwizzle(tex3d.read(uint3(int3(0)), 0), spvAuxBuffer.swizzleConst[2])); + c = float4(spvTextureSwizzle(tex2dArray.read(uint2(int3(0).xy), uint(int3(0).z), 0), spvAuxBuffer.swizzleConst[4])); + c = float4(texBuffer.read(spvTexelBufferCoord(0))); + c = float4(spvGatherSwizzle, float2, int2>(tex2dSmplr, tex2d, float2(0.0), int2(0), component::x, spvAuxBuffer.swizzleConst[1])); + c = float4(spvGatherSwizzle, float3>(texCubeSmplr, texCube, float3(0.0), component::y, spvAuxBuffer.swizzleConst[3])); + c = float4(spvGatherSwizzle, float2, uint, int2>(tex2dArraySmplr, tex2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), int2(0), component::z, spvAuxBuffer.swizzleConst[4])); + c = float4(spvGatherSwizzle, float3, uint>(texCubeArraySmplr, texCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), component::w, spvAuxBuffer.swizzleConst[5])); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access.swizzle.frag b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access.swizzle.frag new file mode 100644 index 000000000..7997d894e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/frag/texture-access.swizzle.frag @@ -0,0 +1,184 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct spvAux +{ + uint swizzleConst[1]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +enum class spvSwizzle : uint +{ + none = 0, + zero, + one, + red, + green, + blue, + alpha +}; + +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template struct spvRemoveReference { typedef T type; }; +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type& x) +{ + return static_cast(x); +} +template inline constexpr thread T&& spvForward(thread typename spvRemoveReference::type&& x) +{ + return static_cast(x); +} + +template +inline T spvGetSwizzle(vec x, T c, spvSwizzle s) +{ + switch (s) + { + case spvSwizzle::none: + return c; + case spvSwizzle::zero: + return 0; + case spvSwizzle::one: + return 1; + case spvSwizzle::red: + return x.r; + case spvSwizzle::green: + return x.g; + case spvSwizzle::blue: + return x.b; + case spvSwizzle::alpha: + return x.a; + } +} + +// Wrapper function that swizzles texture samples and fetches. +template +inline vec spvTextureSwizzle(vec x, uint s) +{ + if (!s) + return x; + return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) & 0xFF)), spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF))); +} + +template +inline T spvTextureSwizzle(T x, uint s) +{ + return spvTextureSwizzle(vec(x, 0, 0, 1), s).x; +} + +// Wrapper function that swizzles texture gathers. +template +inline vec spvGatherSwizzle(sampler s, thread Tex& t, Ts... params, component c, uint sw) METAL_CONST_ARG(c) +{ + if (sw) + { + switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF)) + { + case spvSwizzle::none: + break; + case spvSwizzle::zero: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + case spvSwizzle::red: + return t.gather(s, spvForward(params)..., component::x); + case spvSwizzle::green: + return t.gather(s, spvForward(params)..., component::y); + case spvSwizzle::blue: + return t.gather(s, spvForward(params)..., component::z); + case spvSwizzle::alpha: + return t.gather(s, spvForward(params)..., component::w); + } + } + switch (c) + { + case component::x: + return t.gather(s, spvForward(params)..., component::x); + case component::y: + return t.gather(s, spvForward(params)..., component::y); + case component::z: + return t.gather(s, spvForward(params)..., component::z); + case component::w: + return t.gather(s, spvForward(params)..., component::w); + } +} + +// Wrapper function that swizzles depth texture gathers. +template +inline vec spvGatherCompareSwizzle(sampler s, thread Tex& t, Ts... params, uint sw) +{ + if (sw) + { + switch (spvSwizzle(sw & 0xFF)) + { + case spvSwizzle::none: + case spvSwizzle::red: + break; + case spvSwizzle::zero: + case spvSwizzle::green: + case spvSwizzle::blue: + case spvSwizzle::alpha: + return vec(0, 0, 0, 0); + case spvSwizzle::one: + return vec(1, 1, 1, 1); + } + } + return t.gather_compare(s, spvForward(params)...); +} + +fragment void main0(constant spvAux& spvAuxBuffer [[buffer(0)]], texture1d tex1d [[texture(0)]], texture2d tex2d [[texture(1)]], texture3d tex3d [[texture(2)]], texturecube texCube [[texture(3)]], texture2d_array tex2dArray [[texture(4)]], texturecube_array texCubeArray [[texture(5)]], texture2d texBuffer [[texture(6)]], depth2d depth2d [[texture(7)]], depthcube depthCube [[texture(8)]], depth2d_array depth2dArray [[texture(9)]], depthcube_array depthCubeArray [[texture(10)]], sampler tex1dSmplr [[sampler(0)]], sampler tex2dSmplr [[sampler(1)]], sampler tex3dSmplr [[sampler(2)]], sampler texCubeSmplr [[sampler(3)]], sampler tex2dArraySmplr [[sampler(4)]], sampler texCubeArraySmplr [[sampler(5)]], sampler depth2dSmplr [[sampler(7)]], sampler depthCubeSmplr [[sampler(8)]], sampler depth2dArraySmplr [[sampler(9)]], sampler depthCubeArraySmplr [[sampler(10)]]) +{ + float4 c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0)), spvAuxBuffer.swizzleConst[3]); + c = spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z))), spvAuxBuffer.swizzleConst[4]); + c = spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w))), spvAuxBuffer.swizzleConst[5]); + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, float3(0.0, 0.0, 1.0).xy, float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[7]); + c.x = spvTextureSwizzle(depthCube.sample_compare(depthCubeSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz, float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[8]); + c.x = spvTextureSwizzle(depth2dArray.sample_compare(depth2dArraySmplr, float4(0.0, 0.0, 0.0, 1.0).xy, uint(round(float4(0.0, 0.0, 0.0, 1.0).z)), float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[9]); + c.x = spvTextureSwizzle(depthCubeArray.sample_compare(depthCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w)), 1.0), spvAuxBuffer.swizzleConst[10]); + c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w), spvAuxBuffer.swizzleConst[2]); + float4 _100 = float4(0.0, 0.0, 1.0, 1.0); + _100.z = float4(0.0, 0.0, 1.0, 1.0).w; + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, _100.xy / _100.z, float4(0.0, 0.0, 1.0, 1.0).z), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, 0.0), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float2(0.0), level(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(texCube.sample(texCubeSmplr, float3(0.0), level(0.0)), spvAuxBuffer.swizzleConst[3]); + c = spvTextureSwizzle(tex2dArray.sample(tex2dArraySmplr, float3(0.0).xy, uint(round(float3(0.0).z)), level(0.0)), spvAuxBuffer.swizzleConst[4]); + c = spvTextureSwizzle(texCubeArray.sample(texCubeArraySmplr, float4(0.0).xyz, uint(round(float4(0.0).w)), level(0.0)), spvAuxBuffer.swizzleConst[5]); + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, float3(0.0, 0.0, 1.0).xy, float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.sample(tex1dSmplr, float2(0.0, 1.0).x / float2(0.0, 1.0).y), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.sample(tex2dSmplr, float3(0.0, 0.0, 1.0).xy / float3(0.0, 0.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.sample(tex3dSmplr, float4(0.0, 0.0, 0.0, 1.0).xyz / float4(0.0, 0.0, 0.0, 1.0).w, level(0.0)), spvAuxBuffer.swizzleConst[2]); + float4 _128 = float4(0.0, 0.0, 1.0, 1.0); + _128.z = float4(0.0, 0.0, 1.0, 1.0).w; + c.x = spvTextureSwizzle(depth2d.sample_compare(depth2dSmplr, _128.xy / _128.z, float4(0.0, 0.0, 1.0, 1.0).z, level(0.0)), spvAuxBuffer.swizzleConst[7]); + c = spvTextureSwizzle(tex1d.read(uint(0)), spvAuxBuffer.swizzleConst[0]); + c = spvTextureSwizzle(tex2d.read(uint2(int2(0)), 0), spvAuxBuffer.swizzleConst[1]); + c = spvTextureSwizzle(tex3d.read(uint3(int3(0)), 0), spvAuxBuffer.swizzleConst[2]); + c = spvTextureSwizzle(tex2dArray.read(uint2(int3(0).xy), uint(int3(0).z), 0), spvAuxBuffer.swizzleConst[4]); + c = texBuffer.read(spvTexelBufferCoord(0)); + c = spvGatherSwizzle, float2, int2>(tex2dSmplr, tex2d, float2(0.0), int2(0), component::x, spvAuxBuffer.swizzleConst[1]); + c = spvGatherSwizzle, float3>(texCubeSmplr, texCube, float3(0.0), component::y, spvAuxBuffer.swizzleConst[3]); + c = spvGatherSwizzle, float2, uint, int2>(tex2dArraySmplr, tex2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), int2(0), component::z, spvAuxBuffer.swizzleConst[4]); + c = spvGatherSwizzle, float3, uint>(texCubeArraySmplr, texCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), component::w, spvAuxBuffer.swizzleConst[5]); + c = spvGatherCompareSwizzle, float2, float>(depth2dSmplr, depth2d, float2(0.0), 1.0, spvAuxBuffer.swizzleConst[7]); + c = spvGatherCompareSwizzle, float3, float>(depthCubeSmplr, depthCube, float3(0.0), 1.0, spvAuxBuffer.swizzleConst[8]); + c = spvGatherCompareSwizzle, float2, uint, float>(depth2dArraySmplr, depth2dArray, float3(0.0).xy, uint(round(float3(0.0).z)), 1.0, spvAuxBuffer.swizzleConst[9]); + c = spvGatherCompareSwizzle, float3, uint, float>(depthCubeArraySmplr, depthCubeArray, float4(0.0).xyz, uint(round(float4(0.0).w)), 1.0, spvAuxBuffer.swizzleConst[10]); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl-no-opt/vert/functions_nested.vert b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/vert/functions_nested.vert new file mode 100644 index 000000000..037ead392 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl-no-opt/vert/functions_nested.vert @@ -0,0 +1,195 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct attr_desc +{ + int type; + int attribute_size; + int starting_offset; + int stride; + int swap_bytes; + int is_volatile; +}; + +struct VertexBuffer +{ + float4x4 scale_offset_mat; + uint vertex_base_index; + int4 input_attributes[16]; +}; + +struct VertexConstantsBuffer +{ + float4 vc[16]; +}; + +constant float4 _295 = {}; + +struct main0_out +{ + float4 tc0 [[user(locn0)]]; + float4 back_color [[user(locn10)]]; + float4 gl_Position [[position]]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +attr_desc fetch_desc(thread const int& location, constant VertexBuffer& v_227) +{ + int attribute_flags = v_227.input_attributes[location].w; + attr_desc result; + result.type = v_227.input_attributes[location].x; + result.attribute_size = v_227.input_attributes[location].y; + result.starting_offset = v_227.input_attributes[location].z; + result.stride = attribute_flags & 255; + result.swap_bytes = (attribute_flags >> 8) & 1; + result.is_volatile = (attribute_flags >> 9) & 1; + return result; +} + +uint get_bits(thread const uint4& v, thread const int& swap) +{ + if (swap != 0) + { + return ((v.w | (v.z << uint(8))) | (v.y << uint(16))) | (v.x << uint(24)); + } + return ((v.x | (v.y << uint(8))) | (v.z << uint(16))) | (v.w << uint(24)); +} + +float4 fetch_attr(thread const attr_desc& desc, thread const int& vertex_id, thread const texture2d input_stream) +{ + float4 result = float4(0.0, 0.0, 0.0, 1.0); + bool reverse_order = false; + int first_byte = (vertex_id * desc.stride) + desc.starting_offset; + for (int n = 0; n < 4; n++) + { + if (n == desc.attribute_size) + { + break; + } + uint4 tmp; + switch (desc.type) + { + case 0: + { + int _131 = first_byte; + first_byte = _131 + 1; + tmp.x = input_stream.read(spvTexelBufferCoord(_131)).x; + int _138 = first_byte; + first_byte = _138 + 1; + tmp.y = input_stream.read(spvTexelBufferCoord(_138)).x; + uint4 param = tmp; + int param_1 = desc.swap_bytes; + result[n] = float(get_bits(param, param_1)); + break; + } + case 1: + { + int _156 = first_byte; + first_byte = _156 + 1; + tmp.x = input_stream.read(spvTexelBufferCoord(_156)).x; + int _163 = first_byte; + first_byte = _163 + 1; + tmp.y = input_stream.read(spvTexelBufferCoord(_163)).x; + int _170 = first_byte; + first_byte = _170 + 1; + tmp.z = input_stream.read(spvTexelBufferCoord(_170)).x; + int _177 = first_byte; + first_byte = _177 + 1; + tmp.w = input_stream.read(spvTexelBufferCoord(_177)).x; + uint4 param_2 = tmp; + int param_3 = desc.swap_bytes; + result[n] = as_type(get_bits(param_2, param_3)); + break; + } + case 2: + { + int _195 = first_byte; + first_byte = _195 + 1; + result[n] = float(input_stream.read(spvTexelBufferCoord(_195)).x); + reverse_order = desc.swap_bytes != 0; + break; + } + } + } + float4 _210; + if (reverse_order) + { + _210 = result.wzyx; + } + else + { + _210 = result; + } + return _210; +} + +float4 read_location(thread const int& location, constant VertexBuffer& v_227, thread uint& gl_VertexIndex, thread texture2d buff_in_2, thread texture2d buff_in_1) +{ + int param = location; + attr_desc desc = fetch_desc(param, v_227); + int vertex_id = gl_VertexIndex - int(v_227.vertex_base_index); + if (desc.is_volatile != 0) + { + attr_desc param_1 = desc; + int param_2 = vertex_id; + return fetch_attr(param_1, param_2, buff_in_2); + } + else + { + attr_desc param_3 = desc; + int param_4 = vertex_id; + return fetch_attr(param_3, param_4, buff_in_1); + } +} + +void vs_adjust(thread float4& dst_reg0, thread float4& dst_reg1, thread float4& dst_reg7, constant VertexBuffer& v_227, thread uint& gl_VertexIndex, thread texture2d buff_in_2, thread texture2d buff_in_1, constant VertexConstantsBuffer& v_309) +{ + int param = 3; + float4 in_diff_color = read_location(param, v_227, gl_VertexIndex, buff_in_2, buff_in_1); + int param_1 = 0; + float4 in_pos = read_location(param_1, v_227, gl_VertexIndex, buff_in_2, buff_in_1); + int param_2 = 8; + float4 in_tc0 = read_location(param_2, v_227, gl_VertexIndex, buff_in_2, buff_in_1); + dst_reg1 = in_diff_color * v_309.vc[13]; + float4 tmp0; + tmp0.x = float4(dot(float4(in_pos.xyz, 1.0), v_309.vc[4])).x; + tmp0.y = float4(dot(float4(in_pos.xyz, 1.0), v_309.vc[5])).y; + tmp0.z = float4(dot(float4(in_pos.xyz, 1.0), v_309.vc[6])).z; + float4 tmp1; + tmp1 = float4(in_tc0.xy.x, in_tc0.xy.y, tmp1.z, tmp1.w); + tmp1.z = v_309.vc[15].x; + dst_reg7.y = float4(dot(float4(tmp1.xyz, 1.0), v_309.vc[8])).y; + dst_reg7.x = float4(dot(float4(tmp1.xyz, 1.0), v_309.vc[7])).x; + dst_reg0.y = float4(dot(float4(tmp0.xyz, 1.0), v_309.vc[1])).y; + dst_reg0.x = float4(dot(float4(tmp0.xyz, 1.0), v_309.vc[0])).x; +} + +vertex main0_out main0(constant VertexBuffer& v_227 [[buffer(0)]], constant VertexConstantsBuffer& v_309 [[buffer(1)]], texture2d buff_in_1 [[texture(3)]], texture2d buff_in_2 [[texture(4)]], uint gl_VertexIndex [[vertex_id]]) +{ + main0_out out = {}; + float4 dst_reg0 = float4(0.0, 0.0, 0.0, 1.0); + float4 dst_reg1 = float4(0.0); + float4 dst_reg7 = float4(0.0); + float4 param = dst_reg0; + float4 param_1 = dst_reg1; + float4 param_2 = dst_reg7; + vs_adjust(param, param_1, param_2, v_227, gl_VertexIndex, buff_in_2, buff_in_1, v_309); + dst_reg0 = param; + dst_reg1 = param_1; + dst_reg7 = param_2; + out.gl_Position = dst_reg0; + out.back_color = dst_reg1; + out.tc0 = dst_reg7; + out.gl_Position *= v_227.scale_offset_mat; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..95841a78a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,28 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct u0_counters +{ + uint c; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(device u0_counters& u0_counter [[buffer(0)]], texture2d u0 [[texture(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint _29 = atomic_fetch_sub_explicit((volatile device atomic_uint*)&u0_counter.c, 1, memory_order_relaxed); + float4 r0; + r0.x = as_type(_29); + u0.write(uint4(uint(int(gl_GlobalInvocationID.x))), spvTexelBufferCoord(((uint(as_type(r0.x)) * 1u) + (uint(0) >> 2u)))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..cd78fa2c6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,28 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct u0_counters +{ + uint c; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(device u0_counters& u0_counter [[buffer(0)]], texture2d u0 [[texture(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint _29 = atomic_fetch_add_explicit((volatile device atomic_uint*)&u0_counter.c, 1, memory_order_relaxed); + float4 r0; + r0.x = as_type(_29); + u0.write(uint4(uint(int(gl_GlobalInvocationID.x))), spvTexelBufferCoord(((uint(as_type(r0.x)) * 1u) + (uint(0) >> 2u)))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_iadd.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_iadd.asm.comp new file mode 100644 index 000000000..47ce85f8f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_iadd.asm.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + _6._m0 = _5._m1 + uint4(_5._m0); + _6._m0 = uint4(_5._m0) + _5._m1; + _6._m0 = _5._m1 + _5._m1; + _6._m0 = uint4(_5._m0 + _5._m0); + _6._m1 = int4(_5._m1 + _5._m1); + _6._m1 = _5._m0 + _5._m0; + _6._m1 = int4(_5._m1) + _5._m0; + _6._m1 = _5._m0 + int4(_5._m1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_sar.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_sar.asm.comp new file mode 100644 index 000000000..417683058 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_sar.asm.comp @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + int4 _22 = _5._m0; + uint4 _23 = _5._m1; + _6._m0 = uint4(int4(_23) >> _22); + _6._m0 = uint4(_22 >> int4(_23)); + _6._m0 = uint4(int4(_23) >> int4(_23)); + _6._m0 = uint4(_22 >> _22); + _6._m1 = int4(_23) >> int4(_23); + _6._m1 = _22 >> _22; + _6._m1 = int4(_23) >> _22; + _6._m1 = _22 >> int4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_sdiv.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_sdiv.asm.comp new file mode 100644 index 000000000..6b80dff31 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_sdiv.asm.comp @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + int4 _22 = _5._m0; + uint4 _23 = _5._m1; + _6._m0 = uint4(int4(_23) / _22); + _6._m0 = uint4(_22 / int4(_23)); + _6._m0 = uint4(int4(_23) / int4(_23)); + _6._m0 = uint4(_22 / _22); + _6._m1 = int4(_23) / int4(_23); + _6._m1 = _22 / _22; + _6._m1 = int4(_23) / _22; + _6._m1 = _22 / int4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_slr.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_slr.asm.comp new file mode 100644 index 000000000..1dfca3918 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/bitcast_slr.asm.comp @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct _3 +{ + int4 _m0; + uint4 _m1; +}; + +struct _4 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _3& _5 [[buffer(0)]], device _4& _6 [[buffer(1)]]) +{ + int4 _22 = _5._m0; + uint4 _23 = _5._m1; + _6._m0 = _23 >> uint4(_22); + _6._m0 = uint4(_22) >> _23; + _6._m0 = _23 >> _23; + _6._m0 = uint4(_22) >> uint4(_22); + _6._m1 = int4(_23 >> _23); + _6._m1 = int4(uint4(_22) >> uint4(_22)); + _6._m1 = int4(_23 >> uint4(_22)); + _6._m1 = int4(uint4(_22) >> _23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..95f2717b4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,45 @@ +#include +#include + +using namespace metal; + +struct A +{ + int a; + int b; +}; + +struct A_1 +{ + A Data[1]; +}; + +struct A_2 +{ + int a; + int b; +}; + +struct A_3 +{ + A_2 Data[1024]; +}; + +struct B +{ + A Data[1]; +}; + +struct B_1 +{ + A_2 Data[1024]; +}; + +kernel void main0(device B& C3 [[buffer(0)]], device A_1& C1 [[buffer(1)]], constant A_3& C2 [[buffer(2)]], constant B_1& C4 [[buffer(3)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + C1.Data[gl_GlobalInvocationID.x].a = C2.Data[gl_GlobalInvocationID.x].a; + C1.Data[gl_GlobalInvocationID.x].b = C2.Data[gl_GlobalInvocationID.x].b; + C3.Data[gl_GlobalInvocationID.x].a = C4.Data[gl_GlobalInvocationID.x].a; + C3.Data[gl_GlobalInvocationID.x].b = C4.Data[gl_GlobalInvocationID.x].b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp new file mode 100644 index 000000000..4cb90e202 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct cb5_struct +{ + float4 _m0[5]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(constant cb5_struct& cb0_5 [[buffer(1)]], texture2d u0 [[texture(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + float4 r0; + r0.x = as_type(int(gl_LocalInvocationID.x) << 4); + r0.y = as_type(int(gl_LocalInvocationID.x)); + uint _44 = as_type(r0.x) >> 2u; + uint4 _51 = as_type(cb0_5._m0[uint(as_type(r0.y)) + 1u]); + u0.write(_51.xxxx, spvTexelBufferCoord(_44)); + u0.write(_51.yyyy, spvTexelBufferCoord((_44 + 1u))); + u0.write(_51.zzzz, spvTexelBufferCoord((_44 + 2u))); + u0.write(_51.wwww, spvTexelBufferCoord((_44 + 3u))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/buffer-write.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/buffer-write.asm.comp new file mode 100644 index 000000000..ddf958289 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/buffer-write.asm.comp @@ -0,0 +1,23 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct cb +{ + float value; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +kernel void main0(constant cb& _6 [[buffer(7)]], texture2d _buffer [[texture(0)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + _buffer.write(_6.value, spvTexelBufferCoord(((32u * gl_WorkGroupID.x) + gl_LocalInvocationIndex))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..23b88f7a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct ssbo +{ + uint _data[1]; +}; + +void Load(thread const uint& size, const device ssbo& ssbo_1) +{ + int byteAddrTemp = int(size >> uint(2)); + uint4 data = uint4(ssbo_1._data[byteAddrTemp], ssbo_1._data[byteAddrTemp + 1], ssbo_1._data[byteAddrTemp + 2], ssbo_1._data[byteAddrTemp + 3]); +} + +void _main(thread const uint3& id, const device ssbo& ssbo_1) +{ + uint param = 4u; + Load(param, ssbo_1); +} + +kernel void main0(const device ssbo& ssbo_1 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint3 id = gl_GlobalInvocationID; + uint3 param = id; + _main(param, ssbo_1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/multiple-entry.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/multiple-entry.asm.comp new file mode 100644 index 000000000..765273326 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/multiple-entry.asm.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct _6 +{ + int4 _m0; + uint4 _m1; +}; + +struct _7 +{ + uint4 _m0; + int4 _m1; +}; + +kernel void main0(device _6& _8 [[buffer(0)]], device _7& _9 [[buffer(1)]]) +{ + _9._m0 = _8._m1 + uint4(_8._m0); + _9._m0 = uint4(_8._m0) + _8._m1; + _9._m0 = _8._m1 + _8._m1; + _9._m0 = uint4(_8._m0 + _8._m0); + _9._m1 = int4(_8._m1 + _8._m1); + _9._m1 = _8._m0 + _8._m0; + _9._m1 = int4(_8._m1) + _8._m0; + _9._m1 = _8._m0 + int4(_8._m1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/quantize.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/quantize.asm.comp new file mode 100644 index 000000000..1839ec7a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/quantize.asm.comp @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +struct SSBO0 +{ + float scalar; + float2 vec2_val; + float3 vec3_val; + float4 vec4_val; +}; + +kernel void main0(device SSBO0& _4 [[buffer(0)]]) +{ + _4.scalar = float(half(_4.scalar)); + _4.vec2_val = float2(half2(_4.vec2_val)); + _4.vec3_val = float3(half3(_4.vec3_val)); + _4.vec4_val = float4(half4(_4.vec4_val)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/relaxed-block-layout.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/relaxed-block-layout.asm.comp new file mode 100644 index 000000000..6728a4e2d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/relaxed-block-layout.asm.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct foo +{ + uint bar; + packed_float3 baz; + uchar quux; + packed_uchar4 blah; + packed_half2 wibble; +}; + +kernel void main0(device foo& _8 [[buffer(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint3 gl_NumWorkGroups [[threadgroups_per_grid]]) +{ + _8.bar = gl_LocalInvocationID.x; + _8.baz = float3(gl_GlobalInvocationID); + _8.blah = uchar4(uint4(uint4(uchar4(_8.blah)).xyz + gl_WorkGroupID, 0u)); + _8.wibble = half2(float2(half2(_8.wibble)) * float2(gl_NumWorkGroups.xy)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp new file mode 100644 index 000000000..5802ddac9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +constant uint _5_tmp [[function_constant(10)]]; +constant uint _5 = is_function_constant_defined(_5_tmp) ? _5_tmp : 9u; +constant uint _6_tmp [[function_constant(12)]]; +constant uint _6 = is_function_constant_defined(_6_tmp) ? _6_tmp : 4u; +constant uint3 gl_WorkGroupSize = uint3(_5, 20u, _6); + +struct SSBO +{ + float a; +}; + +kernel void main0(device SSBO& _4 [[buffer(0)]]) +{ + _4.a += 1.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp new file mode 100644 index 000000000..2c9b038b2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +constant uint _3_tmp [[function_constant(0)]]; +constant uint _3 = is_function_constant_defined(_3_tmp) ? _3_tmp : 1u; +constant uint _4_tmp [[function_constant(2)]]; +constant uint _4 = is_function_constant_defined(_4_tmp) ? _4_tmp : 3u; +constant uint3 gl_WorkGroupSize = uint3(_3, 2u, _4); + +struct _6 +{ + float _m0[1]; +}; + +kernel void main0(device _6& _8 [[buffer(0)]], device _6& _9 [[buffer(1)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]]) +{ + _8._m0[gl_WorkGroupID.x] = _9._m0[gl_WorkGroupID.x] + _8._m0[gl_WorkGroupID.x]; + uint3 _23 = gl_WorkGroupSize; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/variable-pointers-2.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/variable-pointers-2.asm.comp new file mode 100644 index 000000000..b4e229595 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/variable-pointers-2.asm.comp @@ -0,0 +1,42 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct foo +{ + int a[128]; + uint b; + float2 c; +}; + +struct bar +{ + int d; +}; + +device foo* select_buffer(device foo& a, constant bar& cb) +{ + return (cb.d != 0) ? &a : nullptr; +} + +thread uint3* select_input(thread uint3& gl_GlobalInvocationID, thread uint3& gl_LocalInvocationID, constant bar& cb) +{ + return (cb.d != 0) ? &gl_GlobalInvocationID : &gl_LocalInvocationID; +} + +kernel void main0(device foo& buf [[buffer(0)]], constant bar& cb [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + device foo* _46 = select_buffer(buf, cb); + device foo* _45 = _46; + for (device int* _52 = &_45->a[0u], * _55 = &buf.a[0u]; (*_52) != (*_55); _52 = &_52[1u], _55 = &_55[1u]) + { + int _66 = ((*_52) + (*_55)) + int((*select_input(gl_GlobalInvocationID, gl_LocalInvocationID, cb)).x); + *_52 = _66; + *_55 = _66; + continue; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp new file mode 100644 index 000000000..b2f8fc424 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct foo +{ + int a; +}; + +struct bar +{ + int b; +}; + +device int* _24(device foo& a, device bar& b, thread uint3& gl_GlobalInvocationID) +{ + return (gl_GlobalInvocationID.x != 0u) ? &a.a : &b.b; +} + +kernel void main0(device foo& x [[buffer(0)]], device bar& y [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + device int* _34 = _24(x, y, gl_GlobalInvocationID); + device int* _33 = _34; + int _37 = x.a; + *_33 = 0; + y.b = _37 + _37; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp new file mode 100644 index 000000000..55afb839b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp @@ -0,0 +1,33 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u); + +struct cb1_struct +{ + float4 _m0[1]; +}; + +int2 get_texcoord(thread const int2& base, thread const int2& index, thread uint3& gl_LocalInvocationID) +{ + return (base * int3(gl_LocalInvocationID).xy) + index; +} + +kernel void main0(constant cb1_struct& cb0_1 [[buffer(0)]], texture2d u0 [[texture(1)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + int2 r0 = int2(int2(u0.get_width(), u0.get_height()) >> int2(uint2(4u))); + for (int i = 0; i < r0.y; i++) + { + for (int j = 0; j < r0.x; j++) + { + int2 param = r0; + int2 param_1 = int2(i, j); + u0.write(cb0_1._m0[0].xxxx, uint2(get_texcoord(param, param_1, gl_LocalInvocationID))); + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp new file mode 100644 index 000000000..0e3ebd72a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(16u, 16u, 1u); + +struct cb1_struct +{ + float4 _m0[1]; +}; + +kernel void main0(constant cb1_struct& cb0_1 [[buffer(0)]], texture2d u0 [[texture(1)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + int2 r0 = int2(int2(u0.get_width(), u0.get_height()) >> int2(uint2(4u))); + for (int i = 0; i < r0.y; i++) + { + for (int j = 0; j < r0.x; j++) + { + u0.write(cb0_1._m0[0].xxxx, uint2(((r0 * int3(gl_LocalInvocationID).xy) + int2(i, j)))); + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag new file mode 100644 index 000000000..e420153bf --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uTex [[texture(1)]], sampler uSampler [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uTex.sample(uSampler, in.vUV); + out.FragColor += uTex.sample(uSampler, in.vUV, int2(1)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/default-member-names.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/default-member-names.asm.frag new file mode 100644 index 000000000..3628c4eaa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/default-member-names.asm.frag @@ -0,0 +1,40 @@ +#include +#include + +using namespace metal; + +struct _9 +{ + float _m0; +}; + +struct _10 +{ + float _m0; + float _m1; + float _m2; + float _m3; + float _m4; + float _m5; + float _m6; + float _m7; + float _m8; + float _m9; + float _m10; + float _m11; + _9 _m12; +}; + +struct main0_out +{ + float4 m_3 [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + _10 _21; + out.m_3 = float4(_21._m0, _21._m1, _21._m2, _21._m3); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..0a56f1f15 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/empty-struct.asm.frag @@ -0,0 +1,30 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct EmptyStructTest +{ + int empty_struct_member; +}; + +float GetValue(thread const EmptyStructTest& self) +{ + return 0.0; +} + +float GetValue_1(EmptyStructTest self) +{ + return 0.0; +} + +fragment void main0() +{ + EmptyStructTest _23 = EmptyStructTest{ 0 }; + EmptyStructTest emptyStruct; + float value = GetValue(emptyStruct); + value = GetValue_1(_23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag new file mode 100644 index 000000000..1eabc93a2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag @@ -0,0 +1,48 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Foo +{ + float3 a; + float b; +}; + +struct Foo_1 +{ + packed_float3 a; + float b; +}; + +struct buf +{ + Foo_1 results[16]; + float4 bar; +}; + +struct main0_out +{ + float4 _entryPointOutput [[color(0)]]; +}; + +float4 _main(thread const float4& pos, constant buf& v_11) +{ + int _46 = int(pos.x) % 16; + Foo foo; + foo.a = v_11.results[_46].a; + foo.b = v_11.results[_46].b; + return float4(dot(foo.a, v_11.bar.xyz), foo.b, 0.0, 0.0); +} + +fragment main0_out main0(constant buf& v_11 [[buffer(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + float4 pos = gl_FragCoord; + float4 param = pos; + out._entryPointOutput = _main(param, v_11); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/frem.asm.frag new file mode 100644 index 000000000..ebc73d52d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/frem.asm.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vA [[user(locn0)]]; + float4 vB [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = fmod(in.vA, in.vB); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..1a6314c81 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,47 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +float4 foo(thread const float4& foo_1) +{ + return foo_1 + float4(1.0); +} + +float4 foo(thread const float3& foo_1) +{ + return foo_1.xyzz + float4(1.0); +} + +float4 foo_1(thread const float4& foo_2) +{ + return foo_2 + float4(2.0); +} + +float4 foo(thread const float2& foo_2) +{ + return foo_2.xyxy + float4(2.0); +} + +fragment main0_out main0() +{ + main0_out out = {}; + float4 foo_3 = float4(1.0); + float4 foo_2 = foo(foo_3); + float3 foo_5 = float3(1.0); + float4 foo_4 = foo(foo_5); + float4 foo_7 = float4(1.0); + float4 foo_6 = foo_1(foo_7); + float2 foo_9 = float2(1.0); + float4 foo_8 = foo(foo_9); + out.FragColor = ((foo_2 + foo_4) + foo_6) + foo_8; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..0d691b306 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + int2 Size [[color(0)]]; +}; + +fragment main0_out main0(texture2d uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.Size = int2(uTexture.get_width(), uTexture.get_height()) + int2(uTexture.get_width(1), uTexture.get_height(1)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..830df0c7e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,49 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 v0 [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uImage [[texture(0)]], sampler uImageSmplr [[sampler(0)]]) +{ + main0_out out = {}; + int i = 0; + float phi; + float4 _36; + phi = 1.0; + _36 = float4(1.0, 2.0, 1.0, 2.0); + for (;;) + { + out.FragColor = _36; + if (i < 4) + { + if (in.v0[i] > 0.0) + { + float2 _48 = float2(phi); + i++; + phi += 2.0; + _36 = uImage.sample(uImageSmplr, _48, level(0.0)); + continue; + } + else + { + break; + } + } + else + { + break; + } + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..8537dac19 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float3 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float3(as_type(0x7f800000u), as_type(0xff800000u), as_type(0x7fc00000u)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag new file mode 100644 index 000000000..41472adac --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag @@ -0,0 +1,47 @@ +#include +#include + +using namespace metal; + +struct Input +{ + float2 v0; + float2 v1; + float3 v2; + float4 v3; + float v4; + float v5; + float v6; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 Input_v0 [[user(locn0)]]; + float2 Input_v1 [[user(locn1), center_no_perspective]]; + float3 Input_v2 [[user(locn2), centroid_perspective]]; + float4 Input_v3 [[user(locn3), centroid_no_perspective]]; + float Input_v4 [[user(locn4), sample_perspective]]; + float Input_v5 [[user(locn5), sample_no_perspective]]; + float Input_v6 [[user(locn6), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Input inp = {}; + inp.v0 = in.Input_v0; + inp.v1 = in.Input_v1; + inp.v2 = in.Input_v2; + inp.v3 = in.Input_v3; + inp.v4 = in.Input_v4; + inp.v5 = in.Input_v5; + inp.v6 = in.Input_v6; + out.FragColor = float4(inp.v0.x + inp.v1.y, inp.v2.xy, ((inp.v3.w * inp.v4) + inp.v5) - inp.v6); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/locations-components.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/locations-components.asm.frag new file mode 100644 index 000000000..2cebdec49 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/locations-components.asm.frag @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 o0 [[color(0)]]; +}; + +struct main0_in +{ + float2 m_2 [[user(locn1)]]; + float m_3 [[user(locn1_2)]]; + float m_4 [[user(locn2), flat]]; + uint m_5 [[user(locn2_1)]]; + uint m_6 [[user(locn2_2)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float4 v1; + v1 = float4(in.m_2.x, in.m_2.y, v1.z, v1.w); + v1.z = in.m_3; + float4 v2; + v2.x = in.m_4; + v2.y = as_type(in.m_5); + v2.z = as_type(in.m_6); + float4 r0; + r0.x = as_type(as_type(v2.y) + as_type(v2.z)); + out.o0.y = float(as_type(r0.x)); + out.o0.x = v1.y + v2.x; + out.o0 = float4(out.o0.x, out.o0.y, v1.z, v1.x); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..48f3317d2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,66 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant float _46[16] = { 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0 }; +constant float4 _76[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; +constant float4 _90[4] = { float4(20.0), float4(30.0), float4(50.0), float4(60.0) }; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + float4 foobar[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; + float4 baz[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; + main0_out out = {}; + out.FragColor = _46[in.index]; + if (in.index < 10) + { + out.FragColor += _46[in.index ^ 1]; + } + else + { + out.FragColor += _46[in.index & 1]; + } + if (in.index > 30) + { + out.FragColor += _76[in.index & 3].y; + } + else + { + out.FragColor += _76[in.index & 1].x; + } + if (in.index > 30) + { + foobar[1].z = 20.0; + } + out.FragColor += foobar[in.index & 3].z; + spvArrayCopyFromConstant1(baz, _90); + out.FragColor += baz[in.index & 3].z; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag new file mode 100644 index 000000000..f597a6eb8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag @@ -0,0 +1,69 @@ +#include +#include + +using namespace metal; + +struct main0_in +{ + float v1 [[user(locn0)]]; + float2 v2 [[user(locn1)]]; + float3 v3 [[user(locn2)]]; + float4 v4 [[user(locn3)]]; + half h1 [[user(locn4)]]; + half2 h2 [[user(locn5)]]; + half3 h3 [[user(locn6)]]; + half4 h4 [[user(locn7)]]; +}; + +fragment void main0(main0_in in [[stage_in]]) +{ + float res = fast::min(in.v1, in.v1); + res = fast::max(in.v1, in.v1); + res = fast::clamp(in.v1, in.v1, in.v1); + res = precise::min(in.v1, in.v1); + res = precise::max(in.v1, in.v1); + res = precise::clamp(in.v1, in.v1, in.v1); + float2 res2 = fast::min(in.v2, in.v2); + res2 = fast::max(in.v2, in.v2); + res2 = fast::clamp(in.v2, in.v2, in.v2); + res2 = precise::min(in.v2, in.v2); + res2 = precise::max(in.v2, in.v2); + res2 = precise::clamp(in.v2, in.v2, in.v2); + float3 res3 = fast::min(in.v3, in.v3); + res3 = fast::max(in.v3, in.v3); + res3 = fast::clamp(in.v3, in.v3, in.v3); + res3 = precise::min(in.v3, in.v3); + res3 = precise::max(in.v3, in.v3); + res3 = precise::clamp(in.v3, in.v3, in.v3); + float4 res4 = fast::min(in.v4, in.v4); + res4 = fast::max(in.v4, in.v4); + res4 = fast::clamp(in.v4, in.v4, in.v4); + res4 = precise::min(in.v4, in.v4); + res4 = precise::max(in.v4, in.v4); + res4 = precise::clamp(in.v4, in.v4, in.v4); + half hres = min(in.h1, in.h1); + hres = max(in.h1, in.h1); + hres = clamp(in.h1, in.h1, in.h1); + hres = min(in.h1, in.h1); + hres = max(in.h1, in.h1); + hres = clamp(in.h1, in.h1, in.h1); + half2 hres2 = min(in.h2, in.h2); + hres2 = max(in.h2, in.h2); + hres2 = clamp(in.h2, in.h2, in.h2); + hres2 = min(in.h2, in.h2); + hres2 = max(in.h2, in.h2); + hres2 = clamp(in.h2, in.h2, in.h2); + half3 hres3 = min(in.h3, in.h3); + hres3 = max(in.h3, in.h3); + hres3 = clamp(in.h3, in.h3, in.h3); + hres3 = min(in.h3, in.h3); + hres3 = max(in.h3, in.h3); + hres3 = clamp(in.h3, in.h3, in.h3); + half4 hres4 = min(in.h4, in.h4); + hres4 = max(in.h4, in.h4); + hres4 = clamp(in.h4, in.h4, in.h4); + hres4 = min(in.h4, in.h4); + hres4 = max(in.h4, in.h4); + hres4 = clamp(in.h4, in.h4, in.h4); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/op-constant-null.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/op-constant-null.asm.frag new file mode 100644 index 000000000..9d5d7fb1d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/op-constant-null.asm.frag @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct D +{ + float4 a; + float b; +}; + +constant float4 _14[4] = { float4(0.0), float4(0.0), float4(0.0), float4(0.0) }; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + float a = 0.0; + float4 b = float4(0.0); + float2x3 c = float2x3(float3(0.0), float3(0.0)); + D d = D{ float4(0.0), 0.0 }; + out.FragColor = a; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/op-image-sampled-image.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/op-image-sampled-image.asm.frag new file mode 100644 index 000000000..1fd5d15e6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/op-image-sampled-image.asm.frag @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct push_cb +{ + float4 cb0[1]; +}; + +struct main0_out +{ + float4 o0 [[color(0)]]; +}; + +fragment main0_out main0(constant push_cb& _19 [[buffer(0)]], texture2d t0 [[texture(2)]], sampler dummy_sampler [[sampler(4)]]) +{ + main0_out out = {}; + float4 r0; + r0 = float4(_19.cb0[0u].z, _19.cb0[0u].w, r0.z, r0.w); + r0 = float4(r0.x, r0.y, float2(0.0).x, float2(0.0).y); + out.o0 = t0.read(uint2(as_type(r0.xy)) + uint2(int2(-1, -2)), as_type(r0.w)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..6ed945ecf --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,29 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Registers +{ + float foo; +}; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +float add_value(float v, float w) +{ + return v + w; +} + +fragment main0_out main0(constant Registers& registers [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = add_value(10.0, registers.foo); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/phi-loop-variable.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/phi-loop-variable.asm.frag new file mode 100644 index 000000000..036774d66 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/phi-loop-variable.asm.frag @@ -0,0 +1,12 @@ +#include +#include + +using namespace metal; + +fragment void main0() +{ + for (int _22 = 35; _22 >= 0; _22--) + { + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/srem.asm.frag new file mode 100644 index 000000000..f0cdd574d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/srem.asm.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int4 vA [[user(locn0)]]; + int4 vB [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = float4(in.vA - in.vB * (in.vA / in.vB)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..dd308c32a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = uTexture.read(uint2(int2(gl_FragCoord.xy)), 0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/undef-variable-store.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/undef-variable-store.asm.frag new file mode 100644 index 000000000..2cefeb669 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/undef-variable-store.asm.frag @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +constant float4 _38 = {}; +constant float4 _47 = {}; + +struct main0_out +{ + float4 _entryPointOutput [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + float4 _27; + do + { + float2 _26 = float2(0.0); + if (_26.x != 0.0) + { + _27 = float4(1.0, 0.0, 0.0, 1.0); + break; + } + else + { + _27 = float4(1.0, 1.0, 0.0, 1.0); + break; + } + _27 = _38; + break; + } while (false); + out._entryPointOutput = _27; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unknown-depth-state.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unknown-depth-state.asm.frag new file mode 100644 index 000000000..005af22e3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unknown-depth-state.asm.frag @@ -0,0 +1,34 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vUV [[user(locn0)]]; +}; + +float sample_combined(thread float3& vUV, thread depth2d uShadow, thread const sampler uShadowSmplr) +{ + return uShadow.sample_compare(uShadowSmplr, vUV.xy, vUV.z); +} + +float sample_separate(thread float3& vUV, thread depth2d uTexture, thread sampler uSampler) +{ + return uTexture.sample_compare(uSampler, vUV.xy, vUV.z); +} + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uShadow [[texture(0)]], depth2d uTexture [[texture(1)]], sampler uShadowSmplr [[sampler(0)]], sampler uSampler [[sampler(2)]]) +{ + main0_out out = {}; + out.FragColor = sample_combined(in.vUV, uShadow, uShadowSmplr) + sample_separate(in.vUV, uTexture, uSampler); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unord-relational-op.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unord-relational-op.asm.frag new file mode 100644 index 000000000..8df57c55b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unord-relational-op.asm.frag @@ -0,0 +1,58 @@ +#include +#include + +using namespace metal; + +constant float a_tmp [[function_constant(1)]]; +constant float a = is_function_constant_defined(a_tmp) ? a_tmp : 1.0; +constant float b_tmp [[function_constant(2)]]; +constant float b = is_function_constant_defined(b_tmp) ? b_tmp : 2.0; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 c [[user(locn2)]]; + float2 d [[user(locn3)]]; + float3 e [[user(locn4)]]; + float3 f [[user(locn5)]]; + float4 g [[user(locn6)]]; + float4 h [[user(locn7)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float t0 = a; + float t1 = b; + bool c1 = (isunordered(a, b) || a == b); + bool c2 = (isunordered(a, b) || a != b); + bool c3 = (isunordered(a, b) || a < b); + bool c4 = (isunordered(a, b) || a > b); + bool c5 = (isunordered(a, b) || a <= b); + bool c6 = (isunordered(a, b) || a >= b); + bool2 c7 = (isunordered(in.c, in.d) || in.c == in.d); + bool2 c8 = (isunordered(in.c, in.d) || in.c != in.d); + bool2 c9 = (isunordered(in.c, in.d) || in.c < in.d); + bool2 c10 = (isunordered(in.c, in.d) || in.c > in.d); + bool2 c11 = (isunordered(in.c, in.d) || in.c <= in.d); + bool2 c12 = (isunordered(in.c, in.d) || in.c >= in.d); + bool3 c13 = (isunordered(in.e, in.f) || in.e == in.f); + bool3 c14 = (isunordered(in.e, in.f) || in.e != in.f); + bool3 c15 = (isunordered(in.e, in.f) || in.e < in.f); + bool3 c16 = (isunordered(in.e, in.f) || in.e > in.f); + bool3 c17 = (isunordered(in.e, in.f) || in.e <= in.f); + bool3 c18 = (isunordered(in.e, in.f) || in.e >= in.f); + bool4 c19 = (isunordered(in.g, in.h) || in.g == in.h); + bool4 c20 = (isunordered(in.g, in.h) || in.g != in.h); + bool4 c21 = (isunordered(in.g, in.h) || in.g < in.h); + bool4 c22 = (isunordered(in.g, in.h) || in.g > in.h); + bool4 c23 = (isunordered(in.g, in.h) || in.g <= in.h); + bool4 c24 = (isunordered(in.g, in.h) || in.g >= in.h); + out.FragColor = float4(t0 + t1); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..7ae4aa5e6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/unreachable.asm.frag @@ -0,0 +1,40 @@ +#include +#include + +using namespace metal; + +constant float4 _21 = {}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int counter [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float4 _24; + _24 = _21; + float4 _33; + for (;;) + { + if (in.counter == 10) + { + _33 = float4(10.0); + break; + } + else + { + _33 = float4(30.0); + break; + } + } + out.FragColor = _33; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag new file mode 100644 index 000000000..d06da7a68 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag @@ -0,0 +1,363 @@ +#include +#include + +using namespace metal; + +struct _6 +{ + float4 _m0; + float _m1; + float4 _m2; +}; + +struct _10 +{ + float3 _m0; + packed_float3 _m1; + float _m2; + packed_float3 _m3; + float _m4; + packed_float3 _m5; + float _m6; + packed_float3 _m7; + float _m8; + packed_float3 _m9; + float _m10; + packed_float3 _m11; + float _m12; + float2 _m13; + float2 _m14; + packed_float3 _m15; + float _m16; + float _m17; + float _m18; + float _m19; + float _m20; + float4 _m21; + float4 _m22; + float4x4 _m23; + float4 _m24; +}; + +struct _18 +{ + float4x4 _m0; + float4x4 _m1; + float4x4 _m2; + float4x4 _m3; + float4 _m4; + float4 _m5; + float _m6; + float _m7; + float _m8; + float _m9; + packed_float3 _m10; + float _m11; + packed_float3 _m12; + float _m13; + packed_float3 _m14; + float _m15; + packed_float3 _m16; + float _m17; + float _m18; + float _m19; + float2 _m20; + float2 _m21; + float2 _m22; + float4 _m23; + float2 _m24; + float2 _m25; + float2 _m26; + char pad27[8]; + packed_float3 _m27; + float _m28; + float _m29; + float _m30; + float _m31; + float _m32; + float2 _m33; + float _m34; + float _m35; + float3 _m36; + float4x4 _m37[2]; + float4 _m38[2]; +}; + +struct _20 +{ + float4 _m0; + float4 _m1; + float2 _m2; + float2 _m3; + float3 _m4; + float _m5; + float3 _m6; + float _m7; + float4 _m8; + float4 _m9; + float4 _m10; + float3 _m11; + float _m12; + float3 _m13; + float _m14; + float3 _m15; + float4 _m16; + float3 _m17; + float _m18; + float3 _m19; + float2 _m20; +}; + +struct _21 +{ + float4 _m0; +}; + +struct _28 +{ + float4 _m0; +}; + +constant _28 _74 = {}; + +struct main0_out +{ + float4 m_5 [[color(0)]]; +}; + +fragment main0_out main0(constant _6& _7 [[buffer(0)]], constant _18& _19 [[buffer(1)]], constant _10& _11 [[buffer(2)]], texture2d _14 [[texture(4)]], texture2d _12 [[texture(13)]], texture2d _8 [[texture(14)]], sampler _15 [[sampler(3)]], sampler _13 [[sampler(5)]], sampler _9 [[sampler(6)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + _28 _77 = _74; + _77._m0 = float4(0.0); + float2 _82 = gl_FragCoord.xy * _19._m23.xy; + float4 _88 = _7._m2 * _7._m0.xyxy; + float2 _97 = fast::clamp(_82 + (float3(0.0, -2.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _109 = float3(_11._m5) * fast::clamp(_8.sample(_9, _97, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _113 = _12.sample(_13, _97, level(0.0)); + float3 _129; + if (_113.y > 0.0) + { + _129 = _109 + (_14.sample(_15, _97, level(0.0)).xyz * fast::clamp(_113.y * _113.z, 0.0, 1.0)); + } + else + { + _129 = _109; + } + float3 _130 = _129 * 0.5; + float3 _133 = float4(0.0).xyz + _130; + float4 _134 = float4(_133.x, _133.y, _133.z, float4(0.0).w); + _28 _135 = _77; + _135._m0 = _134; + float2 _144 = fast::clamp(_82 + (float3(-1.0, -1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _156 = float3(_11._m5) * fast::clamp(_8.sample(_9, _144, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _160 = _12.sample(_13, _144, level(0.0)); + float3 _176; + if (_160.y > 0.0) + { + _176 = _156 + (_14.sample(_15, _144, level(0.0)).xyz * fast::clamp(_160.y * _160.z, 0.0, 1.0)); + } + else + { + _176 = _156; + } + float3 _177 = _176 * 0.5; + float3 _180 = _134.xyz + _177; + float4 _181 = float4(_180.x, _180.y, _180.z, _134.w); + _28 _182 = _135; + _182._m0 = _181; + float2 _191 = fast::clamp(_82 + (float3(0.0, -1.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _203 = float3(_11._m5) * fast::clamp(_8.sample(_9, _191, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _207 = _12.sample(_13, _191, level(0.0)); + float3 _223; + if (_207.y > 0.0) + { + _223 = _203 + (_14.sample(_15, _191, level(0.0)).xyz * fast::clamp(_207.y * _207.z, 0.0, 1.0)); + } + else + { + _223 = _203; + } + float3 _224 = _223 * 0.75; + float3 _227 = _181.xyz + _224; + float4 _228 = float4(_227.x, _227.y, _227.z, _181.w); + _28 _229 = _182; + _229._m0 = _228; + float2 _238 = fast::clamp(_82 + (float3(1.0, -1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _250 = float3(_11._m5) * fast::clamp(_8.sample(_9, _238, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _254 = _12.sample(_13, _238, level(0.0)); + float3 _270; + if (_254.y > 0.0) + { + _270 = _250 + (_14.sample(_15, _238, level(0.0)).xyz * fast::clamp(_254.y * _254.z, 0.0, 1.0)); + } + else + { + _270 = _250; + } + float3 _271 = _270 * 0.5; + float3 _274 = _228.xyz + _271; + float4 _275 = float4(_274.x, _274.y, _274.z, _228.w); + _28 _276 = _229; + _276._m0 = _275; + float2 _285 = fast::clamp(_82 + (float3(-2.0, 0.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _297 = float3(_11._m5) * fast::clamp(_8.sample(_9, _285, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _301 = _12.sample(_13, _285, level(0.0)); + float3 _317; + if (_301.y > 0.0) + { + _317 = _297 + (_14.sample(_15, _285, level(0.0)).xyz * fast::clamp(_301.y * _301.z, 0.0, 1.0)); + } + else + { + _317 = _297; + } + float3 _318 = _317 * 0.5; + float3 _321 = _275.xyz + _318; + float4 _322 = float4(_321.x, _321.y, _321.z, _275.w); + _28 _323 = _276; + _323._m0 = _322; + float2 _332 = fast::clamp(_82 + (float3(-1.0, 0.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _344 = float3(_11._m5) * fast::clamp(_8.sample(_9, _332, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _348 = _12.sample(_13, _332, level(0.0)); + float3 _364; + if (_348.y > 0.0) + { + _364 = _344 + (_14.sample(_15, _332, level(0.0)).xyz * fast::clamp(_348.y * _348.z, 0.0, 1.0)); + } + else + { + _364 = _344; + } + float3 _365 = _364 * 0.75; + float3 _368 = _322.xyz + _365; + float4 _369 = float4(_368.x, _368.y, _368.z, _322.w); + _28 _370 = _323; + _370._m0 = _369; + float2 _379 = fast::clamp(_82 + (float3(0.0, 0.0, 1.0).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _391 = float3(_11._m5) * fast::clamp(_8.sample(_9, _379, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _395 = _12.sample(_13, _379, level(0.0)); + float3 _411; + if (_395.y > 0.0) + { + _411 = _391 + (_14.sample(_15, _379, level(0.0)).xyz * fast::clamp(_395.y * _395.z, 0.0, 1.0)); + } + else + { + _411 = _391; + } + float3 _412 = _411 * 1.0; + float3 _415 = _369.xyz + _412; + float4 _416 = float4(_415.x, _415.y, _415.z, _369.w); + _28 _417 = _370; + _417._m0 = _416; + float2 _426 = fast::clamp(_82 + (float3(1.0, 0.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _438 = float3(_11._m5) * fast::clamp(_8.sample(_9, _426, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _442 = _12.sample(_13, _426, level(0.0)); + float3 _458; + if (_442.y > 0.0) + { + _458 = _438 + (_14.sample(_15, _426, level(0.0)).xyz * fast::clamp(_442.y * _442.z, 0.0, 1.0)); + } + else + { + _458 = _438; + } + float3 _459 = _458 * 0.75; + float3 _462 = _416.xyz + _459; + float4 _463 = float4(_462.x, _462.y, _462.z, _416.w); + _28 _464 = _417; + _464._m0 = _463; + float2 _473 = fast::clamp(_82 + (float3(2.0, 0.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _485 = float3(_11._m5) * fast::clamp(_8.sample(_9, _473, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _489 = _12.sample(_13, _473, level(0.0)); + float3 _505; + if (_489.y > 0.0) + { + _505 = _485 + (_14.sample(_15, _473, level(0.0)).xyz * fast::clamp(_489.y * _489.z, 0.0, 1.0)); + } + else + { + _505 = _485; + } + float3 _506 = _505 * 0.5; + float3 _509 = _463.xyz + _506; + float4 _510 = float4(_509.x, _509.y, _509.z, _463.w); + _28 _511 = _464; + _511._m0 = _510; + float2 _520 = fast::clamp(_82 + (float3(-1.0, 1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _532 = float3(_11._m5) * fast::clamp(_8.sample(_9, _520, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _536 = _12.sample(_13, _520, level(0.0)); + float3 _552; + if (_536.y > 0.0) + { + _552 = _532 + (_14.sample(_15, _520, level(0.0)).xyz * fast::clamp(_536.y * _536.z, 0.0, 1.0)); + } + else + { + _552 = _532; + } + float3 _553 = _552 * 0.5; + float3 _556 = _510.xyz + _553; + float4 _557 = float4(_556.x, _556.y, _556.z, _510.w); + _28 _558 = _511; + _558._m0 = _557; + float2 _567 = fast::clamp(_82 + (float3(0.0, 1.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _579 = float3(_11._m5) * fast::clamp(_8.sample(_9, _567, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _583 = _12.sample(_13, _567, level(0.0)); + float3 _599; + if (_583.y > 0.0) + { + _599 = _579 + (_14.sample(_15, _567, level(0.0)).xyz * fast::clamp(_583.y * _583.z, 0.0, 1.0)); + } + else + { + _599 = _579; + } + float3 _600 = _599 * 0.75; + float3 _603 = _557.xyz + _600; + float4 _604 = float4(_603.x, _603.y, _603.z, _557.w); + _28 _605 = _558; + _605._m0 = _604; + float2 _614 = fast::clamp(_82 + (float3(1.0, 1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _626 = float3(_11._m5) * fast::clamp(_8.sample(_9, _614, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _630 = _12.sample(_13, _614, level(0.0)); + float3 _646; + if (_630.y > 0.0) + { + _646 = _626 + (_14.sample(_15, _614, level(0.0)).xyz * fast::clamp(_630.y * _630.z, 0.0, 1.0)); + } + else + { + _646 = _626; + } + float3 _647 = _646 * 0.5; + float3 _650 = _604.xyz + _647; + float4 _651 = float4(_650.x, _650.y, _650.z, _604.w); + _28 _652 = _605; + _652._m0 = _651; + float2 _661 = fast::clamp(_82 + (float3(0.0, 2.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + float3 _673 = float3(_11._m5) * fast::clamp(_8.sample(_9, _661, level(0.0)).w * _7._m1, 0.0, 1.0); + float4 _677 = _12.sample(_13, _661, level(0.0)); + float3 _693; + if (_677.y > 0.0) + { + _693 = _673 + (_14.sample(_15, _661, level(0.0)).xyz * fast::clamp(_677.y * _677.z, 0.0, 1.0)); + } + else + { + _693 = _673; + } + float3 _697 = _651.xyz + (_693 * 0.5); + float4 _698 = float4(_697.x, _697.y, _697.z, _651.w); + _28 _699 = _652; + _699._m0 = _698; + float3 _702 = _698.xyz / float3(((((((((((((0.0 + 0.5) + 0.5) + 0.75) + 0.5) + 0.5) + 0.75) + 1.0) + 0.75) + 0.5) + 0.5) + 0.75) + 0.5) + 0.5); + _28 _704 = _699; + _704._m0 = float4(_702.x, _702.y, _702.z, _698.w); + _28 _705 = _704; + _705._m0.w = 1.0; + out.m_5 = _705._m0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/packing-test.asm.vert b/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/packing-test.asm.vert new file mode 100644 index 000000000..ac2d30c8d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/packing-test.asm.vert @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct TestStruct +{ + float4x4 transforms[6]; +}; + +struct CB0 +{ + TestStruct CB0[16]; +}; + +vertex void main0() +{ +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert b/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert new file mode 100644 index 000000000..67485d732 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +constant int _7_tmp [[function_constant(201)]]; +constant int _7 = is_function_constant_defined(_7_tmp) ? _7_tmp : -10; +constant uint _8_tmp [[function_constant(202)]]; +constant uint _8 = is_function_constant_defined(_8_tmp) ? _8_tmp : 100u; +constant float _9_tmp [[function_constant(200)]]; +constant float _9 = is_function_constant_defined(_9_tmp) ? _9_tmp : 3.141590118408203125; +constant int _20 = (_7 + 2); +constant uint _25 = (_8 % 5u); +constant int4 _30 = int4(20, 30, _20, _20); +constant int2 _32 = int2(_30.y, _30.x); +constant int _33 = _30.y; + +struct main0_out +{ + int m_4 [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +vertex main0_out main0() +{ + main0_out out = {}; + float4 pos = float4(0.0); + pos.y += float(_20); + pos.z += float(_25); + pos += float4(_30); + float2 _56 = pos.xy + float2(_32); + pos = float4(_56.x, _56.y, pos.z, pos.w); + out.gl_Position = pos; + out.m_4 = _33; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..89ca17f98 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,28 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +float4 _main(thread const uint& vid, thread const uint& iid) +{ + return float4(float(vid + iid)); +} + +vertex main0_out main0(uint gl_VertexIndex [[vertex_id]], uint gl_InstanceIndex [[instance_id]]) +{ + main0_out out = {}; + uint vid = gl_VertexIndex; + uint iid = gl_InstanceIndex; + uint param = vid; + uint param_1 = iid; + out.gl_Position = _main(param, param_1); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/access-private-workgroup-in-function.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/access-private-workgroup-in-function.comp new file mode 100644 index 000000000..17acda967 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/access-private-workgroup-in-function.comp @@ -0,0 +1,34 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +void set_f(thread int& f) +{ + f = 40; +} + +void set_shared_u(threadgroup int& u) +{ + u = 50; +} + +kernel void main0(uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + threadgroup int u; + int f; + set_f(f); + set_shared_u(u); + if (gl_LocalInvocationIndex == 0u) + { + f = 10; + } + else + { + f = 30; + u = 20; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/atomic.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/atomic.comp new file mode 100644 index 000000000..f77922aca --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/atomic.comp @@ -0,0 +1,70 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct SSBO +{ + uint u32; + int i32; +}; + +kernel void main0(device SSBO& ssbo [[buffer(2)]]) +{ + threadgroup uint shared_u32; + threadgroup int shared_i32; + uint _16 = atomic_fetch_add_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _18 = atomic_fetch_or_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _20 = atomic_fetch_xor_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _22 = atomic_fetch_and_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _24 = atomic_fetch_min_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _26 = atomic_fetch_max_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _28 = atomic_exchange_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed); + uint _32; + do + { + _32 = 10u; + } while (!atomic_compare_exchange_weak_explicit((volatile device atomic_uint*)&ssbo.u32, &_32, 2u, memory_order_relaxed, memory_order_relaxed)); + int _36 = atomic_fetch_add_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _38 = atomic_fetch_or_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _40 = atomic_fetch_xor_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _42 = atomic_fetch_and_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _44 = atomic_fetch_min_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _46 = atomic_fetch_max_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _48 = atomic_exchange_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed); + int _52; + do + { + _52 = 10; + } while (!atomic_compare_exchange_weak_explicit((volatile device atomic_int*)&ssbo.i32, &_52, 2, memory_order_relaxed, memory_order_relaxed)); + shared_u32 = 10u; + shared_i32 = 10; + uint _57 = atomic_fetch_add_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _58 = atomic_fetch_or_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _59 = atomic_fetch_xor_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _60 = atomic_fetch_and_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _61 = atomic_fetch_min_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _62 = atomic_fetch_max_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _63 = atomic_exchange_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed); + uint _64; + do + { + _64 = 10u; + } while (!atomic_compare_exchange_weak_explicit((volatile threadgroup atomic_uint*)&shared_u32, &_64, 2u, memory_order_relaxed, memory_order_relaxed)); + int _65 = atomic_fetch_add_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _66 = atomic_fetch_or_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _67 = atomic_fetch_xor_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _68 = atomic_fetch_and_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _69 = atomic_fetch_min_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _70 = atomic_fetch_max_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _71 = atomic_exchange_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed); + int _72; + do + { + _72 = 10; + } while (!atomic_compare_exchange_weak_explicit((volatile threadgroup atomic_int*)&shared_i32, &_72, 2, memory_order_relaxed, memory_order_relaxed)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/barriers.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/barriers.comp new file mode 100644 index 000000000..3b8474c75 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/barriers.comp @@ -0,0 +1,67 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +void barrier_shared() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); +} + +void full_barrier() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); +} + +void buffer_barrier() +{ + threadgroup_barrier(mem_flags::mem_none); +} + +void group_barrier() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); +} + +void barrier_shared_exec() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); +} + +void full_barrier_exec() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); +} + +void buffer_barrier_exec() +{ + threadgroup_barrier(mem_flags::mem_none); +} + +void group_barrier_exec() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); +} + +void exec_barrier() +{ + threadgroup_barrier(mem_flags::mem_threadgroup); +} + +kernel void main0() +{ + barrier_shared(); + full_barrier(); + buffer_barrier(); + group_barrier(); + barrier_shared_exec(); + full_barrier_exec(); + buffer_barrier_exec(); + group_barrier_exec(); + exec_barrier(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/basic.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/basic.comp new file mode 100644 index 000000000..6410894ba --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/basic.comp @@ -0,0 +1,34 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +struct SSBO3 +{ + uint counter; +}; + +kernel void main0(const device SSBO& _23 [[buffer(0)]], device SSBO2& _45 [[buffer(1)]], device SSBO3& _48 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + float4 idata = _23.in_data[ident]; + if (dot(idata, float4(1.0, 5.0, 6.0, 2.0)) > 8.19999980926513671875) + { + uint _52 = atomic_fetch_add_explicit((volatile device atomic_uint*)&_48.counter, 1u, memory_order_relaxed); + _45.out_data[_52] = idata; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/bitcast-16bit-1.invalid.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/bitcast-16bit-1.invalid.comp new file mode 100644 index 000000000..ddf38a602 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/bitcast-16bit-1.invalid.comp @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct SSBO0 +{ + short4 inputs[1]; +}; + +struct SSBO1 +{ + int4 outputs[1]; +}; + +kernel void main0(device SSBO0& _25 [[buffer(0)]], device SSBO1& _39 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + half2 a = as_type(_25.inputs[ident].xy); + _39.outputs[ident].x = int(as_type(a + half2(half(1), half(1)))); + _39.outputs[ident].y = as_type(_25.inputs[ident].zw); + _39.outputs[ident].z = int(as_type(ushort2(_25.inputs[ident].xy))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/bitcast-16bit-2.invalid.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/bitcast-16bit-2.invalid.comp new file mode 100644 index 000000000..1dac3969b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/bitcast-16bit-2.invalid.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct SSBO1 +{ + short4 outputs[1]; +}; + +struct SSBO0 +{ + int4 inputs[1]; +}; + +struct UBO +{ + half4 const0; +}; + +kernel void main0(device SSBO0& _29 [[buffer(0)]], device SSBO1& _21 [[buffer(1)]], constant UBO& _40 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + short2 _47 = as_type(_29.inputs[ident].x) + as_type(_40.const0.xy); + _21.outputs[ident] = short4(_47.x, _47.y, _21.outputs[ident].z, _21.outputs[ident].w); + short2 _66 = short2(as_type(uint(_29.inputs[ident].y)) - as_type(_40.const0.zw)); + _21.outputs[ident] = short4(_21.outputs[ident].x, _21.outputs[ident].y, _66.x, _66.y); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/builtins.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/builtins.comp new file mode 100644 index 000000000..4330d5783 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/builtins.comp @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(8u, 4u, 2u); + +kernel void main0(uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]], uint3 gl_NumWorkGroups [[threadgroups_per_grid]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]]) +{ + uint3 local_id = gl_LocalInvocationID; + uint3 global_id = gl_GlobalInvocationID; + uint local_index = gl_LocalInvocationIndex; + uint3 work_group_size = gl_WorkGroupSize; + uint3 num_work_groups = gl_NumWorkGroups; + uint3 work_group_id = gl_WorkGroupID; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/cfg-preserve-parameter.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/cfg-preserve-parameter.comp new file mode 100644 index 000000000..d65beee5d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/cfg-preserve-parameter.comp @@ -0,0 +1,78 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +void out_test_0(thread const int& cond, thread int& i) +{ + if (cond == 0) + { + i = 40; + } + else + { + i = 60; + } +} + +void out_test_1(thread const int& cond, thread int& i) +{ + switch (cond) + { + case 40: + { + i = 40; + break; + } + default: + { + i = 70; + break; + } + } +} + +void inout_test_0(thread const int& cond, thread int& i) +{ + if (cond == 0) + { + i = 40; + } +} + +void inout_test_1(thread const int& cond, thread int& i) +{ + switch (cond) + { + case 40: + { + i = 40; + break; + } + } +} + +kernel void main0() +{ + int cond = 40; + int i = 50; + int param = cond; + int param_1 = i; + out_test_0(param, param_1); + i = param_1; + int param_2 = cond; + int param_3 = i; + out_test_1(param_2, param_3); + i = param_3; + int param_4 = cond; + int param_5 = i; + inout_test_0(param_4, param_5); + i = param_5; + int param_6 = cond; + int param_7 = i; + inout_test_1(param_6, param_7); + i = param_7; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/coherent-block.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/coherent-block.comp new file mode 100644 index 000000000..963574acd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/coherent-block.comp @@ -0,0 +1,15 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 value; +}; + +kernel void main0(device SSBO& _10 [[buffer(1)]]) +{ + _10.value = float4(20.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/coherent-image.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/coherent-image.comp new file mode 100644 index 000000000..827a24712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/coherent-image.comp @@ -0,0 +1,15 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + int4 value; +}; + +kernel void main0(device SSBO& _10 [[buffer(1)]], texture2d uImage [[texture(3)]]) +{ + _10.value = uImage.read(uint2(int2(10))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/composite-array-initialization.comp new file mode 100644 index 000000000..1a901dc8e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/composite-array-initialization.comp @@ -0,0 +1,61 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant float X_tmp [[function_constant(0)]]; +constant float X = is_function_constant_defined(X_tmp) ? X_tmp : 4.0; +constant uint3 gl_WorkGroupSize = uint3(2u, 1u, 1u); + +struct Data +{ + float a; + float b; +}; + +struct Data_1 +{ + float a; + float b; +}; + +struct SSBO +{ + Data_1 outdata[1]; +}; + +constant Data _25[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } }; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +Data combine(thread const Data& a, thread const Data& b) +{ + return Data{ a.a + b.a, a.b + b.b }; +} + +kernel void main0(device SSBO& _53 [[buffer(0)]], uint3 gl_WorkGroupID [[threadgroup_position_in_grid]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + Data data[2] = { Data{ 1.0, 2.0 }, Data{ 3.0, 4.0 } }; + Data _31[2] = { Data{ X, 2.0 }, Data{ 3.0, 5.0 } }; + Data data2[2]; + spvArrayCopyFromStack1(data2, _31); + Data param = data[gl_LocalInvocationID.x]; + Data param_1 = data2[gl_LocalInvocationID.x]; + Data _73 = combine(param, param_1); + _53.outdata[gl_WorkGroupID.x].a = _73.a; + _53.outdata[gl_WorkGroupID.x].b = _73.b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/composite-construct.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/composite-construct.comp new file mode 100644 index 000000000..4b5ea37e9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/composite-construct.comp @@ -0,0 +1,48 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct SSBO0 +{ + float4 as[1]; +}; + +struct SSBO1 +{ + float4 bs[1]; +}; + +struct Composite +{ + float4 a; + float4 b; +}; + +constant float4 _43[2] = { float4(20.0), float4(40.0) }; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +kernel void main0(device SSBO0& _16 [[buffer(0)]], device SSBO1& _32 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + float4 _37[2] = { _16.as[gl_GlobalInvocationID.x], _32.bs[gl_GlobalInvocationID.x] }; + float4 values[2]; + spvArrayCopyFromStack1(values, _37); + Composite c = Composite{ values[0], _43[1] }; + _16.as[0] = values[gl_LocalInvocationIndex]; + _32.bs[1] = c.b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/copy-array-of-arrays.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/copy-array-of-arrays.comp new file mode 100644 index 000000000..cb6c328ae --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/copy-array-of-arrays.comp @@ -0,0 +1,93 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct BUF +{ + int a; + float b; + float c; +}; + +constant float _16[2] = { 1.0, 2.0 }; +constant float _19[2] = { 3.0, 4.0 }; +constant float _20[2][2] = { { 1.0, 2.0 }, { 3.0, 4.0 } }; +constant float _21[2][2][2] = { { { 1.0, 2.0 }, { 3.0, 4.0 } }, { { 1.0, 2.0 }, { 3.0, 4.0 } } }; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromStack2(thread T (&dst)[A][B], thread const T (&src)[A][B]) +{ + for (uint i = 0; i < A; i++) + { + spvArrayCopyFromStack1(dst[i], src[i]); + } +} + +template +void spvArrayCopyFromConstant2(thread T (&dst)[A][B], constant T (&src)[A][B]) +{ + for (uint i = 0; i < A; i++) + { + spvArrayCopyFromConstant1(dst[i], src[i]); + } +} + +template +void spvArrayCopyFromStack3(thread T (&dst)[A][B][C], thread const T (&src)[A][B][C]) +{ + for (uint i = 0; i < A; i++) + { + spvArrayCopyFromStack2(dst[i], src[i]); + } +} + +template +void spvArrayCopyFromConstant3(thread T (&dst)[A][B][C], constant T (&src)[A][B][C]) +{ + for (uint i = 0; i < A; i++) + { + spvArrayCopyFromConstant2(dst[i], src[i]); + } +} + +kernel void main0(device BUF& o [[buffer(0)]]) +{ + float c[2][2][2]; + spvArrayCopyFromConstant3(c, _21); + o.a = int(c[1][1][1]); + float _43[2] = { o.b, o.c }; + float _48[2] = { o.b, o.b }; + float _49[2][2]; + spvArrayCopyFromStack1(_49[0], _43); + spvArrayCopyFromStack1(_49[1], _48); + float _54[2] = { o.c, o.c }; + float _59[2] = { o.c, o.b }; + float _60[2][2]; + spvArrayCopyFromStack1(_60[0], _54); + spvArrayCopyFromStack1(_60[1], _59); + float _61[2][2][2]; + spvArrayCopyFromStack2(_61[0], _49); + spvArrayCopyFromStack2(_61[1], _60); + float d[2][2][2]; + spvArrayCopyFromStack3(d, _61); + float e[2][2][2]; + spvArrayCopyFromStack3(e, d); + o.b = e[1][0][1]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/culling.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/culling.comp new file mode 100644 index 000000000..1f6bdcbee --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/culling.comp @@ -0,0 +1,36 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +struct SSBO +{ + float in_data[1]; +}; + +struct SSBO2 +{ + float out_data[1]; +}; + +struct SSBO3 +{ + uint count; +}; + +kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _38 [[buffer(1)]], device SSBO3& _41 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + float idata = _22.in_data[ident]; + if (idata > 12.0) + { + uint _45 = atomic_fetch_add_explicit((volatile device atomic_uint*)&_41.count, 1u, memory_order_relaxed); + _38.out_data[_45] = idata; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/defer-parens.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/defer-parens.comp new file mode 100644 index 000000000..76dce7773 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/defer-parens.comp @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 data; + int index; +}; + +kernel void main0(device SSBO& _13 [[buffer(0)]]) +{ + float4 d = _13.data; + _13.data = float4(d.x, d.yz + float2(10.0), d.w); + _13.data = (d + d) + d; + _13.data = (d.yz + float2(10.0)).xxyy; + float t = (d.yz + float2(10.0)).y; + _13.data = float4(t); + t = (d.zw + float2(10.0))[_13.index]; + _13.data = float4(t); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/dowhile.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/dowhile.comp new file mode 100644 index 000000000..3482fb355 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/dowhile.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4x4 mvp; + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(const device SSBO& _28 [[buffer(0)]], device SSBO2& _52 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + int i = 0; + float4 idat = _28.in_data[ident]; + do + { + idat = _28.mvp * idat; + i++; + } while (i < 16); + _52.out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/functions.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/functions.comp new file mode 100644 index 000000000..d8f6e55a4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/functions.comp @@ -0,0 +1,18 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +void myfunc(threadgroup int (&foo)[1337]) +{ + foo[0] = 13; +} + +kernel void main0() +{ + threadgroup int foo[1337]; + myfunc(foo); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp new file mode 100644 index 000000000..1b525c1f9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +float getB(device myBlock& myStorage, thread uint3& gl_GlobalInvocationID) +{ + return myStorage.b[gl_GlobalInvocationID.x]; +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_GlobalInvocationID.x] = mod(getB(myStorage, gl_GlobalInvocationID) + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/global-invocation-id.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/global-invocation-id.comp new file mode 100644 index 000000000..fe0212ec3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/global-invocation-id.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_GlobalInvocationID.x] = mod(myStorage.b[gl_GlobalInvocationID.x] + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/image-cube-array-load-store.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/image-cube-array-load-store.comp new file mode 100644 index 000000000..ef67a326f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/image-cube-array-load-store.comp @@ -0,0 +1,12 @@ +#include +#include + +using namespace metal; + +kernel void main0(texturecube_array uImageIn [[texture(0)]], texturecube_array uImageOut [[texture(1)]]) +{ + int3 coord = int3(9, 7, 11); + float4 indata = uImageIn.read(uint2(coord.xy), uint(coord.z) % 6u, uint(coord.z) / 6u); + uImageOut.write(indata, uint2(coord.xy), uint(coord.z) % 6u, uint(coord.z) / 6u); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/image.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/image.comp new file mode 100644 index 000000000..f3bc1455d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/image.comp @@ -0,0 +1,11 @@ +#include +#include + +using namespace metal; + +kernel void main0(texture2d uImageIn [[texture(0)]], texture2d uImageOut [[texture(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float4 v = uImageIn.read(uint2((int2(gl_GlobalInvocationID.xy) + int2(uImageIn.get_width(), uImageIn.get_height())))); + uImageOut.write(v, uint2(int2(gl_GlobalInvocationID.xy))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/insert.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/insert.comp new file mode 100644 index 000000000..0f56a6515 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/insert.comp @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 out_data[1]; +}; + +kernel void main0(device SSBO& _27 [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float4 v; + v.x = 10.0; + v.y = 30.0; + v.z = 70.0; + v.w = 90.0; + _27.out_data[gl_GlobalInvocationID.x] = v; + _27.out_data[gl_GlobalInvocationID.x].y = 20.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/inverse.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/inverse.comp new file mode 100644 index 000000000..f2f499b91 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/inverse.comp @@ -0,0 +1,123 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct MatrixOut +{ + float2x2 m2out; + float3x3 m3out; + float4x4 m4out; +}; + +struct MatrixIn +{ + float2x2 m2in; + float3x3 m3in; + float4x4 m4in; +}; + +// Returns the determinant of a 2x2 matrix. +inline float spvDet2x2(float a1, float a2, float b1, float b2) +{ + return a1 * b2 - b1 * a2; +} + +// Returns the determinant of a 3x3 matrix. +inline float spvDet3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3) +{ + return a1 * spvDet2x2(b2, b3, c2, c3) - b1 * spvDet2x2(a2, a3, c2, c3) + c1 * spvDet2x2(a2, a3, b2, b3); +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float4x4 spvInverse4x4(float4x4 m) +{ + float4x4 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = spvDet3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][1] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][2] = spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3]); + adj[0][3] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3]); + + adj[1][0] = -spvDet3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][1] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][2] = -spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3]); + adj[1][3] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3]); + + adj[2][0] = spvDet3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][1] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][2] = spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3]); + adj[2][3] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3]); + + adj[3][0] = -spvDet3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][1] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][2] = -spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2]); + adj[3][3] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] * m[3][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float3x3 spvInverse3x3(float3x3 m) +{ + float3x3 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = spvDet2x2(m[1][1], m[1][2], m[2][1], m[2][2]); + adj[0][1] = -spvDet2x2(m[0][1], m[0][2], m[2][1], m[2][2]); + adj[0][2] = spvDet2x2(m[0][1], m[0][2], m[1][1], m[1][2]); + + adj[1][0] = -spvDet2x2(m[1][0], m[1][2], m[2][0], m[2][2]); + adj[1][1] = spvDet2x2(m[0][0], m[0][2], m[2][0], m[2][2]); + adj[1][2] = -spvDet2x2(m[0][0], m[0][2], m[1][0], m[1][2]); + + adj[2][0] = spvDet2x2(m[1][0], m[1][1], m[2][0], m[2][1]); + adj[2][1] = -spvDet2x2(m[0][0], m[0][1], m[2][0], m[2][1]); + adj[2][2] = spvDet2x2(m[0][0], m[0][1], m[1][0], m[1][1]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float2x2 spvInverse2x2(float2x2 m) +{ + float2x2 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = m[1][1]; + adj[0][1] = -m[0][1]; + + adj[1][0] = -m[1][0]; + adj[1][1] = m[0][0]; + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +kernel void main0(device MatrixOut& _15 [[buffer(0)]], const device MatrixIn& _20 [[buffer(1)]]) +{ + _15.m2out = spvInverse2x2(_20.m2in); + _15.m3out = spvInverse3x3(_20.m3in); + _15.m4out = spvInverse4x4(_20.m4in); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/local-invocation-id.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/local-invocation-id.comp new file mode 100644 index 000000000..772e5e0d8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/local-invocation-id.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_LocalInvocationID.x] = mod(myStorage.b[gl_LocalInvocationID.x] + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/local-invocation-index.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/local-invocation-index.comp new file mode 100644 index 000000000..41adbdca5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/local-invocation-index.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_LocalInvocationIndex] = mod(myStorage.b[gl_LocalInvocationIndex] + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/mat3.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/mat3.comp new file mode 100644 index 000000000..475d163bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/mat3.comp @@ -0,0 +1,16 @@ +#include +#include + +using namespace metal; + +struct SSBO2 +{ + float3x3 out_data[1]; +}; + +kernel void main0(device SSBO2& _22 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + _22.out_data[ident] = float3x3(float3(10.0), float3(20.0), float3(40.0)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/mod.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/mod.comp new file mode 100644 index 000000000..e0d290259 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/mod.comp @@ -0,0 +1,35 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(const device SSBO& _23 [[buffer(0)]], device SSBO2& _33 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + float4 v = mod(_23.in_data[ident], _33.out_data[ident]); + _33.out_data[ident] = v; + uint4 vu = as_type(_23.in_data[ident]) % as_type(_33.out_data[ident]); + _33.out_data[ident] = as_type(vu); + int4 vi = as_type(_23.in_data[ident]) % as_type(_33.out_data[ident]); + _33.out_data[ident] = as_type(vi); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/modf.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/modf.comp new file mode 100644 index 000000000..ef50a0213 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/modf.comp @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(const device SSBO& _23 [[buffer(0)]], device SSBO2& _35 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + float4 i; + float4 _31 = modf(_23.in_data[ident], i); + float4 v = _31; + _35.out_data[ident] = v; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/packing-test-1.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/packing-test-1.comp new file mode 100644 index 000000000..d98ad59aa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/packing-test-1.comp @@ -0,0 +1,38 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u); + +struct T1 +{ + float3 a; + float b; +}; + +struct T1_1 +{ + packed_float3 a; + float b; +}; + +struct Buffer0 +{ + T1_1 buf0[1]; +}; + +struct Buffer1 +{ + float buf1[1]; +}; + +kernel void main0(device Buffer0& _15 [[buffer(1)]], device Buffer1& _34 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + T1 v; + v.a = _15.buf0[0].a; + v.b = _15.buf0[0].b; + float x = v.b; + _34.buf1[gl_GlobalInvocationID.x] = x; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/packing-test-2.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/packing-test-2.comp new file mode 100644 index 000000000..dfccbf863 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/packing-test-2.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(32u, 1u, 1u); + +struct T1 +{ + packed_float3 a; + float b; +}; + +struct Buffer0 +{ + T1 buf0[1]; +}; + +struct Buffer1 +{ + float buf1[1]; +}; + +kernel void main0(device Buffer0& _14 [[buffer(1)]], device Buffer1& _24 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + float x = _14.buf0[0].b; + _24.buf1[gl_GlobalInvocationID.x] = x; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/read-write-only.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/read-write-only.comp new file mode 100644 index 000000000..42c625092 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/read-write-only.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct SSBO2 +{ + float4 data4; + float4 data5; +}; + +struct SSBO0 +{ + float4 data0; + float4 data1; +}; + +struct SSBO1 +{ + float4 data2; + float4 data3; +}; + +kernel void main0(const device SSBO0& _15 [[buffer(0)]], device SSBO1& _21 [[buffer(1)]], device SSBO2& _10 [[buffer(2)]]) +{ + _10.data4 = _15.data0 + _21.data2; + _10.data5 = _15.data1 + _21.data3; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/rmw-matrix.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/rmw-matrix.comp new file mode 100644 index 000000000..150db7ede --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/rmw-matrix.comp @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float a; + float4 b; + float4x4 c; + float a1; + float4 b1; + float4x4 c1; +}; + +kernel void main0(device SSBO& _11 [[buffer(0)]]) +{ + _11.a *= _11.a1; + _11.b *= _11.b1; + _11.c = _11.c * _11.c1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/rmw-opt.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/rmw-opt.comp new file mode 100644 index 000000000..060f9f9c7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/rmw-opt.comp @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + int a; +}; + +kernel void main0(device SSBO& _9 [[buffer(0)]]) +{ + _9.a += 10; + _9.a -= 10; + _9.a *= 10; + _9.a /= 10; + _9.a = _9.a << 2; + _9.a = _9.a >> 3; + _9.a &= 40; + _9.a ^= 10; + _9.a %= 40; + _9.a |= 1; + bool c = false; + bool d = true; + c = c && d; + d = d || c; + _9.a = int(c && d); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/shared-array-of-arrays.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/shared-array-of-arrays.comp new file mode 100644 index 000000000..3133e5f29 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/shared-array-of-arrays.comp @@ -0,0 +1,32 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 4u, 1u); + +struct SSBO +{ + float out_data[1]; +}; + +void work(threadgroup float (&foo)[4][4], thread uint3& gl_LocalInvocationID, thread uint& gl_LocalInvocationIndex, device SSBO& v_67, thread uint3& gl_GlobalInvocationID) +{ + foo[gl_LocalInvocationID.x][gl_LocalInvocationID.y] = float(gl_LocalInvocationIndex); + threadgroup_barrier(mem_flags::mem_threadgroup); + float x = 0.0; + x += foo[gl_LocalInvocationID.x][0]; + x += foo[gl_LocalInvocationID.x][1]; + x += foo[gl_LocalInvocationID.x][2]; + x += foo[gl_LocalInvocationID.x][3]; + v_67.out_data[gl_GlobalInvocationID.x] = x; +} + +kernel void main0(device SSBO& v_67 [[buffer(0)]], uint3 gl_LocalInvocationID [[thread_position_in_threadgroup]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + threadgroup float foo[4][4]; + work(foo, gl_LocalInvocationID, gl_LocalInvocationIndex, v_67, gl_GlobalInvocationID); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/shared.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/shared.comp new file mode 100644 index 000000000..e296190bd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/shared.comp @@ -0,0 +1,27 @@ +#include +#include + +using namespace metal; + +constant uint3 gl_WorkGroupSize = uint3(4u, 1u, 1u); + +struct SSBO +{ + float in_data[1]; +}; + +struct SSBO2 +{ + float out_data[1]; +}; + +kernel void main0(const device SSBO& _22 [[buffer(0)]], device SSBO2& _44 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]], uint gl_LocalInvocationIndex [[thread_index_in_threadgroup]]) +{ + threadgroup float sShared[4]; + uint ident = gl_GlobalInvocationID.x; + float idata = _22.in_data[ident]; + sShared[gl_LocalInvocationIndex] = idata; + threadgroup_barrier(mem_flags::mem_threadgroup); + _44.out_data[ident] = sShared[(4u - gl_LocalInvocationIndex) - 1u]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/spec-constant-op-member-array.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/spec-constant-op-member-array.comp new file mode 100644 index 000000000..35ebe1592 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/spec-constant-op-member-array.comp @@ -0,0 +1,46 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 100 +#endif +constant int a = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 200 +#endif +constant int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 300 +#endif +constant int c = SPIRV_CROSS_CONSTANT_ID_2; +constant int _18 = (c + 50); +constant int e_tmp [[function_constant(3)]]; +constant int e = is_function_constant_defined(e_tmp) ? e_tmp : 400; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +struct SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[_18]; +}; + +kernel void main0(device SSBO& _22 [[buffer(0)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/spec-constant-work-group-size.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/spec-constant-work-group-size.comp new file mode 100644 index 000000000..e46ab8939 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/spec-constant-work-group-size.comp @@ -0,0 +1,33 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 2 +#endif +constant int b = SPIRV_CROSS_CONSTANT_ID_1; +constant int a_tmp [[function_constant(0)]]; +constant int a = is_function_constant_defined(a_tmp) ? a_tmp : 1; +constant uint _21 = (uint(a) + 0u); +constant uint _22_tmp [[function_constant(10)]]; +constant uint _22 = is_function_constant_defined(_22_tmp) ? _22_tmp : 1u; +constant uint3 gl_WorkGroupSize = uint3(_22, 20u, 1u); +constant uint _27 = gl_WorkGroupSize.x; +constant uint _28 = (_21 + _27); +constant uint _29 = gl_WorkGroupSize.y; +constant uint _30 = (_28 + _29); +constant int _32 = (1 - a); + +struct SSBO +{ + int v[1]; +}; + +kernel void main0(device SSBO& _17 [[buffer(0)]]) +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + _17.v[_30] = b + spec_const_array_size[_32]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-layout.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-layout.comp new file mode 100644 index 000000000..3c44fe541 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-layout.comp @@ -0,0 +1,26 @@ +#include +#include + +using namespace metal; + +struct Foo +{ + float4x4 m; +}; + +struct SSBO2 +{ + Foo out_data[1]; +}; + +struct SSBO +{ + Foo in_data[1]; +}; + +kernel void main0(const device SSBO& _30 [[buffer(0)]], device SSBO2& _23 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + _23.out_data[ident].m = _30.in_data[ident].m * _30.in_data[ident].m; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-nested.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-nested.comp new file mode 100644 index 000000000..117e492d6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-nested.comp @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +struct s1 +{ + int a; +}; + +struct s2 +{ + s1 b; +}; + +struct s1_1 +{ + int a; +}; + +struct s2_1 +{ + s1_1 b; +}; + +struct dstbuffer +{ + s2_1 test[1]; +}; + +kernel void main0(device dstbuffer& _19 [[buffer(1)]]) +{ + s2 testVal; + testVal.b.a = 0; + _19.test[0].b.a = testVal.b.a; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-packing.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-packing.comp new file mode 100644 index 000000000..2b37844fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/struct-packing.comp @@ -0,0 +1,149 @@ +#include +#include + +using namespace metal; + +typedef float3x2 packed_float2x3; + +struct S0 +{ + float2 a[1]; + float b; +}; + +struct S1 +{ + packed_float3 a; + float b; +}; + +struct S2 +{ + float3 a[1]; + float b; +}; + +struct S3 +{ + float2 a; + float b; +}; + +struct S4 +{ + float2 c; +}; + +struct Content +{ + S0 m0s[1]; + S1 m1s[1]; + S2 m2s[1]; + S0 m0; + S1 m1; + S2 m2; + S3 m3; + char pad7[4]; + float m4; + S4 m3s[8]; +}; + +struct SSBO1 +{ + Content content; + Content content1[2]; + Content content2; + char pad3[8]; + float2x2 m0; + float2x2 m1; + float2x3 m2[4]; + float3x2 m3; + float2x2 m4; + float2x2 m5[9]; + packed_float2x3 m6[4][2]; + char pad10[8]; + float3x2 m7; + char pad11[8]; + float array[1]; +}; + +struct S0_1 +{ + float2 a[1]; + float b; +}; + +struct S1_1 +{ + packed_float3 a; + float b; +}; + +struct S2_1 +{ + float3 a[1]; + float b; +}; + +struct S3_1 +{ + float2 a; + float b; +}; + +struct S4_1 +{ + float2 c; +}; + +struct Content_1 +{ + S0_1 m0s[1]; + S1_1 m1s[1]; + S2_1 m2s[1]; + S0_1 m0; + S1_1 m1; + S2_1 m2; + S3_1 m3; + char pad7[4]; + float m4; + S4_1 m3s[8]; +}; + +struct SSBO0 +{ + Content_1 content; + Content_1 content1[2]; + Content_1 content2; + float array[1]; +}; + +kernel void main0(device SSBO0& ssbo_140 [[buffer(0)]], device SSBO1& ssbo_430 [[buffer(1)]]) +{ + Content_1 _60 = ssbo_140.content; + ssbo_430.content.m0s[0].a[0] = _60.m0s[0].a[0]; + ssbo_430.content.m0s[0].b = _60.m0s[0].b; + ssbo_430.content.m1s[0].a = _60.m1s[0].a; + ssbo_430.content.m1s[0].b = _60.m1s[0].b; + ssbo_430.content.m2s[0].a[0] = _60.m2s[0].a[0]; + ssbo_430.content.m2s[0].b = _60.m2s[0].b; + ssbo_430.content.m0.a[0] = _60.m0.a[0]; + ssbo_430.content.m0.b = _60.m0.b; + ssbo_430.content.m1.a = _60.m1.a; + ssbo_430.content.m1.b = _60.m1.b; + ssbo_430.content.m2.a[0] = _60.m2.a[0]; + ssbo_430.content.m2.b = _60.m2.b; + ssbo_430.content.m3.a = _60.m3.a; + ssbo_430.content.m3.b = _60.m3.b; + ssbo_430.content.m4 = _60.m4; + ssbo_430.content.m3s[0].c = _60.m3s[0].c; + ssbo_430.content.m3s[1].c = _60.m3s[1].c; + ssbo_430.content.m3s[2].c = _60.m3s[2].c; + ssbo_430.content.m3s[3].c = _60.m3s[3].c; + ssbo_430.content.m3s[4].c = _60.m3s[4].c; + ssbo_430.content.m3s[5].c = _60.m3s[5].c; + ssbo_430.content.m3s[6].c = _60.m3s[6].c; + ssbo_430.content.m3s[7].c = _60.m3s[7].c; + ssbo_430.content.m1.a = ssbo_430.content.m3.a * ssbo_430.m6[1][1]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/torture-loop.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/torture-loop.comp new file mode 100644 index 000000000..1b65a3afa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/torture-loop.comp @@ -0,0 +1,51 @@ +#include +#include + +using namespace metal; + +struct SSBO +{ + float4x4 mvp; + float4 in_data[1]; +}; + +struct SSBO2 +{ + float4 out_data[1]; +}; + +kernel void main0(const device SSBO& _24 [[buffer(0)]], device SSBO2& _89 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + uint ident = gl_GlobalInvocationID.x; + float4 idat = _24.in_data[ident]; + int k = 0; + for (;;) + { + int _39 = k; + int _40 = _39 + 1; + k = _40; + if (_40 < 10) + { + idat *= 2.0; + k++; + continue; + } + else + { + break; + } + } + for (uint i = 0u; i < 16u; i++, k++) + { + for (uint j = 0u; j < 30u; j++) + { + idat = _24.mvp * idat; + } + } + do + { + k++; + } while (k > 10); + _89.out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/type-alias.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/type-alias.comp new file mode 100644 index 000000000..25a49f59f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/type-alias.comp @@ -0,0 +1,63 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct S0 +{ + float4 a; +}; + +struct S1 +{ + float4 a; +}; + +struct S0_1 +{ + float4 a; +}; + +struct SSBO0 +{ + S0_1 s0s[1]; +}; + +struct S1_1 +{ + float4 a; +}; + +struct SSBO1 +{ + S1_1 s1s[1]; +}; + +struct SSBO2 +{ + float4 outputs[1]; +}; + +float4 overload(thread const S0& s0) +{ + return s0.a; +} + +float4 overload(thread const S1& s1) +{ + return s1.a; +} + +kernel void main0(device SSBO0& _36 [[buffer(0)]], device SSBO1& _55 [[buffer(1)]], device SSBO2& _66 [[buffer(2)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + S0 s0; + s0.a = _36.s0s[gl_GlobalInvocationID.x].a; + S1 s1; + s1.a = _55.s1s[gl_GlobalInvocationID.x].a; + S0 param = s0; + S1 param_1 = s1; + _66.outputs[gl_GlobalInvocationID.x] = overload(param) + overload(param_1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/udiv.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/udiv.comp new file mode 100644 index 000000000..a298ecdb7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/udiv.comp @@ -0,0 +1,20 @@ +#include +#include + +using namespace metal; + +struct SSBO2 +{ + uint outputs[1]; +}; + +struct SSBO +{ + uint inputs[1]; +}; + +kernel void main0(device SSBO& _23 [[buffer(0)]], device SSBO2& _10 [[buffer(1)]], uint3 gl_GlobalInvocationID [[thread_position_in_grid]]) +{ + _10.outputs[gl_GlobalInvocationID.x] = _23.inputs[gl_GlobalInvocationID.x] / 29u; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/comp/writable-ssbo.comp b/3rdparty/spirv-cross/reference/shaders-msl/comp/writable-ssbo.comp new file mode 100644 index 000000000..9dc53b6dd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/comp/writable-ssbo.comp @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct myBlock +{ + int a; + float b; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +kernel void main0(device myBlock& myStorage [[buffer(0)]]) +{ + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b = mod(myStorage.b + 0.0199999995529651641845703125, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp new file mode 100644 index 000000000..a37fe519a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp @@ -0,0 +1,177 @@ +#include +#include + +using namespace metal; + +struct SSBOUint +{ + uint a; + uint b; + uint c; + uint d; + uint2 a2; + uint2 b2; + uint2 c2; + uint2 d2; + uint3 a3; + uint3 b3; + uint3 c3; + uint3 d3; + uint4 a4; + uint4 b4; + uint4 c4; + uint4 d4; +}; + +struct ResType +{ + uint _m0; + uint _m1; +}; + +struct ResType_1 +{ + uint2 _m0; + uint2 _m1; +}; + +struct ResType_2 +{ + uint3 _m0; + uint3 _m1; +}; + +struct ResType_3 +{ + uint4 _m0; + uint4 _m1; +}; + +struct SSBOInt +{ + int a; + int b; + int c; + int d; + int2 a2; + int2 b2; + int2 c2; + int2 d2; + int3 a3; + int3 b3; + int3 c3; + int3 d3; + int4 a4; + int4 b4; + int4 c4; + int4 d4; +}; + +struct ResType_4 +{ + int _m0; + int _m1; +}; + +struct ResType_5 +{ + int2 _m0; + int2 _m1; +}; + +struct ResType_6 +{ + int3 _m0; + int3 _m1; +}; + +struct ResType_7 +{ + int4 _m0; + int4 _m1; +}; + +kernel void main0(device SSBOUint& u [[buffer(0)]], device SSBOInt& i [[buffer(1)]]) +{ + ResType _25; + _25._m0 = u.a + u.b; + _25._m1 = select(uint(1), uint(0), _25._m0 >= max(u.a, u.b)); + u.d = _25._m1; + u.c = _25._m0; + ResType_1 _40; + _40._m0 = u.a2 + u.b2; + _40._m1 = select(uint2(1), uint2(0), _40._m0 >= max(u.a2, u.b2)); + u.d2 = _40._m1; + u.c2 = _40._m0; + ResType_2 _55; + _55._m0 = u.a3 + u.b3; + _55._m1 = select(uint3(1), uint3(0), _55._m0 >= max(u.a3, u.b3)); + u.d3 = _55._m1; + u.c3 = _55._m0; + ResType_3 _70; + _70._m0 = u.a4 + u.b4; + _70._m1 = select(uint4(1), uint4(0), _70._m0 >= max(u.a4, u.b4)); + u.d4 = _70._m1; + u.c4 = _70._m0; + ResType _79; + _79._m0 = u.a - u.b; + _79._m1 = select(uint(1), uint(0), u.a >= u.b); + u.d = _79._m1; + u.c = _79._m0; + ResType_1 _88; + _88._m0 = u.a2 - u.b2; + _88._m1 = select(uint2(1), uint2(0), u.a2 >= u.b2); + u.d2 = _88._m1; + u.c2 = _88._m0; + ResType_2 _97; + _97._m0 = u.a3 - u.b3; + _97._m1 = select(uint3(1), uint3(0), u.a3 >= u.b3); + u.d3 = _97._m1; + u.c3 = _97._m0; + ResType_3 _106; + _106._m0 = u.a4 - u.b4; + _106._m1 = select(uint4(1), uint4(0), u.a4 >= u.b4); + u.d4 = _106._m1; + u.c4 = _106._m0; + ResType _116; + _116._m0 = u.a * u.b; + _116._m1 = mulhi(u.a, u.b); + u.d = _116._m0; + u.c = _116._m1; + ResType_1 _125; + _125._m0 = u.a2 * u.b2; + _125._m1 = mulhi(u.a2, u.b2); + u.d2 = _125._m0; + u.c2 = _125._m1; + ResType_2 _134; + _134._m0 = u.a3 * u.b3; + _134._m1 = mulhi(u.a3, u.b3); + u.d3 = _134._m0; + u.c3 = _134._m1; + ResType_3 _143; + _143._m0 = u.a4 * u.b4; + _143._m1 = mulhi(u.a4, u.b4); + u.d4 = _143._m0; + u.c4 = _143._m1; + ResType_4 _160; + _160._m0 = i.a * i.b; + _160._m1 = mulhi(i.a, i.b); + i.d = _160._m0; + i.c = _160._m1; + ResType_5 _171; + _171._m0 = i.a2 * i.b2; + _171._m1 = mulhi(i.a2, i.b2); + i.d2 = _171._m0; + i.c2 = _171._m1; + ResType_6 _182; + _182._m0 = i.a3 * i.b3; + _182._m1 = mulhi(i.a3, i.b3); + i.d3 = _182._m0; + i.c3 = _182._m1; + ResType_7 _193; + _193._m0 = i.a4 * i.b4; + _193._m1 = mulhi(i.a4, i.b4); + i.d4 = _193._m0; + i.c4 = _193._m1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/image-ms.desktop.frag b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/image-ms.desktop.frag new file mode 100644 index 000000000..b7b2cc158 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/image-ms.desktop.frag @@ -0,0 +1,13 @@ +#include +#include + +using namespace metal; + +fragment void main0(texture2d uImage [[texture(0)]], texture2d_array uImageArray [[texture(1)]], texture2d_ms uImageMS [[texture(2)]]) +{ + float4 a = uImageMS.read(uint2(int2(1, 2)), 2); + float4 b = uImageArray.read(uint2(int3(1, 2, 4).xy), uint(int3(1, 2, 4).z)); + uImage.write(a, uint2(int2(2, 3))); + uImageArray.write(b, uint2(int3(2, 3, 7).xy), uint(int3(2, 3, 7).z)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/query-levels.desktop.frag b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/query-levels.desktop.frag new file mode 100644 index 000000000..922796b74 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/query-levels.desktop.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = float4(float(int(uSampler.get_num_mip_levels()))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag new file mode 100644 index 000000000..a6f06064a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d_ms uSampler [[texture(0)]], texture2d_ms uImage [[texture(2)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = float4(float(int(uSampler.get_num_samples()) + int(uImage.get_num_samples()))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/basic.desktop.sso.vert b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/basic.desktop.sso.vert new file mode 100644 index 000000000..ffb435712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/basic.desktop.sso.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.uMVP * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert new file mode 100644 index 000000000..32f0d9aa0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + float gl_ClipDistance [[clip_distance]] [2]; + float gl_CullDistance[2]; +}; + +vertex main0_out main0() +{ + main0_out out = {}; + out.gl_Position = float4(10.0); + out.gl_ClipDistance[0] = 1.0; + out.gl_ClipDistance[1] = 4.0; + out.gl_CullDistance[0] = 4.0; + out.gl_CullDistance[1] = 9.0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert new file mode 100644 index 000000000..1d203ba98 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +vertex main0_out main0(uint gl_BaseVertex [[base_vertex]], uint gl_BaseInstance [[base_instance]]) +{ + main0_out out = {}; + out.gl_Position = float4(float(gl_BaseVertex), float(gl_BaseInstance), 0.0, 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/flatten/basic.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/flatten/basic.flatten.vert new file mode 100644 index 000000000..ffb435712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/flatten/basic.flatten.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.uMVP * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/flatten/multiindex.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/flatten/multiindex.flatten.vert new file mode 100644 index 000000000..f4549abab --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/flatten/multiindex.flatten.vert @@ -0,0 +1,27 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4 Data[3][5]; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + int2 aIndex [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _20 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _20.Data[in.aIndex.x][in.aIndex.y]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/flatten/push-constant.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/flatten/push-constant.flatten.vert new file mode 100644 index 000000000..8f2e8c173 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/flatten/push-constant.flatten.vert @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +struct PushMe +{ + float4x4 MVP; + float2x2 Rot; + float Arr[4]; +}; + +struct main0_out +{ + float2 vRot [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float2 Rot [[attribute(0)]]; + float4 Pos [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant PushMe& registers [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = registers.MVP * in.Pos; + out.vRot = (registers.Rot * in.Rot) + float2(registers.Arr[2]); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/flatten/rowmajor.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/flatten/rowmajor.flatten.vert new file mode 100644 index 000000000..b5df8b064 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/flatten/rowmajor.flatten.vert @@ -0,0 +1,38 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVPR; + float4x4 uMVPC; + float2x4 uMVP; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; +}; + +// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization. +float2x4 spvConvertFromRowMajor2x4(float2x4 m) +{ + return float2x4(float4(m[0][0], m[0][2], m[1][0], m[1][2]), float4(m[0][1], m[0][3], m[1][1], m[1][3])); +} + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + float2 v = in.aVertex * spvConvertFromRowMajor2x4(_18.uMVP); + out.gl_Position = (_18.uMVPR * in.aVertex) + (in.aVertex * _18.uMVPC); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/flatten/struct.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/flatten/struct.flatten.vert new file mode 100644 index 000000000..954f9255c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/flatten/struct.flatten.vert @@ -0,0 +1,40 @@ +#include +#include + +using namespace metal; + +struct Light +{ + packed_float3 Position; + float Radius; + float4 Color; +}; + +struct UBO +{ + float4x4 uMVP; + Light light; +}; + +struct main0_out +{ + float4 vColor [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _18.uMVP * in.aVertex; + out.vColor = float4(0.0); + float3 L = in.aVertex.xyz - float3(_18.light.Position); + out.vColor += ((_18.light.Color * fast::clamp(1.0 - (length(L) / _18.light.Radius), 0.0, 1.0)) * dot(in.aNormal, normalize(L))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/flatten/swizzle.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/flatten/swizzle.flatten.vert new file mode 100644 index 000000000..05a6bbaeb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/flatten/swizzle.flatten.vert @@ -0,0 +1,47 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4 A; + float2 B0; + float2 B1; + float C0; + float3 C1; + packed_float3 D0; + float D1; + float E0; + float E1; + float E2; + float E3; + float F0; + float2 F1; + float F2; +}; + +struct main0_out +{ + float4 oA [[user(locn0)]]; + float4 oB [[user(locn1)]]; + float4 oC [[user(locn2)]]; + float4 oD [[user(locn3)]]; + float4 oE [[user(locn4)]]; + float4 oF [[user(locn5)]]; + float4 gl_Position [[position]]; +}; + +vertex main0_out main0(constant UBO& _22 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = float4(0.0); + out.oA = _22.A; + out.oB = float4(_22.B0, _22.B1); + out.oC = float4(_22.C0, _22.C1) + float4(_22.C1.xy, _22.C1.z, _22.C0); + out.oD = float4(_22.D0[0], _22.D0[1], _22.D0[2], _22.D1) + float4(float2(_22.D0[0], _22.D0[1]), _22.D0[2u], _22.D1); + out.oE = float4(_22.E0, _22.E1, _22.E2, _22.E3); + out.oF = float4(_22.F0, _22.F1, _22.F2); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/flatten/types.flatten.frag b/3rdparty/spirv-cross/reference/shaders-msl/flatten/types.flatten.frag new file mode 100644 index 000000000..540c5baeb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/flatten/types.flatten.frag @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct UBO1 +{ + int4 c; + int4 d; +}; + +struct UBO2 +{ + uint4 e; + uint4 f; +}; + +struct UBO0 +{ + float4 a; + float4 b; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(constant UBO0& _41 [[buffer(0)]], constant UBO1& _14 [[buffer(1)]], constant UBO2& _29 [[buffer(2)]]) +{ + main0_out out = {}; + out.FragColor = ((((float4(_14.c) + float4(_14.d)) + float4(_29.e)) + float4(_29.f)) + _41.a) + _41.b; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/16bit-constants.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/16bit-constants.frag new file mode 100644 index 000000000..2aae1e8aa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/16bit-constants.frag @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + half foo [[color(0)]]; + short bar [[color(1)]]; + ushort baz [[color(2)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.foo = 1.0h; + out.bar = 2; + out.baz = 3u; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..87158849b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +constant float _17[5] = { 1.0, 2.0, 3.0, 4.0, 5.0 }; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + for (int i = 0; i < 4; i++, out.FragColor += float4(_17[i])) + { + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/basic.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/basic.frag new file mode 100644 index 000000000..f33db61eb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/basic.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; + float2 vTex [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uTex [[texture(0)]], sampler uTexSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = in.vColor * uTex.sample(uTexSmplr, in.vTex); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/binary-func-unpack-pack-arguments.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/binary-func-unpack-pack-arguments.frag new file mode 100644 index 000000000..134cfe184 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/binary-func-unpack-pack-arguments.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + packed_float3 color; + float v; +}; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vIn [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant UBO& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = dot(in.vIn, float3(_15.color)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/binary-unpack-pack-arguments.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/binary-unpack-pack-arguments.frag new file mode 100644 index 000000000..8bd538bec --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/binary-unpack-pack-arguments.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + packed_float3 color; + float v; +}; + +struct main0_out +{ + float3 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vIn [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant UBO& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = cross(in.vIn, float3(_15.color) - in.vIn); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/bitcasting.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/bitcasting.frag new file mode 100644 index 000000000..475b573a2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/bitcasting.frag @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor0 [[color(0)]]; + float4 FragColor1 [[color(1)]]; +}; + +struct main0_in +{ + float4 VertGeom [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d TextureBase [[texture(0)]], texture2d TextureDetail [[texture(1)]], sampler TextureBaseSmplr [[sampler(0)]], sampler TextureDetailSmplr [[sampler(1)]]) +{ + main0_out out = {}; + float4 texSample0 = TextureBase.sample(TextureBaseSmplr, in.VertGeom.xy); + float4 texSample1 = TextureDetail.sample(TextureDetailSmplr, in.VertGeom.xy, int2(3, 2)); + int4 iResult0 = as_type(texSample0); + int4 iResult1 = as_type(texSample1); + out.FragColor0 = as_type(iResult0) * as_type(iResult1); + uint4 uResult0 = as_type(texSample0); + uint4 uResult1 = as_type(texSample1); + out.FragColor1 = as_type(uResult0) * as_type(uResult1); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/buffer-read.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/buffer-read.frag new file mode 100644 index 000000000..fdd88b568 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/buffer-read.frag @@ -0,0 +1,25 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +fragment main0_out main0(texture2d buf [[texture(0)]]) +{ + main0_out out = {}; + out.FragColor = buf.read(spvTexelBufferCoord(0)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/builtins.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/builtins.frag new file mode 100644 index 000000000..f9085252b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/builtins.frag @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; + float gl_FragDepth [[depth(any)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = gl_FragCoord + in.vColor; + out.gl_FragDepth = 0.5; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..7f39a7b94 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4 results[1024]; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int vIn [[user(locn0)]]; + int vIn2 [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], device UBO& _34 [[buffer(0)]], texture2d Buf [[texture(1)]], sampler BufSmplr [[sampler(1)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + int4 coords = Buf.read(uint2(int2(gl_FragCoord.xy)), 0); + float4 foo = _34.results[coords.x % 16]; + int c = in.vIn * in.vIn; + int d = in.vIn2 * in.vIn2; + out.FragColor = (foo + foo) + _34.results[c + d]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/composite-extract-forced-temporary.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/composite-extract-forced-temporary.frag new file mode 100644 index 000000000..dfab4d26f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/composite-extract-forced-temporary.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vTexCoord [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d Texture [[texture(0)]], sampler TextureSmplr [[sampler(0)]]) +{ + main0_out out = {}; + float f = Texture.sample(TextureSmplr, in.vTexCoord).x; + out.FragColor = float4(f * f); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/constant-array.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/constant-array.frag new file mode 100644 index 000000000..f6989a2a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/constant-array.frag @@ -0,0 +1,57 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Foobar +{ + float a; + float b; +}; + +constant float4 _37[3] = { float4(1.0), float4(2.0), float4(3.0) }; +constant float4 _49[2] = { float4(1.0), float4(2.0) }; +constant float4 _54[2] = { float4(8.0), float4(10.0) }; +constant float4 _55[2][2] = { { float4(1.0), float4(2.0) }, { float4(8.0), float4(10.0) } }; +constant Foobar _75[2] = { Foobar{ 10.0, 40.0 }, Foobar{ 90.0, 70.0 } }; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +float4 resolve(thread const Foobar& f) +{ + return float4(f.a + f.b); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Foobar param = Foobar{ 10.0, 20.0 }; + Foobar indexable[2] = { Foobar{ 10.0, 40.0 }, Foobar{ 90.0, 70.0 } }; + Foobar param_1 = indexable[in.index]; + out.FragColor = ((_37[in.index] + _55[in.index][in.index + 1]) + resolve(param)) + resolve(param_1); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/constant-composites.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/constant-composites.frag new file mode 100644 index 000000000..2c91b1117 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/constant-composites.frag @@ -0,0 +1,49 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Foo +{ + float a; + float b; +}; + +constant float _16[4] = { 1.0, 4.0, 3.0, 2.0 }; +constant Foo _28[2] = { Foo{ 10.0, 20.0 }, Foo{ 30.0, 40.0 } }; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int line [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float lut[4] = { 1.0, 4.0, 3.0, 2.0 }; + Foo foos[2] = { Foo{ 10.0, 20.0 }, Foo{ 30.0, 40.0 } }; + out.FragColor = float4(lut[in.line]); + out.FragColor += float4(foos[in.line].a * foos[1 - in.line].a); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..4d1016713 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,45 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vInput [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = in.vInput; + float4 t = uSampler.sample(uSamplerSmplr, in.vInput.xy); + float4 d0 = dfdx(in.vInput); + float4 d1 = dfdy(in.vInput); + float4 d2 = fwidth(in.vInput); + float4 d3 = dfdx(in.vInput); + float4 d4 = dfdy(in.vInput); + float4 d5 = fwidth(in.vInput); + float4 d6 = dfdx(in.vInput); + float4 d7 = dfdy(in.vInput); + float4 d8 = fwidth(in.vInput); + if (in.vInput.y > 10.0) + { + out.FragColor += t; + out.FragColor += d0; + out.FragColor += d1; + out.FragColor += d2; + out.FragColor += d3; + out.FragColor += d4; + out.FragColor += d5; + out.FragColor += d6; + out.FragColor += d7; + out.FragColor += d8; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/depth-greater-than.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/depth-greater-than.frag new file mode 100644 index 000000000..5861509fc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/depth-greater-than.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float gl_FragDepth [[depth(greater)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.gl_FragDepth = 0.5; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/depth-less-than.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/depth-less-than.frag new file mode 100644 index 000000000..f1177fa64 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/depth-less-than.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float gl_FragDepth [[depth(less)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.gl_FragDepth = 0.5; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/dual-source-blending.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/dual-source-blending.frag new file mode 100644 index 000000000..37938bf8c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/dual-source-blending.frag @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor0 [[color(0), index(0)]]; + float4 FragColor1 [[color(0), index(1)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor0 = float4(1.0); + out.FragColor1 = float4(2.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/early-fragment-tests.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/early-fragment-tests.frag new file mode 100644 index 000000000..850fdc920 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/early-fragment-tests.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +[[ early_fragment_tests ]] fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/false-loop-init.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/false-loop-init.frag new file mode 100644 index 000000000..7a4d6d5a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/false-loop-init.frag @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 result [[color(0)]]; +}; + +struct main0_in +{ + float4 accum [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.result = float4(0.0); + uint j; + for (int i = 0; i < 4; i += int(j)) + { + if (in.accum.y > 10.0) + { + j = 40u; + } + else + { + j = 30u; + } + out.result += in.accum; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/flush_params.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/flush_params.frag new file mode 100644 index 000000000..e2f2a48cb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/flush_params.frag @@ -0,0 +1,38 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Structy +{ + float4 c; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +void foo2(thread Structy& f) +{ + f.c = float4(10.0); +} + +Structy foo() +{ + Structy param; + foo2(param); + Structy f = param; + return f; +} + +fragment main0_out main0() +{ + main0_out out = {}; + Structy s = foo(); + out.FragColor = s.c; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/for-loop-init.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/for-loop-init.frag new file mode 100644 index 000000000..9f3191b97 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/for-loop-init.frag @@ -0,0 +1,58 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + int FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = 16; + for (int i = 0; i < 25; i++) + { + out.FragColor += 10; + } + for (int i_1 = 1, j = 4; i_1 < 30; i_1++, j += 4) + { + out.FragColor += 11; + } + int k = 0; + for (; k < 20; k++) + { + out.FragColor += 12; + } + k += 3; + out.FragColor += k; + int l; + if (k == 40) + { + l = 0; + for (; l < 40; l++) + { + out.FragColor += 13; + } + return out; + } + else + { + l = k; + out.FragColor += l; + } + int2 i_2 = int2(0); + for (; i_2.x < 10; i_2.x += 4) + { + out.FragColor += i_2.y; + } + int o = k; + for (int m = k; m < 40; m++) + { + out.FragColor += m; + } + out.FragColor += o; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/fp16-packing.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/fp16-packing.frag new file mode 100644 index 000000000..e21feb43e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/fp16-packing.frag @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float2 FP32Out [[color(0)]]; + uint FP16Out [[color(1)]]; +}; + +struct main0_in +{ + uint FP16 [[user(locn0)]]; + float2 FP32 [[user(locn1), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FP32Out = float2(as_type(in.FP16)); + out.FP16Out = as_type(half2(in.FP32)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/fp16.desktop.invalid.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/fp16.desktop.invalid.frag new file mode 100644 index 000000000..b914e4b8f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/fp16.desktop.invalid.frag @@ -0,0 +1,180 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct ResType +{ + half4 _m0; + int4 _m1; +}; + +struct main0_in +{ + half v1 [[user(locn0)]]; + half2 v2 [[user(locn1)]]; + half3 v3 [[user(locn2)]]; + half4 v4 [[user(locn3)]]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +// Implementation of the GLSL radians() function +template +T radians(T d) +{ + return d * T(0.01745329251); +} + +// Implementation of the GLSL degrees() function +template +T degrees(T r) +{ + return r * T(57.2957795131); +} + +half2x2 test_mat2(thread const half2& a, thread const half2& b, thread const half2& c, thread const half2& d) +{ + return half2x2(half2(a), half2(b)) * half2x2(half2(c), half2(d)); +} + +half3x3 test_mat3(thread const half3& a, thread const half3& b, thread const half3& c, thread const half3& d, thread const half3& e, thread const half3& f) +{ + return half3x3(half3(a), half3(b), half3(c)) * half3x3(half3(d), half3(e), half3(f)); +} + +void test_constants() +{ + half a = 1.0h; + half b = 1.5h; + half c = -1.5h; + half d = (0.0h / 0.0h); + half e = (1.0h / 0.0h); + half f = (-1.0h / 0.0h); + half g = 1014.0h; + half h = 9.5367431640625e-07h; +} + +half test_result() +{ + return 1.0h; +} + +void test_conversions() +{ + half one = test_result(); + int a = int(one); + uint b = uint(one); + bool c = one != 0.0h; + float d = float(one); + half a2 = half(a); + half b2 = half(b); + half c2 = half(c); + half d2 = half(d); +} + +void test_builtins(thread half4& v4, thread half3& v3, thread half& v1) +{ + half4 res = radians(v4); + res = degrees(v4); + res = sin(v4); + res = cos(v4); + res = tan(v4); + res = asin(v4); + res = atan2(v4, v3.xyzz); + res = atan(v4); + res = sinh(v4); + res = cosh(v4); + res = tanh(v4); + res = asinh(v4); + res = acosh(v4); + res = atanh(v4); + res = pow(v4, v4); + res = exp(v4); + res = log(v4); + res = exp2(v4); + res = log2(v4); + res = sqrt(v4); + res = rsqrt(v4); + res = abs(v4); + res = sign(v4); + res = floor(v4); + res = trunc(v4); + res = round(v4); + res = rint(v4); + res = ceil(v4); + res = fract(v4); + res = mod(v4, v4); + half4 tmp; + half4 _223 = modf(v4, tmp); + res = _223; + res = min(v4, v4); + res = max(v4, v4); + res = clamp(v4, v4, v4); + res = mix(v4, v4, v4); + bool4 _243 = v4 < v4; + res = half4(_243.x ? v4.x : v4.x, _243.y ? v4.y : v4.y, _243.z ? v4.z : v4.z, _243.w ? v4.w : v4.w); + res = step(v4, v4); + res = smoothstep(v4, v4, v4); + bool4 btmp = isnan(v4); + btmp = isinf(v4); + res = fma(v4, v4, v4); + ResType _267; + _267._m0 = frexp(v4, _267._m1); + int4 itmp = _267._m1; + res = _267._m0; + res = ldexp(res, itmp); + uint pack0 = as_type(v4.xy); + uint pack1 = as_type(v4.zw); + res = half4(as_type(pack0), as_type(pack1)); + half t0 = length(v4); + t0 = distance(v4, v4); + t0 = dot(v4, v4); + half3 res3 = cross(v3, v3); + res = normalize(v4); + res = faceforward(v4, v4, v4); + res = reflect(v4, v4); + res = refract(v4, v4, v1); + btmp = v4 < v4; + btmp = v4 <= v4; + btmp = v4 > v4; + btmp = v4 >= v4; + btmp = v4 == v4; + btmp = v4 != v4; + res = dfdx(v4); + res = dfdy(v4); + res = dfdx(v4); + res = dfdy(v4); + res = dfdx(v4); + res = dfdy(v4); + res = fwidth(v4); + res = fwidth(v4); + res = fwidth(v4); +} + +fragment void main0(main0_in in [[stage_in]]) +{ + half2 param = in.v2; + half2 param_1 = in.v2; + half2 param_2 = in.v3.xy; + half2 param_3 = in.v3.xy; + half2x2 m0 = test_mat2(param, param_1, param_2, param_3); + half3 param_4 = in.v3; + half3 param_5 = in.v3; + half3 param_6 = in.v3; + half3 param_7 = in.v4.xyz; + half3 param_8 = in.v4.xyz; + half3 param_9 = in.v4.yzw; + half3x3 m1 = test_mat3(param_4, param_5, param_6, param_7, param_8, param_9); + test_constants(); + test_conversions(); + test_builtins(in.v4, in.v3, in.v1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/front-facing.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/front-facing.frag new file mode 100644 index 000000000..2f8364249 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/front-facing.frag @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vA [[user(locn0)]]; + float4 vB [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], bool gl_FrontFacing [[front_facing]]) +{ + main0_out out = {}; + if (gl_FrontFacing) + { + out.FragColor = in.vA; + } + else + { + out.FragColor = in.vB; + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/gather-dref.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/gather-dref.frag new file mode 100644 index 000000000..c5c5ccf0b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/gather-dref.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uT [[texture(0)]], sampler uTSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uT.gather_compare(uTSmplr, in.vUV.xy, in.vUV.z); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/gather-offset.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/gather-offset.frag new file mode 100644 index 000000000..02b80194b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/gather-offset.frag @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uT [[texture(0)]], sampler uTSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uT.gather(uTSmplr, float2(0.5), int2(0), component::w); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/helper-invocation.msl21.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/helper-invocation.msl21.frag new file mode 100644 index 000000000..8d32f4860 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/helper-invocation.msl21.frag @@ -0,0 +1,39 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vUV [[user(locn0)]]; +}; + +float4 foo(thread bool& gl_HelperInvocation, thread texture2d uSampler, thread const sampler uSamplerSmplr, thread float2& vUV) +{ + float4 color; + if (!gl_HelperInvocation) + { + color = uSampler.sample(uSamplerSmplr, vUV, level(0.0)); + } + else + { + color = float4(1.0); + } + return color; +} + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + bool gl_HelperInvocation = simd_is_helper_thread(); + out.FragColor = foo(gl_HelperInvocation, uSampler, uSamplerSmplr, in.vUV); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/illegal-name-test-0.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/illegal-name-test-0.frag new file mode 100644 index 000000000..6b209b49d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/illegal-name-test-0.frag @@ -0,0 +1,21 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + float4 fragment0 = float4(10.0); + float4 compute0 = float4(10.0); + float4 kernel0 = float4(10.0); + float4 vertex0 = float4(10.0); + out.FragColor = ((fragment0 + compute0) + kernel0) + vertex0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/in_block.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/in_block.frag new file mode 100644 index 000000000..8178c9a4e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/in_block.frag @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +struct VertexOut +{ + float4 color; + float4 color2; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 VertexOut_color [[user(locn2)]]; + float4 VertexOut_color2 [[user(locn3)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + VertexOut inputs = {}; + inputs.color = in.VertexOut_color; + inputs.color2 = in.VertexOut_color2; + out.FragColor = inputs.color + inputs.color2; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/in_mat.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/in_mat.frag new file mode 100644 index 000000000..f0f4c4eee --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/in_mat.frag @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 outFragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 inPos [[user(locn0)]]; + float3 inNormal [[user(locn1)]]; + float4 inInvModelView_0 [[user(locn2)]]; + float4 inInvModelView_1 [[user(locn3)]]; + float4 inInvModelView_2 [[user(locn4)]]; + float4 inInvModelView_3 [[user(locn5)]]; + float inLodBias [[user(locn6)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texturecube samplerColor [[texture(1)]], sampler samplerColorSmplr [[sampler(1)]]) +{ + main0_out out = {}; + float4x4 inInvModelView = {}; + inInvModelView[0] = in.inInvModelView_0; + inInvModelView[1] = in.inInvModelView_1; + inInvModelView[2] = in.inInvModelView_2; + inInvModelView[3] = in.inInvModelView_3; + float3 cI = normalize(in.inPos); + float3 cR = reflect(cI, normalize(in.inNormal)); + cR = float3((inInvModelView * float4(cR, 0.0)).xyz); + cR.x *= (-1.0); + out.outFragColor = samplerColor.sample(samplerColorSmplr, cR, bias(in.inLodBias)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/input-attachment-ms.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/input-attachment-ms.frag new file mode 100644 index 000000000..d38712e91 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/input-attachment-ms.frag @@ -0,0 +1,24 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +float4 load_subpasses(thread const texture2d_ms uInput, thread uint& gl_SampleID, thread float4& gl_FragCoord) +{ + return uInput.read(uint2(gl_FragCoord.xy), gl_SampleID); +} + +fragment main0_out main0(texture2d_ms uSubpass0 [[texture(0)]], texture2d_ms uSubpass1 [[texture(1)]], uint gl_SampleID [[sample_id]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = (uSubpass0.read(uint2(gl_FragCoord.xy), 1) + uSubpass1.read(uint2(gl_FragCoord.xy), 2)) + load_subpasses(uSubpass0, gl_SampleID, gl_FragCoord); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/input-attachment.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/input-attachment.frag new file mode 100644 index 000000000..3cc929182 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/input-attachment.frag @@ -0,0 +1,24 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +float4 load_subpasses(thread const texture2d uInput, thread float4& gl_FragCoord) +{ + return uInput.read(uint2(gl_FragCoord.xy), 0); +} + +fragment main0_out main0(texture2d uSubpass0 [[texture(0)]], texture2d uSubpass1 [[texture(1)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = uSubpass0.read(uint2(gl_FragCoord.xy), 0) + load_subpasses(uSubpass1, gl_FragCoord); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/interpolation-qualifiers-block.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/interpolation-qualifiers-block.frag new file mode 100644 index 000000000..2b420195f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/interpolation-qualifiers-block.frag @@ -0,0 +1,47 @@ +#include +#include + +using namespace metal; + +struct Input +{ + float2 v0; + float2 v1; + float3 v2; + float4 v3; + float v4; + float v5; + float v6; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 Input_v0 [[user(locn0), centroid_no_perspective]]; + float2 Input_v1 [[user(locn1), centroid_no_perspective]]; + float3 Input_v2 [[user(locn2), centroid_no_perspective]]; + float4 Input_v3 [[user(locn3), centroid_no_perspective]]; + float Input_v4 [[user(locn4), centroid_no_perspective]]; + float Input_v5 [[user(locn5), centroid_no_perspective]]; + float Input_v6 [[user(locn6), centroid_no_perspective]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Input inp = {}; + inp.v0 = in.Input_v0; + inp.v1 = in.Input_v1; + inp.v2 = in.Input_v2; + inp.v3 = in.Input_v3; + inp.v4 = in.Input_v4; + inp.v5 = in.Input_v5; + inp.v6 = in.Input_v6; + out.FragColor = float4(inp.v0.x + inp.v1.y, inp.v2.xy, ((inp.v3.w * inp.v4) + inp.v5) - inp.v6); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/interpolation-qualifiers.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/interpolation-qualifiers.frag new file mode 100644 index 000000000..aff6e1b0f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/interpolation-qualifiers.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 v0 [[user(locn0)]]; + float2 v1 [[user(locn1), center_no_perspective]]; + float3 v2 [[user(locn2), centroid_perspective]]; + float4 v3 [[user(locn3), centroid_no_perspective]]; + float v4 [[user(locn4), sample_perspective]]; + float v5 [[user(locn5), sample_no_perspective]]; + float v6 [[user(locn6), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = float4(in.v0.x + in.v1.y, in.v2.xy, ((in.v3.w * in.v4) + in.v5) - in.v6); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/lut-promotion.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/lut-promotion.frag new file mode 100644 index 000000000..f7e51edb8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/lut-promotion.frag @@ -0,0 +1,66 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant float _16[16] = { 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0 }; +constant float4 _60[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; +constant float4 _104[4] = { float4(20.0), float4(30.0), float4(50.0), float4(60.0) }; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = _16[in.index]; + if (in.index < 10) + { + out.FragColor += _16[in.index ^ 1]; + } + else + { + out.FragColor += _16[in.index & 1]; + } + if (in.index > 30) + { + out.FragColor += _60[in.index & 3].y; + } + else + { + out.FragColor += _60[in.index & 1].x; + } + float4 foobar[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; + if (in.index > 30) + { + foobar[1].z = 20.0; + } + out.FragColor += foobar[in.index & 3].z; + float4 baz[4] = { float4(0.0), float4(1.0), float4(8.0), float4(5.0) }; + spvArrayCopyFromConstant1(baz, _104); + out.FragColor += baz[in.index & 3].z; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/mix.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/mix.frag new file mode 100644 index 000000000..ad7c5adee --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/mix.frag @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vIn0 [[user(locn0)]]; + float4 vIn1 [[user(locn1)]]; + float vIn2 [[user(locn2)]]; + float vIn3 [[user(locn3)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + bool4 l = bool4(false, true, false, false); + out.FragColor = float4(l.x ? in.vIn1.x : in.vIn0.x, l.y ? in.vIn1.y : in.vIn0.y, l.z ? in.vIn1.z : in.vIn0.z, l.w ? in.vIn1.w : in.vIn0.w); + bool f = true; + out.FragColor = float4(f ? in.vIn3 : in.vIn2); + bool4 _37 = bool4(f); + out.FragColor = float4(_37.x ? in.vIn0.x : in.vIn1.x, _37.y ? in.vIn0.y : in.vIn1.y, _37.z ? in.vIn0.z : in.vIn1.z, _37.w ? in.vIn0.w : in.vIn1.w); + out.FragColor = float4(f ? in.vIn2 : in.vIn3); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/mrt-array.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/mrt-array.frag new file mode 100644 index 000000000..daf7edb4c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/mrt-array.frag @@ -0,0 +1,53 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor_0 [[color(0)]]; + float4 FragColor_1 [[color(1)]]; + float4 FragColor_2 [[color(2)]]; + float4 FragColor_3 [[color(3)]]; +}; + +struct main0_in +{ + float4 vA [[user(locn0)]]; + float4 vB [[user(locn1)]]; +}; + +// Implementation of the GLSL mod() function, which is slightly different than Metal fmod() +template +Tx mod(Tx x, Ty y) +{ + return x - y * floor(x / y); +} + +void write_deeper_in_function(thread float4 (&FragColor)[4], thread float4& vA, thread float4& vB) +{ + FragColor[3] = vA * vB; +} + +void write_in_function(thread float4 (&FragColor)[4], thread float4& vA, thread float4& vB) +{ + FragColor[2] = vA - vB; + write_deeper_in_function(FragColor, vA, vB); +} + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float4 FragColor[4] = {}; + FragColor[0] = mod(in.vA, in.vB); + FragColor[1] = in.vA + in.vB; + write_in_function(FragColor, in.vA, in.vB); + out.FragColor_0 = FragColor[0]; + out.FragColor_1 = FragColor[1]; + out.FragColor_2 = FragColor[2]; + out.FragColor_3 = FragColor[3]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/packed-expression-vector-shuffle.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/packed-expression-vector-shuffle.frag new file mode 100644 index 000000000..dc8947425 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/packed-expression-vector-shuffle.frag @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + packed_float3 color; + float v; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(constant UBO& _15 [[buffer(0)]]) +{ + main0_out out = {}; + float4 f = float4(1.0); + f = float4(_15.color[0], _15.color[1], _15.color[2], f.w); + out.FragColor = f; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/packing-test-3.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/packing-test-3.frag new file mode 100644 index 000000000..9c59bc164 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/packing-test-3.frag @@ -0,0 +1,54 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct VertexOutput +{ + float4 HPosition; +}; + +struct TestStruct +{ + float3 position; + float radius; +}; + +struct TestStruct_1 +{ + packed_float3 position; + float radius; +}; + +struct CB0 +{ + TestStruct_1 CB0[16]; +}; + +struct main0_out +{ + float4 _entryPointOutput [[color(0)]]; +}; + +float4 _main(thread const VertexOutput& IN, constant CB0& v_26) +{ + TestStruct st; + st.position = v_26.CB0[1].position; + st.radius = v_26.CB0[1].radius; + float4 col = float4(st.position, st.radius); + return col; +} + +fragment main0_out main0(constant CB0& v_26 [[buffer(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + VertexOutput IN; + IN.HPosition = gl_FragCoord; + VertexOutput param = IN; + VertexOutput param_1 = param; + out._entryPointOutput = _main(param_1, v_26); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/pls.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/pls.frag new file mode 100644 index 000000000..ee774a04a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/pls.frag @@ -0,0 +1,31 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 PLSOut0 [[color(0)]]; + float4 PLSOut1 [[color(1)]]; + float4 PLSOut2 [[color(2)]]; + float4 PLSOut3 [[color(3)]]; +}; + +struct main0_in +{ + float4 PLSIn0 [[user(locn0)]]; + float4 PLSIn1 [[user(locn1)]]; + float4 PLSIn2 [[user(locn2)]]; + float4 PLSIn3 [[user(locn3)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.PLSOut0 = in.PLSIn0 * 2.0; + out.PLSOut1 = in.PLSIn1 * 6.0; + out.PLSOut2 = in.PLSIn2 * 7.0; + out.PLSOut3 = in.PLSIn3 * 4.0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/readonly-ssbo.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/readonly-ssbo.frag new file mode 100644 index 000000000..771c225d6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/readonly-ssbo.frag @@ -0,0 +1,29 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct SSBO +{ + float4 v; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +float4 read_from_function(const device SSBO& v_13) +{ + return v_13.v; +} + +fragment main0_out main0(const device SSBO& v_13 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = v_13.v + read_from_function(v_13); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-depth-separate-image-sampler.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-depth-separate-image-sampler.frag new file mode 100644 index 000000000..ae8434474 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-depth-separate-image-sampler.frag @@ -0,0 +1,29 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +float sample_depth_from_function(thread const depth2d uT, thread const sampler uS) +{ + return uT.sample_compare(uS, float3(0.5).xy, float3(0.5).z); +} + +float sample_color_from_function(thread const texture2d uT, thread const sampler uS) +{ + return uT.sample(uS, float2(0.5)).x; +} + +fragment main0_out main0(depth2d uDepth [[texture(0)]], texture2d uColor [[texture(1)]], sampler uSampler [[sampler(2)]], sampler uSamplerShadow [[sampler(3)]]) +{ + main0_out out = {}; + out.FragColor = sample_depth_from_function(uDepth, uSamplerShadow) + sample_color_from_function(uColor, uSampler); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-mask.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-mask.frag new file mode 100644 index 000000000..6a282395d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-mask.frag @@ -0,0 +1,19 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; + uint gl_SampleMask [[sample_mask]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float4(1.0); + out.gl_SampleMask = 0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-position-func.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-position-func.frag new file mode 100644 index 000000000..06fa53063 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-position-func.frag @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int index [[user(locn0)]]; +}; + +float4 getColor(thread const int& i, thread float2& gl_SamplePosition) +{ + return float4(gl_SamplePosition, float(i), 1.0); +} + +fragment main0_out main0(main0_in in [[stage_in]], uint gl_SampleID [[sample_id]]) +{ + main0_out out = {}; + float2 gl_SamplePosition = get_sample_position(gl_SampleID); + int param = in.index; + out.FragColor = getColor(param, gl_SamplePosition); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-position.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-position.frag new file mode 100644 index 000000000..8d26acb9f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sample-position.frag @@ -0,0 +1,18 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(uint gl_SampleID [[sample_id]]) +{ + main0_out out = {}; + float2 gl_SamplePosition = get_sample_position(gl_SampleID); + out.FragColor = float4(gl_SamplePosition, float(gl_SampleID), 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-1d-lod.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-1d-lod.frag new file mode 100644 index 000000000..96914f805 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-1d-lod.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float vTex [[user(locn0), flat]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture1d uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor += ((uSampler.sample(uSamplerSmplr, in.vTex) + uSampler.sample(uSamplerSmplr, in.vTex)) + uSampler.sample(uSamplerSmplr, in.vTex)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-compare-cascade-gradient.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-compare-cascade-gradient.frag new file mode 100644 index 000000000..18c2d4237 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-compare-cascade-gradient.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d_array uTex [[texture(0)]], sampler uShadow [[sampler(1)]]) +{ + main0_out out = {}; + out.FragColor = uTex.sample_compare(uShadow, in.vUV.xy, uint(round(in.vUV.z)), in.vUV.w, level(0)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag new file mode 100644 index 000000000..f01965c9e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vUV [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d_array uTex [[texture(0)]], sampler uShadow [[sampler(1)]]) +{ + main0_out out = {}; + out.FragColor = uTex.sample_compare(uShadow, in.vUV.xy, uint(round(in.vUV.z)), in.vUV.w, gradient2d(float2(0.0), float2(0.0))); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-image-arrays.msl2.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-image-arrays.msl2.frag new file mode 100644 index 000000000..af5112c19 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-image-arrays.msl2.frag @@ -0,0 +1,45 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float2 vTex [[user(locn0), flat]]; + int vIndex [[user(locn1)]]; +}; + +float4 sample_from_global(thread int& vIndex, thread float2& vTex, thread const array, 4> uSampler, thread const array uSamplerSmplr) +{ + return uSampler[vIndex].sample(uSamplerSmplr[vIndex], (vTex + float2(0.100000001490116119384765625))); +} + +float4 sample_from_argument(thread const array, 4> samplers, thread const array samplersSmplr, thread int& vIndex, thread float2& vTex) +{ + return samplers[vIndex].sample(samplersSmplr[vIndex], (vTex + float2(0.20000000298023223876953125))); +} + +float4 sample_single_from_argument(thread const texture2d samp, thread const sampler sampSmplr, thread float2& vTex) +{ + return samp.sample(sampSmplr, (vTex + float2(0.300000011920928955078125))); +} + +fragment main0_out main0(main0_in in [[stage_in]], array, 4> uSampler [[texture(0)]], array, 4> uTextures [[texture(8)]], array uSamplerSmplr [[sampler(0)]], array uSamplers [[sampler(4)]]) +{ + main0_out out = {}; + out.FragColor = float4(0.0); + out.FragColor += uTextures[2].sample(uSamplers[1], in.vTex); + out.FragColor += uSampler[in.vIndex].sample(uSamplerSmplr[in.vIndex], in.vTex); + out.FragColor += sample_from_global(in.vIndex, in.vTex, uSampler, uSamplerSmplr); + out.FragColor += sample_from_argument(uSampler, uSamplerSmplr, in.vIndex, in.vTex); + out.FragColor += sample_single_from_argument(uSampler[3], uSamplerSmplr[3], in.vTex); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-ms.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-ms.frag new file mode 100644 index 000000000..1ceb3f96b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler-ms.frag @@ -0,0 +1,18 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d_ms uSampler [[texture(0)]], sampler uSamplerSmplr [[sampler(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + int2 coord = int2(gl_FragCoord.xy); + out.FragColor = ((uSampler.read(uint2(coord), 0) + uSampler.read(uint2(coord), 1)) + uSampler.read(uint2(coord), 2)) + uSampler.read(uint2(coord), 3); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler.frag new file mode 100644 index 000000000..395854699 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/sampler.frag @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; + float2 vTex [[user(locn1)]]; +}; + +float4 sample_texture(thread const texture2d tex, thread const sampler texSmplr, thread const float2& uv) +{ + return tex.sample(texSmplr, uv); +} + +fragment main0_out main0(main0_in in [[stage_in]], texture2d uTex [[texture(0)]], sampler uTexSmplr [[sampler(0)]]) +{ + main0_out out = {}; + float2 param = in.vTex; + out.FragColor = in.vColor * sample_texture(uTex, uTexSmplr, param); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/separate-image-sampler-argument.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/separate-image-sampler-argument.frag new file mode 100644 index 000000000..c0c2ea11e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/separate-image-sampler-argument.frag @@ -0,0 +1,24 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +float4 samp(thread const texture2d t, thread const sampler s) +{ + return t.sample(s, float2(0.5)); +} + +fragment main0_out main0(texture2d uDepth [[texture(1)]], sampler uSampler [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = samp(uDepth, uSampler); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/shadow-compare-global-alias.invalid.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/shadow-compare-global-alias.invalid.frag new file mode 100644 index 000000000..2dd2d32df --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/shadow-compare-global-alias.invalid.frag @@ -0,0 +1,53 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vUV [[user(locn0)]]; +}; + +float Samp(thread const float3& uv, thread depth2d uTex, thread sampler uSamp) +{ + return uTex.sample_compare(uSamp, uv.xy, uv.z); +} + +float Samp2(thread const float3& uv, thread depth2d uSampler, thread const sampler uSamplerSmplr, thread float3& vUV) +{ + return uSampler.sample_compare(uSamplerSmplr, vUV.xy, vUV.z); +} + +float Samp3(thread const depth2d uT, thread const sampler uS, thread const float3& uv, thread float3& vUV) +{ + return uT.sample_compare(uS, vUV.xy, vUV.z); +} + +float Samp4(thread const depth2d uS, thread const sampler uSSmplr, thread const float3& uv, thread float3& vUV) +{ + return uS.sample_compare(uSSmplr, vUV.xy, vUV.z); +} + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uSampler [[texture(0)]], depth2d uTex [[texture(1)]], sampler uSamplerSmplr [[sampler(0)]], sampler uSamp [[sampler(2)]]) +{ + main0_out out = {}; + out.FragColor = uSampler.sample_compare(uSamplerSmplr, in.vUV.xy, in.vUV.z); + out.FragColor += uTex.sample_compare(uSamp, in.vUV.xy, in.vUV.z); + float3 param = in.vUV; + out.FragColor += Samp(param, uTex, uSamp); + float3 param_1 = in.vUV; + out.FragColor += Samp2(param_1, uSampler, uSamplerSmplr, in.vUV); + float3 param_2 = in.vUV; + out.FragColor += Samp3(uTex, uSamp, param_2, in.vUV); + float3 param_3 = in.vUV; + out.FragColor += Samp4(uSampler, uSamplerSmplr, param_3, in.vUV); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/spec-constant-block-size.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/spec-constant-block-size.frag new file mode 100644 index 000000000..36456b814 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/spec-constant-block-size.frag @@ -0,0 +1,32 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 2 +#endif +constant int Value = SPIRV_CROSS_CONSTANT_ID_10; + +struct SpecConstArray +{ + float4 samples[Value]; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int Index [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant SpecConstArray& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = _15.samples[in.Index]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/spec-constant-ternary.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/spec-constant-ternary.frag new file mode 100644 index 000000000..5ab6b4fcb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/spec-constant-ternary.frag @@ -0,0 +1,22 @@ +#include +#include + +using namespace metal; + +constant uint s_tmp [[function_constant(0)]]; +constant uint s = is_function_constant_defined(s_tmp) ? s_tmp : 10u; +constant bool _13 = (s > 20u); +constant uint _16 = _13 ? 30u : 50u; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = float(_16); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..4cd2b6852 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/switch-unsigned-case.frag @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct Buff +{ + uint TestVal; +}; + +struct main0_out +{ + float4 fsout_Color [[color(0)]]; +}; + +fragment main0_out main0(constant Buff& _15 [[buffer(0)]]) +{ + main0_out out = {}; + out.fsout_Color = float4(1.0); + switch (_15.TestVal) + { + case 0u: + { + out.fsout_Color = float4(0.100000001490116119384765625); + break; + } + case 1u: + { + out.fsout_Color = float4(0.20000000298023223876953125); + break; + } + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/swizzle.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/swizzle.frag new file mode 100644 index 000000000..7a0494e06 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/swizzle.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vNormal [[user(locn1)]]; + float2 vUV [[user(locn2)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d samp [[texture(0)]], sampler sampSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = float4(samp.sample(sampSmplr, in.vUV).xyz, 1.0); + out.FragColor = float4(samp.sample(sampSmplr, in.vUV).xz, 1.0, 4.0); + out.FragColor = float4(samp.sample(sampSmplr, in.vUV).xx, samp.sample(sampSmplr, (in.vUV + float2(0.100000001490116119384765625))).yy); + out.FragColor = float4(in.vNormal, 1.0); + out.FragColor = float4(in.vNormal + float3(1.7999999523162841796875), 1.0); + out.FragColor = float4(in.vUV, in.vUV + float2(1.7999999523162841796875)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..4d4301e1c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/texel-fetch-offset.frag @@ -0,0 +1,18 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(texture2d uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]], float4 gl_FragCoord [[position]]) +{ + main0_out out = {}; + out.FragColor = uTexture.read(uint2(int2(gl_FragCoord.xy)) + uint2(int2(1)), 0); + out.FragColor += uTexture.read(uint2(int2(gl_FragCoord.xy)) + uint2(int2(-1, 1)), 0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/texture-multisample-array.msl21.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/texture-multisample-array.msl21.frag new file mode 100644 index 000000000..ed1e81f3e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/texture-multisample-array.msl21.frag @@ -0,0 +1,23 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + int3 vCoord [[user(locn0)]]; + int vSample [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], texture2d_ms_array uTexture [[texture(0)]], sampler uTextureSmplr [[sampler(0)]]) +{ + main0_out out = {}; + out.FragColor = uTexture.read(uint2(in.vCoord.xy), uint(in.vCoord.z), in.vSample); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/texture-proj-shadow.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/texture-proj-shadow.frag new file mode 100644 index 000000000..c5ab0ee00 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/texture-proj-shadow.frag @@ -0,0 +1,29 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vClip3 [[user(locn0)]]; + float4 vClip4 [[user(locn1)]]; + float2 vClip2 [[user(locn2)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], depth2d uShadow2D [[texture(1)]], texture1d uSampler1D [[texture(2)]], texture2d uSampler2D [[texture(3)]], texture3d uSampler3D [[texture(4)]], sampler uShadow2DSmplr [[sampler(1)]], sampler uSampler1DSmplr [[sampler(2)]], sampler uSampler2DSmplr [[sampler(3)]], sampler uSampler3DSmplr [[sampler(4)]]) +{ + main0_out out = {}; + float4 _20 = in.vClip4; + _20.z = in.vClip4.w; + out.FragColor = uShadow2D.sample_compare(uShadow2DSmplr, _20.xy / _20.z, in.vClip4.z); + out.FragColor = uSampler1D.sample(uSampler1DSmplr, in.vClip2.x / in.vClip2.y).x; + out.FragColor = uSampler2D.sample(uSampler2DSmplr, in.vClip3.xy / in.vClip3.z).x; + out.FragColor = uSampler3D.sample(uSampler3DSmplr, in.vClip4.xyz / in.vClip4.w).x; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/ubo_layout.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/ubo_layout.frag new file mode 100644 index 000000000..0bc27462b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/ubo_layout.frag @@ -0,0 +1,37 @@ +#include +#include + +using namespace metal; + +struct Str +{ + float4x4 foo; +}; + +struct UBO1 +{ + Str foo; +}; + +struct Str_1 +{ + float4x4 foo; +}; + +struct UBO2 +{ + Str_1 foo; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0(constant UBO1& ubo1 [[buffer(0)]], constant UBO2& ubo0 [[buffer(1)]]) +{ + main0_out out = {}; + out.FragColor = transpose(ubo1.foo.foo)[0] + ubo0.foo.foo[0]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/unary-enclose.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/unary-enclose.frag new file mode 100644 index 000000000..c33269f2b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/unary-enclose.frag @@ -0,0 +1,26 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vIn [[user(locn0)]]; + int4 vIn1 [[user(locn1)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.FragColor = -(-in.vIn); + int4 a = ~(~in.vIn1); + bool b = false; + b = !(!b); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/frag/write-depth-in-function.frag b/3rdparty/spirv-cross/reference/shaders-msl/frag/write-depth-in-function.frag new file mode 100644 index 000000000..6837a9b3e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/frag/write-depth-in-function.frag @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float FragColor [[color(0)]]; + float gl_FragDepth [[depth(any)]]; +}; + +void set_output_depth(thread float& gl_FragDepth) +{ + gl_FragDepth = 0.20000000298023223876953125; +} + +fragment main0_out main0() +{ + main0_out out = {}; + out.FragColor = 1.0; + set_output_depth(out.gl_FragDepth); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/legacy/vert/transpose.legacy.vert b/3rdparty/spirv-cross/reference/shaders-msl/legacy/vert/transpose.legacy.vert new file mode 100644 index 000000000..3837c8bc0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/legacy/vert/transpose.legacy.vert @@ -0,0 +1,33 @@ +#include +#include + +using namespace metal; + +struct Buffer +{ + float4x4 MVPRowMajor; + float4x4 MVPColMajor; + float4x4 M; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 Position [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant Buffer& _13 [[buffer(0)]]) +{ + main0_out out = {}; + float4 c0 = _13.M * (in.Position * _13.MVPRowMajor); + float4 c1 = _13.M * (_13.MVPColMajor * in.Position); + float4 c2 = _13.M * (_13.MVPRowMajor * in.Position); + float4 c3 = _13.M * (in.Position * _13.MVPColMajor); + out.gl_Position = ((c0 + c1) + c2) + c3; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/basic.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/basic.vert new file mode 100644 index 000000000..ffb435712 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/basic.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.uMVP * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/copy.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/copy.flatten.vert new file mode 100644 index 000000000..a87b44782 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/copy.flatten.vert @@ -0,0 +1,54 @@ +#include +#include + +using namespace metal; + +struct Light +{ + packed_float3 Position; + float Radius; + float4 Color; +}; + +struct UBO +{ + float4x4 uMVP; + Light lights[4]; +}; + +struct Light_1 +{ + float3 Position; + float Radius; + float4 Color; +}; + +struct main0_out +{ + float4 vColor [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _21 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _21.uMVP * in.aVertex; + out.vColor = float4(0.0); + for (int i = 0; i < 4; i++) + { + Light_1 light; + light.Position = _21.lights[i].Position; + light.Radius = _21.lights[i].Radius; + light.Color = _21.lights[i].Color; + float3 L = in.aVertex.xyz - light.Position; + out.vColor += ((_21.lights[i].Color * fast::clamp(1.0 - (length(L) / light.Radius), 0.0, 1.0)) * dot(in.aNormal, normalize(L))); + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/dynamic.flatten.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/dynamic.flatten.vert new file mode 100644 index 000000000..c285f3c87 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/dynamic.flatten.vert @@ -0,0 +1,43 @@ +#include +#include + +using namespace metal; + +struct Light +{ + packed_float3 Position; + float Radius; + float4 Color; +}; + +struct UBO +{ + float4x4 uMVP; + Light lights[4]; +}; + +struct main0_out +{ + float4 vColor [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _21 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _21.uMVP * in.aVertex; + out.vColor = float4(0.0); + for (int i = 0; i < 4; i++) + { + float3 L = in.aVertex.xyz - float3(_21.lights[i].Position); + out.vColor += ((_21.lights[i].Color * fast::clamp(1.0 - (length(L) / _21.lights[i].Radius), 0.0, 1.0)) * dot(in.aNormal, normalize(L))); + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/functions.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/functions.vert new file mode 100644 index 000000000..f71022526 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/functions.vert @@ -0,0 +1,119 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; + float3 rotDeg; + float3 rotRad; + int2 bits; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float3 vRotDeg [[user(locn1)]]; + float3 vRotRad [[user(locn2)]]; + int2 vLSB [[user(locn3)]]; + int2 vMSB [[user(locn4)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +// Implementation of the GLSL radians() function +template +T radians(T d) +{ + return d * T(0.01745329251); +} + +// Implementation of the GLSL degrees() function +template +T degrees(T r) +{ + return r * T(57.2957795131); +} + +// Implementation of the GLSL findLSB() function +template +T findLSB(T x) +{ + return select(ctz(x), T(-1), x == T(0)); +} + +// Implementation of the signed GLSL findMSB() function +template +T findSMSB(T x) +{ + T v = select(x, T(-1) - x, x < T(0)); + return select(clz(T(0)) - (clz(v) + T(1)), T(-1), v == T(0)); +} + +// Returns the determinant of a 2x2 matrix. +inline float spvDet2x2(float a1, float a2, float b1, float b2) +{ + return a1 * b2 - b1 * a2; +} + +// Returns the determinant of a 3x3 matrix. +inline float spvDet3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, float c2, float c3) +{ + return a1 * spvDet2x2(b2, b3, c2, c3) - b1 * spvDet2x2(a2, a3, c2, c3) + c1 * spvDet2x2(a2, a3, b2, b3); +} + +// Returns the inverse of a matrix, by using the algorithm of calculating the classical +// adjoint and dividing by the determinant. The contents of the matrix are changed. +float4x4 spvInverse4x4(float4x4 m) +{ + float4x4 adj; // The adjoint matrix (inverse after dividing by determinant) + + // Create the transpose of the cofactors, as the classical adjoint of the matrix. + adj[0][0] = spvDet3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][1] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], m[3][3]); + adj[0][2] = spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], m[3][3]); + adj[0][3] = -spvDet3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3]); + + adj[1][0] = -spvDet3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][1] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], m[3][3]); + adj[1][2] = -spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], m[3][3]); + adj[1][3] = spvDet3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3]); + + adj[2][0] = spvDet3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][1] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], m[3][3]); + adj[2][2] = spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], m[3][3]); + adj[2][3] = -spvDet3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3]); + + adj[3][0] = -spvDet3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][1] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], m[3][2]); + adj[3][2] = -spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], m[3][2]); + adj[3][3] = spvDet3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2]); + + // Calculate the determinant as a combination of the cofactors of the first row. + float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] * m[3][0]); + + // Divide the classical adjoint matrix by the determinant. + // If determinant is zero, matrix is not invertable, so leave it unchanged. + return (det != 0.0f) ? (adj * (1.0f / det)) : m; +} + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = spvInverse4x4(_18.uMVP) * in.aVertex; + out.vNormal = in.aNormal; + out.vRotDeg = degrees(_18.rotRad); + out.vRotRad = radians(_18.rotDeg); + out.vLSB = findLSB(_18.bits); + out.vMSB = findSMSB(_18.bits); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/in_out_array_mat.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/in_out_array_mat.vert new file mode 100644 index 000000000..95be574a5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/in_out_array_mat.vert @@ -0,0 +1,78 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 projection; + float4x4 model; + float lodBias; +}; + +struct main0_out +{ + float3 outPos [[user(locn0)]]; + float3 outNormal [[user(locn1)]]; + float4 outTransModel_0 [[user(locn2)]]; + float4 outTransModel_1 [[user(locn3)]]; + float4 outTransModel_2 [[user(locn4)]]; + float4 outTransModel_3 [[user(locn5)]]; + float outLodBias [[user(locn6)]]; + float4 color [[user(locn7)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float3 inPos [[attribute(0)]]; + float4 colors_0 [[attribute(1)]]; + float4 colors_1 [[attribute(2)]]; + float4 colors_2 [[attribute(3)]]; + float3 inNormal [[attribute(4)]]; + float4 inViewMat_0 [[attribute(5)]]; + float4 inViewMat_1 [[attribute(6)]]; + float4 inViewMat_2 [[attribute(7)]]; + float4 inViewMat_3 [[attribute(8)]]; +}; + +void write_deeper_in_function(thread float4x4& outTransModel, constant UBO& ubo, thread float4& color, thread float4 (&colors)[3]) +{ + outTransModel[1].y = ubo.lodBias; + color = colors[2]; +} + +void write_in_function(thread float4x4& outTransModel, constant UBO& ubo, thread float4& color, thread float4 (&colors)[3], thread float3& inNormal) +{ + outTransModel[2] = float4(inNormal, 1.0); + write_deeper_in_function(outTransModel, ubo, color, colors); +} + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& ubo [[buffer(0)]]) +{ + main0_out out = {}; + float4x4 outTransModel = {}; + float4 colors[3] = {}; + float4x4 inViewMat = {}; + colors[0] = in.colors_0; + colors[1] = in.colors_1; + colors[2] = in.colors_2; + inViewMat[0] = in.inViewMat_0; + inViewMat[1] = in.inViewMat_1; + inViewMat[2] = in.inViewMat_2; + inViewMat[3] = in.inViewMat_3; + out.gl_Position = (ubo.projection * ubo.model) * float4(in.inPos, 1.0); + out.outPos = float3((ubo.model * float4(in.inPos, 1.0)).xyz); + out.outNormal = float3x3(float3(float3(ubo.model[0].x, ubo.model[0].y, ubo.model[0].z)), float3(float3(ubo.model[1].x, ubo.model[1].y, ubo.model[1].z)), float3(float3(ubo.model[2].x, ubo.model[2].y, ubo.model[2].z))) * in.inNormal; + out.outLodBias = ubo.lodBias; + outTransModel = transpose(ubo.model) * inViewMat; + write_in_function(outTransModel, ubo, out.color, colors, in.inNormal); + out.outTransModel_0 = outTransModel[0]; + out.outTransModel_1 = outTransModel[1]; + out.outTransModel_2 = outTransModel[2]; + out.outTransModel_3 = outTransModel[3]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/interface-block-block-composites.frag b/3rdparty/spirv-cross/reference/shaders-msl/vert/interface-block-block-composites.frag new file mode 100644 index 000000000..c42381d00 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/interface-block-block-composites.frag @@ -0,0 +1,56 @@ +#include +#include + +using namespace metal; + +struct Vert +{ + float3x3 wMatrix; + float4 wTmp; + float arr[4]; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float3 vMatrix_0 [[user(locn0)]]; + float3 vMatrix_1 [[user(locn1)]]; + float3 vMatrix_2 [[user(locn2)]]; + float3 Vert_wMatrix_0 [[user(locn4)]]; + float3 Vert_wMatrix_1 [[user(locn5)]]; + float3 Vert_wMatrix_2 [[user(locn6)]]; + float4 Vert_wTmp [[user(locn7)]]; + float Vert_arr_0 [[user(locn8)]]; + float Vert_arr_1 [[user(locn9)]]; + float Vert_arr_2 [[user(locn10)]]; + float Vert_arr_3 [[user(locn11)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Vert _17 = {}; + float3x3 vMatrix = {}; + _17.wMatrix[0] = in.Vert_wMatrix_0; + _17.wMatrix[1] = in.Vert_wMatrix_1; + _17.wMatrix[2] = in.Vert_wMatrix_2; + _17.wTmp = in.Vert_wTmp; + _17.arr[0] = in.Vert_arr_0; + _17.arr[1] = in.Vert_arr_1; + _17.arr[2] = in.Vert_arr_2; + _17.arr[3] = in.Vert_arr_3; + vMatrix[0] = in.vMatrix_0; + vMatrix[1] = in.vMatrix_1; + vMatrix[2] = in.vMatrix_2; + out.FragColor = (_17.wMatrix[0].xxyy + _17.wTmp) + vMatrix[1].yyzz; + for (int i = 0; i < 4; i++) + { + out.FragColor += float4(_17.arr[i]); + } + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/interface-block-block-composites.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/interface-block-block-composites.vert new file mode 100644 index 000000000..3d97ae6dc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/interface-block-block-composites.vert @@ -0,0 +1,64 @@ +#include +#include + +using namespace metal; + +struct Vert +{ + float arr[3]; + float3x3 wMatrix; + float4 wTmp; +}; + +struct main0_out +{ + float3 vMatrix_0 [[user(locn0)]]; + float3 vMatrix_1 [[user(locn1)]]; + float3 vMatrix_2 [[user(locn2)]]; + float Vert_arr_0 [[user(locn4)]]; + float Vert_arr_1 [[user(locn5)]]; + float Vert_arr_2 [[user(locn6)]]; + float3 Vert_wMatrix_0 [[user(locn7)]]; + float3 Vert_wMatrix_1 [[user(locn8)]]; + float3 Vert_wMatrix_2 [[user(locn9)]]; + float4 Vert_wTmp [[user(locn10)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float3 Matrix_0 [[attribute(0)]]; + float3 Matrix_1 [[attribute(1)]]; + float3 Matrix_2 [[attribute(2)]]; + float4 Pos [[attribute(4)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float3x3 vMatrix = {}; + Vert _20 = {}; + float3x3 Matrix = {}; + Matrix[0] = in.Matrix_0; + Matrix[1] = in.Matrix_1; + Matrix[2] = in.Matrix_2; + vMatrix = Matrix; + _20.wMatrix = Matrix; + _20.arr[0] = 1.0; + _20.arr[1] = 2.0; + _20.arr[2] = 3.0; + _20.wTmp = in.Pos; + out.gl_Position = in.Pos; + out.vMatrix_0 = vMatrix[0]; + out.vMatrix_1 = vMatrix[1]; + out.vMatrix_2 = vMatrix[2]; + out.Vert_arr_0 = _20.arr[0]; + out.Vert_arr_1 = _20.arr[1]; + out.Vert_arr_2 = _20.arr[2]; + out.Vert_wMatrix_0 = _20.wMatrix[0]; + out.Vert_wMatrix_1 = _20.wMatrix[1]; + out.Vert_wMatrix_2 = _20.wMatrix[2]; + out.Vert_wTmp = _20.wTmp; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/interpolation-qualifiers-block.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/interpolation-qualifiers-block.vert new file mode 100644 index 000000000..4206623b4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/interpolation-qualifiers-block.vert @@ -0,0 +1,55 @@ +#include +#include + +using namespace metal; + +struct Output +{ + float2 v0; + float2 v1; + float3 v2; + float4 v3; + float v4; + float v5; + float v6; +}; + +struct main0_out +{ + float2 Output_v0 [[user(locn0)]]; + float2 Output_v1 [[user(locn1)]]; + float3 Output_v2 [[user(locn2)]]; + float4 Output_v3 [[user(locn3)]]; + float Output_v4 [[user(locn4)]]; + float Output_v5 [[user(locn5)]]; + float Output_v6 [[user(locn6)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 Position [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + Output outp = {}; + outp.v0 = in.Position.xy; + outp.v1 = in.Position.zw; + outp.v2 = float3(in.Position.x, in.Position.z * in.Position.y, in.Position.x); + outp.v3 = in.Position.xxyy; + outp.v4 = in.Position.w; + outp.v5 = in.Position.y; + outp.v6 = in.Position.x * in.Position.w; + out.gl_Position = in.Position; + out.Output_v0 = outp.v0; + out.Output_v1 = outp.v1; + out.Output_v2 = outp.v2; + out.Output_v3 = outp.v3; + out.Output_v4 = outp.v4; + out.Output_v5 = outp.v5; + out.Output_v6 = outp.v6; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/interpolation-qualifiers.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/interpolation-qualifiers.vert new file mode 100644 index 000000000..ba2c4fbd2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/interpolation-qualifiers.vert @@ -0,0 +1,36 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float2 v0 [[user(locn0)]]; + float2 v1 [[user(locn1)]]; + float3 v2 [[user(locn2)]]; + float4 v3 [[user(locn3)]]; + float v4 [[user(locn4)]]; + float v5 [[user(locn5)]]; + float v6 [[user(locn6)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 Position [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.v0 = in.Position.xy; + out.v1 = in.Position.zw; + out.v2 = float3(in.Position.x, in.Position.z * in.Position.y, in.Position.x); + out.v3 = in.Position.xxyy; + out.v4 = in.Position.w; + out.v5 = in.Position.y; + out.v6 = in.Position.x * in.Position.w; + out.gl_Position = in.Position; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/layer.msl11.invalid.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/layer.msl11.invalid.vert new file mode 100644 index 000000000..b6f39dca3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/layer.msl11.invalid.vert @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + uint gl_Layer [[render_target_array_index]]; +}; + +struct main0_in +{ + float4 coord [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.gl_Position = in.coord; + out.gl_Layer = uint(int(in.coord.z)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.vert new file mode 100644 index 000000000..28098ee88 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.vert @@ -0,0 +1,20 @@ +#include +#include + +using namespace metal; + +struct _10 +{ + uint4 _m0[1024]; +}; + +struct main0_in +{ + uint4 m_19 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], device _10& _12 [[buffer(0)]], uint gl_VertexIndex [[vertex_id]]) +{ + _12._m0[gl_VertexIndex] = in.m_19; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_buff.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_buff.vert new file mode 100644 index 000000000..2ac15cfa6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_buff.vert @@ -0,0 +1,35 @@ +#include +#include + +using namespace metal; + +struct _35 +{ + uint4 _m0[1024]; +}; + +struct _40 +{ + uint4 _m0[1024]; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_17 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], constant _40& _42 [[buffer(0)]], device _35& _37 [[buffer(1)]]) +{ + main0_out out = {}; + out.gl_Position = in.m_17; + for (int _22 = 0; _22 < 1024; _22++) + { + _37._m0[_22] = _42._m0[_22]; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_buff_atomic.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_buff_atomic.vert new file mode 100644 index 000000000..9fe99e29f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_buff_atomic.vert @@ -0,0 +1,31 @@ +#pragma clang diagnostic ignored "-Wunused-variable" + +#include +#include +#include + +using namespace metal; + +struct _23 +{ + uint _m0; +}; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_17 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], device _23& _25 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = in.m_17; + uint _29 = atomic_fetch_add_explicit((volatile device atomic_uint*)&_25._m0, 1u, memory_order_relaxed); + uint _22 = _29; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_tex.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_tex.vert new file mode 100644 index 000000000..533986b95 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/no_stage_out.write_tex.vert @@ -0,0 +1,25 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_17 [[attribute(0)]]; +}; + +vertex void main0(main0_in in [[stage_in]], texture1d _37 [[texture(0)]], texture1d _34 [[texture(1)]]) +{ + main0_out out = {}; + out.gl_Position = in.m_17; + for (int _22 = 0; _22 < 128; _22++) + { + _34.write(_37.read(uint(_22)), uint(_22)); + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/out_block.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/out_block.vert new file mode 100644 index 000000000..45b897013 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/out_block.vert @@ -0,0 +1,41 @@ +#include +#include + +using namespace metal; + +struct Transform +{ + float4x4 transform; +}; + +struct VertexOut +{ + float4 color; + float4 color2; +}; + +struct main0_out +{ + float4 VertexOut_color [[user(locn2)]]; + float4 VertexOut_color2 [[user(locn3)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float3 position [[attribute(0)]]; + float4 color [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant Transform& block [[buffer(0)]]) +{ + main0_out out = {}; + VertexOut outputs = {}; + out.gl_Position = block.transform * float4(in.position, 1.0); + outputs.color = in.color; + outputs.color2 = in.color + float4(1.0); + out.VertexOut_color = outputs.color; + out.VertexOut_color2 = outputs.color2; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/packed_matrix.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/packed_matrix.vert new file mode 100644 index 000000000..89638511d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/packed_matrix.vert @@ -0,0 +1,57 @@ +#include +#include + +using namespace metal; + +typedef float3x4 packed_float4x3; + +struct _15 +{ + packed_float4x3 _m0; + packed_float4x3 _m1; +}; + +struct _42 +{ + float4x4 _m0; + float4x4 _m1; + float _m2; + char pad3[12]; + packed_float3 _m3; + float _m4; + packed_float3 _m5; + float _m6; + float _m7; + float _m8; + float2 _m9; +}; + +struct main0_out +{ + float3 m_72 [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 m_25 [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant _42& _44 [[buffer(12)]], constant _15& _17 [[buffer(13)]]) +{ + main0_out out = {}; + float3 _91; + float3 _13; + do + { + _13 = normalize(float4(in.m_25.xyz, 0.0) * _17._m1); + break; + } while (false); + float4 _39 = _44._m0 * float4(float3(_44._m3) + (in.m_25.xyz * (_44._m6 + _44._m7)), 1.0); + out.m_72 = _13; + float4 _74 = _39; + _74.y = -_39.y; + out.gl_Position = _74; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/pointsize.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/pointsize.vert new file mode 100644 index 000000000..8e5782bde --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/pointsize.vert @@ -0,0 +1,33 @@ +#include +#include + +using namespace metal; + +struct params +{ + float4x4 mvp; + float psize; +}; + +struct main0_out +{ + float4 color [[user(locn0)]]; + float4 gl_Position [[position]]; + float gl_PointSize [[point_size]]; +}; + +struct main0_in +{ + float4 position [[attribute(0)]]; + float4 color0 [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant params& _19 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _19.mvp * in.position; + out.gl_PointSize = _19.psize; + out.color = in.color0; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..9a633c5fe --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/read-from-row-major-array.vert @@ -0,0 +1,66 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct Block +{ + float2x3 var[3][4]; +}; + +struct main0_out +{ + float v_vtxResult [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 a_position [[attribute(0)]]; +}; + +// Implementation of a conversion of matrix content from RowMajor to ColumnMajor organization. +float2x3 spvConvertFromRowMajor2x3(float2x3 m) +{ + return float2x3(float3(m[0][0], m[0][2], m[1][1]), float3(m[0][1], m[1][0], m[1][2])); +} + +float compare_float(thread const float& a, thread const float& b) +{ + return float(abs(a - b) < 0.0500000007450580596923828125); +} + +float compare_vec3(thread const float3& a, thread const float3& b) +{ + float param = a.x; + float param_1 = b.x; + float param_2 = a.y; + float param_3 = b.y; + float param_4 = a.z; + float param_5 = b.z; + return (compare_float(param, param_1) * compare_float(param_2, param_3)) * compare_float(param_4, param_5); +} + +float compare_mat2x3(thread const float2x3& a, thread const float2x3& b) +{ + float3 param = a[0]; + float3 param_1 = b[0]; + float3 param_2 = a[1]; + float3 param_3 = b[1]; + return compare_vec3(param, param_1) * compare_vec3(param_2, param_3); +} + +vertex main0_out main0(main0_in in [[stage_in]], constant Block& _104 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = in.a_position; + float result = 1.0; + float2x3 param = spvConvertFromRowMajor2x3(_104.var[0][0]); + float2x3 param_1 = float2x3(float3(2.0, 6.0, -6.0), float3(0.0, 5.0, 5.0)); + result *= compare_mat2x3(param, param_1); + out.v_vtxResult = result; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/resource-arrays-leaf.ios.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/resource-arrays-leaf.ios.vert new file mode 100644 index 000000000..731a69efc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/resource-arrays-leaf.ios.vert @@ -0,0 +1,49 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 3 +#endif +constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0; + +struct storage_block +{ + uint4 baz; + int2 quux; +}; + +struct constant_block +{ + float4 foo; + int bar; +}; + +void doWork(device storage_block* (&storage)[2], constant constant_block* (&constants)[4], thread const array, 3> images) +{ + storage[0]->baz = uint4(constants[3]->foo); + storage[1]->quux = images[2].read(uint2(int2(constants[1]->bar))).xy; +} + +vertex void main0(device storage_block* storage_0 [[buffer(0)]], device storage_block* storage_1 [[buffer(1)]], constant constant_block* constants_0 [[buffer(2)]], constant constant_block* constants_1 [[buffer(3)]], constant constant_block* constants_2 [[buffer(4)]], constant constant_block* constants_3 [[buffer(5)]], array, 3> images [[texture(0)]]) +{ + device storage_block* storage[] = + { + storage_0, + storage_1, + }; + + constant constant_block* constants[] = + { + constants_0, + constants_1, + constants_2, + constants_3, + }; + + doWork(storage, constants, images); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/resource-arrays.ios.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/resource-arrays.ios.vert new file mode 100644 index 000000000..22dc480b2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/resource-arrays.ios.vert @@ -0,0 +1,42 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 3 +#endif +constant int arraySize = SPIRV_CROSS_CONSTANT_ID_0; + +struct storage_block +{ + uint4 baz; + int2 quux; +}; + +struct constant_block +{ + float4 foo; + int bar; +}; + +vertex void main0(device storage_block* storage_0 [[buffer(0)]], device storage_block* storage_1 [[buffer(1)]], constant constant_block* constants_0 [[buffer(2)]], constant constant_block* constants_1 [[buffer(3)]], constant constant_block* constants_2 [[buffer(4)]], constant constant_block* constants_3 [[buffer(5)]], array, 3> images [[texture(0)]]) +{ + device storage_block* storage[] = + { + storage_0, + storage_1, + }; + + constant constant_block* constants[] = + { + constants_0, + constants_1, + constants_2, + constants_3, + }; + + storage[0]->baz = uint4(constants[3]->foo); + storage[1]->quux = images[2].read(uint2(int2(constants[1]->bar))).xy; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/return-array.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/return-array.vert new file mode 100644 index 000000000..cd06fddaa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/return-array.vert @@ -0,0 +1,57 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +constant float4 _20[2] = { float4(10.0), float4(20.0) }; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 vInput0 [[attribute(0)]]; + float4 vInput1 [[attribute(1)]]; +}; + +// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment. +template +void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +template +void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N]) +{ + for (uint i = 0; i < N; dst[i] = src[i], i++); +} + +void test(thread float4 (&SPIRV_Cross_return_value)[2]) +{ + spvArrayCopyFromConstant1(SPIRV_Cross_return_value, _20); +} + +void test2(thread float4 (&SPIRV_Cross_return_value)[2], thread float4& vInput0, thread float4& vInput1) +{ + float4 foobar[2]; + foobar[0] = vInput0; + foobar[1] = vInput1; + spvArrayCopyFromStack1(SPIRV_Cross_return_value, foobar); +} + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + float4 _42[2]; + test(_42); + float4 _44[2]; + test2(_44, in.vInput0, in.vInput1); + out.gl_Position = _42[0] + _44[1]; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/set_builtin_in_func.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/set_builtin_in_func.vert new file mode 100644 index 000000000..2952748dc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/set_builtin_in_func.vert @@ -0,0 +1,26 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + float gl_PointSize [[point_size]]; +}; + +void write_outblock(thread float4& gl_Position, thread float& gl_PointSize) +{ + gl_PointSize = 1.0; + gl_Position = float4(gl_PointSize); +} + +vertex main0_out main0() +{ + main0_out out = {}; + write_outblock(out.gl_Position, out.gl_PointSize); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/sign-int-types.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/sign-int-types.vert new file mode 100644 index 000000000..2f518b129 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/sign-int-types.vert @@ -0,0 +1,60 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 uMVP; + float4 uFloatVec4; + float3 uFloatVec3; + float2 uFloatVec2; + float uFloat; + int4 uIntVec4; + int3 uIntVec3; + int2 uIntVec2; + int uInt; +}; + +struct main0_out +{ + float4 vFloatVec4 [[user(locn0)]]; + float3 vFloatVec3 [[user(locn1)]]; + float2 vFloatVec2 [[user(locn2)]]; + float vFloat [[user(locn3)]]; + int4 vIntVec4 [[user(locn4)]]; + int3 vIntVec3 [[user(locn5)]]; + int2 vIntVec2 [[user(locn6)]]; + int vInt [[user(locn7)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; +}; + +// Implementation of the GLSL sign() function for integer types +template::value>::type> +T sign(T x) +{ + return select(select(select(x, T(0), x == T(0)), T(1), x > T(0)), T(-1), x < T(0)); +} + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _21 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _21.uMVP * in.aVertex; + out.vFloatVec4 = sign(_21.uFloatVec4); + out.vFloatVec3 = sign(_21.uFloatVec3); + out.vFloatVec2 = sign(_21.uFloatVec2); + out.vFloat = sign(_21.uFloat); + out.vIntVec4 = sign(_21.uIntVec4); + out.vIntVec3 = sign(_21.uIntVec3); + out.vIntVec2 = sign(_21.uIntVec2); + out.vInt = sign(_21.uInt); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/texture_buffer.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/texture_buffer.vert new file mode 100644 index 000000000..c45d29813 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/texture_buffer.vert @@ -0,0 +1,25 @@ +#pragma clang diagnostic ignored "-Wmissing-prototypes" + +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +// Returns 2D texture coords corresponding to 1D texel buffer coords +uint2 spvTexelBufferCoord(uint tc) +{ + return uint2(tc % 4096, tc / 4096); +} + +vertex main0_out main0(texture2d uSamp [[texture(4)]], texture2d uSampo [[texture(5)]]) +{ + main0_out out = {}; + out.gl_Position = uSamp.read(spvTexelBufferCoord(10)) + uSampo.read(spvTexelBufferCoord(100)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/ubo.alignment.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/ubo.alignment.vert new file mode 100644 index 000000000..9a7ea56c6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/ubo.alignment.vert @@ -0,0 +1,38 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 mvp; + float2 targSize; + char pad2[8]; + packed_float3 color; + float opacity; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float3 vColor [[user(locn1)]]; + float2 vSize [[user(locn2)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _18 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _18.mvp * in.aVertex; + out.vNormal = in.aNormal; + out.vColor = float3(_18.color) * _18.opacity; + out.vSize = _18.targSize * _18.opacity; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/ubo.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/ubo.vert new file mode 100644 index 000000000..86ba1e968 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/ubo.vert @@ -0,0 +1,30 @@ +#include +#include + +using namespace metal; + +struct UBO +{ + float4x4 mvp; +}; + +struct main0_out +{ + float3 vNormal [[user(locn0)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + float4 aVertex [[attribute(0)]]; + float3 aNormal [[attribute(1)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant UBO& _16 [[buffer(0)]]) +{ + main0_out out = {}; + out.gl_Position = _16.mvp * in.aVertex; + out.vNormal = in.aNormal; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vert/viewport-index.msl2.invalid.vert b/3rdparty/spirv-cross/reference/shaders-msl/vert/viewport-index.msl2.invalid.vert new file mode 100644 index 000000000..e5316c072 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vert/viewport-index.msl2.invalid.vert @@ -0,0 +1,24 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; + uint gl_ViewportIndex [[viewport_array_index]]; +}; + +struct main0_in +{ + float4 coord [[attribute(0)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]]) +{ + main0_out out = {}; + out.gl_Position = in.coord; + out.gl_ViewportIndex = uint(int(in.coord.z)); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/push-constant.vk.frag b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/push-constant.vk.frag new file mode 100644 index 000000000..7b8c502b3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/push-constant.vk.frag @@ -0,0 +1,28 @@ +#include +#include + +using namespace metal; + +struct PushConstants +{ + float4 value0; + float4 value1; +}; + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +struct main0_in +{ + float4 vColor [[user(locn0)]]; +}; + +fragment main0_out main0(main0_in in [[stage_in]], constant PushConstants& push [[buffer(0)]]) +{ + main0_out out = {}; + out.FragColor = (in.vColor + push.value0) + push.value1; + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag new file mode 100644 index 000000000..74fe26eca --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag @@ -0,0 +1,126 @@ +#include +#include + +using namespace metal; + +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 1.0 +#endif +constant float a = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 2.0 +#endif +constant float b = SPIRV_CROSS_CONSTANT_ID_2; +#ifndef SPIRV_CROSS_CONSTANT_ID_3 +#define SPIRV_CROSS_CONSTANT_ID_3 3 +#endif +constant int c = SPIRV_CROSS_CONSTANT_ID_3; +constant uint _18 = (uint(c) + 0u); +constant int _21 = (-c); +constant int _23 = (~c); +#ifndef SPIRV_CROSS_CONSTANT_ID_4 +#define SPIRV_CROSS_CONSTANT_ID_4 4 +#endif +constant int d = SPIRV_CROSS_CONSTANT_ID_4; +constant int _26 = (c + d); +constant int _28 = (c - d); +constant int _30 = (c * d); +constant int _32 = (c / d); +#ifndef SPIRV_CROSS_CONSTANT_ID_5 +#define SPIRV_CROSS_CONSTANT_ID_5 5u +#endif +constant uint e = SPIRV_CROSS_CONSTANT_ID_5; +#ifndef SPIRV_CROSS_CONSTANT_ID_6 +#define SPIRV_CROSS_CONSTANT_ID_6 6u +#endif +constant uint f = SPIRV_CROSS_CONSTANT_ID_6; +constant uint _36 = (e / f); +constant int _38 = (c % d); +constant uint _40 = (e % f); +constant int _42 = (c >> d); +constant uint _44 = (e >> f); +constant int _46 = (c << d); +constant int _48 = (c | d); +constant int _50 = (c ^ d); +constant int _52 = (c & d); +#ifndef SPIRV_CROSS_CONSTANT_ID_7 +#define SPIRV_CROSS_CONSTANT_ID_7 false +#endif +constant bool g = SPIRV_CROSS_CONSTANT_ID_7; +#ifndef SPIRV_CROSS_CONSTANT_ID_8 +#define SPIRV_CROSS_CONSTANT_ID_8 true +#endif +constant bool h = SPIRV_CROSS_CONSTANT_ID_8; +constant bool _58 = (g || h); +constant bool _60 = (g && h); +constant bool _62 = (!g); +constant bool _64 = (g == h); +constant bool _66 = (g != h); +constant bool _68 = (c == d); +constant bool _70 = (c != d); +constant bool _72 = (c < d); +constant bool _74 = (e < f); +constant bool _76 = (c > d); +constant bool _78 = (e > f); +constant bool _80 = (c <= d); +constant bool _82 = (e <= f); +constant bool _84 = (c >= d); +constant bool _86 = (e >= f); +constant int _92 = int(e + 0u); +constant bool _94 = (c != int(0u)); +constant bool _96 = (e != 0u); +constant int _100 = int(g); +constant uint _103 = uint(g); + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + float t0 = a; + float t1 = b; + uint c0 = _18; + int c1 = _21; + int c2 = _23; + int c3 = _26; + int c4 = _28; + int c5 = _30; + int c6 = _32; + uint c7 = _36; + int c8 = _38; + uint c9 = _40; + int c10 = _42; + uint c11 = _44; + int c12 = _46; + int c13 = _48; + int c14 = _50; + int c15 = _52; + bool c16 = _58; + bool c17 = _60; + bool c18 = _62; + bool c19 = _64; + bool c20 = _66; + bool c21 = _68; + bool c22 = _70; + bool c23 = _72; + bool c24 = _74; + bool c25 = _76; + bool c26 = _78; + bool c27 = _80; + bool c28 = _82; + bool c29 = _84; + bool c30 = _86; + int c31 = c8 + c3; + int c32 = _92; + bool c33 = _94; + bool c34 = _96; + int c35 = _100; + uint c36 = _103; + float c37 = float(g); + out.FragColor = float4(t0 + t1); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/spec-constant.vk.frag b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/spec-constant.vk.frag new file mode 100644 index 000000000..aa10a501a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/frag/spec-constant.vk.frag @@ -0,0 +1,110 @@ +#include +#include + +using namespace metal; + +constant float a_tmp [[function_constant(1)]]; +constant float a = is_function_constant_defined(a_tmp) ? a_tmp : 1.0; +constant float b_tmp [[function_constant(2)]]; +constant float b = is_function_constant_defined(b_tmp) ? b_tmp : 2.0; +constant int c_tmp [[function_constant(3)]]; +constant int c = is_function_constant_defined(c_tmp) ? c_tmp : 3; +constant uint _18 = (uint(c) + 0u); +constant int _21 = (-c); +constant int _23 = (~c); +constant int d_tmp [[function_constant(4)]]; +constant int d = is_function_constant_defined(d_tmp) ? d_tmp : 4; +constant int _26 = (c + d); +constant int _28 = (c - d); +constant int _30 = (c * d); +constant int _32 = (c / d); +constant uint e_tmp [[function_constant(5)]]; +constant uint e = is_function_constant_defined(e_tmp) ? e_tmp : 5u; +constant uint f_tmp [[function_constant(6)]]; +constant uint f = is_function_constant_defined(f_tmp) ? f_tmp : 6u; +constant uint _36 = (e / f); +constant int _38 = (c % d); +constant uint _40 = (e % f); +constant int _42 = (c >> d); +constant uint _44 = (e >> f); +constant int _46 = (c << d); +constant int _48 = (c | d); +constant int _50 = (c ^ d); +constant int _52 = (c & d); +constant bool g_tmp [[function_constant(7)]]; +constant bool g = is_function_constant_defined(g_tmp) ? g_tmp : false; +constant bool h_tmp [[function_constant(8)]]; +constant bool h = is_function_constant_defined(h_tmp) ? h_tmp : true; +constant bool _58 = (g || h); +constant bool _60 = (g && h); +constant bool _62 = (!g); +constant bool _64 = (g == h); +constant bool _66 = (g != h); +constant bool _68 = (c == d); +constant bool _70 = (c != d); +constant bool _72 = (c < d); +constant bool _74 = (e < f); +constant bool _76 = (c > d); +constant bool _78 = (e > f); +constant bool _80 = (c <= d); +constant bool _82 = (e <= f); +constant bool _84 = (c >= d); +constant bool _86 = (e >= f); +constant int _92 = int(e + 0u); +constant bool _94 = (c != int(0u)); +constant bool _96 = (e != 0u); +constant int _100 = int(g); +constant uint _103 = uint(g); + +struct main0_out +{ + float4 FragColor [[color(0)]]; +}; + +fragment main0_out main0() +{ + main0_out out = {}; + float t0 = a; + float t1 = b; + uint c0 = _18; + int c1 = _21; + int c2 = _23; + int c3 = _26; + int c4 = _28; + int c5 = _30; + int c6 = _32; + uint c7 = _36; + int c8 = _38; + uint c9 = _40; + int c10 = _42; + uint c11 = _44; + int c12 = _46; + int c13 = _48; + int c14 = _50; + int c15 = _52; + bool c16 = _58; + bool c17 = _60; + bool c18 = _62; + bool c19 = _64; + bool c20 = _66; + bool c21 = _68; + bool c22 = _70; + bool c23 = _72; + bool c24 = _74; + bool c25 = _76; + bool c26 = _78; + bool c27 = _80; + bool c28 = _82; + bool c29 = _84; + bool c30 = _86; + int c31 = c8 + c3; + int c32 = _92; + bool c33 = _94; + bool c34 = _96; + int c35 = _100; + uint c36 = _103; + float c37 = float(g); + out.FragColor = float4(t0 + t1); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vulkan/vert/small-storage.vk.vert b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/vert/small-storage.vk.vert new file mode 100644 index 000000000..c9ef91b24 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/vert/small-storage.vk.vert @@ -0,0 +1,48 @@ +#include +#include + +using namespace metal; + +struct block +{ + short2 a; + ushort2 b; + char2 c; + uchar2 d; + half2 e; +}; + +struct storage +{ + short3 f; + ushort3 g; + char3 h; + uchar3 i; + half3 j; +}; + +struct main0_out +{ + short4 p [[user(locn0)]]; + ushort4 q [[user(locn1)]]; + half4 r [[user(locn2)]]; + float4 gl_Position [[position]]; +}; + +struct main0_in +{ + short foo [[attribute(0)]]; + ushort bar [[attribute(1)]]; + half baz [[attribute(2)]]; +}; + +vertex main0_out main0(main0_in in [[stage_in]], constant block& _26 [[buffer(0)]], const device storage& _53 [[buffer(1)]]) +{ + main0_out out = {}; + out.p = short4((int4(int(in.foo)) + int4(int2(_26.a), int2(_26.c))) - int4(int3(_53.f) / int3(_53.h), 1)); + out.q = ushort4((uint4(uint(in.bar)) + uint4(uint2(_26.b), uint2(_26.d))) - uint4(uint3(_53.g) / uint3(_53.i), 1u)); + out.r = half4((float4(float(in.baz)) + float4(float2(_26.e), 0.0, 1.0)) - float4(float3(_53.j), 1.0)); + out.gl_Position = float4(0.0, 0.0, 0.0, 1.0); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert new file mode 100644 index 000000000..53e26e4a8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert @@ -0,0 +1,17 @@ +#include +#include + +using namespace metal; + +struct main0_out +{ + float4 gl_Position [[position]]; +}; + +vertex main0_out main0(uint gl_VertexIndex [[vertex_id]], uint gl_InstanceIndex [[instance_id]]) +{ + main0_out out = {}; + out.gl_Position = float4(1.0, 2.0, 3.0, 4.0) * float(gl_VertexIndex + gl_InstanceIndex); + return out; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag b/3rdparty/spirv-cross/reference/shaders-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag new file mode 100644 index 000000000..01797173f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag @@ -0,0 +1,228 @@ +#version 450 + +struct VertexOutput +{ + vec4 HPosition; + vec4 Uv_EdgeDistance1; + vec4 UvStuds_EdgeDistance2; + vec4 Color; + vec4 LightPosition_Fog; + vec4 View_Depth; + vec4 Normal_SpecPower; + vec3 Tangent; + vec4 PosLightSpace_Reflectance; + float studIndex; +}; + +struct Surface +{ + vec3 albedo; + vec3 normal; + float specular; + float gloss; + float reflectance; + float opacity; +}; + +struct SurfaceInput +{ + vec4 Color; + vec2 Uv; + vec2 UvStuds; +}; + +struct Globals +{ + mat4 ViewProjection; + vec4 ViewRight; + vec4 ViewUp; + vec4 ViewDir; + vec3 CameraPosition; + vec3 AmbientColor; + vec3 Lamp0Color; + vec3 Lamp0Dir; + vec3 Lamp1Color; + vec4 FogParams; + vec3 FogColor; + vec4 LightBorder; + vec4 LightConfig0; + vec4 LightConfig1; + vec4 LightConfig2; + vec4 LightConfig3; + vec4 RefractionBias_FadeDistance_GlowFactor; + vec4 OutlineBrightness_ShadowInfo; + vec4 ShadowMatrix0; + vec4 ShadowMatrix1; + vec4 ShadowMatrix2; +}; + +struct Params +{ + vec4 LqmatFarTilingFactor; +}; + +layout(binding = 0, std140) uniform CB0 +{ + Globals CB0; +} _19; + +uniform sampler2D SPIRV_Cross_CombinedDiffuseMapTextureDiffuseMapSampler; +uniform sampler2D SPIRV_Cross_CombinedNormalMapTextureNormalMapSampler; +uniform sampler2D SPIRV_Cross_CombinedNormalDetailMapTextureNormalDetailMapSampler; +uniform sampler2D SPIRV_Cross_CombinedStudsMapTextureStudsMapSampler; +uniform sampler2D SPIRV_Cross_CombinedSpecularMapTextureSpecularMapSampler; +uniform sampler3D SPIRV_Cross_CombinedLightMapTextureLightMapSampler; +uniform sampler2D SPIRV_Cross_CombinedShadowMapTextureShadowMapSampler; +uniform samplerCube SPIRV_Cross_CombinedEnvironmentMapTextureEnvironmentMapSampler; + +layout(location = 0) in vec4 IN_Uv_EdgeDistance1; +layout(location = 1) in vec4 IN_UvStuds_EdgeDistance2; +layout(location = 2) in vec4 IN_Color; +layout(location = 3) in vec4 IN_LightPosition_Fog; +layout(location = 4) in vec4 IN_View_Depth; +layout(location = 5) in vec4 IN_Normal_SpecPower; +layout(location = 6) in vec3 IN_Tangent; +layout(location = 7) in vec4 IN_PosLightSpace_Reflectance; +layout(location = 8) in float IN_studIndex; +layout(location = 0) out vec4 _entryPointOutput; + +VertexOutput _121; +SurfaceInput _122; +vec2 _123; +vec4 _124; +Surface _125; +vec4 _192; +vec4 _219; +vec4 _297; + +void main() +{ + VertexOutput _128 = _121; + _128.HPosition = gl_FragCoord; + VertexOutput _130 = _128; + _130.Uv_EdgeDistance1 = IN_Uv_EdgeDistance1; + VertexOutput _132 = _130; + _132.UvStuds_EdgeDistance2 = IN_UvStuds_EdgeDistance2; + VertexOutput _134 = _132; + _134.Color = IN_Color; + VertexOutput _136 = _134; + _136.LightPosition_Fog = IN_LightPosition_Fog; + VertexOutput _138 = _136; + _138.View_Depth = IN_View_Depth; + VertexOutput _140 = _138; + _140.Normal_SpecPower = IN_Normal_SpecPower; + VertexOutput _142 = _140; + _142.Tangent = IN_Tangent; + VertexOutput _144 = _142; + _144.PosLightSpace_Reflectance = IN_PosLightSpace_Reflectance; + VertexOutput _146 = _144; + _146.studIndex = IN_studIndex; + SurfaceInput _147 = _122; + _147.Color = IN_Color; + SurfaceInput _149 = _147; + _149.Uv = IN_Uv_EdgeDistance1.xy; + SurfaceInput _151 = _149; + _151.UvStuds = IN_UvStuds_EdgeDistance2.xy; + SurfaceInput _156 = _151; + _156.UvStuds.y = (fract(_151.UvStuds.y) + IN_studIndex) * 0.25; + float _163 = _146.View_Depth.w * _19.CB0.RefractionBias_FadeDistance_GlowFactor.y; + float _165 = clamp(1.0 - _163, 0.0, 1.0); + vec2 _166 = IN_Uv_EdgeDistance1.xy * 1.0; + bool _173; + vec4 _193; + do + { + _173 = 0.0 == 0.0; + if (_173) + { + _193 = texture(SPIRV_Cross_CombinedDiffuseMapTextureDiffuseMapSampler, _166); + break; + } + else + { + float _180 = 1.0 / (1.0 - 0.0); + _193 = mix(texture(SPIRV_Cross_CombinedDiffuseMapTextureDiffuseMapSampler, _166 * 0.25), texture(SPIRV_Cross_CombinedDiffuseMapTextureDiffuseMapSampler, _166), vec4(clamp((clamp(1.0 - (_146.View_Depth.w * 0.00333332992158830165863037109375), 0.0, 1.0) * _180) - (0.0 * _180), 0.0, 1.0))); + break; + } + _193 = _192; + break; + } while (false); + vec4 _194 = _193 * 1.0; + vec4 _220; + do + { + if (_173) + { + _220 = texture(SPIRV_Cross_CombinedNormalMapTextureNormalMapSampler, _166); + break; + } + else + { + float _207 = 1.0 / (1.0 - 0.0); + _220 = mix(texture(SPIRV_Cross_CombinedNormalMapTextureNormalMapSampler, _166 * 0.25), texture(SPIRV_Cross_CombinedNormalMapTextureNormalMapSampler, _166), vec4(clamp((_165 * _207) - (0.0 * _207), 0.0, 1.0))); + break; + } + _220 = _219; + break; + } while (false); + vec2 _223 = vec2(1.0); + vec2 _224 = (_220.wy * 2.0) - _223; + vec3 _232 = vec3(_224, sqrt(clamp(1.0 + dot(-_224, _224), 0.0, 1.0))); + vec2 _240 = (texture(SPIRV_Cross_CombinedNormalDetailMapTextureNormalDetailMapSampler, _166 * 0.0).wy * 2.0) - _223; + vec2 _252 = _232.xy + (vec3(_240, sqrt(clamp(1.0 + dot(-_240, _240), 0.0, 1.0))).xy * 0.0); + vec3 _253 = vec3(_252.x, _252.y, _232.z); + vec2 _255 = _253.xy * _165; + vec3 _256 = vec3(_255.x, _255.y, _253.z); + vec3 _271 = ((IN_Color.xyz * _194.xyz) * (1.0 + (_256.x * 0.300000011920928955078125))) * (texture(SPIRV_Cross_CombinedStudsMapTextureStudsMapSampler, _156.UvStuds).x * 2.0); + vec4 _298; + do + { + if (0.75 == 0.0) + { + _298 = texture(SPIRV_Cross_CombinedSpecularMapTextureSpecularMapSampler, _166); + break; + } + else + { + float _285 = 1.0 / (1.0 - 0.75); + _298 = mix(texture(SPIRV_Cross_CombinedSpecularMapTextureSpecularMapSampler, _166 * 0.25), texture(SPIRV_Cross_CombinedSpecularMapTextureSpecularMapSampler, _166), vec4(clamp((_165 * _285) - (0.75 * _285), 0.0, 1.0))); + break; + } + _298 = _297; + break; + } while (false); + vec2 _303 = mix(vec2(0.800000011920928955078125, 120.0), (_298.xy * vec2(2.0, 256.0)) + vec2(0.0, 0.00999999977648258209228515625), vec2(_165)); + Surface _304 = _125; + _304.albedo = _271; + Surface _305 = _304; + _305.normal = _256; + float _306 = _303.x; + Surface _307 = _305; + _307.specular = _306; + float _308 = _303.y; + Surface _309 = _307; + _309.gloss = _308; + float _312 = (_298.xy.y * _165) * 0.0; + Surface _313 = _309; + _313.reflectance = _312; + vec4 _318 = vec4(_271, _146.Color.w); + vec3 _329 = normalize(((IN_Tangent * _313.normal.x) + (cross(IN_Normal_SpecPower.xyz, IN_Tangent) * _313.normal.y)) + (IN_Normal_SpecPower.xyz * _313.normal.z)); + vec3 _332 = -_19.CB0.Lamp0Dir; + float _333 = dot(_329, _332); + float _357 = clamp(dot(step(_19.CB0.LightConfig3.xyz, abs(IN_LightPosition_Fog.xyz - _19.CB0.LightConfig2.xyz)), vec3(1.0)), 0.0, 1.0); + vec4 _368 = mix(texture(SPIRV_Cross_CombinedLightMapTextureLightMapSampler, IN_LightPosition_Fog.xyz.yzx - (IN_LightPosition_Fog.xyz.yzx * _357)), _19.CB0.LightBorder, vec4(_357)); + vec2 _376 = texture(SPIRV_Cross_CombinedShadowMapTextureShadowMapSampler, IN_PosLightSpace_Reflectance.xyz.xy).xy; + float _392 = (1.0 - (((step(_376.x, IN_PosLightSpace_Reflectance.xyz.z) * clamp(9.0 - (20.0 * abs(IN_PosLightSpace_Reflectance.xyz.z - 0.5)), 0.0, 1.0)) * _376.y) * _19.CB0.OutlineBrightness_ShadowInfo.w)) * _368.w; + vec3 _403 = mix(_318.xyz, texture(SPIRV_Cross_CombinedEnvironmentMapTextureEnvironmentMapSampler, reflect(-IN_View_Depth.xyz, _329)).xyz, vec3(_312)); + vec4 _404 = vec4(_403.x, _403.y, _403.z, _318.w); + vec3 _422 = (((_19.CB0.AmbientColor + (((_19.CB0.Lamp0Color * clamp(_333, 0.0, 1.0)) + (_19.CB0.Lamp1Color * max(-_333, 0.0))) * _392)) + _368.xyz) * _404.xyz) + (_19.CB0.Lamp0Color * (((step(0.0, _333) * _306) * _392) * pow(clamp(dot(_329, normalize(_332 + normalize(IN_View_Depth.xyz))), 0.0, 1.0), _308))); + vec4 _425 = vec4(_422.x, _422.y, _422.z, _124.w); + _425.w = _404.w; + vec2 _435 = min(IN_Uv_EdgeDistance1.wz, IN_UvStuds_EdgeDistance2.wz); + float _439 = min(_435.x, _435.y) / _163; + vec3 _445 = _425.xyz * clamp((clamp((_163 * _19.CB0.OutlineBrightness_ShadowInfo.x) + _19.CB0.OutlineBrightness_ShadowInfo.y, 0.0, 1.0) * (1.5 - _439)) + _439, 0.0, 1.0); + vec4 _446 = vec4(_445.x, _445.y, _445.z, _425.w); + vec3 _453 = mix(_19.CB0.FogColor, _446.xyz, vec3(clamp(_146.LightPosition_Fog.w, 0.0, 1.0))); + _entryPointOutput = vec4(_453.x, _453.y, _453.z, _446.w); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/asm/vert/empty-struct-composite.asm.vert b/3rdparty/spirv-cross/reference/shaders-no-opt/asm/vert/empty-struct-composite.asm.vert new file mode 100644 index 000000000..8f786d49e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/asm/vert/empty-struct-composite.asm.vert @@ -0,0 +1,13 @@ +#version 450 + +struct Test +{ + int empty_struct_member; +}; + +void main() +{ + Test _14 = Test(0); + Test t = _14; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/asm/vert/semantic-decoration.asm.vert b/3rdparty/spirv-cross/reference/shaders-no-opt/asm/vert/semantic-decoration.asm.vert new file mode 100644 index 000000000..9af0e241e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/asm/vert/semantic-decoration.asm.vert @@ -0,0 +1,25 @@ +#version 450 + +struct VOut +{ + vec4 p; + vec4 c; +}; + +layout(location = 0) out vec4 _entryPointOutput_c; + +VOut _main() +{ + VOut v; + v.p = vec4(1.0); + v.c = vec4(2.0); + return v; +} + +void main() +{ + VOut flattenTemp = _main(); + gl_Position = flattenTemp.p; + _entryPointOutput_c = flattenTemp.c; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/comp/bitfield.comp b/3rdparty/spirv-cross/reference/shaders-no-opt/comp/bitfield.comp new file mode 100644 index 000000000..49bbddb0a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/comp/bitfield.comp @@ -0,0 +1,19 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void main() +{ + int signed_value = 0; + uint unsigned_value = 0u; + int s = bitfieldExtract(signed_value, 5, 20); + uint u = bitfieldExtract(unsigned_value, 6, 21); + s = bitfieldInsert(s, 40, 5, 4); + u = bitfieldInsert(u, 60u, 5, 4); + u = bitfieldReverse(u); + s = bitfieldReverse(s); + int v0 = bitCount(u); + int v1 = bitCount(s); + int v2 = findMSB(u); + int v3 = findLSB(s); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/comp/loop.comp b/3rdparty/spirv-cross/reference/shaders-no-opt/comp/loop.comp new file mode 100644 index 000000000..049a30669 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/comp/loop.comp @@ -0,0 +1,105 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +} _24; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _177; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idat = _24.in_data[ident]; + int k = 0; + uint i = 0u; + if (idat.y == 20.0) + { + do + { + k *= 2; + i++; + } while (i < ident); + } + switch (k) + { + case 10: + { + for (;;) + { + i++; + if (i > 10u) + { + break; + } + continue; + } + break; + } + default: + { + for (;;) + { + i += 2u; + if (i > 20u) + { + break; + } + continue; + } + break; + } + } + while (k < 10) + { + idat *= 2.0; + k++; + } + for (uint i_1 = 0u; i_1 < 16u; i_1++, k++) + { + for (uint j = 0u; j < 30u; j++) + { + idat = _24.mvp * idat; + } + } + k = 0; + for (;;) + { + k++; + if (k > 10) + { + k += 2; + } + else + { + k += 3; + continue; + } + k += 10; + continue; + } + k = 0; + do + { + k++; + } while (k > 10); + int l = 0; + for (;;) + { + if (l == 5) + { + l++; + continue; + } + idat += vec4(1.0); + l++; + continue; + } + _177.out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/comp/return.comp b/3rdparty/spirv-cross/reference/shaders-no-opt/comp/return.comp new file mode 100644 index 000000000..4be20e93e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/comp/return.comp @@ -0,0 +1,34 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _27; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + if (ident == 2u) + { + _27.out_data[ident] = vec4(20.0); + } + else + { + if (ident == 4u) + { + _27.out_data[ident] = vec4(10.0); + return; + } + } + for (int i = 0; i < 20; i++) + { + if (i == 10) + { + break; + } + return; + } + _27.out_data[ident] = vec4(10.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/vulkan/frag/spec-constant.vk.frag b/3rdparty/spirv-cross/reference/shaders-no-opt/vulkan/frag/spec-constant.vk.frag new file mode 100644 index 000000000..2ae3b6b54 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/vulkan/frag/spec-constant.vk.frag @@ -0,0 +1,131 @@ +#version 310 es +precision mediump float; +precision highp int; + +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 1.0 +#endif +const float a = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 2.0 +#endif +const float b = SPIRV_CROSS_CONSTANT_ID_2; +#ifndef SPIRV_CROSS_CONSTANT_ID_3 +#define SPIRV_CROSS_CONSTANT_ID_3 3 +#endif +const int c = SPIRV_CROSS_CONSTANT_ID_3; +const uint _18 = (uint(c) + 0u); +const int _21 = (-c); +const int _23 = (~c); +#ifndef SPIRV_CROSS_CONSTANT_ID_4 +#define SPIRV_CROSS_CONSTANT_ID_4 4 +#endif +const int d = SPIRV_CROSS_CONSTANT_ID_4; +const int _26 = (c + d); +const int _28 = (c - d); +const int _30 = (c * d); +const int _32 = (c / d); +#ifndef SPIRV_CROSS_CONSTANT_ID_5 +#define SPIRV_CROSS_CONSTANT_ID_5 5u +#endif +const uint e = SPIRV_CROSS_CONSTANT_ID_5; +#ifndef SPIRV_CROSS_CONSTANT_ID_6 +#define SPIRV_CROSS_CONSTANT_ID_6 6u +#endif +const uint f = SPIRV_CROSS_CONSTANT_ID_6; +const uint _36 = (e / f); +const int _38 = (c % d); +const uint _40 = (e % f); +const int _42 = (c >> d); +const uint _44 = (e >> f); +const int _46 = (c << d); +const int _48 = (c | d); +const int _50 = (c ^ d); +const int _52 = (c & d); +#ifndef SPIRV_CROSS_CONSTANT_ID_7 +#define SPIRV_CROSS_CONSTANT_ID_7 false +#endif +const bool g = SPIRV_CROSS_CONSTANT_ID_7; +#ifndef SPIRV_CROSS_CONSTANT_ID_8 +#define SPIRV_CROSS_CONSTANT_ID_8 true +#endif +const bool h = SPIRV_CROSS_CONSTANT_ID_8; +const bool _58 = (g || h); +const bool _60 = (g && h); +const bool _62 = (!g); +const bool _64 = (g == h); +const bool _66 = (g != h); +const bool _68 = (c == d); +const bool _70 = (c != d); +const bool _72 = (c < d); +const bool _74 = (e < f); +const bool _76 = (c > d); +const bool _78 = (e > f); +const bool _80 = (c <= d); +const bool _82 = (e <= f); +const bool _84 = (c >= d); +const bool _86 = (e >= f); +const int _92 = int(e + 0u); +const bool _94 = (c != int(0u)); +const bool _96 = (e != 0u); +const int _100 = int(g); +const uint _103 = uint(g); +const int _118 = (c + 3); +const int _127 = (c + 2); +const int _135 = (d + 2); + +struct Foo +{ + float elems[_135]; +}; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + float t0 = a; + float t1 = b; + mediump uint c0 = _18; + mediump int c1 = _21; + mediump int c2 = _23; + mediump int c3 = _26; + mediump int c4 = _28; + mediump int c5 = _30; + mediump int c6 = _32; + mediump uint c7 = _36; + mediump int c8 = _38; + mediump uint c9 = _40; + mediump int c10 = _42; + mediump uint c11 = _44; + mediump int c12 = _46; + mediump int c13 = _48; + mediump int c14 = _50; + mediump int c15 = _52; + bool c16 = _58; + bool c17 = _60; + bool c18 = _62; + bool c19 = _64; + bool c20 = _66; + bool c21 = _68; + bool c22 = _70; + bool c23 = _72; + bool c24 = _74; + bool c25 = _76; + bool c26 = _78; + bool c27 = _80; + bool c28 = _82; + bool c29 = _84; + bool c30 = _86; + mediump int c31 = c8 + c3; + mediump int c32 = _92; + bool c33 = _94; + bool c34 = _96; + mediump int c35 = _100; + mediump uint c36 = _103; + float c37 = float(g); + float vec0[_118][8]; + float vec1[_127]; + Foo foo; + FragColor = ((vec4(t0 + t1) + vec4(vec0[0][0])) + vec4(vec1[0])) + vec4(foo.elems[c]); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-no-opt/vulkan/frag/spec-constant.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders-no-opt/vulkan/frag/spec-constant.vk.frag.vk new file mode 100644 index 000000000..c5ae60b27 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-no-opt/vulkan/frag/spec-constant.vk.frag.vk @@ -0,0 +1,107 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(constant_id = 1) const float a = 1.0; +layout(constant_id = 2) const float b = 2.0; +layout(constant_id = 3) const int c = 3; +const uint _18 = (uint(c) + 0u); +const int _21 = (-c); +const int _23 = (~c); +layout(constant_id = 4) const int d = 4; +const int _26 = (c + d); +const int _28 = (c - d); +const int _30 = (c * d); +const int _32 = (c / d); +layout(constant_id = 5) const uint e = 5u; +layout(constant_id = 6) const uint f = 6u; +const uint _36 = (e / f); +const int _38 = (c % d); +const uint _40 = (e % f); +const int _42 = (c >> d); +const uint _44 = (e >> f); +const int _46 = (c << d); +const int _48 = (c | d); +const int _50 = (c ^ d); +const int _52 = (c & d); +layout(constant_id = 7) const bool g = false; +layout(constant_id = 8) const bool h = true; +const bool _58 = (g || h); +const bool _60 = (g && h); +const bool _62 = (!g); +const bool _64 = (g == h); +const bool _66 = (g != h); +const bool _68 = (c == d); +const bool _70 = (c != d); +const bool _72 = (c < d); +const bool _74 = (e < f); +const bool _76 = (c > d); +const bool _78 = (e > f); +const bool _80 = (c <= d); +const bool _82 = (e <= f); +const bool _84 = (c >= d); +const bool _86 = (e >= f); +const int _92 = int(e + 0u); +const bool _94 = (c != int(0u)); +const bool _96 = (e != 0u); +const int _100 = int(g); +const uint _103 = uint(g); +const int _118 = (c + 3); +const int _127 = (c + 2); +const int _135 = (d + 2); + +struct Foo +{ + float elems[_135]; +}; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + float t0 = a; + float t1 = b; + mediump uint c0 = _18; + mediump int c1 = _21; + mediump int c2 = _23; + mediump int c3 = _26; + mediump int c4 = _28; + mediump int c5 = _30; + mediump int c6 = _32; + mediump uint c7 = _36; + mediump int c8 = _38; + mediump uint c9 = _40; + mediump int c10 = _42; + mediump uint c11 = _44; + mediump int c12 = _46; + mediump int c13 = _48; + mediump int c14 = _50; + mediump int c15 = _52; + bool c16 = _58; + bool c17 = _60; + bool c18 = _62; + bool c19 = _64; + bool c20 = _66; + bool c21 = _68; + bool c22 = _70; + bool c23 = _72; + bool c24 = _74; + bool c25 = _76; + bool c26 = _78; + bool c27 = _80; + bool c28 = _82; + bool c29 = _84; + bool c30 = _86; + mediump int c31 = c8 + c3; + mediump int c32 = _92; + bool c33 = _94; + bool c34 = _96; + mediump int c35 = _100; + mediump uint c36 = _103; + float c37 = float(g); + float vec0[_118][8]; + float vec1[_127]; + Foo foo; + FragColor = ((vec4(t0 + t1) + vec4(vec0[0][0])) + vec4(vec1[0])) + vec4(foo.elems[c]); +} + diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/asm/aliased-entry-point-names.asm.multi.json b/3rdparty/spirv-cross/reference/shaders-reflection/asm/aliased-entry-point-names.asm.multi.json new file mode 100644 index 000000000..45adf50b1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/asm/aliased-entry-point-names.asm.multi.json @@ -0,0 +1,49 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "vert" + }, + { + "name" : "main2", + "mode" : "vert" + }, + { + "name" : "main", + "mode" : "frag" + }, + { + "name" : "main2", + "mode" : "frag" + } + ], + "types" : { + "_8" : { + "name" : "_8", + "members" : [ + { + "name" : "_m0", + "type" : "vec4" + }, + { + "name" : "_m1", + "type" : "float" + }, + { + "name" : "_m2", + "type" : "float", + "array" : [ + 1 + ] + }, + { + "name" : "_m3", + "type" : "float", + "array" : [ + 1 + ] + } + ] + } + } +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/comp/struct-layout.comp.json b/3rdparty/spirv-cross/reference/shaders-reflection/comp/struct-layout.comp.json new file mode 100644 index 000000000..3004454b8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/comp/struct-layout.comp.json @@ -0,0 +1,64 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "comp" + } + ], + "types" : { + "_19" : { + "name" : "Foo", + "members" : [ + { + "name" : "m", + "type" : "mat4", + "offset" : 0 + } + ] + }, + "_21" : { + "name" : "SSBO2", + "members" : [ + { + "name" : "out_data", + "type" : "_19", + "array" : [ + 0 + ], + "offset" : 0 + } + ] + }, + "_28" : { + "name" : "SSBO", + "members" : [ + { + "name" : "in_data", + "type" : "_19", + "array" : [ + 0 + ], + "offset" : 0 + } + ] + } + }, + "ssbos" : [ + { + "type" : "_21", + "name" : "SSBO2", + "writeonly" : true, + "block_size" : 0, + "set" : 0, + "binding" : 1 + }, + { + "type" : "_28", + "name" : "SSBO", + "readonly" : true, + "block_size" : 0, + "set" : 0, + "binding" : 0 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/comp/struct-packing.comp.json b/3rdparty/spirv-cross/reference/shaders-reflection/comp/struct-packing.comp.json new file mode 100644 index 000000000..22a41584d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/comp/struct-packing.comp.json @@ -0,0 +1,474 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "comp" + } + ], + "types" : { + "_11" : { + "name" : "S0", + "members" : [ + { + "name" : "a", + "type" : "vec2", + "array" : [ + 1 + ], + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 8 + } + ] + }, + "_14" : { + "name" : "S1", + "members" : [ + { + "name" : "a", + "type" : "vec3", + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 12 + } + ] + }, + "_17" : { + "name" : "S2", + "members" : [ + { + "name" : "a", + "type" : "vec3", + "array" : [ + 1 + ], + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 16 + } + ] + }, + "_19" : { + "name" : "S3", + "members" : [ + { + "name" : "a", + "type" : "vec2", + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 8 + } + ] + }, + "_20" : { + "name" : "S4", + "members" : [ + { + "name" : "c", + "type" : "vec2", + "offset" : 0 + } + ] + }, + "_23" : { + "name" : "Content", + "members" : [ + { + "name" : "m0s", + "type" : "_11", + "array" : [ + 1 + ], + "offset" : 0 + }, + { + "name" : "m1s", + "type" : "_14", + "array" : [ + 1 + ], + "offset" : 16 + }, + { + "name" : "m2s", + "type" : "_17", + "array" : [ + 1 + ], + "offset" : 32 + }, + { + "name" : "m0", + "type" : "_11", + "offset" : 64 + }, + { + "name" : "m1", + "type" : "_14", + "offset" : 80 + }, + { + "name" : "m2", + "type" : "_17", + "offset" : 96 + }, + { + "name" : "m3", + "type" : "_19", + "offset" : 128 + }, + { + "name" : "m4", + "type" : "float", + "offset" : 144 + }, + { + "name" : "m3s", + "type" : "_20", + "array" : [ + 8 + ], + "offset" : 152 + } + ] + }, + "_36" : { + "name" : "SSBO1", + "members" : [ + { + "name" : "content", + "type" : "_23", + "offset" : 0 + }, + { + "name" : "content1", + "type" : "_23", + "array" : [ + 2 + ], + "offset" : 224 + }, + { + "name" : "content2", + "type" : "_23", + "offset" : 672 + }, + { + "name" : "m0", + "type" : "mat2", + "offset" : 896 + }, + { + "name" : "m1", + "type" : "mat2", + "offset" : 912 + }, + { + "name" : "m2", + "type" : "mat2x3", + "array" : [ + 4 + ], + "offset" : 928 + }, + { + "name" : "m3", + "type" : "mat3x2", + "offset" : 1056 + }, + { + "name" : "m4", + "type" : "mat2", + "row_major" : true, + "offset" : 1080 + }, + { + "name" : "m5", + "type" : "mat2", + "row_major" : true, + "array" : [ + 9 + ], + "offset" : 1096 + }, + { + "name" : "m6", + "type" : "mat2x3", + "row_major" : true, + "array" : [ + 2, + 4 + ], + "offset" : 1240 + }, + { + "name" : "m7", + "type" : "mat3x2", + "row_major" : true, + "offset" : 1440 + }, + { + "name" : "array", + "type" : "float", + "array" : [ + 0 + ], + "offset" : 1472 + } + ] + }, + "_42" : { + "name" : "S0", + "members" : [ + { + "name" : "a", + "type" : "vec2", + "array" : [ + 1 + ], + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 16 + } + ] + }, + "_44" : { + "name" : "S1", + "members" : [ + { + "name" : "a", + "type" : "vec3", + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 12 + } + ] + }, + "_47" : { + "name" : "S2", + "members" : [ + { + "name" : "a", + "type" : "vec3", + "array" : [ + 1 + ], + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 16 + } + ] + }, + "_49" : { + "name" : "S3", + "members" : [ + { + "name" : "a", + "type" : "vec2", + "offset" : 0 + }, + { + "name" : "b", + "type" : "float", + "offset" : 8 + } + ] + }, + "_50" : { + "name" : "S4", + "members" : [ + { + "name" : "c", + "type" : "vec2", + "offset" : 0 + } + ] + }, + "_52" : { + "name" : "Content", + "members" : [ + { + "name" : "m0s", + "type" : "_42", + "array" : [ + 1 + ], + "offset" : 0 + }, + { + "name" : "m1s", + "type" : "_44", + "array" : [ + 1 + ], + "offset" : 32 + }, + { + "name" : "m2s", + "type" : "_47", + "array" : [ + 1 + ], + "offset" : 48 + }, + { + "name" : "m0", + "type" : "_42", + "offset" : 80 + }, + { + "name" : "m1", + "type" : "_44", + "offset" : 112 + }, + { + "name" : "m2", + "type" : "_47", + "offset" : 128 + }, + { + "name" : "m3", + "type" : "_49", + "offset" : 160 + }, + { + "name" : "m4", + "type" : "float", + "offset" : 176 + }, + { + "name" : "m3s", + "type" : "_50", + "array" : [ + 8 + ], + "offset" : 192 + } + ] + }, + "_59" : { + "name" : "SSBO0", + "members" : [ + { + "name" : "content", + "type" : "_52", + "offset" : 0 + }, + { + "name" : "content1", + "type" : "_52", + "array" : [ + 2 + ], + "offset" : 320 + }, + { + "name" : "content2", + "type" : "_52", + "offset" : 960 + }, + { + "name" : "m0", + "type" : "mat2", + "offset" : 1280 + }, + { + "name" : "m1", + "type" : "mat2", + "offset" : 1312 + }, + { + "name" : "m2", + "type" : "mat2x3", + "array" : [ + 4 + ], + "offset" : 1344 + }, + { + "name" : "m3", + "type" : "mat3x2", + "offset" : 1472 + }, + { + "name" : "m4", + "type" : "mat2", + "row_major" : true, + "offset" : 1520 + }, + { + "name" : "m5", + "type" : "mat2", + "row_major" : true, + "array" : [ + 9 + ], + "offset" : 1552 + }, + { + "name" : "m6", + "type" : "mat2x3", + "row_major" : true, + "array" : [ + 2, + 4 + ], + "offset" : 1840 + }, + { + "name" : "m7", + "type" : "mat3x2", + "row_major" : true, + "offset" : 2224 + }, + { + "name" : "array", + "type" : "float", + "array" : [ + 0 + ], + "offset" : 2256 + } + ] + } + }, + "ssbos" : [ + { + "type" : "_36", + "name" : "SSBO1", + "restrict" : true, + "block_size" : 1472, + "set" : 0, + "binding" : 1 + }, + { + "type" : "_59", + "name" : "SSBO0", + "restrict" : true, + "block_size" : 2256, + "set" : 0, + "binding" : 0 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/combined-texture-sampler-shadow.vk.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/combined-texture-sampler-shadow.vk.frag.json new file mode 100644 index 000000000..5b4d3c6f7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/combined-texture-sampler-shadow.vk.frag.json @@ -0,0 +1,37 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "outputs" : [ + { + "type" : "float", + "name" : "FragColor", + "location" : 0 + } + ], + "separate_images" : [ + { + "type" : "texture2D", + "name" : "uDepth", + "set" : 0, + "binding" : 2 + } + ], + "separate_samplers" : [ + { + "type" : "sampler", + "name" : "uSampler", + "set" : 0, + "binding" : 0 + }, + { + "type" : "sampler", + "name" : "uSampler1", + "set" : 0, + "binding" : 1 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/combined-texture-sampler.vk.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/combined-texture-sampler.vk.frag.json new file mode 100644 index 000000000..8b6a18429 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/combined-texture-sampler.vk.frag.json @@ -0,0 +1,50 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "inputs" : [ + { + "type" : "vec2", + "name" : "vTex", + "location" : 0 + } + ], + "outputs" : [ + { + "type" : "vec4", + "name" : "FragColor", + "location" : 0 + } + ], + "separate_images" : [ + { + "type" : "texture2D", + "name" : "uTexture0", + "set" : 0, + "binding" : 2 + }, + { + "type" : "texture2D", + "name" : "uTexture1", + "set" : 0, + "binding" : 3 + } + ], + "separate_samplers" : [ + { + "type" : "sampler", + "name" : "uSampler0", + "set" : 0, + "binding" : 0 + }, + { + "type" : "sampler", + "name" : "uSampler1", + "set" : 0, + "binding" : 1 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/image-load-store-uint-coord.asm.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/image-load-store-uint-coord.asm.frag.json new file mode 100644 index 000000000..527ea2bfe --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/image-load-store-uint-coord.asm.frag.json @@ -0,0 +1,47 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "outputs" : [ + { + "type" : "vec4", + "name" : "_entryPointOutput", + "location" : 0 + } + ], + "textures" : [ + { + "type" : "sampler2D", + "name" : "ROIm", + "set" : 0, + "binding" : 1 + } + ], + "separate_images" : [ + { + "type" : "samplerBuffer", + "name" : "ROBuf", + "set" : 0, + "binding" : 0 + } + ], + "images" : [ + { + "type" : "image2D", + "name" : "RWIm", + "set" : 0, + "binding" : 1, + "format" : "rgba32f" + }, + { + "type" : "imageBuffer", + "name" : "RWBuf", + "set" : 0, + "binding" : 0, + "format" : "rgba32f" + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/input-attachment-ms.vk.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/input-attachment-ms.vk.frag.json new file mode 100644 index 000000000..5f381911a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/input-attachment-ms.vk.frag.json @@ -0,0 +1,31 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "subpass_inputs" : [ + { + "type" : "subpassInputMS", + "name" : "uSubpass0", + "set" : 0, + "binding" : 0, + "input_attachment_index" : 0 + }, + { + "type" : "subpassInputMS", + "name" : "uSubpass1", + "set" : 0, + "binding" : 1, + "input_attachment_index" : 1 + } + ], + "outputs" : [ + { + "type" : "vec4", + "name" : "FragColor", + "location" : 0 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/input-attachment.vk.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/input-attachment.vk.frag.json new file mode 100644 index 000000000..16ae6a468 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/input-attachment.vk.frag.json @@ -0,0 +1,31 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "subpass_inputs" : [ + { + "type" : "subpassInput", + "name" : "uSubpass0", + "set" : 0, + "binding" : 0, + "input_attachment_index" : 0 + }, + { + "type" : "subpassInput", + "name" : "uSubpass1", + "set" : 0, + "binding" : 1, + "input_attachment_index" : 1 + } + ], + "outputs" : [ + { + "type" : "vec4", + "name" : "FragColor", + "location" : 0 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/push-constant.vk.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/push-constant.vk.frag.json new file mode 100644 index 000000000..f72a8fd65 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/push-constant.vk.frag.json @@ -0,0 +1,46 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "types" : { + "_13" : { + "name" : "PushConstants", + "members" : [ + { + "name" : "value0", + "type" : "vec4", + "offset" : 0 + }, + { + "name" : "value1", + "type" : "vec4", + "offset" : 16 + } + ] + } + }, + "inputs" : [ + { + "type" : "vec4", + "name" : "vColor", + "location" : 0 + } + ], + "outputs" : [ + { + "type" : "vec4", + "name" : "FragColor", + "location" : 0 + } + ], + "push_constants" : [ + { + "type" : "_13", + "name" : "push", + "push_constant" : true + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/separate-sampler-texture-array.vk.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/separate-sampler-texture-array.vk.frag.json new file mode 100644 index 000000000..9216d93e5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/separate-sampler-texture-array.vk.frag.json @@ -0,0 +1,73 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "inputs" : [ + { + "type" : "vec2", + "name" : "vTex", + "location" : 0 + }, + { + "type" : "vec3", + "name" : "vTex3", + "location" : 1 + } + ], + "outputs" : [ + { + "type" : "vec4", + "name" : "FragColor", + "location" : 0 + } + ], + "separate_images" : [ + { + "type" : "texture2D", + "name" : "uTexture", + "array" : [ + 4 + ], + "set" : 0, + "binding" : 1 + }, + { + "type" : "texture2DArray", + "name" : "uTextureArray", + "array" : [ + 4 + ], + "set" : 0, + "binding" : 4 + }, + { + "type" : "textureCube", + "name" : "uTextureCube", + "array" : [ + 4 + ], + "set" : 0, + "binding" : 3 + }, + { + "type" : "texture3D", + "name" : "uTexture3D", + "array" : [ + 4 + ], + "set" : 0, + "binding" : 2 + } + ], + "separate_samplers" : [ + { + "type" : "sampler", + "name" : "uSampler", + "set" : 0, + "binding" : 0 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/frag/spec-constant.vk.frag.json b/3rdparty/spirv-cross/reference/shaders-reflection/frag/spec-constant.vk.frag.json new file mode 100644 index 000000000..0add29866 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/frag/spec-constant.vk.frag.json @@ -0,0 +1,71 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "frag" + } + ], + "types" : { + "_137" : { + "name" : "Foo", + "members" : [ + { + "name" : "elems", + "type" : "float", + "array" : [ + 135 + ] + } + ] + } + }, + "outputs" : [ + { + "type" : "vec4", + "name" : "FragColor", + "location" : 0 + } + ], + "specialization_constants" : [ + { + "id" : 1, + "type" : "float", + "default_value" : 1.5 + }, + { + "id" : 2, + "type" : "float", + "default_value" : 2.5 + }, + { + "id" : 3, + "type" : "int", + "default_value" : 3 + }, + { + "id" : 4, + "type" : "int", + "default_value" : 4 + }, + { + "id" : 5, + "type" : "uint", + "default_value" : 5 + }, + { + "id" : 6, + "type" : "uint", + "default_value" : 6 + }, + { + "id" : 7, + "type" : "bool", + "default_value" : false + }, + { + "id" : 8, + "type" : "bool", + "default_value" : true + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/vert/read-from-row-major-array.vert.json b/3rdparty/spirv-cross/reference/shaders-reflection/vert/read-from-row-major-array.vert.json new file mode 100644 index 000000000..d92fb67fb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/vert/read-from-row-major-array.vert.json @@ -0,0 +1,61 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "vert" + } + ], + "types" : { + "_89" : { + "name" : "gl_PerVertex", + "members" : [ + { + "name" : "gl_Position", + "type" : "vec4" + }, + { + "name" : "gl_PointSize", + "type" : "float" + } + ] + }, + "_102" : { + "name" : "Block", + "members" : [ + { + "name" : "var", + "type" : "mat2x3", + "row_major" : true, + "array" : [ + 4, + 3 + ], + "offset" : 0 + } + ] + } + }, + "inputs" : [ + { + "type" : "vec4", + "name" : "a_position", + "location" : 0 + } + ], + "outputs" : [ + { + "type" : "float", + "name" : "v_vtxResult", + "location" : 0 + } + ], + "ubos" : [ + { + "type" : "_102", + "name" : "Block", + "block_size" : 576, + "set" : 0, + "binding" : 0 + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders-reflection/vert/texture_buffer.vert.json b/3rdparty/spirv-cross/reference/shaders-reflection/vert/texture_buffer.vert.json new file mode 100644 index 000000000..3c69e24cb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders-reflection/vert/texture_buffer.vert.json @@ -0,0 +1,40 @@ +{ + "entryPoints" : [ + { + "name" : "main", + "mode" : "vert" + } + ], + "types" : { + "_8" : { + "name" : "gl_PerVertex", + "members" : [ + { + "name" : "gl_Position", + "type" : "vec4" + }, + { + "name" : "gl_PointSize", + "type" : "float" + } + ] + } + }, + "textures" : [ + { + "type" : "samplerBuffer", + "name" : "uSamp", + "set" : 0, + "binding" : 4 + } + ], + "images" : [ + { + "type" : "imageBuffer", + "name" : "uSampo", + "set" : 0, + "binding" : 5, + "format" : "rgba32f" + } + ] +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/reference/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag.vk b/3rdparty/spirv-cross/reference/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag.vk new file mode 100644 index 000000000..4aaf397a0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag.vk @@ -0,0 +1,11 @@ +#version 450 +#extension GL_AMD_shader_fragment_mask : require + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS t; + +void main() +{ + vec4 test2 = fragmentFetchAMD(t, 4u); + uint testi2 = fragmentMaskFetchAMD(t); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/amd/fs.invalid.frag b/3rdparty/spirv-cross/reference/shaders/amd/fs.invalid.frag new file mode 100644 index 000000000..aecf69eba --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/amd/fs.invalid.frag @@ -0,0 +1,15 @@ +#version 450 +#extension GL_AMD_shader_fragment_mask : require +#extension GL_AMD_shader_explicit_vertex_parameter : require + +layout(binding = 0) uniform sampler2DMS texture1; + +layout(location = 0) __explicitInterpAMD in vec4 vary; + +void main() +{ + uint testi1 = fragmentMaskFetchAMD(texture1, ivec2(0)); + vec4 test1 = fragmentFetchAMD(texture1, ivec2(1), 2u); + vec4 pos = interpolateAtVertexAMD(vary, 0u); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/amd/gcn_shader.comp b/3rdparty/spirv-cross/reference/shaders/amd/gcn_shader.comp new file mode 100644 index 000000000..1c0c5ae38 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/amd/gcn_shader.comp @@ -0,0 +1,12 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require +#extension GL_AMD_gcn_shader : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +void main() +{ + float cubeFace = cubeFaceIndexAMD(vec3(0.0)); + vec2 cubeFaceCoord = cubeFaceCoordAMD(vec3(1.0)); + uint64_t time = timeAMD(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/amd/shader_ballot.comp b/3rdparty/spirv-cross/reference/shaders/amd/shader_ballot.comp new file mode 100644 index 000000000..1fade727c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/amd/shader_ballot.comp @@ -0,0 +1,32 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require +#extension GL_ARB_shader_ballot : require +#extension GL_AMD_shader_ballot : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer inputData +{ + float inputDataArray[]; +} _12; + +layout(binding = 1, std430) buffer outputData +{ + float outputDataArray[]; +} _74; + +void main() +{ + float thisLaneData = _12.inputDataArray[gl_LocalInvocationID.x]; + bool laneActive = thisLaneData > 0.0; + uint thisLaneOutputSlot = mbcntAMD(packUint2x32(uvec2(uvec4(unpackUint2x32(ballotARB(laneActive)), 0u, 0u).xy))); + int firstInvocation = readFirstInvocationARB(1); + int invocation = readInvocationARB(1, 0u); + vec3 swizzleInvocations = swizzleInvocationsAMD(vec3(0.0, 2.0, 1.0), uvec4(3u)); + vec3 swizzelInvocationsMasked = swizzleInvocationsMaskedAMD(vec3(0.0, 2.0, 1.0), uvec3(2u)); + vec3 writeInvocation = writeInvocationAMD(swizzleInvocations, swizzelInvocationsMasked, 0u); + if (laneActive) + { + _74.outputDataArray[thisLaneOutputSlot] = thisLaneData; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp b/3rdparty/spirv-cross/reference/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp new file mode 100644 index 000000000..a14343ae1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp @@ -0,0 +1,11 @@ +#version 450 +#extension GL_AMD_shader_ballot : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void main() +{ + float addInvocations = addInvocationsNonUniformAMD(0.0); + int minInvocations = minInvocationsNonUniformAMD(1); + uint maxInvocations = uint(maxInvocationsNonUniformAMD(4)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/amd/shader_group_vote.comp b/3rdparty/spirv-cross/reference/shaders/amd/shader_group_vote.comp new file mode 100644 index 000000000..007d9f984 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/amd/shader_group_vote.comp @@ -0,0 +1,18 @@ +#version 450 +#extension GL_ARB_shader_group_vote : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer inputData +{ + float inputDataArray[]; +} _12; + +void main() +{ + float thisLaneData = _12.inputDataArray[gl_LocalInvocationID.x]; + bool laneActive = thisLaneData > 0.0; + bool allInvocations = allInvocationsARB(laneActive); + bool anyInvocations = anyInvocationARB(laneActive); + bool allInvocationsEqual = allInvocationsEqualARB(laneActive); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/amd/shader_trinary_minmax.comp b/3rdparty/spirv-cross/reference/shaders/amd/shader_trinary_minmax.comp new file mode 100644 index 000000000..ece39b710 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/amd/shader_trinary_minmax.comp @@ -0,0 +1,11 @@ +#version 450 +#extension GL_AMD_shader_trinary_minmax : require +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +void main() +{ + int t11 = min3(0, 3, 2); + int t12 = max3(0, 3, 2); + int t13 = mid3(0, 3, 2); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..5ece8257d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,18 @@ +#version 450 +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer u0_counters +{ + uint c; +} u0_counter; + +layout(binding = 0, r32ui) uniform writeonly uimageBuffer u0; + +void main() +{ + uint _29 = atomicAdd(u0_counter.c, uint(-1)); + vec4 r0; + r0.x = uintBitsToFloat(_29); + imageStore(u0, int((uint(floatBitsToInt(r0.x)) * 1u) + (uint(0) >> 2u)), uvec4(uint(int(gl_GlobalInvocationID.x)))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..2a8f71182 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,18 @@ +#version 450 +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer u0_counters +{ + uint c; +} u0_counter; + +layout(binding = 0, r32ui) uniform writeonly uimageBuffer u0; + +void main() +{ + uint _29 = atomicAdd(u0_counter.c, 1u); + vec4 r0; + r0.x = uintBitsToFloat(_29); + imageStore(u0, int((uint(floatBitsToInt(r0.x)) * 1u) + (uint(0) >> 2u)), uvec4(uint(int(gl_GlobalInvocationID.x)))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_iadd.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_iadd.asm.comp new file mode 100644 index 000000000..bed2dffcc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_iadd.asm.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) restrict buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) restrict buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + _6._m0 = _5._m1 + uvec4(_5._m0); + _6._m0 = uvec4(_5._m0) + _5._m1; + _6._m0 = _5._m1 + _5._m1; + _6._m0 = uvec4(_5._m0 + _5._m0); + _6._m1 = ivec4(_5._m1 + _5._m1); + _6._m1 = _5._m0 + _5._m0; + _6._m1 = ivec4(_5._m1) + _5._m0; + _6._m1 = _5._m0 + ivec4(_5._m1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_iequal.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_iequal.asm.comp new file mode 100644 index 000000000..bdb3eeb9a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_iequal.asm.comp @@ -0,0 +1,33 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _30 = _5._m0; + uvec4 _31 = _5._m1; + bvec4 _34 = equal(ivec4(_31), _30); + bvec4 _35 = equal(_30, ivec4(_31)); + bvec4 _36 = equal(_31, _31); + bvec4 _37 = equal(_30, _30); + _6._m0 = mix(uvec4(0u), uvec4(1u), _34); + _6._m0 = mix(uvec4(0u), uvec4(1u), _35); + _6._m0 = mix(uvec4(0u), uvec4(1u), _36); + _6._m0 = mix(uvec4(0u), uvec4(1u), _37); + _6._m1 = mix(ivec4(0), ivec4(1), _34); + _6._m1 = mix(ivec4(0), ivec4(1), _35); + _6._m1 = mix(ivec4(0), ivec4(1), _36); + _6._m1 = mix(ivec4(0), ivec4(1), _37); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_sar.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_sar.asm.comp new file mode 100644 index 000000000..283b444cc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_sar.asm.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _22 = _5._m0; + uvec4 _23 = _5._m1; + _6._m0 = uvec4(ivec4(_23) >> _22); + _6._m0 = uvec4(_22 >> ivec4(_23)); + _6._m0 = uvec4(ivec4(_23) >> ivec4(_23)); + _6._m0 = uvec4(_22 >> _22); + _6._m1 = ivec4(_23) >> ivec4(_23); + _6._m1 = _22 >> _22; + _6._m1 = ivec4(_23) >> _22; + _6._m1 = _22 >> ivec4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_sdiv.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_sdiv.asm.comp new file mode 100644 index 000000000..e28c481d2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_sdiv.asm.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _22 = _5._m0; + uvec4 _23 = _5._m1; + _6._m0 = uvec4(ivec4(_23) / _22); + _6._m0 = uvec4(_22 / ivec4(_23)); + _6._m0 = uvec4(ivec4(_23) / ivec4(_23)); + _6._m0 = uvec4(_22 / _22); + _6._m1 = ivec4(_23) / ivec4(_23); + _6._m1 = _22 / _22; + _6._m1 = ivec4(_23) / _22; + _6._m1 = _22 / ivec4(_23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_slr.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_slr.asm.comp new file mode 100644 index 000000000..78efaf385 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/bitcast_slr.asm.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_5 +{ + ivec4 _m0; + uvec4 _m1; +} _5; + +layout(binding = 1, std430) buffer _4_6 +{ + uvec4 _m0; + ivec4 _m1; +} _6; + +void main() +{ + ivec4 _22 = _5._m0; + uvec4 _23 = _5._m1; + _6._m0 = _23 >> uvec4(_22); + _6._m0 = uvec4(_22) >> _23; + _6._m0 = _23 >> _23; + _6._m0 = uvec4(_22) >> uvec4(_22); + _6._m1 = ivec4(_23 >> _23); + _6._m1 = ivec4(uvec4(_22) >> uvec4(_22)); + _6._m1 = ivec4(_23 >> uvec4(_22)); + _6._m1 = ivec4(uvec4(_22) >> _23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..08fccbcde --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,43 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct A +{ + int a; + int b; +}; + +struct A_1 +{ + int a; + int b; +}; + +layout(binding = 1, std430) buffer C1 +{ + A Data[]; +} C1_1; + +layout(binding = 2, std140) uniform C2 +{ + A_1 Data[1024]; +} C2_1; + +layout(binding = 0, std430) buffer B +{ + A Data[]; +} C3; + +layout(binding = 3, std140) uniform B +{ + A_1 Data[1024]; +} C4; + +void main() +{ + C1_1.Data[gl_GlobalInvocationID.x].a = C2_1.Data[gl_GlobalInvocationID.x].a; + C1_1.Data[gl_GlobalInvocationID.x].b = C2_1.Data[gl_GlobalInvocationID.x].b; + C3.Data[gl_GlobalInvocationID.x].a = C4.Data[gl_GlobalInvocationID.x].a; + C3.Data[gl_GlobalInvocationID.x].b = C4.Data[gl_GlobalInvocationID.x].b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/builtin-compute-bitcast.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/builtin-compute-bitcast.asm.comp new file mode 100644 index 000000000..abb8a7976 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/builtin-compute-bitcast.asm.comp @@ -0,0 +1,13 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer BUF +{ + int values[]; +} _6; + +void main() +{ + _6.values[int(gl_WorkGroupID.y)] = int(gl_GlobalInvocationID.z); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/decoration-group.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/decoration-group.asm.comp new file mode 100644 index 000000000..28ad4d41f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/decoration-group.asm.comp @@ -0,0 +1,38 @@ +#version 430 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 5, std430) buffer _6_15 +{ + float _m0[]; +} _15; + +layout(binding = 0, std430) buffer _7_16 +{ + float _m0[]; +} _16; + +layout(binding = 1, std430) buffer _8_17 +{ + float _m0[]; +} _17; + +layout(binding = 2, std430) restrict readonly buffer _9_18 +{ + float _m0[]; +} _18; + +layout(binding = 3, std430) restrict readonly buffer _10_19 +{ + float _m0[]; +} _19; + +layout(binding = 4, std430) restrict readonly buffer _11_20 +{ + float _m0[]; +} _20; + +void main() +{ + _15._m0[gl_GlobalInvocationID.x] = (((_16._m0[gl_GlobalInvocationID.x] + _17._m0[gl_GlobalInvocationID.x]) + _18._m0[gl_GlobalInvocationID.x]) + _19._m0[gl_GlobalInvocationID.x]) + _20._m0[gl_GlobalInvocationID.x]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..20db16f5f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,27 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) readonly buffer ssbo +{ + uint _data[]; +} ssbo_1; + +void Load(uint size) +{ + int byteAddrTemp = int(size >> uint(2)); + uvec4 data = uvec4(ssbo_1._data[byteAddrTemp], ssbo_1._data[byteAddrTemp + 1], ssbo_1._data[byteAddrTemp + 2], ssbo_1._data[byteAddrTemp + 3]); +} + +void _main(uvec3 id) +{ + uint param = 4u; + Load(param); +} + +void main() +{ + uvec3 id = gl_GlobalInvocationID; + uvec3 param = id; + _main(param); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/hlsl-functionality.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/hlsl-functionality.asm.comp new file mode 100644 index 000000000..ae3bb1f86 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/hlsl-functionality.asm.comp @@ -0,0 +1,24 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer Buf +{ + vec4 _data[]; +} Buf_1; + +layout(std430) buffer Buf_count +{ + int _count; +} Buf_count_1; + +void _main() +{ + int _29 = atomicAdd(Buf_count_1._count, 1); + Buf_1._data[_29] = vec4(1.0); +} + +void main() +{ + _main(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/logical.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/logical.asm.comp new file mode 100644 index 000000000..9ae25f78a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/logical.asm.comp @@ -0,0 +1,56 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + float a; + vec2 b; + vec3 c; + vec4 d; +} s0; + +layout(binding = 1, std430) buffer SSBO1 +{ + float a; + vec2 b; + vec3 c; + vec4 d; +} s1; + +bool and(bool a, bool b) +{ + return !((a && b) || b); +} + +bvec2 and(bvec2 a, bvec2 b) +{ + bvec2 _98 = bvec2(a.x && b.x, a.y && b.y); + return not(bvec2(_98.x || b.x, _98.y || b.y)); +} + +bvec3 and(bvec3 a, bvec3 b) +{ + return bvec3(a.x && b.x, a.y && b.y, a.z && b.z); +} + +bvec4 and(bvec4 a, bvec4 b) +{ + return bvec4(a.x && b.x, a.y && b.y, a.z && b.z, a.w && b.w); +} + +void main() +{ + bool param = isinf(s0.a); + bool param_1 = isnan(s1.a); + bool b0 = and(param, param_1); + bvec2 param_2 = isinf(s0.b); + bvec2 param_3 = isnan(s1.b); + bvec2 b1 = and(param_2, param_3); + bvec3 param_4 = isinf(s0.c); + bvec3 param_5 = isnan(s1.c); + bvec3 b2 = and(param_4, param_5); + bvec4 param_6 = isinf(s0.d); + bvec4 param_7 = isnan(s1.d); + bvec4 b3 = and(param_6, param_7); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/multiple-entry.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/multiple-entry.asm.comp new file mode 100644 index 000000000..6418464f1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/multiple-entry.asm.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) restrict buffer _6_8 +{ + ivec4 _m0; + uvec4 _m1; +} _8; + +layout(binding = 1, std430) restrict buffer _7_9 +{ + uvec4 _m0; + ivec4 _m1; +} _9; + +void main() +{ + _9._m0 = _8._m1 + uvec4(_8._m0); + _9._m0 = uvec4(_8._m0) + _8._m1; + _9._m0 = _8._m1 + _8._m1; + _9._m0 = uvec4(_8._m0 + _8._m0); + _9._m1 = ivec4(_8._m1 + _8._m1); + _9._m1 = _8._m0 + _8._m0; + _9._m1 = ivec4(_8._m1) + _8._m0; + _9._m1 = _8._m0 + ivec4(_8._m1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/op-phi-swap.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/op-phi-swap.asm.comp new file mode 100644 index 000000000..f8c51c045 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/op-phi-swap.asm.comp @@ -0,0 +1,40 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer _3_4 +{ + float _m0[]; +} _4; + +layout(binding = 1, std430) buffer _3_5 +{ + float _m0[]; +} _5; + +void main() +{ + float _26 = 8.5; + bool _34; + float _35; + float _35_copy; + float _36; + _34 = true; + _35 = _4._m0[gl_GlobalInvocationID.x]; + _36 = _26; + for (;;) + { + if (_34) + { + _34 = false; + _35_copy = _35; + _35 = _36; + _36 = _35_copy; + } + else + { + break; + } + } + _5._m0[gl_GlobalInvocationID.x] = _35 - _36; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/quantize.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/quantize.asm.comp new file mode 100644 index 000000000..c08921380 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/quantize.asm.comp @@ -0,0 +1,19 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + float scalar; + vec2 vec2_val; + vec3 vec3_val; + vec4 vec4_val; +} _4; + +void main() +{ + _4.scalar = unpackHalf2x16(packHalf2x16(vec2(_4.scalar))).x; + _4.vec2_val = unpackHalf2x16(packHalf2x16(_4.vec2_val)); + _4.vec3_val = vec3(unpackHalf2x16(packHalf2x16(_4.vec3_val.xy)), unpackHalf2x16(packHalf2x16(_4.vec3_val.zz)).x); + _4.vec4_val = vec4(unpackHalf2x16(packHalf2x16(_4.vec4_val.xy)), unpackHalf2x16(packHalf2x16(_4.vec4_val.zw))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/specialization-constant-workgroup.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/specialization-constant-workgroup.asm.comp new file mode 100644 index 000000000..8016ebaf9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/specialization-constant-workgroup.asm.comp @@ -0,0 +1,21 @@ +#version 310 es + +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 9u +#endif +#ifndef SPIRV_CROSS_CONSTANT_ID_12 +#define SPIRV_CROSS_CONSTANT_ID_12 4u +#endif + +layout(local_size_x = SPIRV_CROSS_CONSTANT_ID_10, local_size_y = 20, local_size_z = SPIRV_CROSS_CONSTANT_ID_12) in; + +layout(binding = 0, std430) buffer SSBO +{ + float a; +} _4; + +void main() +{ + _4.a += 1.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp new file mode 100644 index 000000000..482cfd8a0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp @@ -0,0 +1,28 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1u +#endif +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 3u +#endif + +layout(local_size_x = SPIRV_CROSS_CONSTANT_ID_0, local_size_y = 2, local_size_z = SPIRV_CROSS_CONSTANT_ID_2) in; + +layout(binding = 0, std430) buffer _6_8 +{ + float _m0[]; +} _8; + +layout(binding = 1, std430) buffer _6_9 +{ + float _m0[]; +} _9; + +uvec3 _22 = gl_WorkGroupSize; + +void main() +{ + _8._m0[gl_WorkGroupID.x] = _9._m0[gl_WorkGroupID.x] + _8._m0[gl_WorkGroupID.x]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/comp/switch-break-ladder.asm.comp b/3rdparty/spirv-cross/reference/shaders/asm/comp/switch-break-ladder.asm.comp new file mode 100644 index 000000000..f326869ce --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/comp/switch-break-ladder.asm.comp @@ -0,0 +1,64 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer BUF +{ + int a; + int b; + int d; +} o; + +void main() +{ + int c = o.a; + int a; + for (;;) + { + bool _22_ladder_break = false; + switch (c) + { + case 5: + { + for (;;) + { + bool _30_ladder_break = false; + switch (o.d) + { + case 10: + case 20: + { + c += c; + _30_ladder_break = true; + break; + } + default: + { + continue; + } + } + if (_30_ladder_break) + { + break; + } + } + break; + } + case 1: + case 2: + case 3: + { + a = c; + _22_ladder_break = true; + break; + } + } + if (_22_ladder_break) + { + break; + } + c++; + continue; + } + o.b = a; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag new file mode 100644 index 000000000..b5e59f88b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag @@ -0,0 +1,13 @@ +#version 450 + +uniform sampler2D SPIRV_Cross_CombineduTexuSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vUV; + +void main() +{ + FragColor = texture(SPIRV_Cross_CombineduTexuSampler, vUV); + FragColor += textureOffset(SPIRV_Cross_CombineduTexuSampler, vUV, ivec2(1)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag.vk b/3rdparty/spirv-cross/reference/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag.vk new file mode 100644 index 000000000..bce980895 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag.vk @@ -0,0 +1,14 @@ +#version 450 + +layout(set = 0, binding = 1) uniform texture2D uTex; +layout(set = 0, binding = 0) uniform sampler uSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vUV; + +void main() +{ + FragColor = texture(sampler2D(uTex, uSampler), vUV); + FragColor += textureOffset(sampler2D(uTex, uSampler), vUV, ivec2(1)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/complex-name-workarounds.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/complex-name-workarounds.asm.frag new file mode 100644 index 000000000..7b120719e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/complex-name-workarounds.asm.frag @@ -0,0 +1,28 @@ +#version 450 + +layout(location = 0) in vec4 _; +layout(location = 1) in vec4 a; +layout(location = 0) out vec4 b; + +vec4 fu_nc_(vec4 a_) +{ + return a_; +} + +vec4 fu_nc_1(vec4 _0_1) +{ + return _0_1; +} + +void main() +{ + vec4 b_1 = _; + vec4 _0_1 = (_ + a) + fu_nc_(b_1); + vec4 b_3 = a; + vec4 b_2 = (_ - a) + fu_nc_1(b_3); + b = _0_1; + b = b_2; + b = _0_1; + b = b_2; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag new file mode 100644 index 000000000..c64818d2b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct SwizzleTest +{ + float a; + float b; +}; + +layout(location = 0) in vec2 foo; +layout(location = 0) out float FooOut; + +void main() +{ + SwizzleTest _22 = SwizzleTest(foo.x, foo.y); + FooOut = _22.a + _22.b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/default-member-names.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/default-member-names.asm.frag new file mode 100644 index 000000000..57d4536c9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/default-member-names.asm.frag @@ -0,0 +1,32 @@ +#version 450 + +struct _9 +{ + float _m0; +}; + +struct _10 +{ + float _m0; + float _m1; + float _m2; + float _m3; + float _m4; + float _m5; + float _m6; + float _m7; + float _m8; + float _m9; + float _m10; + float _m11; + _9 _m12; +}; + +layout(location = 0) out vec4 _3; + +void main() +{ + _10 _21; + _3 = vec4(_21._m0, _21._m1, _21._m2, _21._m3); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..7c9d39338 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/empty-struct.asm.frag @@ -0,0 +1,25 @@ +#version 450 + +struct EmptyStructTest +{ + int empty_struct_member; +}; + +float GetValue(EmptyStructTest self) +{ + return 0.0; +} + +float GetValue_1(EmptyStructTest self) +{ + return 0.0; +} + +void main() +{ + EmptyStructTest _23 = EmptyStructTest(0); + EmptyStructTest emptyStruct; + float value = GetValue(emptyStruct); + value = GetValue_1(_23); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/for-loop-phi-only-continue.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/for-loop-phi-only-continue.asm.frag new file mode 100644 index 000000000..feb45db44 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/for-loop-phi-only-continue.asm.frag @@ -0,0 +1,19 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; + +void main() +{ + float _19; + _19 = 0.0; + float _20; + int _23; + for (int _22 = 0; _22 < 16; _19 = _20, _22 = _23) + { + _20 = _19 + 1.0; + _23 = _22 + 1; + continue; + } + FragColor = vec4(_19); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/frem.asm.frag new file mode 100644 index 000000000..1095ab04f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/frem.asm.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void main() +{ + FragColor = vA - vB * trunc(vA / vB); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..676f98680 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,39 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; + +vec4 foo(vec4 foo_1) +{ + return foo_1 + vec4(1.0); +} + +vec4 foo(vec3 foo_1) +{ + return foo_1.xyzz + vec4(1.0); +} + +vec4 foo_1(vec4 foo_2) +{ + return foo_2 + vec4(2.0); +} + +vec4 foo(vec2 foo_2) +{ + return foo_2.xyxy + vec4(2.0); +} + +void main() +{ + highp vec4 foo_3 = vec4(1.0); + vec4 foo_2 = foo(foo_3); + highp vec3 foo_5 = vec3(1.0); + vec4 foo_4 = foo(foo_5); + highp vec4 foo_7 = vec4(1.0); + vec4 foo_6 = foo_1(foo_7); + highp vec2 foo_9 = vec2(1.0); + vec4 foo_8 = foo(foo_9); + FragColor = ((foo_2 + foo_4) + foo_6) + foo_8; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag new file mode 100644 index 000000000..3585285eb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag @@ -0,0 +1,17 @@ +#version 450 + +uniform samplerCubeShadow SPIRV_Cross_CombinedpointLightShadowMapshadowSamplerPCF; + +layout(location = 0) out float _entryPointOutput; + +float _main() +{ + vec4 _33 = vec4(vec3(0.100000001490116119384765625), 0.5); + return textureGrad(SPIRV_Cross_CombinedpointLightShadowMapshadowSamplerPCF, vec4(_33.xyz, _33.w), vec3(0.0), vec3(0.0)); +} + +void main() +{ + _entryPointOutput = _main(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag new file mode 100644 index 000000000..63856ddd4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag @@ -0,0 +1,27 @@ +#version 450 + +uniform sampler2DArrayShadow SPIRV_Cross_CombinedShadowMapShadowSamplerPCF; + +layout(location = 0) in vec2 texCoords; +layout(location = 1) in float cascadeIndex; +layout(location = 2) in float fragDepth; +layout(location = 0) out vec4 _entryPointOutput; + +vec4 _main(vec2 texCoords_1, float cascadeIndex_1, float fragDepth_1) +{ + vec4 _60 = vec4(vec3(texCoords_1, cascadeIndex_1), fragDepth_1); + float c = textureGrad(SPIRV_Cross_CombinedShadowMapShadowSamplerPCF, vec4(_60.xyz, _60.w), vec2(0.0), vec2(0.0)); + return vec4(c, c, c, c); +} + +void main() +{ + vec2 texCoords_1 = texCoords; + float cascadeIndex_1 = cascadeIndex; + float fragDepth_1 = fragDepth; + vec2 param = texCoords_1; + float param_1 = cascadeIndex_1; + float param_2 = fragDepth_1; + _entryPointOutput = _main(param, param_1, param_2); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..ab2749b4d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uTexture; + +layout(location = 0) out ivec2 Size; + +void main() +{ + Size = textureSize(uTexture, 0) + textureSize(uTexture, 1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag new file mode 100644 index 000000000..60bb78aa5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag @@ -0,0 +1,38 @@ +#version 450 + +uniform sampler2D SPIRV_Cross_CombinedparamSPIRV_Cross_DummySampler; +uniform sampler2D SPIRV_Cross_CombinedSampledImageSPIRV_Cross_DummySampler; +uniform sampler2D SPIRV_Cross_CombinedparamSampler; +uniform sampler2D SPIRV_Cross_CombinedSampledImageSampler; + +layout(location = 0) out vec4 _entryPointOutput; + +vec4 sample_fetch(ivec3 UV, sampler2D SPIRV_Cross_CombinedtexSPIRV_Cross_DummySampler) +{ + return texelFetch(SPIRV_Cross_CombinedtexSPIRV_Cross_DummySampler, UV.xy, UV.z); +} + +vec4 sample_sampler(vec2 UV, sampler2D SPIRV_Cross_CombinedtexSampler) +{ + return texture(SPIRV_Cross_CombinedtexSampler, UV); +} + +vec4 _main(vec4 xIn) +{ + ivec3 coord = ivec3(int(xIn.x * 1280.0), int(xIn.y * 720.0), 0); + ivec3 param = coord; + vec4 value = sample_fetch(param, SPIRV_Cross_CombinedparamSPIRV_Cross_DummySampler); + value += texelFetch(SPIRV_Cross_CombinedSampledImageSPIRV_Cross_DummySampler, coord.xy, coord.z); + vec2 param_1 = xIn.xy; + value += sample_sampler(param_1, SPIRV_Cross_CombinedparamSampler); + value += texture(SPIRV_Cross_CombinedSampledImageSampler, xIn.xy); + return value; +} + +void main() +{ + vec4 xIn = gl_FragCoord; + vec4 param = xIn; + _entryPointOutput = _main(param); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag.vk new file mode 100644 index 000000000..2dab66407 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag.vk @@ -0,0 +1,37 @@ +#version 450 +#extension GL_EXT_samplerless_texture_functions : require + +layout(set = 0, binding = 0) uniform sampler Sampler; +layout(set = 0, binding = 0) uniform texture2D SampledImage; + +layout(location = 0) out vec4 _entryPointOutput; + +vec4 sample_fetch(texture2D tex, ivec3 UV) +{ + return texelFetch(tex, UV.xy, UV.z); +} + +vec4 sample_sampler(texture2D tex, vec2 UV) +{ + return texture(sampler2D(tex, Sampler), UV); +} + +vec4 _main(vec4 xIn) +{ + ivec3 coord = ivec3(int(xIn.x * 1280.0), int(xIn.y * 720.0), 0); + ivec3 param = coord; + vec4 value = sample_fetch(SampledImage, param); + value += texelFetch(SampledImage, coord.xy, coord.z); + vec2 param_1 = xIn.xy; + value += sample_sampler(SampledImage, param_1); + value += texture(sampler2D(SampledImage, Sampler), xIn.xy); + return value; +} + +void main() +{ + vec4 xIn = gl_FragCoord; + vec4 param = xIn; + _entryPointOutput = _main(param); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/image-query-no-sampler.vk.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-query-no-sampler.vk.asm.frag new file mode 100644 index 000000000..2040dd1af --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-query-no-sampler.vk.asm.frag @@ -0,0 +1,13 @@ +#version 450 + +uniform sampler2D SPIRV_Cross_CombineduSampler2DSPIRV_Cross_DummySampler; +uniform sampler2DMS SPIRV_Cross_CombineduSampler2DMSSPIRV_Cross_DummySampler; + +void main() +{ + ivec2 b = textureSize(SPIRV_Cross_CombineduSampler2DSPIRV_Cross_DummySampler, 0); + ivec2 c = textureSize(SPIRV_Cross_CombineduSampler2DMSSPIRV_Cross_DummySampler); + int l1 = textureQueryLevels(SPIRV_Cross_CombineduSampler2DSPIRV_Cross_DummySampler); + int s0 = textureSamples(SPIRV_Cross_CombineduSampler2DMSSPIRV_Cross_DummySampler); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/image-query-no-sampler.vk.asm.frag.vk b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-query-no-sampler.vk.asm.frag.vk new file mode 100644 index 000000000..021d3a60d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/image-query-no-sampler.vk.asm.frag.vk @@ -0,0 +1,14 @@ +#version 450 +#extension GL_EXT_samplerless_texture_functions : require + +layout(set = 0, binding = 0) uniform texture2D uSampler2D; +layout(set = 0, binding = 0) uniform texture2DMS uSampler2DMS; + +void main() +{ + ivec2 b = textureSize(uSampler2D, 0); + ivec2 c = textureSize(uSampler2DMS); + int l1 = textureQueryLevels(uSampler2D); + int s0 = textureSamples(uSampler2DMS); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..6bc1be0d0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,39 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uImage; + +layout(location = 0) in vec4 v0; +layout(location = 0) out vec4 FragColor; + +void main() +{ + int i = 0; + float phi; + vec4 _36; + phi = 1.0; + _36 = vec4(1.0, 2.0, 1.0, 2.0); + for (;;) + { + FragColor = _36; + if (i < 4) + { + if (v0[i] > 0.0) + { + vec2 _48 = vec2(phi); + i++; + phi += 2.0; + _36 = textureLod(uImage, _48, 0.0); + continue; + } + else + { + break; + } + } + else + { + break; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/inf-nan-constant-double.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/inf-nan-constant-double.asm.frag new file mode 100644 index 000000000..d8e29aa40 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/inf-nan-constant-double.asm.frag @@ -0,0 +1,11 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require + +layout(location = 0) out vec3 FragColor; +layout(location = 0) flat in double vTmp; + +void main() +{ + FragColor = vec3(dvec3(uint64BitsToDouble(0x7ff0000000000000ul), uint64BitsToDouble(0xfff0000000000000ul), uint64BitsToDouble(0x7ff8000000000000ul)) + dvec3(vTmp)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..dd4284c9b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out highp vec3 FragColor; + +void main() +{ + FragColor = vec3(uintBitsToFloat(0x7f800000u), uintBitsToFloat(0xff800000u), uintBitsToFloat(0x7fc00000u)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/invalidation.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/invalidation.asm.frag new file mode 100644 index 000000000..db1181804 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/invalidation.asm.frag @@ -0,0 +1,15 @@ +#version 450 + +layout(location = 0) in float v0; +layout(location = 1) in float v1; +layout(location = 0) out float FragColor; + +void main() +{ + float a = v0; + float b = v1; + float _17 = a; + a = v1; + FragColor = (_17 + b) * b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/locations-components.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/locations-components.asm.frag new file mode 100644 index 000000000..95dcd9ceb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/locations-components.asm.frag @@ -0,0 +1,25 @@ +#version 450 + +layout(location = 1) in vec2 _2; +layout(location = 1, component = 2) in float _3; +layout(location = 2) flat in float _4; +layout(location = 2, component = 1) flat in uint _5; +layout(location = 2, component = 2) flat in uint _6; +layout(location = 0) out vec4 o0; +vec4 v1; +vec4 v2; + +void main() +{ + v1 = vec4(_2.x, _2.y, v1.z, v1.w); + v1.z = _3; + v2.x = _4; + v2.y = uintBitsToFloat(_5); + v2.z = uintBitsToFloat(_6); + vec4 r0; + r0.x = intBitsToFloat(floatBitsToInt(v2.y) + floatBitsToInt(v2.z)); + o0.y = float(floatBitsToUint(r0.x)); + o0.x = v1.y + v2.x; + o0 = vec4(o0.x, o0.y, v1.z, v1.x); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag new file mode 100644 index 000000000..e1edccff6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag @@ -0,0 +1,48 @@ +#version 450 + +layout(binding = 0, std140) uniform Foo +{ + layout(row_major) mat4 lightVP[64]; + uint shadowCascadesNum; + int test; +} _11; + +layout(location = 0) in vec3 fragWorld; +layout(location = 0) out int _entryPointOutput; + +mat4 GetClip2TexMatrix() +{ + if (_11.test == 0) + { + return mat4(vec4(0.5, 0.0, 0.0, 0.0), vec4(0.0, 0.5, 0.0, 0.0), vec4(0.0, 0.0, 0.5, 0.0), vec4(0.0, 0.0, 0.0, 1.0)); + } + return mat4(vec4(1.0, 0.0, 0.0, 0.0), vec4(0.0, 1.0, 0.0, 0.0), vec4(0.0, 0.0, 1.0, 0.0), vec4(0.0, 0.0, 0.0, 1.0)); +} + +int GetCascade(vec3 fragWorldPosition) +{ + for (uint cascadeIndex = 0u; cascadeIndex < _11.shadowCascadesNum; cascadeIndex++) + { + mat4 worldToShadowMap = GetClip2TexMatrix() * _11.lightVP[cascadeIndex]; + vec4 fragShadowMapPos = worldToShadowMap * vec4(fragWorldPosition, 1.0); + if ((((fragShadowMapPos.z >= 0.0) && (fragShadowMapPos.z <= 1.0)) && (max(fragShadowMapPos.x, fragShadowMapPos.y) <= 1.0)) && (min(fragShadowMapPos.x, fragShadowMapPos.y) >= 0.0)) + { + return int(cascadeIndex); + } + } + return -1; +} + +int _main(vec3 fragWorld_1) +{ + vec3 param = fragWorld_1; + return GetCascade(param); +} + +void main() +{ + vec3 fragWorld_1 = fragWorld; + vec3 param = fragWorld_1; + _entryPointOutput = _main(param); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-header-to-continue.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-header-to-continue.asm.frag new file mode 100644 index 000000000..a99322d67 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-header-to-continue.asm.frag @@ -0,0 +1,43 @@ +#version 450 + +struct Params +{ + vec4 TextureSize; + vec4 Params1; + vec4 Params2; + vec4 Params3; + vec4 Params4; + vec4 Bloom; +}; + +layout(binding = 1, std140) uniform CB1 +{ + Params CB1; +} _8; + +uniform sampler2D SPIRV_Cross_CombinedmapTexturemapSampler; + +layout(location = 0) in vec2 IN_uv; +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + vec4 _49 = texture(SPIRV_Cross_CombinedmapTexturemapSampler, IN_uv); + float _50 = _49.y; + float _55; + float _58; + _55 = 0.0; + _58 = 0.0; + for (int _60 = -3; _60 <= 3; ) + { + float _64 = float(_60); + vec4 _72 = texture(SPIRV_Cross_CombinedmapTexturemapSampler, IN_uv + (vec2(0.0, _8.CB1.TextureSize.w) * _64)); + float _78 = exp(((-_64) * _64) * 0.2222220003604888916015625) * float(abs(_72.y - _50) < clamp((_50 * 80.0) * 0.0007999999797903001308441162109375, 7.999999797903001308441162109375e-05, 0.008000000379979610443115234375)); + _55 += (_72.x * _78); + _58 += _78; + _60++; + continue; + } + _entryPointOutput = vec4(_55 / _58, _50, 0.0, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-merge-to-continue.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-merge-to-continue.asm.frag new file mode 100644 index 000000000..55db70cf6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/loop-merge-to-continue.asm.frag @@ -0,0 +1,17 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 v0; + +void main() +{ + FragColor = vec4(1.0); + for (int i = 0; i < 4; i++) + { + for (int j = 0; j < 4; j++) + { + FragColor += vec4(v0[(i + j) & 3]); + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..c08bc2c78 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,40 @@ +#version 310 es +precision mediump float; +precision highp int; + +const float _46[16] = float[](1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0); +const vec4 _76[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + +layout(location = 0) out float FragColor; +layout(location = 0) flat in mediump int index; + +void main() +{ + vec4 foobar[4] = _76; + vec4 baz[4] = _76; + FragColor = _46[index]; + if (index < 10) + { + FragColor += _46[index ^ 1]; + } + else + { + FragColor += _46[index & 1]; + } + if (index > 30) + { + FragColor += _76[index & 3].y; + } + else + { + FragColor += _76[index & 1].x; + } + if (index > 30) + { + foobar[1].z = 20.0; + } + FragColor += foobar[index & 3].z; + baz = vec4[](vec4(20.0), vec4(30.0), vec4(50.0), vec4(60.0)); + FragColor += baz[index & 3].z; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/multi-for-loop-init.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/multi-for-loop-init.asm.frag new file mode 100644 index 000000000..b5f30a247 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/multi-for-loop-init.asm.frag @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int counter; + +void main() +{ + FragColor = vec4(0.0); + mediump int i = 0; + mediump uint j = 1u; + for (; (i < 10) && (int(j) < int(20u)); i += counter, j += uint(counter)) + { + FragColor += vec4(float(i)); + FragColor += vec4(float(j)); + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/op-constant-null.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/op-constant-null.asm.frag new file mode 100644 index 000000000..970b4c4a6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/op-constant-null.asm.frag @@ -0,0 +1,23 @@ +#version 310 es +precision mediump float; +precision highp int; + +const vec4 _14[4] = vec4[](vec4(0.0), vec4(0.0), vec4(0.0), vec4(0.0)); + +struct D +{ + vec4 a; + float b; +}; + +layout(location = 0) out float FragColor; + +void main() +{ + float a = 0.0; + vec4 b = vec4(0.0); + mat2x3 c = mat2x3(vec3(0.0), vec3(0.0)); + D d = D(vec4(0.0), 0.0); + FragColor = a; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/op-phi-swap-continue-block.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/op-phi-swap-continue-block.asm.frag new file mode 100644 index 000000000..3dae3e161 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/op-phi-swap-continue-block.asm.frag @@ -0,0 +1,25 @@ +#version 450 + +layout(binding = 0, std140) uniform UBO +{ + int uCount; + int uJ; + int uK; +} _5; + +layout(location = 0) out float FragColor; + +void main() +{ + int _23; + int _23_copy; + int _24; + _23 = _5.uK; + _24 = _5.uJ; + for (int _26 = 0; _26 < _5.uCount; _23_copy = _23, _23 = _24, _24 = _23_copy, _26++) + { + continue; + } + FragColor = float(_24 - _23) * float(_5.uJ * _5.uK); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..35ba2e153 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,21 @@ +#version 450 + +struct Registers +{ + float foo; +}; + +uniform Registers registers; + +layout(location = 0) out float FragColor; + +float add_value(float v, float w) +{ + return v + w; +} + +void main() +{ + FragColor = add_value(10.0, registers.foo); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/phi-loop-variable.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/phi-loop-variable.asm.frag new file mode 100644 index 000000000..786ac74de --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/phi-loop-variable.asm.frag @@ -0,0 +1,9 @@ +#version 450 + +void main() +{ + for (int _22 = 35; _22 >= 0; _22--) + { + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag new file mode 100644 index 000000000..3dc1839d6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag @@ -0,0 +1,28 @@ +#version 450 + +struct Registers +{ + int index; +}; + +uniform Registers registers; + +uniform sampler2D SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler[4]; + +layout(location = 0) out vec4 FragColor; + +vec4 sample_from_func(sampler2D SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler_1[4]) +{ + return texelFetch(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler_1[registers.index], ivec2(4), 0); +} + +vec4 sample_one_from_func(sampler2D SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler_1) +{ + return texelFetch(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler_1, ivec2(4), 0); +} + +void main() +{ + FragColor = (texelFetch(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler[registers.index], ivec2(10), 0) + sample_from_func(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler)) + sample_one_from_func(SPIRV_Cross_CombineduSamplerSPIRV_Cross_DummySampler[registers.index]); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag new file mode 100644 index 000000000..1ebf8fb96 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag @@ -0,0 +1,20 @@ +#version 450 + +layout(binding = 0, rgba32f) uniform writeonly imageBuffer RWTex; +layout(binding = 1) uniform samplerBuffer Tex; + +layout(location = 0) out vec4 _entryPointOutput; + +vec4 _main() +{ + vec4 storeTemp = vec4(1.0, 2.0, 3.0, 4.0); + imageStore(RWTex, 20, storeTemp); + return texelFetch(Tex, 10); +} + +void main() +{ + vec4 _28 = _main(); + _entryPointOutput = _28; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/selection-merge-to-continue.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/selection-merge-to-continue.asm.frag new file mode 100644 index 000000000..82b5973f8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/selection-merge-to-continue.asm.frag @@ -0,0 +1,23 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 v0; + +void main() +{ + FragColor = vec4(1.0); + for (int i = 0; i < 4; i++) + { + if (v0.x == 20.0) + { + FragColor += vec4(v0[i & 3]); + continue; + } + else + { + FragColor += vec4(v0[i & 1]); + continue; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/srem.asm.frag new file mode 100644 index 000000000..05a3d7554 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/srem.asm.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in ivec4 vA; +layout(location = 1) flat in ivec4 vB; + +void main() +{ + FragColor = vec4(vA - vB * (vA / vB)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag new file mode 100644 index 000000000..b2473f4d0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct Foo +{ + float var1; + float var2; +}; + +layout(binding = 0) uniform mediump sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; + +Foo _22; + +void main() +{ + FragColor = texture(uSampler, vec2(_22.var1, _22.var2)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/switch-label-shared-block.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/switch-label-shared-block.asm.frag new file mode 100644 index 000000000..ade9044e3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/switch-label-shared-block.asm.frag @@ -0,0 +1,33 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) flat in mediump int vIndex; +layout(location = 0) out float FragColor; + +void main() +{ + highp float _19; + switch (vIndex) + { + case 0: + case 2: + { + _19 = 1.0; + break; + } + case 1: + default: + { + _19 = 3.0; + break; + } + case 8: + { + _19 = 8.0; + break; + } + } + FragColor = _19; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/switch-merge-to-continue.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/switch-merge-to-continue.asm.frag new file mode 100644 index 000000000..88f76cf1d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/switch-merge-to-continue.asm.frag @@ -0,0 +1,30 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(1.0); + for (int i = 0; i < 4; i++) + { + switch (i) + { + case 0: + { + FragColor.x += 1.0; + break; + } + case 1: + { + FragColor.y += 3.0; + break; + } + default: + { + FragColor.z += 3.0; + break; + } + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/temporary-name-alias.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/temporary-name-alias.asm.frag new file mode 100644 index 000000000..927c0434a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/temporary-name-alias.asm.frag @@ -0,0 +1,10 @@ +#version 450 + +void main() +{ + float constituent = float(0); + mat3 _mat3 = mat3(vec3(constituent), vec3(constituent), vec3(constituent)); + float constituent_1 = float(1); + _mat3 = mat3(vec3(constituent_1), vec3(constituent_1), vec3(constituent_1)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/temporary-phi-hoisting.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/temporary-phi-hoisting.asm.frag new file mode 100644 index 000000000..3917594d9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/temporary-phi-hoisting.asm.frag @@ -0,0 +1,26 @@ +#version 450 + +struct MyStruct +{ + vec4 color; +}; + +layout(std140) uniform MyStruct_CB +{ + MyStruct g_MyStruct[4]; +} _6; + +layout(location = 0) out vec4 _entryPointOutput; + +void main() +{ + vec3 _28; + _28 = vec3(0.0); + vec3 _29; + for (int _31 = 0; _31 < 4; _28 = _29, _31++) + { + _29 = _28 + _6.g_MyStruct[_31].color.xyz; + } + _entryPointOutput = vec4(_28, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..6193de0da --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uTexture; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = texelFetch(uTexture, ivec2(gl_FragCoord.xy), 0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/undef-variable-store.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/undef-variable-store.asm.frag new file mode 100644 index 000000000..26ad568ad --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/undef-variable-store.asm.frag @@ -0,0 +1,29 @@ +#version 450 + +layout(location = 0) out vec4 _entryPointOutput; + +vec4 _38; +vec4 _47; + +void main() +{ + vec4 _27; + do + { + vec2 _26 = vec2(0.0); + if (_26.x != 0.0) + { + _27 = vec4(1.0, 0.0, 0.0, 1.0); + break; + } + else + { + _27 = vec4(1.0, 1.0, 0.0, 1.0); + break; + } + _27 = _38; + break; + } while (false); + _entryPointOutput = _27; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/unknown-depth-state.asm.vk.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/unknown-depth-state.asm.vk.frag new file mode 100644 index 000000000..7729d30c0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/unknown-depth-state.asm.vk.frag @@ -0,0 +1,23 @@ +#version 450 + +layout(binding = 0) uniform sampler2DShadow uShadow; +uniform sampler2DShadow SPIRV_Cross_CombineduTextureuSampler; + +layout(location = 0) in vec3 vUV; +layout(location = 0) out float FragColor; + +float sample_combined() +{ + return texture(uShadow, vec3(vUV.xy, vUV.z)); +} + +float sample_separate() +{ + return texture(SPIRV_Cross_CombineduTextureuSampler, vec3(vUV.xy, vUV.z)); +} + +void main() +{ + FragColor = sample_combined() + sample_separate(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/unknown-depth-state.asm.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/asm/frag/unknown-depth-state.asm.vk.frag.vk new file mode 100644 index 000000000..711fa2776 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/unknown-depth-state.asm.vk.frag.vk @@ -0,0 +1,24 @@ +#version 450 + +layout(set = 0, binding = 0) uniform sampler2DShadow uShadow; +layout(set = 0, binding = 1) uniform texture2D uTexture; +layout(set = 0, binding = 2) uniform samplerShadow uSampler; + +layout(location = 0) in vec3 vUV; +layout(location = 0) out float FragColor; + +float sample_combined() +{ + return texture(uShadow, vec3(vUV.xy, vUV.z)); +} + +float sample_separate() +{ + return texture(sampler2DShadow(uTexture, uSampler), vec3(vUV.xy, vUV.z)); +} + +void main() +{ + FragColor = sample_combined() + sample_separate(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..8bc88b9f0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/unreachable.asm.frag @@ -0,0 +1,28 @@ +#version 450 + +layout(location = 0) flat in int counter; +layout(location = 0) out vec4 FragColor; + +vec4 _21; + +void main() +{ + vec4 _24; + _24 = _21; + vec4 _33; + for (;;) + { + if (counter == 10) + { + _33 = vec4(10.0); + break; + } + else + { + _33 = vec4(30.0); + break; + } + } + FragColor = _33; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/frag/vector-shuffle-oom.asm.frag b/3rdparty/spirv-cross/reference/shaders/asm/frag/vector-shuffle-oom.asm.frag new file mode 100644 index 000000000..cdaf78727 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/frag/vector-shuffle-oom.asm.frag @@ -0,0 +1,328 @@ +#version 450 + +struct _28 +{ + vec4 _m0; +}; + +layout(binding = 0, std140) uniform _6_7 +{ + vec4 _m0; + float _m1; + vec4 _m2; +} _7; + +layout(binding = 2, std140) uniform _10_11 +{ + vec3 _m0; + vec3 _m1; + float _m2; + vec3 _m3; + float _m4; + vec3 _m5; + float _m6; + vec3 _m7; + float _m8; + vec3 _m9; + float _m10; + vec3 _m11; + float _m12; + vec2 _m13; + vec2 _m14; + vec3 _m15; + float _m16; + float _m17; + float _m18; + float _m19; + float _m20; + vec4 _m21; + vec4 _m22; + layout(row_major) mat4 _m23; + vec4 _m24; +} _11; + +layout(binding = 1, std140) uniform _18_19 +{ + layout(row_major) mat4 _m0; + layout(row_major) mat4 _m1; + layout(row_major) mat4 _m2; + layout(row_major) mat4 _m3; + vec4 _m4; + vec4 _m5; + float _m6; + float _m7; + float _m8; + float _m9; + vec3 _m10; + float _m11; + vec3 _m12; + float _m13; + vec3 _m14; + float _m15; + vec3 _m16; + float _m17; + float _m18; + float _m19; + vec2 _m20; + vec2 _m21; + vec2 _m22; + vec4 _m23; + vec2 _m24; + vec2 _m25; + vec2 _m26; + vec3 _m27; + float _m28; + float _m29; + float _m30; + float _m31; + float _m32; + vec2 _m33; + float _m34; + float _m35; + vec3 _m36; + layout(row_major) mat4 _m37[2]; + vec4 _m38[2]; +} _19; + +uniform sampler2D SPIRV_Cross_Combined; +uniform sampler2D SPIRV_Cross_Combined_1; +uniform sampler2D SPIRV_Cross_Combined_2; + +layout(location = 0) out vec4 _5; + +_28 _74; + +void main() +{ + _28 _77 = _74; + _77._m0 = vec4(0.0); + vec2 _82 = gl_FragCoord.xy * _19._m23.xy; + vec4 _88 = _7._m2 * _7._m0.xyxy; + vec2 _97 = clamp(_82 + (vec3(0.0, -2.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _109 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _97, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _113 = textureLod(SPIRV_Cross_Combined_1, _97, 0.0); + vec3 _129; + if (_113.y > 0.0) + { + _129 = _109 + (textureLod(SPIRV_Cross_Combined_2, _97, 0.0).xyz * clamp(_113.y * _113.z, 0.0, 1.0)); + } + else + { + _129 = _109; + } + vec3 _130 = _129 * 0.5; + vec3 _133 = vec4(0.0).xyz + _130; + vec4 _134 = vec4(_133.x, _133.y, _133.z, vec4(0.0).w); + _28 _135 = _77; + _135._m0 = _134; + vec2 _144 = clamp(_82 + (vec3(-1.0, -1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _156 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _144, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _160 = textureLod(SPIRV_Cross_Combined_1, _144, 0.0); + vec3 _176; + if (_160.y > 0.0) + { + _176 = _156 + (textureLod(SPIRV_Cross_Combined_2, _144, 0.0).xyz * clamp(_160.y * _160.z, 0.0, 1.0)); + } + else + { + _176 = _156; + } + vec3 _177 = _176 * 0.5; + vec3 _180 = _134.xyz + _177; + vec4 _181 = vec4(_180.x, _180.y, _180.z, _134.w); + _28 _182 = _135; + _182._m0 = _181; + vec2 _191 = clamp(_82 + (vec3(0.0, -1.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _203 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _191, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _207 = textureLod(SPIRV_Cross_Combined_1, _191, 0.0); + vec3 _223; + if (_207.y > 0.0) + { + _223 = _203 + (textureLod(SPIRV_Cross_Combined_2, _191, 0.0).xyz * clamp(_207.y * _207.z, 0.0, 1.0)); + } + else + { + _223 = _203; + } + vec3 _224 = _223 * 0.75; + vec3 _227 = _181.xyz + _224; + vec4 _228 = vec4(_227.x, _227.y, _227.z, _181.w); + _28 _229 = _182; + _229._m0 = _228; + vec2 _238 = clamp(_82 + (vec3(1.0, -1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _250 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _238, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _254 = textureLod(SPIRV_Cross_Combined_1, _238, 0.0); + vec3 _270; + if (_254.y > 0.0) + { + _270 = _250 + (textureLod(SPIRV_Cross_Combined_2, _238, 0.0).xyz * clamp(_254.y * _254.z, 0.0, 1.0)); + } + else + { + _270 = _250; + } + vec3 _271 = _270 * 0.5; + vec3 _274 = _228.xyz + _271; + vec4 _275 = vec4(_274.x, _274.y, _274.z, _228.w); + _28 _276 = _229; + _276._m0 = _275; + vec2 _285 = clamp(_82 + (vec3(-2.0, 0.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _297 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _285, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _301 = textureLod(SPIRV_Cross_Combined_1, _285, 0.0); + vec3 _317; + if (_301.y > 0.0) + { + _317 = _297 + (textureLod(SPIRV_Cross_Combined_2, _285, 0.0).xyz * clamp(_301.y * _301.z, 0.0, 1.0)); + } + else + { + _317 = _297; + } + vec3 _318 = _317 * 0.5; + vec3 _321 = _275.xyz + _318; + vec4 _322 = vec4(_321.x, _321.y, _321.z, _275.w); + _28 _323 = _276; + _323._m0 = _322; + vec2 _332 = clamp(_82 + (vec3(-1.0, 0.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _344 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _332, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _348 = textureLod(SPIRV_Cross_Combined_1, _332, 0.0); + vec3 _364; + if (_348.y > 0.0) + { + _364 = _344 + (textureLod(SPIRV_Cross_Combined_2, _332, 0.0).xyz * clamp(_348.y * _348.z, 0.0, 1.0)); + } + else + { + _364 = _344; + } + vec3 _365 = _364 * 0.75; + vec3 _368 = _322.xyz + _365; + vec4 _369 = vec4(_368.x, _368.y, _368.z, _322.w); + _28 _370 = _323; + _370._m0 = _369; + vec2 _379 = clamp(_82 + (vec3(0.0, 0.0, 1.0).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _391 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _379, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _395 = textureLod(SPIRV_Cross_Combined_1, _379, 0.0); + vec3 _411; + if (_395.y > 0.0) + { + _411 = _391 + (textureLod(SPIRV_Cross_Combined_2, _379, 0.0).xyz * clamp(_395.y * _395.z, 0.0, 1.0)); + } + else + { + _411 = _391; + } + vec3 _412 = _411 * 1.0; + vec3 _415 = _369.xyz + _412; + vec4 _416 = vec4(_415.x, _415.y, _415.z, _369.w); + _28 _417 = _370; + _417._m0 = _416; + vec2 _426 = clamp(_82 + (vec3(1.0, 0.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _438 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _426, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _442 = textureLod(SPIRV_Cross_Combined_1, _426, 0.0); + vec3 _458; + if (_442.y > 0.0) + { + _458 = _438 + (textureLod(SPIRV_Cross_Combined_2, _426, 0.0).xyz * clamp(_442.y * _442.z, 0.0, 1.0)); + } + else + { + _458 = _438; + } + vec3 _459 = _458 * 0.75; + vec3 _462 = _416.xyz + _459; + vec4 _463 = vec4(_462.x, _462.y, _462.z, _416.w); + _28 _464 = _417; + _464._m0 = _463; + vec2 _473 = clamp(_82 + (vec3(2.0, 0.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _485 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _473, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _489 = textureLod(SPIRV_Cross_Combined_1, _473, 0.0); + vec3 _505; + if (_489.y > 0.0) + { + _505 = _485 + (textureLod(SPIRV_Cross_Combined_2, _473, 0.0).xyz * clamp(_489.y * _489.z, 0.0, 1.0)); + } + else + { + _505 = _485; + } + vec3 _506 = _505 * 0.5; + vec3 _509 = _463.xyz + _506; + vec4 _510 = vec4(_509.x, _509.y, _509.z, _463.w); + _28 _511 = _464; + _511._m0 = _510; + vec2 _520 = clamp(_82 + (vec3(-1.0, 1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _532 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _520, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _536 = textureLod(SPIRV_Cross_Combined_1, _520, 0.0); + vec3 _552; + if (_536.y > 0.0) + { + _552 = _532 + (textureLod(SPIRV_Cross_Combined_2, _520, 0.0).xyz * clamp(_536.y * _536.z, 0.0, 1.0)); + } + else + { + _552 = _532; + } + vec3 _553 = _552 * 0.5; + vec3 _556 = _510.xyz + _553; + vec4 _557 = vec4(_556.x, _556.y, _556.z, _510.w); + _28 _558 = _511; + _558._m0 = _557; + vec2 _567 = clamp(_82 + (vec3(0.0, 1.0, 0.75).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _579 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _567, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _583 = textureLod(SPIRV_Cross_Combined_1, _567, 0.0); + vec3 _599; + if (_583.y > 0.0) + { + _599 = _579 + (textureLod(SPIRV_Cross_Combined_2, _567, 0.0).xyz * clamp(_583.y * _583.z, 0.0, 1.0)); + } + else + { + _599 = _579; + } + vec3 _600 = _599 * 0.75; + vec3 _603 = _557.xyz + _600; + vec4 _604 = vec4(_603.x, _603.y, _603.z, _557.w); + _28 _605 = _558; + _605._m0 = _604; + vec2 _614 = clamp(_82 + (vec3(1.0, 1.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _626 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _614, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _630 = textureLod(SPIRV_Cross_Combined_1, _614, 0.0); + vec3 _646; + if (_630.y > 0.0) + { + _646 = _626 + (textureLod(SPIRV_Cross_Combined_2, _614, 0.0).xyz * clamp(_630.y * _630.z, 0.0, 1.0)); + } + else + { + _646 = _626; + } + vec3 _647 = _646 * 0.5; + vec3 _650 = _604.xyz + _647; + vec4 _651 = vec4(_650.x, _650.y, _650.z, _604.w); + _28 _652 = _605; + _652._m0 = _651; + vec2 _661 = clamp(_82 + (vec3(0.0, 2.0, 0.5).xy * _7._m0.xy), _88.xy, _88.zw); + vec3 _673 = _11._m5 * clamp(textureLod(SPIRV_Cross_Combined, _661, 0.0).w * _7._m1, 0.0, 1.0); + vec4 _677 = textureLod(SPIRV_Cross_Combined_1, _661, 0.0); + vec3 _693; + if (_677.y > 0.0) + { + _693 = _673 + (textureLod(SPIRV_Cross_Combined_2, _661, 0.0).xyz * clamp(_677.y * _677.z, 0.0, 1.0)); + } + else + { + _693 = _673; + } + vec3 _697 = _651.xyz + (_693 * 0.5); + vec4 _698 = vec4(_697.x, _697.y, _697.z, _651.w); + _28 _699 = _652; + _699._m0 = _698; + vec3 _702 = _698.xyz / vec3(((((((((((((0.0 + 0.5) + 0.5) + 0.75) + 0.5) + 0.5) + 0.75) + 1.0) + 0.75) + 0.5) + 0.5) + 0.75) + 0.5) + 0.5); + _28 _704 = _699; + _704._m0 = vec4(_702.x, _702.y, _702.z, _698.w); + _28 _705 = _704; + _705._m0.w = 1.0; + _5 = _705._m0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/geom/block-name-namespace.asm.geom b/3rdparty/spirv-cross/reference/shaders/asm/geom/block-name-namespace.asm.geom new file mode 100644 index 000000000..417cf3421 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/geom/block-name-namespace.asm.geom @@ -0,0 +1,33 @@ +#version 450 +layout(triangles) in; +layout(max_vertices = 4, triangle_strip) out; + +layout(binding = 0, std140) uniform VertexInput +{ + vec4 a; +} VertexInput_1; + +layout(binding = 0, std430) buffer VertexInput +{ + vec4 b; +} VertexInput_2; + +layout(location = 0) out VertexInput +{ + vec4 vColor; +} VertexInput_3; + +layout(location = 0) in VertexInput +{ + vec4 vColor; +} vin[3]; + + +void main() +{ + vec4 VertexInput_4 = vec4(1.0); + gl_Position = (VertexInput_4 + VertexInput_1.a) + VertexInput_2.b; + VertexInput_3.vColor = vin[0].vColor; + EmitVertex(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/geom/inout-split-access-chain-handle.asm.geom b/3rdparty/spirv-cross/reference/shaders/asm/geom/inout-split-access-chain-handle.asm.geom new file mode 100644 index 000000000..71082099e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/geom/inout-split-access-chain-handle.asm.geom @@ -0,0 +1,23 @@ +#version 440 +layout(triangles) in; +layout(max_vertices = 5, triangle_strip) out; + +struct Data +{ + vec4 ApiPerspectivePosition; +}; + +void Copy(inout Data inputStream[3]) +{ + inputStream[0].ApiPerspectivePosition = gl_in[0].gl_Position; +} + +void main() +{ + Data inputStream[3]; + Data param[3] = inputStream; + Copy(param); + inputStream = param; + gl_Position = inputStream[0].ApiPerspectivePosition; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/geom/split-access-chain-input.asm.geom b/3rdparty/spirv-cross/reference/shaders/asm/geom/split-access-chain-input.asm.geom new file mode 100644 index 000000000..511d87fcb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/geom/split-access-chain-input.asm.geom @@ -0,0 +1,9 @@ +#version 440 +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +void main() +{ + gl_Position = gl_in[0].gl_Position; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/geom/store-uint-layer.invalid.asm.geom b/3rdparty/spirv-cross/reference/shaders/asm/geom/store-uint-layer.invalid.asm.geom new file mode 100644 index 000000000..c768d5da8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/geom/store-uint-layer.invalid.asm.geom @@ -0,0 +1,41 @@ +#version 450 +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +struct VertexOutput +{ + vec4 pos; +}; + +struct GeometryOutput +{ + vec4 pos; + uint layer; +}; + +void _main(VertexOutput _input[3], GeometryOutput stream) +{ + GeometryOutput _output; + _output.layer = 1u; + for (int v = 0; v < 3; v++) + { + _output.pos = _input[v].pos; + gl_Position = _output.pos; + gl_Layer = int(_output.layer); + EmitVertex(); + } + EndPrimitive(); +} + +void main() +{ + VertexOutput _input[3]; + _input[0].pos = gl_in[0].gl_Position; + _input[1].pos = gl_in[1].gl_Position; + _input[2].pos = gl_in[2].gl_Position; + VertexOutput param[3] = _input; + GeometryOutput param_1; + _main(param, param_1); + GeometryOutput stream = param_1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc b/3rdparty/spirv-cross/reference/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc new file mode 100644 index 000000000..8cb7a4e64 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc @@ -0,0 +1,79 @@ +#version 450 +layout(vertices = 3) out; + +struct VertexOutput +{ + vec4 pos; + vec2 uv; +}; + +struct HSOut +{ + vec4 pos; + vec2 uv; +}; + +struct HSConstantOut +{ + float EdgeTess[3]; + float InsideTess; +}; + +struct VertexOutput_1 +{ + vec2 uv; +}; + +struct HSOut_1 +{ + vec2 uv; +}; + +layout(location = 0) in VertexOutput_1 p[]; +layout(location = 0) out HSOut_1 _entryPointOutput[3]; + +HSOut _hs_main(VertexOutput p_1[3], uint i) +{ + HSOut _output; + _output.pos = p_1[i].pos; + _output.uv = p_1[i].uv; + return _output; +} + +HSConstantOut PatchHS(VertexOutput _patch[3]) +{ + HSConstantOut _output; + _output.EdgeTess[0] = (vec2(1.0) + _patch[0].uv).x; + _output.EdgeTess[1] = (vec2(1.0) + _patch[0].uv).x; + _output.EdgeTess[2] = (vec2(1.0) + _patch[0].uv).x; + _output.InsideTess = (vec2(1.0) + _patch[0].uv).x; + return _output; +} + +void main() +{ + VertexOutput p_1[3]; + p_1[0].pos = gl_in[0].gl_Position; + p_1[0].uv = p[0].uv; + p_1[1].pos = gl_in[1].gl_Position; + p_1[1].uv = p[1].uv; + p_1[2].pos = gl_in[2].gl_Position; + p_1[2].uv = p[2].uv; + uint i = gl_InvocationID; + VertexOutput param[3] = p_1; + uint param_1 = i; + HSOut flattenTemp = _hs_main(param, param_1); + gl_out[gl_InvocationID].gl_Position = flattenTemp.pos; + _entryPointOutput[gl_InvocationID].uv = flattenTemp.uv; + barrier(); + if (int(gl_InvocationID) == 0) + { + VertexOutput param_2[3] = p_1; + HSConstantOut _patchConstantResult = PatchHS(param_2); + gl_TessLevelOuter[0] = _patchConstantResult.EdgeTess[0]; + gl_TessLevelOuter[1] = _patchConstantResult.EdgeTess[1]; + gl_TessLevelOuter[2] = _patchConstantResult.EdgeTess[2]; + gl_TessLevelInner[0] = _patchConstantResult.InsideTess; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/empty-io.asm.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/empty-io.asm.vert new file mode 100644 index 000000000..cc432cb89 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/empty-io.asm.vert @@ -0,0 +1,34 @@ +#version 450 + +struct VSInput +{ + vec4 position; +}; + +struct VSOutput +{ + vec4 position; +}; + +struct VSOutput_1 +{ + int empty_struct_member; +}; + +layout(location = 0) in vec4 position; + +VSOutput _main(VSInput _input) +{ + VSOutput _out; + _out.position = _input.position; + return _out; +} + +void main() +{ + VSInput _input; + _input.position = position; + VSInput param = _input; + gl_Position = _main(param).position; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/global-builtin.sso.asm.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/global-builtin.sso.asm.vert new file mode 100644 index 000000000..757882701 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/global-builtin.sso.asm.vert @@ -0,0 +1,35 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +struct VSOut +{ + float a; + vec4 pos; +}; + +struct VSOut_1 +{ + float a; +}; + +layout(location = 0) out VSOut_1 _entryPointOutput; + +VSOut _main() +{ + VSOut vout; + vout.a = 40.0; + vout.pos = vec4(1.0); + return vout; +} + +void main() +{ + VSOut flattenTemp = _main(); + _entryPointOutput.a = flattenTemp.a; + gl_Position = flattenTemp.pos; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant-block.asm.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant-block.asm.vert new file mode 100644 index 000000000..9b2f05a8b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant-block.asm.vert @@ -0,0 +1,9 @@ +#version 450 + +invariant gl_Position; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant-block.sso.asm.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant-block.sso.asm.vert new file mode 100644 index 000000000..eb8869419 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant-block.sso.asm.vert @@ -0,0 +1,17 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; + float gl_PointSize; + float gl_ClipDistance[1]; + float gl_CullDistance[1]; +}; + +invariant gl_Position; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant.asm.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant.asm.vert new file mode 100644 index 000000000..c4c6ad6ba --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant.asm.vert @@ -0,0 +1,15 @@ +#version 450 + +invariant gl_Position; + +vec4 _main() +{ + return vec4(1.0); +} + +void main() +{ + vec4 _14 = _main(); + gl_Position = _14; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant.sso.asm.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant.sso.asm.vert new file mode 100644 index 000000000..c486be184 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/invariant.sso.asm.vert @@ -0,0 +1,20 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +invariant gl_Position; + +vec4 _main() +{ + return vec4(1.0); +} + +void main() +{ + vec4 _14 = _main(); + gl_Position = _14; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert new file mode 100644 index 000000000..46e998d7f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert @@ -0,0 +1,34 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_201 +#define SPIRV_CROSS_CONSTANT_ID_201 -10 +#endif +const int _7 = SPIRV_CROSS_CONSTANT_ID_201; +#ifndef SPIRV_CROSS_CONSTANT_ID_202 +#define SPIRV_CROSS_CONSTANT_ID_202 100u +#endif +const uint _8 = SPIRV_CROSS_CONSTANT_ID_202; +#ifndef SPIRV_CROSS_CONSTANT_ID_200 +#define SPIRV_CROSS_CONSTANT_ID_200 3.141590118408203125 +#endif +const float _9 = SPIRV_CROSS_CONSTANT_ID_200; +const int _20 = (_7 + 2); +const uint _25 = (_8 % 5u); +const ivec4 _30 = ivec4(20, 30, _20, _20); +const ivec2 _32 = ivec2(_30.y, _30.x); +const int _33 = _30.y; + +layout(location = 0) flat out int _4; + +void main() +{ + vec4 pos = vec4(0.0); + pos.y += float(_20); + pos.z += float(_25); + pos += vec4(_30); + vec2 _56 = pos.xy + vec2(_32); + pos = vec4(_56.x, _56.y, pos.z, pos.w); + gl_Position = pos; + _4 = _33; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert.vk b/3rdparty/spirv-cross/reference/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert.vk new file mode 100644 index 000000000..d308693aa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert.vk @@ -0,0 +1,25 @@ +#version 450 + +layout(constant_id = 201) const int _7 = -10; +layout(constant_id = 202) const uint _8 = 100u; +layout(constant_id = 200) const float _9 = 3.141590118408203125; +const int _20 = (_7 + 2); +const uint _25 = (_8 % 5u); +const ivec4 _30 = ivec4(20, 30, _20, _20); +const ivec2 _32 = ivec2(_30.y, _30.x); +const int _33 = _30.y; + +layout(location = 0) flat out int _4; + +void main() +{ + vec4 pos = vec4(0.0); + pos.y += float(_20); + pos.z += float(_25); + pos += vec4(_30); + vec2 _56 = pos.xy + vec2(_32); + pos = vec4(_56.x, _56.y, pos.z, pos.w); + gl_Position = pos; + _4 = _33; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/reference/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..31f13bd77 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,18 @@ +#version 450 + +uniform int SPIRV_Cross_BaseInstance; + +vec4 _main(uint vid, uint iid) +{ + return vec4(float(vid + iid)); +} + +void main() +{ + uint vid = uint(gl_VertexID); + uint iid = uint((gl_InstanceID + SPIRV_Cross_BaseInstance)); + uint param = vid; + uint param_1 = iid; + gl_Position = _main(param, param_1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/atomic.comp b/3rdparty/spirv-cross/reference/shaders/comp/atomic.comp new file mode 100644 index 000000000..89b1351c0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/atomic.comp @@ -0,0 +1,49 @@ +#version 310 es +#extension GL_OES_shader_image_atomic : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 2, std430) buffer SSBO +{ + uint u32; + int i32; +} ssbo; + +layout(binding = 0, r32ui) uniform highp uimage2D uImage; +layout(binding = 1, r32i) uniform highp iimage2D iImage; + +void main() +{ + uint _19 = imageAtomicAdd(uImage, ivec2(1, 5), 1u); + uint _27 = imageAtomicAdd(uImage, ivec2(1, 5), 1u); + imageStore(iImage, ivec2(1, 6), ivec4(int(_27))); + uint _32 = imageAtomicOr(uImage, ivec2(1, 5), 1u); + uint _34 = imageAtomicXor(uImage, ivec2(1, 5), 1u); + uint _36 = imageAtomicAnd(uImage, ivec2(1, 5), 1u); + uint _38 = imageAtomicMin(uImage, ivec2(1, 5), 1u); + uint _40 = imageAtomicMax(uImage, ivec2(1, 5), 1u); + uint _44 = imageAtomicCompSwap(uImage, ivec2(1, 5), 10u, 2u); + int _47 = imageAtomicAdd(iImage, ivec2(1, 6), 1); + int _49 = imageAtomicOr(iImage, ivec2(1, 6), 1); + int _51 = imageAtomicXor(iImage, ivec2(1, 6), 1); + int _53 = imageAtomicAnd(iImage, ivec2(1, 6), 1); + int _55 = imageAtomicMin(iImage, ivec2(1, 6), 1); + int _57 = imageAtomicMax(iImage, ivec2(1, 6), 1); + int _61 = imageAtomicCompSwap(iImage, ivec2(1, 5), 10, 2); + uint _68 = atomicAdd(ssbo.u32, 1u); + uint _70 = atomicOr(ssbo.u32, 1u); + uint _72 = atomicXor(ssbo.u32, 1u); + uint _74 = atomicAnd(ssbo.u32, 1u); + uint _76 = atomicMin(ssbo.u32, 1u); + uint _78 = atomicMax(ssbo.u32, 1u); + uint _80 = atomicExchange(ssbo.u32, 1u); + uint _82 = atomicCompSwap(ssbo.u32, 10u, 2u); + int _85 = atomicAdd(ssbo.i32, 1); + int _87 = atomicOr(ssbo.i32, 1); + int _89 = atomicXor(ssbo.i32, 1); + int _91 = atomicAnd(ssbo.i32, 1); + int _93 = atomicMin(ssbo.i32, 1); + int _95 = atomicMax(ssbo.i32, 1); + int _97 = atomicExchange(ssbo.i32, 1); + int _99 = atomicCompSwap(ssbo.i32, 10, 2); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/bake_gradient.comp b/3rdparty/spirv-cross/reference/shaders/comp/bake_gradient.comp new file mode 100644 index 000000000..7b0bb34c6 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/bake_gradient.comp @@ -0,0 +1,39 @@ +#version 310 es +layout(local_size_x = 8, local_size_y = 8, local_size_z = 1) in; + +layout(binding = 4, std140) uniform UBO +{ + vec4 uInvSize; + vec4 uScale; +} _46; + +layout(binding = 0) uniform mediump sampler2D uHeight; +layout(binding = 1) uniform mediump sampler2D uDisplacement; +layout(binding = 2, rgba16f) uniform writeonly mediump image2D iHeightDisplacement; +layout(binding = 3, rgba16f) uniform writeonly mediump image2D iGradJacobian; + +mediump float jacobian(mediump vec2 dDdx, mediump vec2 dDdy) +{ + return ((1.0 + dDdx.x) * (1.0 + dDdy.y)) - (dDdx.y * dDdy.x); +} + +void main() +{ + vec4 uv = (vec2(gl_GlobalInvocationID.xy) * _46.uInvSize.xy).xyxy + (_46.uInvSize * 0.5); + float h = textureLod(uHeight, uv.xy, 0.0).x; + float x0 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(-1, 0)).x; + float x1 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(1, 0)).x; + float y0 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(0, -1)).x; + float y1 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(0, 1)).x; + vec2 grad = (_46.uScale.xy * 0.5) * vec2(x1 - x0, y1 - y0); + vec2 displacement = textureLod(uDisplacement, uv.zw, 0.0).xy * 1.2000000476837158203125; + vec2 dDdx = (textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(1, 0)).xy - textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(-1, 0)).xy) * 0.60000002384185791015625; + vec2 dDdy = (textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(0, 1)).xy - textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(0, -1)).xy) * 0.60000002384185791015625; + vec2 param = dDdx * _46.uScale.z; + vec2 param_1 = dDdy * _46.uScale.z; + float j = jacobian(param, param_1); + displacement = vec2(0.0); + imageStore(iHeightDisplacement, ivec2(gl_GlobalInvocationID.xy), vec4(h, displacement, 0.0)); + imageStore(iGradJacobian, ivec2(gl_GlobalInvocationID.xy), vec4(grad, j, 0.0)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/barriers.comp b/3rdparty/spirv-cross/reference/shaders/comp/barriers.comp new file mode 100644 index 000000000..a1b975de8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/barriers.comp @@ -0,0 +1,83 @@ +#version 310 es +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +void barrier_shared() +{ + memoryBarrierShared(); +} + +void full_barrier() +{ + memoryBarrier(); +} + +void image_barrier() +{ + memoryBarrierImage(); +} + +void buffer_barrier() +{ + memoryBarrierBuffer(); +} + +void group_barrier() +{ + groupMemoryBarrier(); +} + +void barrier_shared_exec() +{ + memoryBarrierShared(); + barrier(); +} + +void full_barrier_exec() +{ + memoryBarrier(); + memoryBarrierShared(); + barrier(); +} + +void image_barrier_exec() +{ + memoryBarrierImage(); + memoryBarrierShared(); + barrier(); +} + +void buffer_barrier_exec() +{ + memoryBarrierBuffer(); + memoryBarrierShared(); + barrier(); +} + +void group_barrier_exec() +{ + groupMemoryBarrier(); + memoryBarrierShared(); + barrier(); +} + +void exec_barrier() +{ + memoryBarrierShared(); + barrier(); +} + +void main() +{ + barrier_shared(); + full_barrier(); + image_barrier(); + buffer_barrier(); + group_barrier(); + barrier_shared_exec(); + full_barrier_exec(); + image_barrier_exec(); + buffer_barrier_exec(); + group_barrier_exec(); + exec_barrier(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/basic.comp b/3rdparty/spirv-cross/reference/shaders/comp/basic.comp new file mode 100644 index 000000000..148508995 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/basic.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + vec4 in_data[]; +} _23; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _45; + +layout(binding = 2, std430) buffer SSBO3 +{ + uint counter; +} _48; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idata = _23.in_data[ident]; + if (dot(idata, vec4(1.0, 5.0, 6.0, 2.0)) > 8.19999980926513671875) + { + uint _52 = atomicAdd(_48.counter, 1u); + _45.out_data[_52] = idata; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/bitcast-16bit-1.invalid.comp b/3rdparty/spirv-cross/reference/shaders/comp/bitcast-16bit-1.invalid.comp new file mode 100644 index 000000000..7420586f0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/bitcast-16bit-1.invalid.comp @@ -0,0 +1,38 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + i16vec4 inputs[]; +} _25; + +layout(binding = 1, std430) buffer SSBO1 +{ + ivec4 outputs[]; +} _39; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + f16vec2 a = int16BitsToFloat16(_25.inputs[ident].xy); + _39.outputs[ident].x = int(packFloat2x16(a + f16vec2(float16_t(1), float16_t(1)))); + _39.outputs[ident].y = packInt2x16(_25.inputs[ident].zw); + _39.outputs[ident].z = int(packUint2x16(u16vec2(_25.inputs[ident].xy))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/bitcast-16bit-2.invalid.comp b/3rdparty/spirv-cross/reference/shaders/comp/bitcast-16bit-2.invalid.comp new file mode 100644 index 000000000..416bc4a2e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/bitcast-16bit-2.invalid.comp @@ -0,0 +1,43 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) buffer SSBO1 +{ + i16vec4 outputs[]; +} _21; + +layout(binding = 0, std430) buffer SSBO0 +{ + ivec4 inputs[]; +} _29; + +layout(binding = 2, std140) uniform UBO +{ + f16vec4 const0; +} _40; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + i16vec2 _47 = unpackInt2x16(_29.inputs[ident].x) + float16BitsToInt16(_40.const0.xy); + _21.outputs[ident] = i16vec4(_47.x, _47.y, _21.outputs[ident].z, _21.outputs[ident].w); + i16vec2 _66 = i16vec2(unpackUint2x16(uint(_29.inputs[ident].y)) - float16BitsToUint16(_40.const0.zw)); + _21.outputs[ident] = i16vec4(_21.outputs[ident].x, _21.outputs[ident].y, _66.x, _66.y); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/casts.comp b/3rdparty/spirv-cross/reference/shaders/comp/casts.comp new file mode 100644 index 000000000..973668676 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/casts.comp @@ -0,0 +1,19 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) buffer SSBO1 +{ + ivec4 outputs[]; +} _21; + +layout(binding = 0, std430) buffer SSBO0 +{ + ivec4 inputs[]; +} _27; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + _21.outputs[ident] = mix(ivec4(0), ivec4(1), notEqual((_27.inputs[ident] & ivec4(3)), ivec4(uvec4(0u)))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/cfg-preserve-parameter.comp b/3rdparty/spirv-cross/reference/shaders/comp/cfg-preserve-parameter.comp new file mode 100644 index 000000000..72bde44a9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/cfg-preserve-parameter.comp @@ -0,0 +1,74 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +void out_test_0(int cond, out int i) +{ + if (cond == 0) + { + i = 40; + } + else + { + i = 60; + } +} + +void out_test_1(int cond, out int i) +{ + switch (cond) + { + case 40: + { + i = 40; + break; + } + default: + { + i = 70; + break; + } + } +} + +void inout_test_0(int cond, inout int i) +{ + if (cond == 0) + { + i = 40; + } +} + +void inout_test_1(int cond, inout int i) +{ + switch (cond) + { + case 40: + { + i = 40; + break; + } + } +} + +void main() +{ + int cond = 40; + int i = 50; + int param = cond; + int param_1 = i; + out_test_0(param, param_1); + i = param_1; + int param_2 = cond; + int param_3 = i; + out_test_1(param_2, param_3); + i = param_3; + int param_4 = cond; + int param_5 = i; + inout_test_0(param_4, param_5); + i = param_5; + int param_6 = cond; + int param_7 = i; + inout_test_1(param_6, param_7); + i = param_7; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/cfg.comp b/3rdparty/spirv-cross/reference/shaders/comp/cfg.comp new file mode 100644 index 000000000..77ad312cd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/cfg.comp @@ -0,0 +1,81 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + float data; +} _11; + +void test() +{ + if (_11.data != 0.0) + { + float tmp = 10.0; + _11.data = tmp; + } + else + { + float tmp_1 = 15.0; + _11.data = tmp_1; + } + if (_11.data != 0.0) + { + float e; + if (_11.data != 5.0) + { + if (_11.data != 6.0) + { + e = 10.0; + } + } + else + { + e = 20.0; + } + } + switch (int(_11.data)) + { + case 0: + { + float tmp_2 = 20.0; + _11.data = tmp_2; + break; + } + case 1: + { + float tmp_3 = 30.0; + _11.data = tmp_3; + break; + } + } + float f; + switch (int(_11.data)) + { + case 0: + { + f = 30.0; + break; + } + case 1: + { + f = 40.0; + break; + } + } + float h; + for (int i = 0; i < 20; i++, h += 10.0) + { + } + _11.data = h; + float m; + do + { + } while (m != 20.0); + _11.data = m; +} + +void main() +{ + test(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/coherent-block.comp b/3rdparty/spirv-cross/reference/shaders/comp/coherent-block.comp new file mode 100644 index 000000000..bfab6bbea --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/coherent-block.comp @@ -0,0 +1,13 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) coherent restrict writeonly buffer SSBO +{ + vec4 value; +} _10; + +void main() +{ + _10.value = vec4(20.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/coherent-image.comp b/3rdparty/spirv-cross/reference/shaders/comp/coherent-image.comp new file mode 100644 index 000000000..b3992f242 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/coherent-image.comp @@ -0,0 +1,15 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) coherent restrict writeonly buffer SSBO +{ + ivec4 value; +} _10; + +layout(binding = 3, r32i) uniform coherent restrict readonly mediump iimage2D uImage; + +void main() +{ + _10.value = imageLoad(uImage, ivec2(10)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/reference/shaders/comp/composite-array-initialization.comp new file mode 100644 index 000000000..1269831f3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/composite-array-initialization.comp @@ -0,0 +1,38 @@ +#version 310 es +layout(local_size_x = 2, local_size_y = 1, local_size_z = 1) in; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 4.0 +#endif +const float X = SPIRV_CROSS_CONSTANT_ID_0; + +struct Data +{ + float a; + float b; +}; + +layout(binding = 0, std430) buffer SSBO +{ + Data outdata[]; +} _53; + +Data data[2]; +Data data2[2]; + +Data combine(Data a, Data b) +{ + return Data(a.a + b.a, a.b + b.b); +} + +void main() +{ + data = Data[](Data(1.0, 2.0), Data(3.0, 4.0)); + data2 = Data[](Data(X, 2.0), Data(3.0, 5.0)); + Data param = data[gl_LocalInvocationID.x]; + Data param_1 = data2[gl_LocalInvocationID.x]; + Data _73 = combine(param, param_1); + _53.outdata[gl_WorkGroupID.x].a = _73.a; + _53.outdata[gl_WorkGroupID.x].b = _73.b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/composite-construct.comp b/3rdparty/spirv-cross/reference/shaders/comp/composite-construct.comp new file mode 100644 index 000000000..3018be8f1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/composite-construct.comp @@ -0,0 +1,39 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +const vec4 _66[2] = vec4[](vec4(10.0), vec4(30.0)); +const float _94[2][3] = float[][](float[](1.0, 1.0, 1.0), float[](2.0, 2.0, 2.0)); + +struct Composite +{ + vec4 a[2]; + vec4 b[2]; +}; + +layout(binding = 0, std430) buffer SSBO0 +{ + vec4 as[]; +} _41; + +layout(binding = 1, std430) buffer SSBO1 +{ + vec4 bs[]; +} _55; + +vec4 summe(vec4 values[3][2]) +{ + return ((values[0][0] + values[2][1]) + values[0][1]) + values[1][0]; +} + +void main() +{ + vec4 values[2] = vec4[](_41.as[gl_GlobalInvocationID.x], _55.bs[gl_GlobalInvocationID.x]); + vec4 copy_values[2] = _66; + vec4 copy_values2[2] = values; + vec4 param[3][2] = vec4[][](values, copy_values, copy_values2); + _41.as[gl_GlobalInvocationID.x] = summe(param); + Composite c = Composite(values, copy_values); + float b = 10.0; + float values_scalar[4] = float[](b, b, b, b); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/culling.comp b/3rdparty/spirv-cross/reference/shaders/comp/culling.comp new file mode 100644 index 000000000..fd83bfcb5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/culling.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + float in_data[]; +} _22; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + float out_data[]; +} _38; + +layout(binding = 2, std430) buffer SSBO3 +{ + uint count; +} _41; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = _22.in_data[ident]; + if (idata > 12.0) + { + uint _45 = atomicAdd(_41.count, 1u); + _38.out_data[_45] = idata; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/defer-parens.comp b/3rdparty/spirv-cross/reference/shaders/comp/defer-parens.comp new file mode 100644 index 000000000..cf9852931 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/defer-parens.comp @@ -0,0 +1,21 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + vec4 data; + int index; +} _13; + +void main() +{ + vec4 d = _13.data; + _13.data = vec4(d.x, d.yz + vec2(10.0), d.w); + _13.data = (d + d) + d; + _13.data = (d.yz + vec2(10.0)).xxyy; + float t = (d.yz + vec2(10.0)).y; + _13.data = vec4(t); + t = (d.zw + vec2(10.0))[_13.index]; + _13.data = vec4(t); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/dowhile.comp b/3rdparty/spirv-cross/reference/shaders/comp/dowhile.comp new file mode 100644 index 000000000..e717961ab --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/dowhile.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +} _28; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _52; + +int i; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + i = 0; + vec4 idat = _28.in_data[ident]; + do + { + idat = _28.mvp * idat; + i++; + } while (i < 16); + _52.out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/generate_height.comp b/3rdparty/spirv-cross/reference/shaders/comp/generate_height.comp new file mode 100644 index 000000000..fe733e289 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/generate_height.comp @@ -0,0 +1,96 @@ +#version 310 es +layout(local_size_x = 64, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer Distribution +{ + vec2 distribution[]; +} _137; + +layout(binding = 2, std140) uniform UBO +{ + vec4 uModTime; +} _166; + +layout(binding = 1, std430) writeonly buffer HeightmapFFT +{ + uint heights[]; +} _225; + +uvec2 workaround_mix(uvec2 a, uvec2 b, bvec2 sel) +{ + uint _86; + if (sel.x) + { + _86 = b.x; + } + else + { + _86 = a.x; + } + uint _94 = _86; + uint _97; + if (sel.y) + { + _97 = b.y; + } + else + { + _97 = a.y; + } + return uvec2(_94, _97); +} + +vec2 alias(vec2 i, vec2 N) +{ + return mix(i, i - N, greaterThan(i, N * 0.5)); +} + +vec2 cmul(vec2 a, vec2 b) +{ + vec2 r3 = a.yx; + vec2 r1 = b.xx; + vec2 R0 = a * r1; + vec2 r2 = b.yy; + vec2 R1 = r2 * r3; + return R0 + vec2(-R1.x, R1.y); +} + +uint pack2(vec2 v) +{ + return packHalf2x16(v); +} + +void generate_heightmap() +{ + uvec2 N = uvec2(64u, 1u) * gl_NumWorkGroups.xy; + uvec2 i = gl_GlobalInvocationID.xy; + uvec2 param = N - i; + uvec2 param_1 = uvec2(0u); + bvec2 param_2 = equal(i, uvec2(0u)); + uvec2 wi = workaround_mix(param, param_1, param_2); + vec2 a = _137.distribution[(i.y * N.x) + i.x]; + vec2 b = _137.distribution[(wi.y * N.x) + wi.x]; + vec2 param_3 = vec2(i); + vec2 param_4 = vec2(N); + vec2 k = _166.uModTime.xy * alias(param_3, param_4); + float k_len = length(k); + float w = sqrt(9.81000041961669921875 * k_len) * _166.uModTime.z; + float cw = cos(w); + float sw = sin(w); + vec2 param_5 = a; + vec2 param_6 = vec2(cw, sw); + a = cmul(param_5, param_6); + vec2 param_7 = b; + vec2 param_8 = vec2(cw, sw); + b = cmul(param_7, param_8); + b = vec2(b.x, -b.y); + vec2 res = a + b; + vec2 param_9 = res; + _225.heights[(i.y * N.x) + i.x] = pack2(param_9); +} + +void main() +{ + generate_heightmap(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/image.comp b/3rdparty/spirv-cross/reference/shaders/comp/image.comp new file mode 100644 index 000000000..b2bf0d65b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/image.comp @@ -0,0 +1,12 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, rgba8) uniform readonly mediump image2D uImageIn; +layout(binding = 1, rgba8) uniform writeonly mediump image2D uImageOut; + +void main() +{ + vec4 v = imageLoad(uImageIn, ivec2(gl_GlobalInvocationID.xy) + imageSize(uImageIn)); + imageStore(uImageOut, ivec2(gl_GlobalInvocationID.xy), v); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/inout-struct.invalid.comp b/3rdparty/spirv-cross/reference/shaders/comp/inout-struct.invalid.comp new file mode 100644 index 000000000..640e25bb9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/inout-struct.invalid.comp @@ -0,0 +1,65 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct Foo +{ + vec4 a; + vec4 b; + vec4 c; + vec4 d; +}; + +layout(binding = 1, std430) readonly buffer SSBO2 +{ + vec4 data[]; +} indata; + +layout(binding = 0, std430) writeonly buffer SSBO +{ + vec4 data[]; +} outdata; + +layout(binding = 2, std430) readonly buffer SSBO3 +{ + Foo foos[]; +} foobar; + +void baz(inout Foo foo) +{ + uint ident = gl_GlobalInvocationID.x; + foo.a = indata.data[(4u * ident) + 0u]; + foo.b = indata.data[(4u * ident) + 1u]; + foo.c = indata.data[(4u * ident) + 2u]; + foo.d = indata.data[(4u * ident) + 3u]; +} + +void meow(inout Foo foo) +{ + foo.a += vec4(10.0); + foo.b += vec4(20.0); + foo.c += vec4(30.0); + foo.d += vec4(40.0); +} + +vec4 bar(Foo foo) +{ + return ((foo.a + foo.b) + foo.c) + foo.d; +} + +void main() +{ + Foo param; + baz(param); + Foo foo = param; + Foo param_1 = foo; + meow(param_1); + foo = param_1; + Foo param_2 = foo; + Foo param_3; + param_3.a = foobar.foos[gl_GlobalInvocationID.x].a; + param_3.b = foobar.foos[gl_GlobalInvocationID.x].b; + param_3.c = foobar.foos[gl_GlobalInvocationID.x].c; + param_3.d = foobar.foos[gl_GlobalInvocationID.x].d; + outdata.data[gl_GlobalInvocationID.x] = bar(param_2) + bar(param_3); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/insert.comp b/3rdparty/spirv-cross/reference/shaders/comp/insert.comp new file mode 100644 index 000000000..cbe1e27f4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/insert.comp @@ -0,0 +1,19 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) writeonly buffer SSBO +{ + vec4 out_data[]; +} _27; + +void main() +{ + vec4 v; + v.x = 10.0; + v.y = 30.0; + v.z = 70.0; + v.w = 90.0; + _27.out_data[gl_GlobalInvocationID.x] = v; + _27.out_data[gl_GlobalInvocationID.x].y = 20.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/mat3.comp b/3rdparty/spirv-cross/reference/shaders/comp/mat3.comp new file mode 100644 index 000000000..2b050f5d0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/mat3.comp @@ -0,0 +1,14 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + mat3 out_data[]; +} _22; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + _22.out_data[ident] = mat3(vec3(10.0), vec3(20.0), vec3(40.0)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/mod.comp b/3rdparty/spirv-cross/reference/shaders/comp/mod.comp new file mode 100644 index 000000000..4be0c5f7f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/mod.comp @@ -0,0 +1,24 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + vec4 in_data[]; +} _23; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _33; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 v = mod(_23.in_data[ident], _33.out_data[ident]); + _33.out_data[ident] = v; + uvec4 vu = floatBitsToUint(_23.in_data[ident]) % floatBitsToUint(_33.out_data[ident]); + _33.out_data[ident] = uintBitsToFloat(vu); + ivec4 vi = floatBitsToInt(_23.in_data[ident]) % floatBitsToInt(_33.out_data[ident]); + _33.out_data[ident] = intBitsToFloat(vi); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/modf.comp b/3rdparty/spirv-cross/reference/shaders/comp/modf.comp new file mode 100644 index 000000000..c92149bf9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/modf.comp @@ -0,0 +1,22 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + vec4 in_data[]; +} _23; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _35; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 i; + vec4 _31 = modf(_23.in_data[ident], i); + vec4 v = _31; + _35.out_data[ident] = v; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/read-write-only.comp b/3rdparty/spirv-cross/reference/shaders/comp/read-write-only.comp new file mode 100644 index 000000000..06227ee2c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/read-write-only.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 2, std430) restrict writeonly buffer SSBO2 +{ + vec4 data4; + vec4 data5; +} _10; + +layout(binding = 0, std430) readonly buffer SSBO0 +{ + vec4 data0; + vec4 data1; +} _15; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + vec4 data2; + vec4 data3; +} _21; + +void main() +{ + _10.data4 = _15.data0 + _21.data2; + _10.data5 = _15.data1 + _21.data3; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/rmw-matrix.comp b/3rdparty/spirv-cross/reference/shaders/comp/rmw-matrix.comp new file mode 100644 index 000000000..5c4ac94bc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/rmw-matrix.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + float a; + vec4 b; + mat4 c; + float a1; + vec4 b1; + mat4 c1; +} _11; + +void main() +{ + _11.a *= _11.a1; + _11.b *= _11.b1; + _11.c = _11.c * _11.c1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/rmw-opt.comp b/3rdparty/spirv-cross/reference/shaders/comp/rmw-opt.comp new file mode 100644 index 000000000..e3fba7810 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/rmw-opt.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + int a; +} _9; + +void main() +{ + _9.a += 10; + _9.a -= 10; + _9.a *= 10; + _9.a /= 10; + _9.a = _9.a << 2; + _9.a = _9.a >> 3; + _9.a &= 40; + _9.a ^= 10; + _9.a %= 40; + _9.a |= 1; + bool c = false; + bool d = true; + c = c && d; + d = d || c; + _9.a = int(c && d); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/shared.comp b/3rdparty/spirv-cross/reference/shaders/comp/shared.comp new file mode 100644 index 000000000..d0987a652 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/shared.comp @@ -0,0 +1,25 @@ +#version 310 es +layout(local_size_x = 4, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + float in_data[]; +} _22; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + float out_data[]; +} _44; + +shared float sShared[4]; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = _22.in_data[ident]; + sShared[gl_LocalInvocationIndex] = idata; + memoryBarrierShared(); + barrier(); + _44.out_data[ident] = sShared[(4u - gl_LocalInvocationIndex) - 1u]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/ssbo-array.comp b/3rdparty/spirv-cross/reference/shaders/comp/ssbo-array.comp new file mode 100644 index 000000000..e773bd093 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/ssbo-array.comp @@ -0,0 +1,14 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + vec4 data[]; +} ssbos[2]; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + ssbos[1].data[ident] = ssbos[0].data[ident]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/struct-layout.comp b/3rdparty/spirv-cross/reference/shaders/comp/struct-layout.comp new file mode 100644 index 000000000..4feea8be5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/struct-layout.comp @@ -0,0 +1,24 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct Foo +{ + mat4 m; +}; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + Foo out_data[]; +} _23; + +layout(binding = 0, std430) readonly buffer SSBO +{ + Foo in_data[]; +} _30; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + _23.out_data[ident].m = _30.in_data[ident].m * _30.in_data[ident].m; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/struct-packing.comp b/3rdparty/spirv-cross/reference/shaders/comp/struct-packing.comp new file mode 100644 index 000000000..cd1eda1b3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/struct-packing.comp @@ -0,0 +1,146 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct S0 +{ + vec2 a[1]; + float b; +}; + +struct S1 +{ + vec3 a; + float b; +}; + +struct S2 +{ + vec3 a[1]; + float b; +}; + +struct S3 +{ + vec2 a; + float b; +}; + +struct S4 +{ + vec2 c; +}; + +struct Content +{ + S0 m0s[1]; + S1 m1s[1]; + S2 m2s[1]; + S0 m0; + S1 m1; + S2 m2; + S3 m3; + float m4; + S4 m3s[8]; +}; + +struct S0_1 +{ + vec2 a[1]; + float b; +}; + +struct S1_1 +{ + vec3 a; + float b; +}; + +struct S2_1 +{ + vec3 a[1]; + float b; +}; + +struct S3_1 +{ + vec2 a; + float b; +}; + +struct S4_1 +{ + vec2 c; +}; + +struct Content_1 +{ + S0_1 m0s[1]; + S1_1 m1s[1]; + S2_1 m2s[1]; + S0_1 m0; + S1_1 m1; + S2_1 m2; + S3_1 m3; + float m4; + S4_1 m3s[8]; +}; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + Content content; + Content content1[2]; + Content content2; + mat2 m0; + mat2 m1; + mat2x3 m2[4]; + mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + float array[]; +} ssbo_430; + +layout(binding = 0, std140) restrict buffer SSBO0 +{ + Content_1 content; + Content_1 content1[2]; + Content_1 content2; + mat2 m0; + mat2 m1; + mat2x3 m2[4]; + mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + float array[]; +} ssbo_140; + +void main() +{ + ssbo_430.content.m0s[0].a[0] = ssbo_140.content.m0s[0].a[0]; + ssbo_430.content.m0s[0].b = ssbo_140.content.m0s[0].b; + ssbo_430.content.m1s[0].a = ssbo_140.content.m1s[0].a; + ssbo_430.content.m1s[0].b = ssbo_140.content.m1s[0].b; + ssbo_430.content.m2s[0].a[0] = ssbo_140.content.m2s[0].a[0]; + ssbo_430.content.m2s[0].b = ssbo_140.content.m2s[0].b; + ssbo_430.content.m0.a[0] = ssbo_140.content.m0.a[0]; + ssbo_430.content.m0.b = ssbo_140.content.m0.b; + ssbo_430.content.m1.a = ssbo_140.content.m1.a; + ssbo_430.content.m1.b = ssbo_140.content.m1.b; + ssbo_430.content.m2.a[0] = ssbo_140.content.m2.a[0]; + ssbo_430.content.m2.b = ssbo_140.content.m2.b; + ssbo_430.content.m3.a = ssbo_140.content.m3.a; + ssbo_430.content.m3.b = ssbo_140.content.m3.b; + ssbo_430.content.m4 = ssbo_140.content.m4; + ssbo_430.content.m3s[0].c = ssbo_140.content.m3s[0].c; + ssbo_430.content.m3s[1].c = ssbo_140.content.m3s[1].c; + ssbo_430.content.m3s[2].c = ssbo_140.content.m3s[2].c; + ssbo_430.content.m3s[3].c = ssbo_140.content.m3s[3].c; + ssbo_430.content.m3s[4].c = ssbo_140.content.m3s[4].c; + ssbo_430.content.m3s[5].c = ssbo_140.content.m3s[5].c; + ssbo_430.content.m3s[6].c = ssbo_140.content.m3s[6].c; + ssbo_430.content.m3s[7].c = ssbo_140.content.m3s[7].c; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/torture-loop.comp b/3rdparty/spirv-cross/reference/shaders/comp/torture-loop.comp new file mode 100644 index 000000000..645af5c37 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/torture-loop.comp @@ -0,0 +1,49 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +} _24; + +layout(binding = 1, std430) writeonly buffer SSBO2 +{ + vec4 out_data[]; +} _89; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idat = _24.in_data[ident]; + int k = 0; + for (;;) + { + int _39 = k; + int _40 = _39 + 1; + k = _40; + if (_40 < 10) + { + idat *= 2.0; + k++; + continue; + } + else + { + break; + } + } + for (uint i = 0u; i < 16u; i++, k++) + { + for (uint j = 0u; j < 30u; j++) + { + idat = _24.mvp * idat; + } + } + do + { + k++; + } while (k > 10); + _89.out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/type-alias.comp b/3rdparty/spirv-cross/reference/shaders/comp/type-alias.comp new file mode 100644 index 000000000..51f3792e1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/type-alias.comp @@ -0,0 +1,49 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct S0 +{ + vec4 a; +}; + +struct S1 +{ + vec4 a; +}; + +layout(binding = 0, std430) buffer SSBO0 +{ + S0 s0s[]; +} _36; + +layout(binding = 1, std430) buffer SSBO1 +{ + S1 s1s[]; +} _55; + +layout(binding = 2, std430) buffer SSBO2 +{ + vec4 outputs[]; +} _66; + +vec4 overload(S0 s0) +{ + return s0.a; +} + +vec4 overload(S1 s1) +{ + return s1.a; +} + +void main() +{ + S0 s0; + s0.a = _36.s0s[gl_GlobalInvocationID.x].a; + S1 s1; + s1.a = _55.s1s[gl_GlobalInvocationID.x].a; + S0 param = s0; + S1 param_1 = s1; + _66.outputs[gl_GlobalInvocationID.x] = overload(param) + overload(param_1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/comp/udiv.comp b/3rdparty/spirv-cross/reference/shaders/comp/udiv.comp new file mode 100644 index 000000000..0c1f926ad --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/comp/udiv.comp @@ -0,0 +1,18 @@ +#version 310 es +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, std430) buffer SSBO2 +{ + uint outputs[]; +} _10; + +layout(binding = 0, std430) buffer SSBO +{ + uint inputs[]; +} _23; + +void main() +{ + _10.outputs[gl_GlobalInvocationID.x] = _23.inputs[gl_GlobalInvocationID.x] / 29u; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/enhanced-layouts.comp b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/enhanced-layouts.comp new file mode 100644 index 000000000..45b25064b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/enhanced-layouts.comp @@ -0,0 +1,47 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct Foo +{ + int a; + int b; + int c; +}; + +struct Foo_1 +{ + int a; + int b; + int c; +}; + +layout(binding = 1, std140) buffer SSBO1 +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo foo; + layout(offset = 48) int c[8]; +} ssbo1; + +layout(binding = 2, std430) buffer SSBO2 +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo_1 foo; + layout(offset = 48) int c[8]; +} ssbo2; + +layout(binding = 0, std140) uniform UBO +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo foo; + layout(offset = 48) int c[8]; +} ubo; + +void main() +{ + ssbo1.a = ssbo2.a; + ssbo1.b = ubo.b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/extended-arithmetic.desktop.comp b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/extended-arithmetic.desktop.comp new file mode 100644 index 000000000..9c55c74db --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/extended-arithmetic.desktop.comp @@ -0,0 +1,159 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct ResType +{ + uint _m0; + uint _m1; +}; + +struct ResType_1 +{ + uvec2 _m0; + uvec2 _m1; +}; + +struct ResType_2 +{ + uvec3 _m0; + uvec3 _m1; +}; + +struct ResType_3 +{ + uvec4 _m0; + uvec4 _m1; +}; + +struct ResType_4 +{ + int _m0; + int _m1; +}; + +struct ResType_5 +{ + ivec2 _m0; + ivec2 _m1; +}; + +struct ResType_6 +{ + ivec3 _m0; + ivec3 _m1; +}; + +struct ResType_7 +{ + ivec4 _m0; + ivec4 _m1; +}; + +layout(binding = 0, std430) buffer SSBOUint +{ + uint a; + uint b; + uint c; + uint d; + uvec2 a2; + uvec2 b2; + uvec2 c2; + uvec2 d2; + uvec3 a3; + uvec3 b3; + uvec3 c3; + uvec3 d3; + uvec4 a4; + uvec4 b4; + uvec4 c4; + uvec4 d4; +} u; + +layout(binding = 1, std430) buffer SSBOInt +{ + int a; + int b; + int c; + int d; + ivec2 a2; + ivec2 b2; + ivec2 c2; + ivec2 d2; + ivec3 a3; + ivec3 b3; + ivec3 c3; + ivec3 d3; + ivec4 a4; + ivec4 b4; + ivec4 c4; + ivec4 d4; +} i; + +void main() +{ + ResType _25; + _25._m0 = uaddCarry(u.a, u.b, _25._m1); + u.d = _25._m1; + u.c = _25._m0; + ResType_1 _40; + _40._m0 = uaddCarry(u.a2, u.b2, _40._m1); + u.d2 = _40._m1; + u.c2 = _40._m0; + ResType_2 _55; + _55._m0 = uaddCarry(u.a3, u.b3, _55._m1); + u.d3 = _55._m1; + u.c3 = _55._m0; + ResType_3 _70; + _70._m0 = uaddCarry(u.a4, u.b4, _70._m1); + u.d4 = _70._m1; + u.c4 = _70._m0; + ResType _79; + _79._m0 = usubBorrow(u.a, u.b, _79._m1); + u.d = _79._m1; + u.c = _79._m0; + ResType_1 _88; + _88._m0 = usubBorrow(u.a2, u.b2, _88._m1); + u.d2 = _88._m1; + u.c2 = _88._m0; + ResType_2 _97; + _97._m0 = usubBorrow(u.a3, u.b3, _97._m1); + u.d3 = _97._m1; + u.c3 = _97._m0; + ResType_3 _106; + _106._m0 = usubBorrow(u.a4, u.b4, _106._m1); + u.d4 = _106._m1; + u.c4 = _106._m0; + ResType _116; + umulExtended(u.a, u.b, _116._m1, _116._m0); + u.d = _116._m0; + u.c = _116._m1; + ResType_1 _125; + umulExtended(u.a2, u.b2, _125._m1, _125._m0); + u.d2 = _125._m0; + u.c2 = _125._m1; + ResType_2 _134; + umulExtended(u.a3, u.b3, _134._m1, _134._m0); + u.d3 = _134._m0; + u.c3 = _134._m1; + ResType_3 _143; + umulExtended(u.a4, u.b4, _143._m1, _143._m0); + u.d4 = _143._m0; + u.c4 = _143._m1; + ResType_4 _160; + imulExtended(i.a, i.b, _160._m1, _160._m0); + i.d = _160._m0; + i.c = _160._m1; + ResType_5 _171; + imulExtended(i.a2, i.b2, _171._m1, _171._m0); + i.d2 = _171._m0; + i.c2 = _171._m1; + ResType_6 _182; + imulExtended(i.a3, i.b3, _182._m1, _182._m0); + i.d3 = _182._m0; + i.c3 = _182._m1; + ResType_7 _193; + imulExtended(i.a4, i.b4, _193._m1, _193._m0); + i.d4 = _193._m0; + i.c4 = _193._m1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/fp64.desktop.comp b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/fp64.desktop.comp new file mode 100644 index 000000000..c9e5e8496 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/fp64.desktop.comp @@ -0,0 +1,83 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct M0 +{ + double v; + dvec2 b[2]; + dmat2x3 c; + dmat3x2 d; +}; + +layout(binding = 0, std430) buffer SSBO0 +{ + dvec4 a; + M0 m0; + dmat4 b; +} ssbo_0; + +layout(binding = 1, std430) buffer SSBO1 +{ + dmat4 a; + dvec4 b; + M0 m0; +} ssbo_1; + +layout(binding = 2, std430) buffer SSBO2 +{ + double a[4]; + dvec2 b[4]; +} ssbo_2; + +layout(binding = 3, std140) buffer SSBO3 +{ + double a[4]; + dvec2 b[4]; +} ssbo_3; + +void main() +{ + ssbo_0.a += dvec4(10.0lf, 20.0lf, 30.0lf, 40.0lf); + ssbo_0.a += dvec4(20.0lf); + dvec4 a = ssbo_0.a; + dmat4 amat = ssbo_0.b; + ssbo_0.a = abs(a); + ssbo_0.a = sign(a); + ssbo_0.a = floor(a); + ssbo_0.a = trunc(a); + ssbo_0.a = round(a); + ssbo_0.a = roundEven(a); + ssbo_0.a = ceil(a); + ssbo_0.a = fract(a); + ssbo_0.a = mod(a, dvec4(20.0lf)); + ssbo_0.a = mod(a, a); + ssbo_0.a = min(a, a); + ssbo_0.a = max(a, a); + ssbo_0.a = clamp(a, a, a); + ssbo_0.a = mix(a, a, a); + ssbo_0.a = step(a, a); + ssbo_0.a = smoothstep(a, a, a); + bvec4 b = isnan(a); + bvec4 c = isinf(a); + double f = packDouble2x32(uvec2(10u, 40u)); + uvec2 g = unpackDouble2x32(f); + double d = length(a); + d = distance(a, a); + d = dot(a, a); + dvec3 e = cross(a.xyz, a.yzw); + a = faceforward(a, a, a); + a = reflect(a, a); + dmat4 l = dmat4(amat[0] * amat[0], amat[1] * amat[1], amat[2] * amat[2], amat[3] * amat[3]); + l = outerProduct(a, a); + l = transpose(l); + double m = determinant(l); + l = inverse(l); + bvec4 k = lessThan(a, a); + k = lessThanEqual(a, a); + k = greaterThan(a, a); + k = greaterThanEqual(a, a); + ssbo_1.b.x += 1.0lf; + ssbo_2.b[0].x += 1.0lf; + ssbo_3.b[0].x += 1.0lf; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp new file mode 100644 index 000000000..7a0797578 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp @@ -0,0 +1,47 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(binding = 0, rgba32f) uniform readonly writeonly image2D uImg00; +layout(binding = 1, rgba16f) uniform readonly writeonly image2D uImg01; +layout(binding = 2, rg32f) uniform readonly writeonly image2D uImg02; +layout(binding = 3, rg16f) uniform readonly writeonly image2D uImg03; +layout(binding = 4, r11f_g11f_b10f) uniform readonly writeonly image2D uImg04; +layout(binding = 5, r32f) uniform readonly writeonly image2D uImg05; +layout(binding = 6, r16f) uniform readonly writeonly image2D uImg06; +layout(binding = 7, rgba16) uniform readonly writeonly image2D uImg07; +layout(binding = 8, rgb10_a2) uniform readonly writeonly image2D uImg08; +layout(binding = 9, rgba8) uniform readonly writeonly image2D uImg09; +layout(binding = 10, rg16) uniform readonly writeonly image2D uImg10; +layout(binding = 11, rg8) uniform readonly writeonly image2D uImg11; +layout(binding = 12, r16) uniform readonly writeonly image2D uImg12; +layout(binding = 13, r8) uniform readonly writeonly image2D uImg13; +layout(binding = 14, rgba16_snorm) uniform readonly writeonly image2D uImg14; +layout(binding = 15, rgba8_snorm) uniform readonly writeonly image2D uImg15; +layout(binding = 16, rg16_snorm) uniform readonly writeonly image2D uImg16; +layout(binding = 17, rg8_snorm) uniform readonly writeonly image2D uImg17; +layout(binding = 18, r16_snorm) uniform readonly writeonly image2D uImg18; +layout(binding = 19, r8_snorm) uniform readonly writeonly image2D uImg19; +layout(binding = 20, rgba32i) uniform readonly writeonly iimage2D uImage20; +layout(binding = 21, rgba16i) uniform readonly writeonly iimage2D uImage21; +layout(binding = 22, rgba8i) uniform readonly writeonly iimage2D uImage22; +layout(binding = 23, rg32i) uniform readonly writeonly iimage2D uImage23; +layout(binding = 24, rg16i) uniform readonly writeonly iimage2D uImage24; +layout(binding = 25, rg8i) uniform readonly writeonly iimage2D uImage25; +layout(binding = 26, r32i) uniform readonly writeonly iimage2D uImage26; +layout(binding = 27, r16i) uniform readonly writeonly iimage2D uImage27; +layout(binding = 28, r8i) uniform readonly writeonly iimage2D uImage28; +layout(binding = 29, rgba32ui) uniform readonly writeonly uimage2D uImage29; +layout(binding = 30, rgba16ui) uniform readonly writeonly uimage2D uImage30; +layout(binding = 31, rgb10_a2ui) uniform readonly writeonly uimage2D uImage31; +layout(binding = 32, rgba8ui) uniform readonly writeonly uimage2D uImage32; +layout(binding = 33, rg32ui) uniform readonly writeonly uimage2D uImage33; +layout(binding = 34, rg16ui) uniform readonly writeonly uimage2D uImage34; +layout(binding = 35, rg8ui) uniform readonly writeonly uimage2D uImage35; +layout(binding = 36, r32ui) uniform readonly writeonly uimage2D uImage36; +layout(binding = 37, r16ui) uniform readonly writeonly uimage2D uImage37; +layout(binding = 38, r8ui) uniform readonly writeonly uimage2D uImage38; + +void main() +{ +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/int64.desktop.comp b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/int64.desktop.comp new file mode 100644 index 000000000..702456b30 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/comp/int64.desktop.comp @@ -0,0 +1,52 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +struct M0 +{ + int64_t v; + i64vec2 b[2]; + uint64_t c; + uint64_t d[5]; +}; + +layout(binding = 0, std430) buffer SSBO0 +{ + i64vec4 a; + M0 m0; +} ssbo_0; + +layout(binding = 1, std430) buffer SSBO1 +{ + u64vec4 b; + M0 m0; +} ssbo_1; + +layout(binding = 2, std430) buffer SSBO2 +{ + int64_t a[4]; + i64vec2 b[4]; +} ssbo_2; + +layout(binding = 3, std140) buffer SSBO3 +{ + int64_t a[4]; + i64vec2 b[4]; +} ssbo_3; + +void main() +{ + ssbo_0.a += i64vec4(10l, 20l, 30l, 40l); + ssbo_1.b += u64vec4(999999999999999999ul, 8888888888888888ul, 77777777777777777ul, 6666666666666666ul); + ssbo_0.a += i64vec4(20l); + ssbo_0.a = abs(ssbo_0.a + i64vec4(ssbo_1.b)); + ssbo_0.a += i64vec4(1l); + ssbo_1.b += u64vec4(i64vec4(1l)); + ssbo_0.a -= i64vec4(1l); + ssbo_1.b -= u64vec4(i64vec4(1l)); + ssbo_1.b = doubleBitsToUint64(int64BitsToDouble(ssbo_0.a)); + ssbo_0.a = doubleBitsToInt64(uint64BitsToDouble(ssbo_1.b)); + ssbo_2.a[0] += 1l; + ssbo_3.a[0] += 2l; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/clip-cull-distance.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/clip-cull-distance.desktop.frag new file mode 100644 index 000000000..3cc320550 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/clip-cull-distance.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +in float gl_ClipDistance[4]; +in float gl_CullDistance[3]; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = gl_ClipDistance[0] + gl_CullDistance[0]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..391b4de1c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,37 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vInput; + +void main() +{ + FragColor = vInput; + vec4 t = texture(uSampler, vInput.xy); + vec4 d0 = dFdx(vInput); + vec4 d1 = dFdy(vInput); + vec4 d2 = fwidth(vInput); + vec4 d3 = dFdxCoarse(vInput); + vec4 d4 = dFdyCoarse(vInput); + vec4 d5 = fwidthCoarse(vInput); + vec4 d6 = dFdxFine(vInput); + vec4 d7 = dFdyFine(vInput); + vec4 d8 = fwidthFine(vInput); + vec2 lod = textureQueryLod(uSampler, vInput.zw); + if (vInput.y > 10.0) + { + FragColor += t; + FragColor += d0; + FragColor += d1; + FragColor += d2; + FragColor += d3; + FragColor += d4; + FragColor += d5; + FragColor += d6; + FragColor += d7; + FragColor += d8; + FragColor += lod.xyxy; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/depth-greater-than.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/depth-greater-than.desktop.frag new file mode 100644 index 000000000..8b7c29644 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/depth-greater-than.desktop.frag @@ -0,0 +1,9 @@ +#version 450 +layout(depth_greater) out float gl_FragDepth; +layout(early_fragment_tests) in; + +void main() +{ + gl_FragDepth = 0.5; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/depth-less-than.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/depth-less-than.desktop.frag new file mode 100644 index 000000000..44752eb8f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/depth-less-than.desktop.frag @@ -0,0 +1,9 @@ +#version 450 +layout(depth_less) out float gl_FragDepth; +layout(early_fragment_tests) in; + +void main() +{ + gl_FragDepth = 0.5; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/dual-source-blending.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/dual-source-blending.desktop.frag new file mode 100644 index 000000000..3d946b04a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/dual-source-blending.desktop.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0, index = 0) out vec4 FragColor0; +layout(location = 0, index = 1) out vec4 FragColor1; + +void main() +{ + FragColor0 = vec4(1.0); + FragColor1 = vec4(2.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/fp16.invalid.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/fp16.invalid.desktop.frag new file mode 100644 index 000000000..af8882c43 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/fp16.invalid.desktop.frag @@ -0,0 +1,161 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif + +struct ResType +{ + f16vec4 _m0; + ivec4 _m1; +}; + +layout(location = 3) in f16vec4 v4; +layout(location = 2) in f16vec3 v3; +layout(location = 0) in float16_t v1; +layout(location = 1) in f16vec2 v2; + +f16mat2 test_mat2(f16vec2 a, f16vec2 b, f16vec2 c, f16vec2 d) +{ + return f16mat2(f16vec2(a), f16vec2(b)) * f16mat2(f16vec2(c), f16vec2(d)); +} + +f16mat3 test_mat3(f16vec3 a, f16vec3 b, f16vec3 c, f16vec3 d, f16vec3 e, f16vec3 f) +{ + return f16mat3(f16vec3(a), f16vec3(b), f16vec3(c)) * f16mat3(f16vec3(d), f16vec3(e), f16vec3(f)); +} + +void test_constants() +{ + float16_t a = 1.0hf; + float16_t b = 1.5hf; + float16_t c = -1.5hf; + float16_t d = (0.0hf / 0.0hf); + float16_t e = (1.0hf / 0.0hf); + float16_t f = (-1.0hf / 0.0hf); + float16_t g = 1014.0hf; + float16_t h = 9.5367431640625e-07hf; +} + +float16_t test_result() +{ + return 1.0hf; +} + +void test_conversions() +{ + float16_t one = test_result(); + int a = int(one); + uint b = uint(one); + bool c = one != 0.0hf; + float d = float(one); + double e = double(one); + float16_t a2 = float16_t(a); + float16_t b2 = float16_t(b); + float16_t c2 = float16_t(c); + float16_t d2 = float16_t(d); + float16_t e2 = float16_t(e); +} + +void test_builtins() +{ + f16vec4 res = radians(v4); + res = degrees(v4); + res = sin(v4); + res = cos(v4); + res = tan(v4); + res = asin(v4); + res = atan(v4, v3.xyzz); + res = atan(v4); + res = sinh(v4); + res = cosh(v4); + res = tanh(v4); + res = asinh(v4); + res = acosh(v4); + res = atanh(v4); + res = pow(v4, v4); + res = exp(v4); + res = log(v4); + res = exp2(v4); + res = log2(v4); + res = sqrt(v4); + res = inversesqrt(v4); + res = abs(v4); + res = sign(v4); + res = floor(v4); + res = trunc(v4); + res = round(v4); + res = roundEven(v4); + res = ceil(v4); + res = fract(v4); + res = mod(v4, v4); + f16vec4 tmp; + f16vec4 _231 = modf(v4, tmp); + res = _231; + res = min(v4, v4); + res = max(v4, v4); + res = clamp(v4, v4, v4); + res = mix(v4, v4, v4); + res = mix(v4, v4, lessThan(v4, v4)); + res = step(v4, v4); + res = smoothstep(v4, v4, v4); + bvec4 btmp = isnan(v4); + btmp = isinf(v4); + res = fma(v4, v4, v4); + ResType _275; + _275._m0 = frexp(v4, _275._m1); + ivec4 itmp = _275._m1; + res = _275._m0; + res = ldexp(res, itmp); + uint pack0 = packFloat2x16(v4.xy); + uint pack1 = packFloat2x16(v4.zw); + res = f16vec4(unpackFloat2x16(pack0), unpackFloat2x16(pack1)); + float16_t t0 = length(v4); + t0 = distance(v4, v4); + t0 = dot(v4, v4); + f16vec3 res3 = cross(v3, v3); + res = normalize(v4); + res = faceforward(v4, v4, v4); + res = reflect(v4, v4); + res = refract(v4, v4, v1); + btmp = lessThan(v4, v4); + btmp = lessThanEqual(v4, v4); + btmp = greaterThan(v4, v4); + btmp = greaterThanEqual(v4, v4); + btmp = equal(v4, v4); + btmp = notEqual(v4, v4); + res = dFdx(v4); + res = dFdy(v4); + res = dFdxFine(v4); + res = dFdyFine(v4); + res = dFdxCoarse(v4); + res = dFdyCoarse(v4); + res = fwidth(v4); + res = fwidthFine(v4); + res = fwidthCoarse(v4); +} + +void main() +{ + f16vec2 param = v2; + f16vec2 param_1 = v2; + f16vec2 param_2 = v3.xy; + f16vec2 param_3 = v3.xy; + f16mat2 m0 = test_mat2(param, param_1, param_2, param_3); + f16vec3 param_4 = v3; + f16vec3 param_5 = v3; + f16vec3 param_6 = v3; + f16vec3 param_7 = v4.xyz; + f16vec3 param_8 = v4.xyz; + f16vec3 param_9 = v4.yzw; + f16mat3 m1 = test_mat3(param_4, param_5, param_6, param_7, param_8, param_9); + test_constants(); + test_conversions(); + test_builtins(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag new file mode 100644 index 000000000..70843a856 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag @@ -0,0 +1,24 @@ +#version 450 + +layout(binding = 0, std430) buffer Foobar +{ + vec4 _data[]; +} Foobar_1; + +layout(binding = 1, std430) buffer Foobaz +{ + vec4 _data[]; +} Foobaz_1; + +layout(location = 0) out vec4 _entryPointOutput; + +vec4 _main() +{ + return Foobar_1._data[0] + Foobaz_1._data[0]; +} + +void main() +{ + _entryPointOutput = _main(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/image-ms.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/image-ms.desktop.frag new file mode 100644 index 000000000..24644be17 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/image-ms.desktop.frag @@ -0,0 +1,13 @@ +#version 450 + +layout(binding = 0, rgba8) uniform image2DMS uImage; +layout(binding = 1, rgba8) uniform image2DMSArray uImageArray; + +void main() +{ + vec4 a = imageLoad(uImage, ivec2(1, 2), 2); + vec4 b = imageLoad(uImageArray, ivec3(1, 2, 4), 3); + imageStore(uImage, ivec2(2, 3), 1, a); + imageStore(uImageArray, ivec3(2, 3, 7), 1, b); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/image-query.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/image-query.desktop.frag new file mode 100644 index 000000000..6f36d5db3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/image-query.desktop.frag @@ -0,0 +1,53 @@ +#version 450 + +layout(binding = 0) uniform sampler1D uSampler1D; +layout(binding = 1) uniform sampler2D uSampler2D; +layout(binding = 2) uniform sampler2DArray uSampler2DArray; +layout(binding = 3) uniform sampler3D uSampler3D; +layout(binding = 4) uniform samplerCube uSamplerCube; +layout(binding = 5) uniform samplerCubeArray uSamplerCubeArray; +layout(binding = 6) uniform samplerBuffer uSamplerBuffer; +layout(binding = 7) uniform sampler2DMS uSamplerMS; +layout(binding = 8) uniform sampler2DMSArray uSamplerMSArray; +layout(binding = 9, r32f) uniform readonly writeonly image1D uImage1D; +layout(binding = 10, r32f) uniform readonly writeonly image2D uImage2D; +layout(binding = 11, r32f) uniform readonly writeonly image2DArray uImage2DArray; +layout(binding = 12, r32f) uniform readonly writeonly image3D uImage3D; +layout(binding = 13, r32f) uniform readonly writeonly imageCube uImageCube; +layout(binding = 14, r32f) uniform readonly writeonly imageCubeArray uImageCubeArray; +layout(binding = 15, r32f) uniform readonly writeonly imageBuffer uImageBuffer; +layout(binding = 16, r32f) uniform readonly writeonly image2DMS uImageMS; +layout(binding = 17, r32f) uniform readonly writeonly image2DMSArray uImageMSArray; + +void main() +{ + int a = textureSize(uSampler1D, 0); + ivec2 b = textureSize(uSampler2D, 0); + ivec3 c = textureSize(uSampler2DArray, 0); + ivec3 d = textureSize(uSampler3D, 0); + ivec2 e = textureSize(uSamplerCube, 0); + ivec3 f = textureSize(uSamplerCubeArray, 0); + int g = textureSize(uSamplerBuffer); + ivec2 h = textureSize(uSamplerMS); + ivec3 i = textureSize(uSamplerMSArray); + int l0 = textureQueryLevels(uSampler1D); + int l1 = textureQueryLevels(uSampler2D); + int l2 = textureQueryLevels(uSampler2DArray); + int l3 = textureQueryLevels(uSampler3D); + int l4 = textureQueryLevels(uSamplerCube); + int l5 = textureQueryLevels(uSamplerCubeArray); + a = imageSize(uImage1D); + b = imageSize(uImage2D); + c = imageSize(uImage2DArray); + d = imageSize(uImage3D); + e = imageSize(uImageCube); + f = imageSize(uImageCubeArray); + g = imageSize(uImageBuffer); + h = imageSize(uImageMS); + i = imageSize(uImageMSArray); + int s0 = textureSamples(uSamplerMS); + int s1 = textureSamples(uSamplerMSArray); + int s2 = imageSamples(uImageMS); + int s3 = imageSamples(uImageMSArray); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/in-block-qualifiers.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/in-block-qualifiers.frag new file mode 100644 index 000000000..d4622801d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/in-block-qualifiers.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in VertexData +{ + flat float f; + centroid vec4 g; + flat int h; + float i; +} vin; + +layout(location = 4) flat in float f; +layout(location = 5) centroid in vec4 g; +layout(location = 6) flat in int h; +layout(location = 7) sample in float i; + +void main() +{ + FragColor = ((((((vec4(vin.f) + vin.g) + vec4(float(vin.h))) + vec4(vin.i)) + vec4(f)) + g) + vec4(float(h))) + vec4(i); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/layout-component.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/layout-component.desktop.frag new file mode 100644 index 000000000..13f17feee --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/layout-component.desktop.frag @@ -0,0 +1,16 @@ +#version 450 + +layout(location = 0) out vec2 FragColor; +layout(location = 0, component = 0) in vec2 v0; +layout(location = 0, component = 2) in float v1; +in Vertex +{ + layout(location = 1, component = 2) float v3; +} _20; + + +void main() +{ + FragColor = (v0 + vec2(v1)) + vec2(_20.v3); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/query-levels.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/query-levels.desktop.frag new file mode 100644 index 000000000..4a80cbf81 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/query-levels.desktop.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(float(textureQueryLevels(uSampler))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/query-lod.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/query-lod.desktop.frag new file mode 100644 index 000000000..f43543b8c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/query-lod.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTexCoord; + +void main() +{ + FragColor = textureQueryLod(uSampler, vTexCoord).xyxy; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/sampler-ms-query.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/sampler-ms-query.desktop.frag new file mode 100644 index 000000000..4c30ed152 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/sampler-ms-query.desktop.frag @@ -0,0 +1,14 @@ +#version 450 + +layout(binding = 0) uniform sampler2DMS uSampler; +layout(binding = 1) uniform sampler2DMSArray uSamplerArray; +layout(binding = 2, rgba8) uniform readonly writeonly image2DMS uImage; +layout(binding = 3, rgba8) uniform readonly writeonly image2DMSArray uImageArray; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(float(((textureSamples(uSampler) + textureSamples(uSamplerArray)) + imageSamples(uImage)) + imageSamples(uImageArray))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag new file mode 100644 index 000000000..d5e45bda4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag @@ -0,0 +1,26 @@ +#version 450 + +layout(binding = 0) uniform sampler1DShadow uShadow1D; +layout(binding = 1) uniform sampler2DShadow uShadow2D; +layout(binding = 2) uniform sampler1D uSampler1D; +layout(binding = 3) uniform sampler2D uSampler2D; +layout(binding = 4) uniform sampler3D uSampler3D; + +layout(location = 0) out float FragColor; +layout(location = 1) in vec4 vClip4; +layout(location = 2) in vec2 vClip2; +layout(location = 0) in vec3 vClip3; + +void main() +{ + vec4 _20 = vClip4; + _20.y = vClip4.w; + FragColor = textureProj(uShadow1D, vec4(_20.x, 0.0, vClip4.z, _20.y)); + vec4 _30 = vClip4; + _30.z = vClip4.w; + FragColor = textureProj(uShadow2D, vec4(_30.xy, vClip4.z, _30.z)); + FragColor = textureProj(uSampler1D, vClip2).x; + FragColor = textureProj(uSampler2D, vClip3).x; + FragColor = textureProj(uSampler3D, vClip4).x; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/geom/basic.desktop.sso.geom b/3rdparty/spirv-cross/reference/shaders/desktop-only/geom/basic.desktop.sso.geom new file mode 100644 index 000000000..f1afee69e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/geom/basic.desktop.sso.geom @@ -0,0 +1,35 @@ +#version 450 +layout(invocations = 4, triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[]; + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal + vec3(float(gl_InvocationID)); + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal + vec3(4.0 * float(gl_InvocationID)); + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal + vec3(2.0 * float(gl_InvocationID)); + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/geom/viewport-index.desktop.geom b/3rdparty/spirv-cross/reference/shaders/desktop-only/geom/viewport-index.desktop.geom new file mode 100644 index 000000000..773aeb8bf --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/geom/viewport-index.desktop.geom @@ -0,0 +1,9 @@ +#version 450 +layout(triangles) in; +layout(max_vertices = 4, triangle_strip) out; + +void main() +{ + gl_ViewportIndex = 1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/tesc/basic.desktop.sso.tesc b/3rdparty/spirv-cross/reference/shaders/desktop-only/tesc/basic.desktop.sso.tesc new file mode 100644 index 000000000..5e958256a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/tesc/basic.desktop.sso.tesc @@ -0,0 +1,27 @@ +#version 450 +layout(vertices = 1) out; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[gl_MaxPatchVertices]; + +out gl_PerVertex +{ + vec4 gl_Position; +} gl_out[1]; + +layout(location = 0) patch out vec3 vFoo; + +void main() +{ + gl_TessLevelInner[0] = 8.8999996185302734375; + gl_TessLevelInner[1] = 6.900000095367431640625; + gl_TessLevelOuter[0] = 8.8999996185302734375; + gl_TessLevelOuter[1] = 6.900000095367431640625; + gl_TessLevelOuter[2] = 3.900000095367431640625; + gl_TessLevelOuter[3] = 4.900000095367431640625; + vFoo = vec3(1.0); + gl_out[gl_InvocationID].gl_Position = gl_in[0].gl_Position + gl_in[1].gl_Position; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/tese/triangle.desktop.sso.tese b/3rdparty/spirv-cross/reference/shaders/desktop-only/tese/triangle.desktop.sso.tese new file mode 100644 index 000000000..31027dae8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/tese/triangle.desktop.sso.tese @@ -0,0 +1,18 @@ +#version 450 +layout(triangles, cw, fractional_even_spacing) in; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[gl_MaxPatchVertices]; + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +void main() +{ + gl_Position = ((gl_in[0].gl_Position * gl_TessCoord.x) + (gl_in[1].gl_Position * gl_TessCoord.y)) + (gl_in[2].gl_Position * gl_TessCoord.z); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/basic.desktop.sso.vert b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/basic.desktop.sso.vert new file mode 100644 index 000000000..5f527e08c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/basic.desktop.sso.vert @@ -0,0 +1,22 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; +} _16; + +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = _16.uMVP * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert new file mode 100644 index 000000000..a7c5d761c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert @@ -0,0 +1,20 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; + float gl_PointSize; + float gl_ClipDistance[4]; + float gl_CullDistance[3]; +}; + +void main() +{ + gl_Position = vec4(1.0); + gl_ClipDistance[0] = 0.0; + gl_ClipDistance[1] = 0.0; + gl_ClipDistance[2] = 0.0; + gl_ClipDistance[3] = 0.0; + gl_CullDistance[1] = 4.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/clip-cull-distance.desktop.vert b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/clip-cull-distance.desktop.vert new file mode 100644 index 000000000..2f3d49f55 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/clip-cull-distance.desktop.vert @@ -0,0 +1,15 @@ +#version 450 + +out float gl_ClipDistance[4]; +out float gl_CullDistance[3]; + +void main() +{ + gl_Position = vec4(1.0); + gl_ClipDistance[0] = 0.0; + gl_ClipDistance[1] = 0.0; + gl_ClipDistance[2] = 0.0; + gl_ClipDistance[3] = 0.0; + gl_CullDistance[1] = 4.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/out-block-qualifiers.vert b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/out-block-qualifiers.vert new file mode 100644 index 000000000..7c731684b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/out-block-qualifiers.vert @@ -0,0 +1,27 @@ +#version 450 + +layout(location = 0) out VertexData +{ + flat float f; + centroid vec4 g; + flat int h; + float i; +} vout; + +layout(location = 4) flat out float f; +layout(location = 5) centroid out vec4 g; +layout(location = 6) flat out int h; +layout(location = 7) out float i; + +void main() +{ + vout.f = 10.0; + vout.g = vec4(20.0); + vout.h = 20; + vout.i = 30.0; + f = 10.0; + g = vec4(20.0); + h = 20; + i = 30.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert new file mode 100644 index 000000000..6121dd8f1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert @@ -0,0 +1,8 @@ +#version 450 +#extension GL_ARB_shader_draw_parameters : require + +void main() +{ + gl_Position = vec4(float(gl_BaseVertexARB), float(gl_BaseInstanceARB), float(gl_DrawIDARB), 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert new file mode 100644 index 000000000..b6948fbc4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert @@ -0,0 +1,7 @@ +#version 460 + +void main() +{ + gl_Position = vec4(float(gl_BaseVertex), float(gl_BaseInstance), float(gl_DrawID), 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/array.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/array.flatten.vert new file mode 100644 index 000000000..5afde34c5 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/array.flatten.vert @@ -0,0 +1,12 @@ +#version 310 es + +uniform vec4 UBO[56]; +layout(location = 0) in vec4 aVertex; + +void main() +{ + vec4 a4 = UBO[23]; + vec4 offset = (UBO[50] + UBO[45]) + vec4(UBO[54].x); + gl_Position = ((mat4(UBO[40], UBO[41], UBO[42], UBO[43]) * aVertex) + UBO[55]) + offset; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/basic.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/basic.flatten.vert new file mode 100644 index 000000000..f7eb758f2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/basic.flatten.vert @@ -0,0 +1,13 @@ +#version 310 es + +uniform vec4 UBO[4]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/copy.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/copy.flatten.vert new file mode 100644 index 000000000..0ca7de347 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/copy.flatten.vert @@ -0,0 +1,30 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + vec4 Color; +}; + +uniform vec4 UBO[12]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec4 vColor; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vColor = vec4(0.0); + for (int i = 0; i < 4; i++) + { + Light _52 = Light(UBO[i * 2 + 4].xyz, UBO[i * 2 + 4].w, UBO[i * 2 + 5]); + Light light; + light.Position = _52.Position; + light.Radius = _52.Radius; + light.Color = _52.Color; + vec3 L = aVertex.xyz - light.Position; + vColor += ((UBO[i * 2 + 5] * clamp(1.0 - (length(L) / light.Radius), 0.0, 1.0)) * dot(aNormal, normalize(L))); + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/dynamic.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/dynamic.flatten.vert new file mode 100644 index 000000000..8be397ea3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/dynamic.flatten.vert @@ -0,0 +1,25 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + vec4 Color; +}; + +uniform vec4 UBO[12]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec4 vColor; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vColor = vec4(0.0); + for (int i = 0; i < 4; i++) + { + vec3 L = aVertex.xyz - UBO[i * 2 + 4].xyz; + vColor += ((UBO[i * 2 + 5] * clamp(1.0 - (length(L) / UBO[i * 2 + 4].w), 0.0, 1.0)) * dot(aNormal, normalize(L))); + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/matrix-conversion.flatten.frag b/3rdparty/spirv-cross/reference/shaders/flatten/matrix-conversion.flatten.frag new file mode 100644 index 000000000..ee79bf5b8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/matrix-conversion.flatten.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform vec4 UBO[4]; +layout(location = 0) out vec3 FragColor; +layout(location = 0) flat in vec3 vNormal; + +void main() +{ + mat4 _19 = mat4(UBO[0], UBO[1], UBO[2], UBO[3]); + FragColor = mat3(_19[0].xyz, _19[1].xyz, _19[2].xyz) * vNormal; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/matrixindex.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/matrixindex.flatten.vert new file mode 100644 index 000000000..f6d0fa486 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/matrixindex.flatten.vert @@ -0,0 +1,19 @@ +#version 310 es + +uniform vec4 UBO[14]; +layout(location = 0) out vec4 oA; +layout(location = 1) out vec4 oB; +layout(location = 2) out vec4 oC; +layout(location = 3) out vec4 oD; +layout(location = 4) out vec4 oE; + +void main() +{ + gl_Position = vec4(0.0); + oA = UBO[1]; + oB = vec4(UBO[4].y, UBO[5].y, UBO[6].y, UBO[7].y); + oC = UBO[9]; + oD = vec4(UBO[10].x, UBO[11].x, UBO[12].x, UBO[13].x); + oE = vec4(UBO[1].z, UBO[6].y, UBO[9].z, UBO[12].y); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag b/3rdparty/spirv-cross/reference/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag new file mode 100644 index 000000000..ef6bb526a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag @@ -0,0 +1,24 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uTextures[2 * 3 * 1]; + +layout(location = 1) in vec2 vUV; +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in int vIndex; + +void main() +{ + vec4 values3[2 * 3 * 1]; + for (int z = 0; z < 2; z++) + { + for (int y = 0; y < 3; y++) + { + for (int x = 0; x < 1; x++) + { + values3[z * 3 * 1 + y * 1 + x] = texture(uTextures[z * 3 * 1 + y * 1 + x], vUV); + } + } + } + FragColor = (values3[1 * 3 * 1 + 2 * 1 + 0] + values3[0 * 3 * 1 + 2 * 1 + 0]) + values3[(vIndex + 1) * 3 * 1 + 2 * 1 + vIndex]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/multiindex.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/multiindex.flatten.vert new file mode 100644 index 000000000..3850bf6c7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/multiindex.flatten.vert @@ -0,0 +1,10 @@ +#version 310 es + +uniform vec4 UBO[15]; +layout(location = 0) in ivec2 aIndex; + +void main() +{ + gl_Position = UBO[aIndex.x * 5 + aIndex.y * 1 + 0]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/push-constant.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/push-constant.flatten.vert new file mode 100644 index 000000000..216c1f9d1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/push-constant.flatten.vert @@ -0,0 +1,13 @@ +#version 310 es + +uniform vec4 PushMe[6]; +layout(location = 1) in vec4 Pos; +layout(location = 0) out vec2 vRot; +layout(location = 0) in vec2 Rot; + +void main() +{ + gl_Position = mat4(PushMe[0], PushMe[1], PushMe[2], PushMe[3]) * Pos; + vRot = (mat2(PushMe[4].xy, PushMe[4].zw) * Rot) + vec2(PushMe[5].z); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/rowmajor.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/rowmajor.flatten.vert new file mode 100644 index 000000000..721c4905c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/rowmajor.flatten.vert @@ -0,0 +1,11 @@ +#version 310 es + +uniform vec4 UBO[12]; +layout(location = 0) in vec4 aVertex; + +void main() +{ + vec2 v = mat4x2(UBO[8].xy, UBO[9].xy, UBO[10].xy, UBO[11].xy) * aVertex; + gl_Position = (mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex) + (aVertex * mat4(UBO[4], UBO[5], UBO[6], UBO[7])); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/struct.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/struct.flatten.vert new file mode 100644 index 000000000..3468d5292 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/struct.flatten.vert @@ -0,0 +1,22 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + vec4 Color; +}; + +uniform vec4 UBO[6]; +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec4 vColor; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = mat4(UBO[0], UBO[1], UBO[2], UBO[3]) * aVertex; + vColor = vec4(0.0); + vec3 L = aVertex.xyz - UBO[4].xyz; + vColor += ((UBO[5] * clamp(1.0 - (length(L) / UBO[4].w), 0.0, 1.0)) * dot(aNormal, normalize(L))); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/struct.rowmajor.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/struct.rowmajor.flatten.vert new file mode 100644 index 000000000..37438498d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/struct.rowmajor.flatten.vert @@ -0,0 +1,26 @@ +#version 310 es + +struct Foo +{ + mat3x4 MVP0; + mat3x4 MVP1; +}; + +uniform vec4 UBO[8]; +layout(location = 0) in vec4 v0; +layout(location = 1) in vec4 v1; +layout(location = 0) out vec3 V0; +layout(location = 1) out vec3 V1; + +void main() +{ + Foo _20 = Foo(transpose(mat4x3(UBO[0].xyz, UBO[1].xyz, UBO[2].xyz, UBO[3].xyz)), transpose(mat4x3(UBO[4].xyz, UBO[5].xyz, UBO[6].xyz, UBO[7].xyz))); + Foo f; + f.MVP0 = _20.MVP0; + f.MVP1 = _20.MVP1; + vec3 a = v0 * f.MVP0; + vec3 b = v1 * f.MVP1; + V0 = a; + V1 = b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/swizzle.flatten.vert b/3rdparty/spirv-cross/reference/shaders/flatten/swizzle.flatten.vert new file mode 100644 index 000000000..92afb475e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/swizzle.flatten.vert @@ -0,0 +1,21 @@ +#version 310 es + +uniform vec4 UBO[8]; +layout(location = 0) out vec4 oA; +layout(location = 1) out vec4 oB; +layout(location = 2) out vec4 oC; +layout(location = 3) out vec4 oD; +layout(location = 4) out vec4 oE; +layout(location = 5) out vec4 oF; + +void main() +{ + gl_Position = vec4(0.0); + oA = UBO[0]; + oB = vec4(UBO[1].xy, UBO[1].zw); + oC = vec4(UBO[2].x, UBO[3].xyz); + oD = vec4(UBO[4].xyz, UBO[4].w); + oE = vec4(UBO[5].x, UBO[5].y, UBO[5].z, UBO[5].w); + oF = vec4(UBO[6].x, UBO[6].zw, UBO[7].x); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/flatten/types.flatten.frag b/3rdparty/spirv-cross/reference/shaders/flatten/types.flatten.frag new file mode 100644 index 000000000..a74327d97 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/flatten/types.flatten.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump ivec4 UBO1[2]; +uniform mediump uvec4 UBO2[2]; +uniform vec4 UBO0[2]; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = ((((vec4(UBO1[0]) + vec4(UBO1[1])) + vec4(UBO2[0])) + vec4(UBO2[1])) + UBO0[0]) + UBO0[1]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/16bit-constants.frag b/3rdparty/spirv-cross/reference/shaders/frag/16bit-constants.frag new file mode 100644 index 000000000..48fbad97a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/16bit-constants.frag @@ -0,0 +1,29 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif + +layout(location = 0) out float16_t foo; +layout(location = 1) out int16_t bar; +layout(location = 2) out uint16_t baz; + +void main() +{ + foo = 1.0hf; + bar = 2s; + baz = 3us; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/reference/shaders/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..baf230251 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; +precision highp int; + +const float _17[5] = float[](1.0, 2.0, 3.0, 4.0, 5.0); + +layout(location = 0) out vec4 FragColor; + +void main() +{ + for (mediump int i = 0; i < 4; i++, FragColor += vec4(_17[i])) + { + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/basic.frag b/3rdparty/spirv-cross/reference/shaders/frag/basic.frag new file mode 100644 index 000000000..2a4e44042 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/basic.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTex; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; + +void main() +{ + FragColor = vColor * texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/reference/shaders/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..738ee6e44 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,24 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0, std430) buffer UBO +{ + vec4 results[1024]; +} _34; + +layout(binding = 1) uniform highp isampler2D Buf; + +layout(location = 0) flat in mediump int vIn; +layout(location = 1) flat in mediump int vIn2; +layout(location = 0) out vec4 FragColor; + +void main() +{ + mediump ivec4 coords = texelFetch(Buf, ivec2(gl_FragCoord.xy), 0); + vec4 foo = _34.results[coords.x % 16]; + mediump int c = vIn * vIn; + mediump int d = vIn2 * vIn2; + FragColor = (foo + foo) + _34.results[c + d]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/composite-extract-forced-temporary.frag b/3rdparty/spirv-cross/reference/shaders/frag/composite-extract-forced-temporary.frag new file mode 100644 index 000000000..e4384f559 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/composite-extract-forced-temporary.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D Texture; + +layout(location = 0) in vec2 vTexCoord; +layout(location = 0) out vec4 FragColor; + +void main() +{ + float f = texture(Texture, vTexCoord).x; + FragColor = vec4(f * f); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/constant-array.frag b/3rdparty/spirv-cross/reference/shaders/frag/constant-array.frag new file mode 100644 index 000000000..be033f387 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/constant-array.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; +precision highp int; + +const vec4 _37[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0)); +const vec4 _55[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0))); + +struct Foobar +{ + float a; + float b; +}; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int index; + +vec4 resolve(Foobar f) +{ + return vec4(f.a + f.b); +} + +void main() +{ + Foobar param = Foobar(10.0, 20.0); + Foobar indexable[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0)); + Foobar param_1 = indexable[index]; + FragColor = ((_37[index] + _55[index][index + 1]) + resolve(param)) + resolve(param_1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/constant-composites.frag b/3rdparty/spirv-cross/reference/shaders/frag/constant-composites.frag new file mode 100644 index 000000000..c65c60613 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/constant-composites.frag @@ -0,0 +1,23 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct Foo +{ + float a; + float b; +}; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int line; +float lut[4]; +Foo foos[2]; + +void main() +{ + lut = float[](1.0, 4.0, 3.0, 2.0); + foos = Foo[](Foo(10.0, 20.0), Foo(30.0, 40.0)); + FragColor = vec4(lut[line]); + FragColor += vec4(foos[line].a * foos[1 - line].a); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/false-loop-init.frag b/3rdparty/spirv-cross/reference/shaders/frag/false-loop-init.frag new file mode 100644 index 000000000..b0ed5577d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/false-loop-init.frag @@ -0,0 +1,25 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 result; +layout(location = 0) in vec4 accum; + +void main() +{ + result = vec4(0.0); + mediump uint j; + for (mediump int i = 0; i < 4; i += int(j)) + { + if (accum.y > 10.0) + { + j = 40u; + } + else + { + j = 30u; + } + result += accum; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/flush_params.frag b/3rdparty/spirv-cross/reference/shaders/frag/flush_params.frag new file mode 100644 index 000000000..b4b36ff90 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/flush_params.frag @@ -0,0 +1,30 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct Structy +{ + vec4 c; +}; + +layout(location = 0) out vec4 FragColor; + +void foo2(inout Structy f) +{ + f.c = vec4(10.0); +} + +Structy foo() +{ + Structy param; + foo2(param); + Structy f = param; + return f; +} + +void main() +{ + Structy s = foo(); + FragColor = s.c; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/for-loop-init.frag b/3rdparty/spirv-cross/reference/shaders/frag/for-loop-init.frag new file mode 100644 index 000000000..7c22e5c78 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/for-loop-init.frag @@ -0,0 +1,52 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out mediump int FragColor; + +void main() +{ + FragColor = 16; + for (mediump int i = 0; i < 25; i++) + { + FragColor += 10; + } + for (mediump int i_1 = 1, j = 4; i_1 < 30; i_1++, j += 4) + { + FragColor += 11; + } + mediump int k = 0; + for (; k < 20; k++) + { + FragColor += 12; + } + k += 3; + FragColor += k; + mediump int l; + if (k == 40) + { + l = 0; + for (; l < 40; l++) + { + FragColor += 13; + } + return; + } + else + { + l = k; + FragColor += l; + } + mediump ivec2 i_2 = ivec2(0); + for (; i_2.x < 10; i_2.x += 4) + { + FragColor += i_2.y; + } + mediump int o = k; + for (mediump int m = k; m < 40; m++) + { + FragColor += m; + } + FragColor += o; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/frexp-modf.frag b/3rdparty/spirv-cross/reference/shaders/frag/frexp-modf.frag new file mode 100644 index 000000000..e495bb316 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/frexp-modf.frag @@ -0,0 +1,43 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct ResType +{ + highp float _m0; + int _m1; +}; + +struct ResType_1 +{ + highp vec2 _m0; + ivec2 _m1; +}; + +layout(location = 0) in float v0; +layout(location = 1) in vec2 v1; +layout(location = 0) out float FragColor; + +void main() +{ + ResType _16; + _16._m0 = frexp(v0, _16._m1); + mediump int e0 = _16._m1; + float f0 = _16._m0; + ResType _22; + _22._m0 = frexp(v0 + 1.0, _22._m1); + e0 = _22._m1; + f0 = _22._m0; + ResType_1 _35; + _35._m0 = frexp(v1, _35._m1); + mediump ivec2 e1 = _35._m1; + vec2 f1 = _35._m0; + float r0; + float _41 = modf(v0, r0); + float m0 = _41; + vec2 r1; + vec2 _45 = modf(v1, r1); + vec2 m1 = _45; + FragColor = ((((f0 + f1.x) + f1.y) + m0) + m1.x) + m1.y; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/front-facing.frag b/3rdparty/spirv-cross/reference/shaders/frag/front-facing.frag new file mode 100644 index 000000000..cc9aecc8b --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/front-facing.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void main() +{ + if (gl_FrontFacing) + { + FragColor = vA; + } + else + { + FragColor = vB; + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/gather-dref.frag b/3rdparty/spirv-cross/reference/shaders/frag/gather-dref.frag new file mode 100644 index 000000000..5416f79cb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/gather-dref.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2DShadow uT; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec3 vUV; + +void main() +{ + FragColor = textureGather(uT, vUV.xy, vUV.z); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/ground.frag b/3rdparty/spirv-cross/reference/shaders/frag/ground.frag new file mode 100644 index 000000000..4d998d568 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/ground.frag @@ -0,0 +1,62 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 4, std140) uniform GlobalPSData +{ + vec4 g_CamPos; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_ResolutionParams; + vec4 g_TimeParams; + vec4 g_FogColor_Distance; +} _101; + +layout(binding = 2) uniform mediump sampler2D TexNormalmap; + +layout(location = 3) out vec4 LightingOut; +layout(location = 2) out vec4 NormalOut; +layout(location = 1) out vec4 SpecularOut; +layout(location = 0) out vec4 AlbedoOut; +layout(location = 0) in vec2 TexCoord; +layout(location = 1) in vec3 EyeVec; + +float saturate(float x) +{ + return clamp(x, 0.0, 1.0); +} + +void Resolve(vec3 Albedo, vec3 Normal, float Roughness, float Metallic) +{ + LightingOut = vec4(0.0); + NormalOut = vec4((Normal * 0.5) + vec3(0.5), 0.0); + SpecularOut = vec4(Roughness, Metallic, 0.0, 0.0); + AlbedoOut = vec4(Albedo, 1.0); +} + +void main() +{ + vec3 Normal = (texture(TexNormalmap, TexCoord).xyz * 2.0) - vec3(1.0); + Normal = normalize(Normal); + highp float param = length(EyeVec) / 1000.0; + vec2 scatter_uv; + scatter_uv.x = saturate(param); + vec3 nEye = normalize(EyeVec); + scatter_uv.y = 0.0; + vec3 Color = vec3(0.100000001490116119384765625, 0.300000011920928955078125, 0.100000001490116119384765625); + vec3 grass = vec3(0.100000001490116119384765625, 0.300000011920928955078125, 0.100000001490116119384765625); + vec3 dirt = vec3(0.100000001490116119384765625); + vec3 snow = vec3(0.800000011920928955078125); + float grass_snow = smoothstep(0.0, 0.1500000059604644775390625, (_101.g_CamPos.y + EyeVec.y) / 200.0); + vec3 base = mix(grass, snow, vec3(grass_snow)); + float edge = smoothstep(0.699999988079071044921875, 0.75, Normal.y); + Color = mix(dirt, base, vec3(edge)); + Color *= Color; + float Roughness = 1.0 - (edge * grass_snow); + highp vec3 param_1 = Color; + highp vec3 param_2 = Normal; + highp float param_3 = Roughness; + highp float param_4 = 0.0; + Resolve(param_1, param_2, param_3, param_4); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/helper-invocation.frag b/3rdparty/spirv-cross/reference/shaders/frag/helper-invocation.frag new file mode 100644 index 000000000..e888ad84d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/helper-invocation.frag @@ -0,0 +1,28 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uSampler; + +layout(location = 0) in vec2 vUV; +layout(location = 0) out vec4 FragColor; + +vec4 foo() +{ + vec4 color; + if (!gl_HelperInvocation) + { + color = textureLod(uSampler, vUV, 0.0); + } + else + { + color = vec4(1.0); + } + return color; +} + +void main() +{ + FragColor = foo(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag b/3rdparty/spirv-cross/reference/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag new file mode 100644 index 000000000..cd4f7d4d2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag @@ -0,0 +1,31 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int vA; +layout(location = 1) flat in mediump int vB; + +void main() +{ + FragColor = vec4(0.0); + mediump int k = 0; + mediump int j; + for (mediump int i = 0; i < vA; i += j) + { + if ((vA + i) == 20) + { + k = 50; + } + else + { + if ((vB + i) == 40) + { + k = 60; + } + } + j = k + 10; + FragColor += vec4(1.0); + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/image-load-store-uint-coord.asm.frag b/3rdparty/spirv-cross/reference/shaders/frag/image-load-store-uint-coord.asm.frag new file mode 100644 index 000000000..414dd956a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/image-load-store-uint-coord.asm.frag @@ -0,0 +1,26 @@ +#version 450 + +layout(binding = 1, rgba32f) uniform image2D RWIm; +layout(binding = 0, rgba32f) uniform writeonly imageBuffer RWBuf; +layout(binding = 1) uniform sampler2D ROIm; +layout(binding = 0) uniform samplerBuffer ROBuf; + +layout(location = 0) out vec4 _entryPointOutput; + +vec4 _main() +{ + vec4 storeTemp = vec4(10.0, 0.5, 8.0, 2.0); + imageStore(RWIm, ivec2(uvec2(10u)), storeTemp); + vec4 v = imageLoad(RWIm, ivec2(uvec2(30u))); + imageStore(RWBuf, int(80u), v); + v += texelFetch(ROIm, ivec2(uvec2(50u, 60u)), 0); + v += texelFetch(ROBuf, int(80u)); + return v; +} + +void main() +{ + vec4 _45 = _main(); + _entryPointOutput = _45; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/loop-dominator-and-switch-default.frag b/3rdparty/spirv-cross/reference/shaders/frag/loop-dominator-and-switch-default.frag new file mode 100644 index 000000000..df968f1a2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/loop-dominator-and-switch-default.frag @@ -0,0 +1,50 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 fragColor; + +void main() +{ + vec4 f4; + mediump int c = int(f4.x); + for (mediump int j = 0; j < c; j++) + { + switch (c) + { + case 0: + { + f4.y = 0.0; + break; + } + case 1: + { + f4.y = 1.0; + break; + } + default: + { + mediump int i = 0; + for (;;) + { + mediump int _48 = i; + mediump int _50 = _48 + 1; + i = _50; + if (_48 < c) + { + f4.y += 0.5; + continue; + } + else + { + break; + } + } + continue; + } + } + f4.y += 0.5; + } + fragColor = f4; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/lut-promotion.frag b/3rdparty/spirv-cross/reference/shaders/frag/lut-promotion.frag new file mode 100644 index 000000000..019393f9f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/lut-promotion.frag @@ -0,0 +1,40 @@ +#version 310 es +precision mediump float; +precision highp int; + +const float _16[16] = float[](1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0, 1.0, 2.0, 3.0, 4.0); +const vec4 _60[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + +layout(location = 0) out float FragColor; +layout(location = 0) flat in mediump int index; + +void main() +{ + FragColor = _16[index]; + if (index < 10) + { + FragColor += _16[index ^ 1]; + } + else + { + FragColor += _16[index & 1]; + } + if (index > 30) + { + FragColor += _60[index & 3].y; + } + else + { + FragColor += _60[index & 1].x; + } + vec4 foobar[4] = _60; + if (index > 30) + { + foobar[1].z = 20.0; + } + FragColor += foobar[index & 3].z; + vec4 baz[4] = _60; + baz = vec4[](vec4(20.0), vec4(30.0), vec4(50.0), vec4(60.0)); + FragColor += baz[index & 3].z; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/mix.frag b/3rdparty/spirv-cross/reference/shaders/frag/mix.frag new file mode 100644 index 000000000..2b288dff0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/mix.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vIn0; +layout(location = 1) in vec4 vIn1; +layout(location = 2) in float vIn2; +layout(location = 3) in float vIn3; + +void main() +{ + bvec4 l = bvec4(false, true, false, false); + FragColor = mix(vIn0, vIn1, l); + bool f = true; + FragColor = vec4(f ? vIn3 : vIn2); + FragColor = mix(vIn1, vIn0, bvec4(f)); + FragColor = vec4(f ? vIn2 : vIn3); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/partial-write-preserve.frag b/3rdparty/spirv-cross/reference/shaders/frag/partial-write-preserve.frag new file mode 100644 index 000000000..cf8a83cf0 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/partial-write-preserve.frag @@ -0,0 +1,109 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct B +{ + float a; + float b; +}; + +layout(binding = 0, std140) uniform UBO +{ + mediump int some_value; +} _51; + +void partial_inout(inout vec4 x) +{ + x.x = 10.0; +} + +void complete_inout(out vec4 x) +{ + x = vec4(50.0); +} + +void branchy_inout(inout vec4 v) +{ + v.y = 20.0; + if (_51.some_value == 20) + { + v = vec4(50.0); + } +} + +void branchy_inout_2(out vec4 v) +{ + if (_51.some_value == 20) + { + v = vec4(50.0); + } + else + { + v = vec4(70.0); + } + v.y = 20.0; +} + +void partial_inout(inout B b) +{ + b.b = 40.0; +} + +void complete_inout(out B b) +{ + b = B(100.0, 200.0); +} + +void branchy_inout(inout B b) +{ + b.b = 20.0; + if (_51.some_value == 20) + { + b = B(10.0, 40.0); + } +} + +void branchy_inout_2(out B b) +{ + if (_51.some_value == 20) + { + b = B(10.0, 40.0); + } + else + { + b = B(70.0, 70.0); + } + b.b = 20.0; +} + +void main() +{ + vec4 a = vec4(10.0); + highp vec4 param = a; + partial_inout(param); + a = param; + highp vec4 param_1; + complete_inout(param_1); + a = param_1; + highp vec4 param_2 = a; + branchy_inout(param_2); + a = param_2; + highp vec4 param_3; + branchy_inout_2(param_3); + a = param_3; + B b = B(10.0, 20.0); + B param_4 = b; + partial_inout(param_4); + b = param_4; + B param_5; + complete_inout(param_5); + b = param_5; + B param_6 = b; + branchy_inout(param_6); + b = param_6; + B param_7; + branchy_inout_2(param_7); + b = param_7; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/pls.frag b/3rdparty/spirv-cross/reference/shaders/frag/pls.frag new file mode 100644 index 000000000..1cafdbd36 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/pls.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 PLSOut0; +layout(location = 0) in vec4 PLSIn0; +layout(location = 1) out vec4 PLSOut1; +layout(location = 1) in vec4 PLSIn1; +layout(location = 2) out vec4 PLSOut2; +layout(location = 2) in vec4 PLSIn2; +layout(location = 3) out vec4 PLSOut3; +layout(location = 3) in vec4 PLSIn3; + +void main() +{ + PLSOut0 = PLSIn0 * 2.0; + PLSOut1 = PLSIn1 * 6.0; + PLSOut2 = PLSIn2 * 7.0; + PLSOut3 = PLSIn3 * 4.0; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/sample-parameter.frag b/3rdparty/spirv-cross/reference/shaders/frag/sample-parameter.frag new file mode 100644 index 000000000..3c130e68d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/sample-parameter.frag @@ -0,0 +1,13 @@ +#version 310 es +#extension GL_OES_sample_variables : require +precision mediump float; +precision highp int; + +layout(location = 0) out vec2 FragColor; + +void main() +{ + FragColor = (gl_SamplePosition + vec2(float(gl_SampleMaskIn[0]))) + vec2(float(gl_SampleID)); + gl_SampleMask[0] = 1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/sampler-ms.frag b/3rdparty/spirv-cross/reference/shaders/frag/sampler-ms.frag new file mode 100644 index 000000000..dbab3fb81 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/sampler-ms.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2DMS uSampler; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + ivec2 coord = ivec2(gl_FragCoord.xy); + FragColor = ((texelFetch(uSampler, coord, 0) + texelFetch(uSampler, coord, 1)) + texelFetch(uSampler, coord, 2)) + texelFetch(uSampler, coord, 3); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/sampler-proj.frag b/3rdparty/spirv-cross/reference/shaders/frag/sampler-proj.frag new file mode 100644 index 000000000..865dec6c8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/sampler-proj.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTex; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vTex; + +void main() +{ + highp vec4 _19 = vTex; + _19.z = vTex.w; + FragColor = textureProj(uTex, _19.xyz); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/sampler.frag b/3rdparty/spirv-cross/reference/shaders/frag/sampler.frag new file mode 100644 index 000000000..0ec200c71 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/sampler.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTex; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; + +vec4 sample_texture(mediump sampler2D tex, vec2 uv) +{ + return texture(tex, uv); +} + +void main() +{ + highp vec2 param = vTex; + FragColor = vColor * sample_texture(uTex, param); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/reference/shaders/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..4177f9e99 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/switch-unsigned-case.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0, std140) uniform Buff +{ + mediump uint TestVal; +} _15; + +layout(location = 0) out vec4 fsout_Color; + +void main() +{ + fsout_Color = vec4(1.0); + switch (_15.TestVal) + { + case 0u: + { + fsout_Color = vec4(0.100000001490116119384765625); + break; + } + case 1u: + { + fsout_Color = vec4(0.20000000298023223876953125); + break; + } + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/swizzle.frag b/3rdparty/spirv-cross/reference/shaders/frag/swizzle.frag new file mode 100644 index 000000000..a229e5b0d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/swizzle.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D samp; + +layout(location = 0) out vec4 FragColor; +layout(location = 2) in vec2 vUV; +layout(location = 1) in vec3 vNormal; + +void main() +{ + FragColor = vec4(texture(samp, vUV).xyz, 1.0); + FragColor = vec4(texture(samp, vUV).xz, 1.0, 4.0); + FragColor = vec4(texture(samp, vUV).xx, texture(samp, vUV + vec2(0.100000001490116119384765625)).yy); + FragColor = vec4(vNormal, 1.0); + FragColor = vec4(vNormal + vec3(1.7999999523162841796875), 1.0); + FragColor = vec4(vUV, vUV + vec2(1.7999999523162841796875)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/reference/shaders/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..658468aa4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/texel-fetch-offset.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uTexture; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(1)); + FragColor += texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(-1, 1)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/ubo_layout.frag b/3rdparty/spirv-cross/reference/shaders/frag/ubo_layout.frag new file mode 100644 index 000000000..4b66e1396 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/ubo_layout.frag @@ -0,0 +1,31 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct Str +{ + mat4 foo; +}; + +struct Str_1 +{ + mat4 foo; +}; + +layout(binding = 0, std140) uniform UBO1 +{ + layout(row_major) Str foo; +} ubo1; + +layout(binding = 1, std140) uniform UBO2 +{ + Str_1 foo; +} ubo0; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = ubo1.foo.foo[0] + ubo0.foo.foo[0]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/frag/unary-enclose.frag b/3rdparty/spirv-cross/reference/shaders/frag/unary-enclose.frag new file mode 100644 index 000000000..3006e86cb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/frag/unary-enclose.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vIn; +layout(location = 1) flat in mediump ivec4 vIn1; + +void main() +{ + FragColor = -(-vIn); + mediump ivec4 a = ~(~vIn1); + bool b = false; + b = !(!b); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/geom/basic.geom b/3rdparty/spirv-cross/reference/shaders/geom/basic.geom new file mode 100644 index 000000000..296ce5792 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/geom/basic.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(invocations = 4, triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal + vec3(float(gl_InvocationID)); + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal + vec3(4.0 * float(gl_InvocationID)); + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal + vec3(2.0 * float(gl_InvocationID)); + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/geom/lines-adjacency.geom b/3rdparty/spirv-cross/reference/shaders/geom/lines-adjacency.geom new file mode 100644 index 000000000..46a21e9fb --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/geom/lines-adjacency.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(lines_adjacency) in; +layout(max_vertices = 3, line_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[4]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/geom/lines.geom b/3rdparty/spirv-cross/reference/shaders/geom/lines.geom new file mode 100644 index 000000000..c5aaa53d3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/geom/lines.geom @@ -0,0 +1,23 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(lines) in; +layout(max_vertices = 2, line_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[2]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/geom/points.geom b/3rdparty/spirv-cross/reference/shaders/geom/points.geom new file mode 100644 index 000000000..4d59137c3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/geom/points.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(points) in; +layout(max_vertices = 3, points) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[1]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/geom/single-invocation.geom b/3rdparty/spirv-cross/reference/shaders/geom/single-invocation.geom new file mode 100644 index 000000000..fdccacc04 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/geom/single-invocation.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/geom/triangles-adjacency.geom b/3rdparty/spirv-cross/reference/shaders/geom/triangles-adjacency.geom new file mode 100644 index 000000000..e9e6857a1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/geom/triangles-adjacency.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(triangles_adjacency) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[6]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/geom/triangles.geom b/3rdparty/spirv-cross/reference/shaders/geom/triangles.geom new file mode 100644 index 000000000..fdccacc04 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/geom/triangles.geom @@ -0,0 +1,26 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require +layout(triangles) in; +layout(max_vertices = 3, triangle_strip) out; + +layout(location = 0) out vec3 vNormal; +layout(location = 0) in VertexData +{ + vec3 normal; +} vin[3]; + + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + EndPrimitive(); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/legacy/fragment/explicit-lod.legacy.frag b/3rdparty/spirv-cross/reference/shaders/legacy/fragment/explicit-lod.legacy.frag new file mode 100644 index 000000000..6e8dbf1a9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/legacy/fragment/explicit-lod.legacy.frag @@ -0,0 +1,12 @@ +#version 100 +#extension GL_EXT_shader_texture_lod : require +precision mediump float; +precision highp int; + +uniform mediump sampler2D tex; + +void main() +{ + gl_FragData[0] = texture2DLodEXT(tex, vec2(0.4000000059604644775390625, 0.60000002384185791015625), 0.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/legacy/fragment/io-blocks.legacy.frag b/3rdparty/spirv-cross/reference/shaders/legacy/fragment/io-blocks.legacy.frag new file mode 100644 index 000000000..d5a60d53e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/legacy/fragment/io-blocks.legacy.frag @@ -0,0 +1,12 @@ +#version 100 +precision mediump float; +precision highp int; + +varying vec4 vin_color; +varying highp vec3 vin_normal; + +void main() +{ + gl_FragData[0] = vin_color + vin_normal.xyzz; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/legacy/fragment/struct-varying.legacy.frag b/3rdparty/spirv-cross/reference/shaders/legacy/fragment/struct-varying.legacy.frag new file mode 100644 index 000000000..81b95dd81 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/legacy/fragment/struct-varying.legacy.frag @@ -0,0 +1,22 @@ +#version 100 +precision mediump float; +precision highp int; + +struct Inputs +{ + highp vec4 a; + highp vec2 b; +}; + +varying highp vec4 vin_a; +varying highp vec2 vin_b; + +void main() +{ + Inputs v0 = Inputs(vin_a, vin_b); + Inputs v1 = Inputs(vin_a, vin_b); + highp vec4 a = vin_a; + highp vec4 b = vin_b.xxyy; + gl_FragData[0] = ((((v0.a + v0.b.xxyy) + v1.a) + v1.b.yyxx) + a) + b; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/legacy/vert/implicit-lod.legacy.vert b/3rdparty/spirv-cross/reference/shaders/legacy/vert/implicit-lod.legacy.vert new file mode 100644 index 000000000..6e4410744 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/legacy/vert/implicit-lod.legacy.vert @@ -0,0 +1,9 @@ +#version 100 + +uniform mediump sampler2D tex; + +void main() +{ + gl_Position = texture2D(tex, vec2(0.4000000059604644775390625, 0.60000002384185791015625)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/legacy/vert/io-block.legacy.vert b/3rdparty/spirv-cross/reference/shaders/legacy/vert/io-block.legacy.vert new file mode 100644 index 000000000..3c518dc79 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/legacy/vert/io-block.legacy.vert @@ -0,0 +1,13 @@ +#version 100 + +attribute vec4 Position; +varying vec4 vout_color; +varying vec3 vout_normal; + +void main() +{ + gl_Position = Position; + vout_color = vec4(1.0); + vout_normal = vec3(0.5); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/legacy/vert/struct-varying.legacy.vert b/3rdparty/spirv-cross/reference/shaders/legacy/vert/struct-varying.legacy.vert new file mode 100644 index 000000000..261e98603 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/legacy/vert/struct-varying.legacy.vert @@ -0,0 +1,32 @@ +#version 100 + +struct Output +{ + vec4 a; + vec2 b; +}; + +varying vec4 vout_a; +varying vec2 vout_b; + +void main() +{ + Output s = Output(vec4(0.5), vec2(0.25)); + { + Output vout = s; + vout_a = vout.a; + vout_b = vout.b; + } + { + Output vout = s; + vout_a = vout.a; + vout_b = vout.b; + } + Output tmp = Output(vout_a, vout_b); + vout_a = tmp.a; + vout_b = tmp.b; + vout_a.x = 1.0; + vout_b.y = 1.0; + float c = vout_a.x; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/legacy/vert/transpose.legacy.vert b/3rdparty/spirv-cross/reference/shaders/legacy/vert/transpose.legacy.vert new file mode 100644 index 000000000..c73d1a11d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/legacy/vert/transpose.legacy.vert @@ -0,0 +1,22 @@ +#version 100 + +struct Buffer +{ + mat4 MVPRowMajor; + mat4 MVPColMajor; + mat4 M; +}; + +uniform Buffer _13; + +attribute vec4 Position; + +void main() +{ + vec4 c0 = _13.M * (Position * _13.MVPRowMajor); + vec4 c1 = _13.M * (_13.MVPColMajor * Position); + vec4 c2 = _13.M * (_13.MVPRowMajor * Position); + vec4 c3 = _13.M * (Position * _13.MVPColMajor); + gl_Position = ((c0 + c1) + c2) + c3; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tesc/basic.tesc b/3rdparty/spirv-cross/reference/shaders/tesc/basic.tesc new file mode 100644 index 000000000..6019151ad --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tesc/basic.tesc @@ -0,0 +1,17 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(vertices = 1) out; + +layout(location = 0) patch out vec3 vFoo; + +void main() +{ + gl_TessLevelInner[0] = 8.8999996185302734375; + gl_TessLevelInner[1] = 6.900000095367431640625; + gl_TessLevelOuter[0] = 8.8999996185302734375; + gl_TessLevelOuter[1] = 6.900000095367431640625; + gl_TessLevelOuter[2] = 3.900000095367431640625; + gl_TessLevelOuter[3] = 4.900000095367431640625; + vFoo = vec3(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tesc/water_tess.tesc b/3rdparty/spirv-cross/reference/shaders/tesc/water_tess.tesc new file mode 100644 index 000000000..4daaa456e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tesc/water_tess.tesc @@ -0,0 +1,118 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(vertices = 1) out; + +layout(std140) uniform UBO +{ + vec4 uScale; + vec3 uCamPos; + vec2 uPatchSize; + vec2 uMaxTessLevel; + float uDistanceMod; + vec4 uFrustum[6]; +} _41; + +layout(location = 1) patch out vec2 vOutPatchPosBase; +layout(location = 2) patch out vec4 vPatchLods; +layout(location = 0) in vec2 vPatchPosBase[]; + +bool frustum_cull(vec2 p0) +{ + vec2 min_xz = (p0 - vec2(10.0)) * _41.uScale.xy; + vec2 max_xz = ((p0 + _41.uPatchSize) + vec2(10.0)) * _41.uScale.xy; + vec3 bb_min = vec3(min_xz.x, -10.0, min_xz.y); + vec3 bb_max = vec3(max_xz.x, 10.0, max_xz.y); + vec3 center = (bb_min + bb_max) * 0.5; + float radius = 0.5 * length(bb_max - bb_min); + vec3 f0 = vec3(dot(_41.uFrustum[0], vec4(center, 1.0)), dot(_41.uFrustum[1], vec4(center, 1.0)), dot(_41.uFrustum[2], vec4(center, 1.0))); + vec3 f1 = vec3(dot(_41.uFrustum[3], vec4(center, 1.0)), dot(_41.uFrustum[4], vec4(center, 1.0)), dot(_41.uFrustum[5], vec4(center, 1.0))); + vec3 _199 = f0; + float _200 = radius; + bool _205 = any(lessThanEqual(_199, vec3(-_200))); + bool _215; + if (!_205) + { + _215 = any(lessThanEqual(f1, vec3(-radius))); + } + else + { + _215 = _205; + } + return !_215; +} + +float lod_factor(vec2 pos_) +{ + vec2 pos = pos_ * _41.uScale.xy; + vec3 dist_to_cam = _41.uCamPos - vec3(pos.x, 0.0, pos.y); + float level = log2((length(dist_to_cam) + 9.9999997473787516355514526367188e-05) * _41.uDistanceMod); + return clamp(level, 0.0, _41.uMaxTessLevel.x); +} + +vec4 tess_level(vec4 lod) +{ + return exp2(-lod) * _41.uMaxTessLevel.y; +} + +float tess_level(float lod) +{ + return _41.uMaxTessLevel.y * exp2(-lod); +} + +void compute_tess_levels(vec2 p0) +{ + vOutPatchPosBase = p0; + vec2 param = p0 + (vec2(-0.5) * _41.uPatchSize); + float l00 = lod_factor(param); + vec2 param_1 = p0 + (vec2(0.5, -0.5) * _41.uPatchSize); + float l10 = lod_factor(param_1); + vec2 param_2 = p0 + (vec2(1.5, -0.5) * _41.uPatchSize); + float l20 = lod_factor(param_2); + vec2 param_3 = p0 + (vec2(-0.5, 0.5) * _41.uPatchSize); + float l01 = lod_factor(param_3); + vec2 param_4 = p0 + (vec2(0.5) * _41.uPatchSize); + float l11 = lod_factor(param_4); + vec2 param_5 = p0 + (vec2(1.5, 0.5) * _41.uPatchSize); + float l21 = lod_factor(param_5); + vec2 param_6 = p0 + (vec2(-0.5, 1.5) * _41.uPatchSize); + float l02 = lod_factor(param_6); + vec2 param_7 = p0 + (vec2(0.5, 1.5) * _41.uPatchSize); + float l12 = lod_factor(param_7); + vec2 param_8 = p0 + (vec2(1.5) * _41.uPatchSize); + float l22 = lod_factor(param_8); + vec4 lods = vec4(dot(vec4(l01, l11, l02, l12), vec4(0.25)), dot(vec4(l00, l10, l01, l11), vec4(0.25)), dot(vec4(l10, l20, l11, l21), vec4(0.25)), dot(vec4(l11, l21, l12, l22), vec4(0.25))); + vPatchLods = lods; + vec4 outer_lods = min(lods, lods.yzwx); + vec4 param_9 = outer_lods; + vec4 levels = tess_level(param_9); + gl_TessLevelOuter[0] = levels.x; + gl_TessLevelOuter[1] = levels.y; + gl_TessLevelOuter[2] = levels.z; + gl_TessLevelOuter[3] = levels.w; + float min_lod = min(min(lods.x, lods.y), min(lods.z, lods.w)); + float param_10 = min(min_lod, l11); + float inner = tess_level(param_10); + gl_TessLevelInner[0] = inner; + gl_TessLevelInner[1] = inner; +} + +void main() +{ + vec2 p0 = vPatchPosBase[0]; + vec2 param = p0; + if (!frustum_cull(param)) + { + gl_TessLevelOuter[0] = -1.0; + gl_TessLevelOuter[1] = -1.0; + gl_TessLevelOuter[2] = -1.0; + gl_TessLevelOuter[3] = -1.0; + gl_TessLevelInner[0] = -1.0; + gl_TessLevelInner[1] = -1.0; + } + else + { + vec2 param_1 = p0; + compute_tess_levels(param_1); + } +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/ccw.tese b/3rdparty/spirv-cross/reference/shaders/tese/ccw.tese new file mode 100644 index 000000000..a2a4508ac --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/ccw.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, ccw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/cw.tese b/3rdparty/spirv-cross/reference/shaders/tese/cw.tese new file mode 100644 index 000000000..95781493d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/cw.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/equal.tese b/3rdparty/spirv-cross/reference/shaders/tese/equal.tese new file mode 100644 index 000000000..6d30518a3 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/equal.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, equal_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/fractional_even.tese b/3rdparty/spirv-cross/reference/shaders/tese/fractional_even.tese new file mode 100644 index 000000000..95781493d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/fractional_even.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/fractional_odd.tese b/3rdparty/spirv-cross/reference/shaders/tese/fractional_odd.tese new file mode 100644 index 000000000..608c19aba --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/fractional_odd.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_odd_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/input-array.tese b/3rdparty/spirv-cross/reference/shaders/tese/input-array.tese new file mode 100644 index 000000000..8a1aaf9fd --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/input-array.tese @@ -0,0 +1,11 @@ +#version 450 +layout(quads, ccw, fractional_odd_spacing) in; + +layout(location = 0) in vec4 Floats[]; +layout(location = 2) in vec4 Floats2[]; + +void main() +{ + gl_Position = (Floats[0] * gl_TessCoord.x) + (Floats2[1] * gl_TessCoord.y); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/line.tese b/3rdparty/spirv-cross/reference/shaders/tese/line.tese new file mode 100644 index 000000000..8b6ad8da2 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/line.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(isolines, point_mode, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/triangle.tese b/3rdparty/spirv-cross/reference/shaders/tese/triangle.tese new file mode 100644 index 000000000..95781493d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/triangle.tese @@ -0,0 +1,9 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(triangles, cw, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/tese/water_tess.tese b/3rdparty/spirv-cross/reference/shaders/tese/water_tess.tese new file mode 100644 index 000000000..e743ed3e9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/tese/water_tess.tese @@ -0,0 +1,61 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +layout(quads, cw, fractional_even_spacing) in; + +layout(binding = 1, std140) uniform UBO +{ + mat4 uMVP; + vec4 uScale; + vec2 uInvScale; + vec3 uCamPos; + vec2 uPatchSize; + vec2 uInvHeightmapSize; +} _31; + +layout(binding = 0) uniform mediump sampler2D uHeightmapDisplacement; + +layout(location = 0) patch in vec2 vOutPatchPosBase; +layout(location = 1) patch in vec4 vPatchLods; +layout(location = 1) out vec4 vGradNormalTex; +layout(location = 0) out vec3 vWorld; + +vec2 lerp_vertex(vec2 tess_coord) +{ + return vOutPatchPosBase + (tess_coord * _31.uPatchSize); +} + +mediump vec2 lod_factor(vec2 tess_coord) +{ + mediump vec2 x = mix(vPatchLods.yx, vPatchLods.zw, vec2(tess_coord.x)); + mediump float level = mix(x.x, x.y, tess_coord.y); + mediump float floor_level = floor(level); + mediump float fract_level = level - floor_level; + return vec2(floor_level, fract_level); +} + +mediump vec3 sample_height_displacement(vec2 uv, vec2 off, mediump vec2 lod) +{ + return mix(textureLod(uHeightmapDisplacement, uv + (off * 0.5), lod.x).xyz, textureLod(uHeightmapDisplacement, uv + (off * 1.0), lod.x + 1.0).xyz, vec3(lod.y)); +} + +void main() +{ + vec2 tess_coord = gl_TessCoord.xy; + vec2 param = tess_coord; + vec2 pos = lerp_vertex(param); + vec2 param_1 = tess_coord; + mediump vec2 lod = lod_factor(param_1); + vec2 tex = pos * _31.uInvHeightmapSize; + pos *= _31.uScale.xy; + mediump float delta_mod = exp2(lod.x); + vec2 off = _31.uInvHeightmapSize * delta_mod; + vGradNormalTex = vec4(tex + (_31.uInvHeightmapSize * 0.5), tex * _31.uScale.zw); + vec2 param_2 = tex; + vec2 param_3 = off; + vec2 param_4 = lod; + vec3 height_displacement = sample_height_displacement(param_2, param_3, param_4); + pos += height_displacement.yz; + vWorld = vec3(pos.x, height_displacement.x, pos.y); + gl_Position = _31.uMVP * vec4(vWorld, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/basic.vert b/3rdparty/spirv-cross/reference/shaders/vert/basic.vert new file mode 100644 index 000000000..05504eb2f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/basic.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(std140) uniform UBO +{ + mat4 uMVP; +} _16; + +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = _16.uMVP * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/ground.vert b/3rdparty/spirv-cross/reference/shaders/vert/ground.vert new file mode 100644 index 000000000..69f92534c --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/ground.vert @@ -0,0 +1,110 @@ +#version 310 es + +struct PatchData +{ + vec4 Position; + vec4 LODs; +}; + +layout(binding = 0, std140) uniform PerPatch +{ + PatchData Patches[256]; +} _53; + +layout(binding = 2, std140) uniform GlobalGround +{ + vec4 GroundScale; + vec4 GroundPosition; + vec4 InvGroundSize_PatchScale; +} _156; + +layout(binding = 0, std140) uniform GlobalVSData +{ + vec4 g_ViewProj_Row0; + vec4 g_ViewProj_Row1; + vec4 g_ViewProj_Row2; + vec4 g_ViewProj_Row3; + vec4 g_CamPos; + vec4 g_CamRight; + vec4 g_CamUp; + vec4 g_CamFront; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_TimeParams; + vec4 g_ResolutionParams; + vec4 g_CamAxisRight; + vec4 g_FogColor_Distance; + vec4 g_ShadowVP_Row0; + vec4 g_ShadowVP_Row1; + vec4 g_ShadowVP_Row2; + vec4 g_ShadowVP_Row3; +} _236; + +layout(binding = 1) uniform mediump sampler2D TexLOD; +layout(binding = 0) uniform mediump sampler2D TexHeightmap; + +layout(location = 1) in vec4 LODWeights; +uniform int SPIRV_Cross_BaseInstance; +layout(location = 0) in vec2 Position; +layout(location = 1) out vec3 EyeVec; +layout(location = 0) out vec2 TexCoord; + +vec2 warp_position() +{ + float vlod = dot(LODWeights, _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].LODs); + vlod = all(equal(LODWeights, vec4(0.0))) ? _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.w : vlod; + float floor_lod = floor(vlod); + float fract_lod = vlod - floor_lod; + uint ufloor_lod = uint(floor_lod); + uvec2 uPosition = uvec2(Position); + uvec2 mask = (uvec2(1u) << uvec2(ufloor_lod, ufloor_lod + 1u)) - uvec2(1u); + uint _110; + if (uPosition.x < 32u) + { + _110 = mask.x; + } + else + { + _110 = 0u; + } + uint _116 = _110; + uint _120; + if (uPosition.y < 32u) + { + _120 = mask.y; + } + else + { + _120 = 0u; + } + uvec2 rounding = uvec2(_116, _120); + vec4 lower_upper_snapped = vec4((uPosition + rounding).xyxy & (~mask).xxyy); + return mix(lower_upper_snapped.xy, lower_upper_snapped.zw, vec2(fract_lod)); +} + +vec2 lod_factor(vec2 uv) +{ + float level = textureLod(TexLOD, uv, 0.0).x * 7.96875; + float floor_level = floor(level); + float fract_level = level - floor_level; + return vec2(floor_level, fract_level); +} + +void main() +{ + vec2 PatchPos = _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.xz * _156.InvGroundSize_PatchScale.zw; + vec2 WarpedPos = warp_position(); + vec2 VertexPos = PatchPos + WarpedPos; + vec2 NormalizedPos = VertexPos * _156.InvGroundSize_PatchScale.xy; + vec2 param = NormalizedPos; + vec2 lod = lod_factor(param); + vec2 Offset = _156.InvGroundSize_PatchScale.xy * exp2(lod.x); + float Elevation = mix(textureLod(TexHeightmap, NormalizedPos + (Offset * 0.5), lod.x).x, textureLod(TexHeightmap, NormalizedPos + (Offset * 1.0), lod.x + 1.0).x, lod.y); + vec3 WorldPos = vec3(NormalizedPos.x, Elevation, NormalizedPos.y); + WorldPos *= _156.GroundScale.xyz; + WorldPos += _156.GroundPosition.xyz; + EyeVec = WorldPos - _236.g_CamPos.xyz; + TexCoord = NormalizedPos + (_156.InvGroundSize_PatchScale.xy * 0.5); + gl_Position = (((_236.g_ViewProj_Row0 * WorldPos.x) + (_236.g_ViewProj_Row1 * WorldPos.y)) + (_236.g_ViewProj_Row2 * WorldPos.z)) + _236.g_ViewProj_Row3; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/invariant.vert b/3rdparty/spirv-cross/reference/shaders/vert/invariant.vert new file mode 100644 index 000000000..648ea2947 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/invariant.vert @@ -0,0 +1,19 @@ +#version 310 es + +invariant gl_Position; + +layout(location = 0) in vec4 vInput0; +layout(location = 1) in vec4 vInput1; +layout(location = 2) in vec4 vInput2; +layout(location = 0) invariant out vec4 vColor; + +void main() +{ + vec4 _20 = vInput1 * vInput2; + vec4 _21 = vInput0 + _20; + gl_Position = _21; + vec4 _27 = vInput0 - vInput1; + vec4 _29 = _27 * vInput2; + vColor = _29; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/ocean.vert b/3rdparty/spirv-cross/reference/shaders/vert/ocean.vert new file mode 100644 index 000000000..720bd7d0d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/ocean.vert @@ -0,0 +1,133 @@ +#version 310 es + +struct PatchData +{ + vec4 Position; + vec4 LODs; +}; + +layout(binding = 0, std140) uniform Offsets +{ + PatchData Patches[256]; +} _53; + +layout(binding = 4, std140) uniform GlobalOcean +{ + vec4 OceanScale; + vec4 OceanPosition; + vec4 InvOceanSize_PatchScale; + vec4 NormalTexCoordScale; +} _180; + +layout(binding = 0, std140) uniform GlobalVSData +{ + vec4 g_ViewProj_Row0; + vec4 g_ViewProj_Row1; + vec4 g_ViewProj_Row2; + vec4 g_ViewProj_Row3; + vec4 g_CamPos; + vec4 g_CamRight; + vec4 g_CamUp; + vec4 g_CamFront; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_TimeParams; + vec4 g_ResolutionParams; + vec4 g_CamAxisRight; + vec4 g_FogColor_Distance; + vec4 g_ShadowVP_Row0; + vec4 g_ShadowVP_Row1; + vec4 g_ShadowVP_Row2; + vec4 g_ShadowVP_Row3; +} _273; + +layout(binding = 1) uniform mediump sampler2D TexLOD; +layout(binding = 0) uniform mediump sampler2D TexDisplacement; + +layout(location = 1) in vec4 LODWeights; +uniform int SPIRV_Cross_BaseInstance; +layout(location = 0) in vec4 Position; +layout(location = 0) out vec3 EyeVec; +layout(location = 1) out vec4 TexCoord; + +vec2 warp_position() +{ + float vlod = dot(LODWeights, _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].LODs); + vlod = all(equal(LODWeights, vec4(0.0))) ? _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.w : vlod; + float floor_lod = floor(vlod); + float fract_lod = vlod - floor_lod; + uint ufloor_lod = uint(floor_lod); + uvec4 uPosition = uvec4(Position); + uvec2 mask = (uvec2(1u) << uvec2(ufloor_lod, ufloor_lod + 1u)) - uvec2(1u); + uint _111; + if (uPosition.x < 32u) + { + _111 = mask.x; + } + else + { + _111 = 0u; + } + uvec4 rounding; + rounding.x = _111; + uint _122; + if (uPosition.y < 32u) + { + _122 = mask.x; + } + else + { + _122 = 0u; + } + rounding.y = _122; + uint _133; + if (uPosition.x < 32u) + { + _133 = mask.y; + } + else + { + _133 = 0u; + } + rounding.z = _133; + uint _145; + if (uPosition.y < 32u) + { + _145 = mask.y; + } + else + { + _145 = 0u; + } + rounding.w = _145; + vec4 lower_upper_snapped = vec4((uPosition.xyxy + rounding) & (~mask).xxyy); + return mix(lower_upper_snapped.xy, lower_upper_snapped.zw, vec2(fract_lod)); +} + +vec2 lod_factor(vec2 uv) +{ + float level = textureLod(TexLOD, uv, 0.0).x * 7.96875; + float floor_level = floor(level); + float fract_level = level - floor_level; + return vec2(floor_level, fract_level); +} + +void main() +{ + vec2 PatchPos = _53.Patches[(gl_InstanceID + SPIRV_Cross_BaseInstance)].Position.xz * _180.InvOceanSize_PatchScale.zw; + vec2 WarpedPos = warp_position(); + vec2 VertexPos = PatchPos + WarpedPos; + vec2 NormalizedPos = VertexPos * _180.InvOceanSize_PatchScale.xy; + vec2 NormalizedTex = NormalizedPos * _180.NormalTexCoordScale.zw; + vec2 param = NormalizedPos; + vec2 lod = lod_factor(param); + vec2 Offset = (_180.InvOceanSize_PatchScale.xy * exp2(lod.x)) * _180.NormalTexCoordScale.zw; + vec3 Displacement = mix(textureLod(TexDisplacement, NormalizedTex + (Offset * 0.5), lod.x).yxz, textureLod(TexDisplacement, NormalizedTex + (Offset * 1.0), lod.x + 1.0).yxz, vec3(lod.y)); + vec3 WorldPos = vec3(NormalizedPos.x, 0.0, NormalizedPos.y) + Displacement; + WorldPos *= _180.OceanScale.xyz; + WorldPos += _180.OceanPosition.xyz; + EyeVec = WorldPos - _273.g_CamPos.xyz; + TexCoord = vec4(NormalizedTex, NormalizedTex * _180.NormalTexCoordScale.xy) + ((_180.InvOceanSize_PatchScale.xyxy * 0.5) * _180.NormalTexCoordScale.zwzw); + gl_Position = (((_273.g_ViewProj_Row0 * WorldPos.x) + (_273.g_ViewProj_Row1 * WorldPos.y)) + (_273.g_ViewProj_Row2 * WorldPos.z)) + _273.g_ViewProj_Row3; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/reference/shaders/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..1c950f3fa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/read-from-row-major-array.vert @@ -0,0 +1,45 @@ +#version 310 es + +layout(binding = 0, std140) uniform Block +{ + layout(row_major) mat2x3 var[3][4]; +} _104; + +layout(location = 0) in vec4 a_position; +layout(location = 0) out mediump float v_vtxResult; + +mediump float compare_float(float a, float b) +{ + return float(abs(a - b) < 0.0500000007450580596923828125); +} + +mediump float compare_vec3(vec3 a, vec3 b) +{ + float param = a.x; + float param_1 = b.x; + float param_2 = a.y; + float param_3 = b.y; + float param_4 = a.z; + float param_5 = b.z; + return (compare_float(param, param_1) * compare_float(param_2, param_3)) * compare_float(param_4, param_5); +} + +mediump float compare_mat2x3(mat2x3 a, mat2x3 b) +{ + vec3 param = a[0]; + vec3 param_1 = b[0]; + vec3 param_2 = a[1]; + vec3 param_3 = b[1]; + return compare_vec3(param, param_1) * compare_vec3(param_2, param_3); +} + +void main() +{ + gl_Position = a_position; + mediump float result = 1.0; + mat2x3 param = _104.var[0][0]; + mat2x3 param_1 = mat2x3(vec3(2.0, 6.0, -6.0), vec3(0.0, 5.0, 5.0)); + result *= compare_mat2x3(param, param_1); + v_vtxResult = result; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/return-array.vert b/3rdparty/spirv-cross/reference/shaders/vert/return-array.vert new file mode 100644 index 000000000..20bb440ec --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/return-array.vert @@ -0,0 +1,23 @@ +#version 310 es + +layout(location = 0) in vec4 vInput0; +layout(location = 1) in vec4 vInput1; + +vec4[2] test() +{ + return vec4[](vec4(10.0), vec4(20.0)); +} + +vec4[2] test2() +{ + vec4 foobar[2]; + foobar[0] = vInput0; + foobar[1] = vInput1; + return foobar; +} + +void main() +{ + gl_Position = test()[0] + test2()[1]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/texture_buffer.vert b/3rdparty/spirv-cross/reference/shaders/vert/texture_buffer.vert new file mode 100644 index 000000000..e9442ce11 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/texture_buffer.vert @@ -0,0 +1,11 @@ +#version 310 es +#extension GL_OES_texture_buffer : require + +layout(binding = 4) uniform highp samplerBuffer uSamp; +layout(binding = 5, rgba32f) uniform readonly highp imageBuffer uSampo; + +void main() +{ + gl_Position = texelFetch(uSamp, 10) + imageLoad(uSampo, 100); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vert/ubo.vert b/3rdparty/spirv-cross/reference/shaders/vert/ubo.vert new file mode 100644 index 000000000..4e7236b29 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vert/ubo.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(binding = 0, std140) uniform UBO +{ + mat4 mvp; +} _16; + +layout(location = 0) in vec4 aVertex; +layout(location = 0) out vec3 vNormal; +layout(location = 1) in vec3 aNormal; + +void main() +{ + gl_Position = _16.mvp * aVertex; + vNormal = aNormal; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp new file mode 100644 index 000000000..9344a7a24 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp @@ -0,0 +1,46 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 100 +#endif +const int a = SPIRV_CROSS_CONSTANT_ID_0; +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 200 +#endif +const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_2 +#define SPIRV_CROSS_CONSTANT_ID_2 300 +#endif +const int c = SPIRV_CROSS_CONSTANT_ID_2; +const int _18 = (c + 50); +#ifndef SPIRV_CROSS_CONSTANT_ID_3 +#define SPIRV_CROSS_CONSTANT_ID_3 400 +#endif +const int e = SPIRV_CROSS_CONSTANT_ID_3; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +layout(binding = 0, std430) buffer SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[_18]; +} _22; + +void main() +{ + _22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp.vk new file mode 100644 index 000000000..d9a2822f7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp.vk @@ -0,0 +1,34 @@ +#version 450 +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(constant_id = 0) const int a = 100; +layout(constant_id = 1) const int b = 200; +layout(constant_id = 2) const int c = 300; +const int _18 = (c + 50); +layout(constant_id = 3) const int e = 400; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +layout(set = 1, binding = 0, std430) buffer SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[_18]; +} _22; + +void main() +{ + _22.w[gl_GlobalInvocationID.x] += (_22.v[gl_GlobalInvocationID.x] + e); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp new file mode 100644 index 000000000..888f4b164 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp @@ -0,0 +1,34 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_1 +#define SPIRV_CROSS_CONSTANT_ID_1 2 +#endif +const int b = SPIRV_CROSS_CONSTANT_ID_1; +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 1 +#endif +const int a = SPIRV_CROSS_CONSTANT_ID_0; +const uint _21 = (uint(a) + 0u); +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 1u +#endif +const uint _27 = gl_WorkGroupSize.x; +const uint _28 = (_21 + _27); +const uint _29 = gl_WorkGroupSize.y; +const uint _30 = (_28 + _29); +const int _32 = (1 - a); + +layout(local_size_x = SPIRV_CROSS_CONSTANT_ID_10, local_size_y = 20, local_size_z = 1) in; + +layout(binding = 0, std430) writeonly buffer SSBO +{ + int v[]; +} _17; + +void main() +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + _17.v[_30] = b + spec_const_array_size[_32]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp.vk new file mode 100644 index 000000000..bdf72dff4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp.vk @@ -0,0 +1,24 @@ +#version 450 +layout(local_size_x_id = 10, local_size_y = 20, local_size_z = 1) in; + +layout(constant_id = 1) const int b = 2; +layout(constant_id = 0) const int a = 1; +const uint _21 = (uint(a) + 0u); +const uint _27 = gl_WorkGroupSize.x; +const uint _28 = (_21 + _27); +const uint _29 = gl_WorkGroupSize.y; +const uint _30 = (_28 + _29); +const int _32 = (1 - a); + +layout(set = 1, binding = 0, std430) writeonly buffer SSBO +{ + int v[]; +} _17; + +void main() +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + _17.v[_30] = b + spec_const_array_size[_32]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp.vk new file mode 100644 index 000000000..6d288574f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp.vk @@ -0,0 +1,110 @@ +#version 450 +#extension GL_KHR_shader_subgroup_basic : require +#extension GL_KHR_shader_subgroup_ballot : require +#extension GL_KHR_shader_subgroup_shuffle : require +#extension GL_KHR_shader_subgroup_shuffle_relative : require +#extension GL_KHR_shader_subgroup_vote : require +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_KHR_shader_subgroup_clustered : require +#extension GL_KHR_shader_subgroup_quad : require +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; + +layout(set = 0, binding = 0, std430) buffer SSBO +{ + float FragColor; +} _9; + +void main() +{ + _9.FragColor = float(gl_NumSubgroups); + _9.FragColor = float(gl_SubgroupID); + _9.FragColor = float(gl_SubgroupSize); + _9.FragColor = float(gl_SubgroupInvocationID); + subgroupMemoryBarrier(); + subgroupBarrier(); + subgroupMemoryBarrier(); + subgroupMemoryBarrierBuffer(); + subgroupMemoryBarrierShared(); + subgroupMemoryBarrierImage(); + bool elected = subgroupElect(); + _9.FragColor = vec4(gl_SubgroupEqMask).x; + _9.FragColor = vec4(gl_SubgroupGeMask).x; + _9.FragColor = vec4(gl_SubgroupGtMask).x; + _9.FragColor = vec4(gl_SubgroupLeMask).x; + _9.FragColor = vec4(gl_SubgroupLtMask).x; + vec4 broadcasted = subgroupBroadcast(vec4(10.0), 8u); + vec3 first = subgroupBroadcastFirst(vec3(20.0)); + uvec4 ballot_value = subgroupBallot(true); + bool inverse_ballot_value = subgroupInverseBallot(ballot_value); + bool bit_extracted = subgroupBallotBitExtract(uvec4(10u), 8u); + uint bit_count = subgroupBallotBitCount(ballot_value); + uint inclusive_bit_count = subgroupBallotInclusiveBitCount(ballot_value); + uint exclusive_bit_count = subgroupBallotExclusiveBitCount(ballot_value); + uint lsb = subgroupBallotFindLSB(ballot_value); + uint msb = subgroupBallotFindMSB(ballot_value); + uint shuffled = subgroupShuffle(10u, 8u); + uint shuffled_xor = subgroupShuffleXor(30u, 8u); + uint shuffled_up = subgroupShuffleUp(20u, 4u); + uint shuffled_down = subgroupShuffleDown(20u, 4u); + bool has_all = subgroupAll(true); + bool has_any = subgroupAny(true); + bool has_equal = subgroupAllEqual(true); + vec4 added = subgroupAdd(vec4(20.0)); + ivec4 iadded = subgroupAdd(ivec4(20)); + vec4 multiplied = subgroupMul(vec4(20.0)); + ivec4 imultiplied = subgroupMul(ivec4(20)); + vec4 lo = subgroupMin(vec4(20.0)); + vec4 hi = subgroupMax(vec4(20.0)); + ivec4 slo = subgroupMin(ivec4(20)); + ivec4 shi = subgroupMax(ivec4(20)); + uvec4 ulo = subgroupMin(uvec4(20u)); + uvec4 uhi = subgroupMax(uvec4(20u)); + uvec4 anded = subgroupAnd(ballot_value); + uvec4 ored = subgroupOr(ballot_value); + uvec4 xored = subgroupXor(ballot_value); + added = subgroupInclusiveAdd(added); + iadded = subgroupInclusiveAdd(iadded); + multiplied = subgroupInclusiveMul(multiplied); + imultiplied = subgroupInclusiveMul(imultiplied); + lo = subgroupInclusiveMin(lo); + hi = subgroupInclusiveMax(hi); + slo = subgroupInclusiveMin(slo); + shi = subgroupInclusiveMax(shi); + ulo = subgroupInclusiveMin(ulo); + uhi = subgroupInclusiveMax(uhi); + anded = subgroupInclusiveAnd(anded); + ored = subgroupInclusiveOr(ored); + xored = subgroupInclusiveXor(ored); + added = subgroupExclusiveAdd(lo); + added = subgroupExclusiveAdd(multiplied); + multiplied = subgroupExclusiveMul(multiplied); + iadded = subgroupExclusiveAdd(imultiplied); + imultiplied = subgroupExclusiveMul(imultiplied); + lo = subgroupExclusiveMin(lo); + hi = subgroupExclusiveMax(hi); + ulo = subgroupExclusiveMin(ulo); + uhi = subgroupExclusiveMax(uhi); + slo = subgroupExclusiveMin(slo); + shi = subgroupExclusiveMax(shi); + anded = subgroupExclusiveAnd(anded); + ored = subgroupExclusiveOr(ored); + xored = subgroupExclusiveXor(ored); + added = subgroupClusteredAdd(added, 4u); + multiplied = subgroupClusteredMul(multiplied, 4u); + iadded = subgroupClusteredAdd(iadded, 4u); + imultiplied = subgroupClusteredMul(imultiplied, 4u); + lo = subgroupClusteredMin(lo, 4u); + hi = subgroupClusteredMax(hi, 4u); + ulo = subgroupClusteredMin(ulo, 4u); + uhi = subgroupClusteredMax(uhi, 4u); + slo = subgroupClusteredMin(slo, 4u); + shi = subgroupClusteredMax(shi, 4u); + anded = subgroupClusteredAnd(anded, 4u); + ored = subgroupClusteredOr(ored, 4u); + xored = subgroupClusteredXor(xored, 4u); + vec4 swap_horiz = subgroupQuadSwapHorizontal(vec4(20.0)); + vec4 swap_vertical = subgroupQuadSwapVertical(vec4(20.0)); + vec4 swap_diagonal = subgroupQuadSwapDiagonal(vec4(20.0)); + vec4 quad_broadcast = subgroupQuadBroadcast(vec4(20.0), 3u); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag new file mode 100644 index 000000000..af64fb87a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag @@ -0,0 +1,31 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2DShadow SPIRV_Cross_CombineduDepthuSampler; +uniform mediump sampler2D SPIRV_Cross_CombineduDepthuSampler1; + +layout(location = 0) out float FragColor; + +float samp2(mediump sampler2DShadow SPIRV_Cross_Combinedts) +{ + return texture(SPIRV_Cross_Combinedts, vec3(vec3(1.0).xy, vec3(1.0).z)); +} + +float samp3(mediump sampler2D SPIRV_Cross_Combinedts) +{ + return texture(SPIRV_Cross_Combinedts, vec2(1.0)).x; +} + +float samp(mediump sampler2DShadow SPIRV_Cross_Combinedts, mediump sampler2D SPIRV_Cross_Combinedts1) +{ + float r0 = samp2(SPIRV_Cross_Combinedts); + float r1 = samp3(SPIRV_Cross_Combinedts1); + return r0 + r1; +} + +void main() +{ + FragColor = samp(SPIRV_Cross_CombineduDepthuSampler, SPIRV_Cross_CombineduDepthuSampler1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag.vk new file mode 100644 index 000000000..f475ae53a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag.vk @@ -0,0 +1,32 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 2) uniform mediump texture2D uDepth; +layout(set = 0, binding = 0) uniform mediump samplerShadow uSampler; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; + +layout(location = 0) out float FragColor; + +float samp2(mediump texture2D t, mediump samplerShadow s) +{ + return texture(sampler2DShadow(t, s), vec3(vec3(1.0).xy, vec3(1.0).z)); +} + +float samp3(mediump texture2D t, mediump sampler s) +{ + return texture(sampler2D(t, s), vec2(1.0)).x; +} + +float samp(mediump texture2D t, mediump samplerShadow s, mediump sampler s1) +{ + float r0 = samp2(t, s); + float r1 = samp3(t, s1); + return r0 + r1; +} + +void main() +{ + FragColor = samp(uDepth, uSampler, uSampler1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler.vk.frag new file mode 100644 index 000000000..5b9c0ddad --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler.vk.frag @@ -0,0 +1,48 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2D SPIRV_Cross_CombineduTexture0uSampler0; +uniform mediump sampler2D SPIRV_Cross_CombineduTexture1uSampler1; +uniform mediump sampler2D SPIRV_Cross_CombineduTexture1uSampler0; +uniform mediump sampler2D SPIRV_Cross_CombineduTexture0uSampler1; + +layout(location = 0) in vec2 vTex; +layout(location = 0) out vec4 FragColor; + +vec4 sample_dual(mediump sampler2D SPIRV_Cross_Combinedtexsamp) +{ + return texture(SPIRV_Cross_Combinedtexsamp, vTex); +} + +vec4 sample_duals() +{ + vec4 a = sample_dual(SPIRV_Cross_CombineduTexture0uSampler0); + vec4 b = sample_dual(SPIRV_Cross_CombineduTexture1uSampler1); + return a + b; +} + +vec4 sample_global_tex(mediump sampler2D SPIRV_Cross_CombineduTexture0samp, mediump sampler2D SPIRV_Cross_CombineduTexture1samp) +{ + vec4 a = texture(SPIRV_Cross_CombineduTexture0samp, vTex); + vec4 b = sample_dual(SPIRV_Cross_CombineduTexture1samp); + return a + b; +} + +vec4 sample_global_sampler(mediump sampler2D SPIRV_Cross_CombinedtexuSampler0, mediump sampler2D SPIRV_Cross_CombinedtexuSampler1) +{ + vec4 a = texture(SPIRV_Cross_CombinedtexuSampler0, vTex); + vec4 b = sample_dual(SPIRV_Cross_CombinedtexuSampler1); + return a + b; +} + +void main() +{ + vec4 c0 = sample_duals(); + vec4 c1 = sample_global_tex(SPIRV_Cross_CombineduTexture0uSampler0, SPIRV_Cross_CombineduTexture1uSampler0); + vec4 c2 = sample_global_tex(SPIRV_Cross_CombineduTexture0uSampler1, SPIRV_Cross_CombineduTexture1uSampler1); + vec4 c3 = sample_global_sampler(SPIRV_Cross_CombineduTexture0uSampler0, SPIRV_Cross_CombineduTexture0uSampler1); + vec4 c4 = sample_global_sampler(SPIRV_Cross_CombineduTexture1uSampler0, SPIRV_Cross_CombineduTexture1uSampler1); + FragColor = (((c0 + c1) + c2) + c3) + c4; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler.vk.frag.vk new file mode 100644 index 000000000..ae8df4c92 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/combined-texture-sampler.vk.frag.vk @@ -0,0 +1,48 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 2) uniform mediump texture2D uTexture0; +layout(set = 0, binding = 3) uniform mediump texture2D uTexture1; +layout(set = 0, binding = 0) uniform mediump sampler uSampler0; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; + +layout(location = 0) in vec2 vTex; +layout(location = 0) out vec4 FragColor; + +vec4 sample_dual(mediump sampler samp, mediump texture2D tex) +{ + return texture(sampler2D(tex, samp), vTex); +} + +vec4 sample_duals() +{ + vec4 a = sample_dual(uSampler0, uTexture0); + vec4 b = sample_dual(uSampler1, uTexture1); + return a + b; +} + +vec4 sample_global_tex(mediump sampler samp) +{ + vec4 a = texture(sampler2D(uTexture0, samp), vTex); + vec4 b = sample_dual(samp, uTexture1); + return a + b; +} + +vec4 sample_global_sampler(mediump texture2D tex) +{ + vec4 a = texture(sampler2D(tex, uSampler0), vTex); + vec4 b = sample_dual(uSampler1, tex); + return a + b; +} + +void main() +{ + vec4 c0 = sample_duals(); + vec4 c1 = sample_global_tex(uSampler0); + vec4 c2 = sample_global_tex(uSampler1); + vec4 c3 = sample_global_sampler(uTexture0); + vec4 c4 = sample_global_sampler(uTexture1); + FragColor = (((c0 + c1) + c2) + c3) + c4; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/desktop-mediump.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/desktop-mediump.vk.frag new file mode 100644 index 000000000..8f7508ee8 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/desktop-mediump.vk.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 F; +layout(location = 1) flat in ivec4 I; +layout(location = 2) flat in uvec4 U; + +void main() +{ + FragColor = (F + vec4(I)) + vec4(U); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/desktop-mediump.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/desktop-mediump.vk.frag.vk new file mode 100644 index 000000000..4c0506b11 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/desktop-mediump.vk.frag.vk @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) out mediump vec4 FragColor; +layout(location = 0) in mediump vec4 F; +layout(location = 1) flat in mediump ivec4 I; +layout(location = 2) flat in mediump uvec4 U; + +void main() +{ + FragColor = (F + vec4(I)) + vec4(U); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment-ms.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment-ms.vk.frag new file mode 100644 index 000000000..ea460c1fa --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment-ms.vk.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(binding = 0) uniform sampler2DMS uSubpass0; +layout(binding = 1) uniform sampler2DMS uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = (texelFetch(uSubpass0, ivec2(gl_FragCoord.xy), 1) + texelFetch(uSubpass1, ivec2(gl_FragCoord.xy), 2)) + texelFetch(uSubpass0, ivec2(gl_FragCoord.xy), gl_SampleID); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment-ms.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment-ms.vk.frag.vk new file mode 100644 index 000000000..462df22a1 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment-ms.vk.frag.vk @@ -0,0 +1,12 @@ +#version 450 + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform subpassInputMS uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = (subpassLoad(uSubpass0, 1) + subpassLoad(uSubpass1, 2)) + subpassLoad(uSubpass0, gl_SampleID); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment.vk.frag new file mode 100644 index 000000000..8d216b2c4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment.vk.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2D uSubpass0; +layout(binding = 1) uniform mediump sampler2D uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = texelFetch(uSubpass0, ivec2(gl_FragCoord.xy), 0) + texelFetch(uSubpass1, ivec2(gl_FragCoord.xy), 0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment.vk.frag.vk new file mode 100644 index 000000000..c8b5d9a70 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/input-attachment.vk.frag.vk @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform mediump subpassInput uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform mediump subpassInput uSubpass1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = subpassLoad(uSubpass0) + subpassLoad(uSubpass1); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/push-constant.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/push-constant.vk.frag new file mode 100644 index 000000000..c04a7ca48 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/push-constant.vk.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; +precision highp int; + +struct PushConstants +{ + vec4 value0; + vec4 value1; +}; + +uniform PushConstants push; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; + +void main() +{ + FragColor = (vColor + push.value0) + push.value1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/push-constant.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/push-constant.vk.frag.vk new file mode 100644 index 000000000..6cec90f19 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/push-constant.vk.frag.vk @@ -0,0 +1,18 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(push_constant, std430) uniform PushConstants +{ + vec4 value0; + vec4 value1; +} push; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; + +void main() +{ + FragColor = (vColor + push.value0) + push.value1; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag new file mode 100644 index 000000000..575c4187e --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag @@ -0,0 +1,22 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSamp; +uniform sampler2D SPIRV_Cross_CombineduTuS; + +layout(location = 0) out vec4 FragColor; + +vec4 samp(sampler2D uSamp_1) +{ + return texture(uSamp_1, vec2(0.5)); +} + +vec4 samp_1(sampler2D SPIRV_Cross_CombinedTS) +{ + return texture(SPIRV_Cross_CombinedTS, vec2(0.5)); +} + +void main() +{ + FragColor = samp(uSamp) + samp_1(SPIRV_Cross_CombineduTuS); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag.vk new file mode 100644 index 000000000..222b659e4 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag.vk @@ -0,0 +1,23 @@ +#version 450 + +layout(set = 0, binding = 0) uniform sampler2D uSamp; +layout(set = 0, binding = 1) uniform texture2D uT; +layout(set = 0, binding = 2) uniform sampler uS; + +layout(location = 0) out vec4 FragColor; + +vec4 samp(sampler2D uSamp_1) +{ + return texture(uSamp_1, vec2(0.5)); +} + +vec4 samp(texture2D T, sampler S) +{ + return texture(sampler2D(T, S), vec2(0.5)); +} + +void main() +{ + FragColor = samp(uSamp) + samp(uT, uS); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag new file mode 100644 index 000000000..43393f4e7 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag @@ -0,0 +1,44 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2D SPIRV_Cross_CombineduTextureuSampler[4]; +uniform mediump sampler2DArray SPIRV_Cross_CombineduTextureArrayuSampler[4]; +uniform mediump samplerCube SPIRV_Cross_CombineduTextureCubeuSampler[4]; +uniform mediump sampler3D SPIRV_Cross_CombineduTexture3DuSampler[4]; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +vec4 sample_func(vec2 uv, mediump sampler2D SPIRV_Cross_CombineduTexturesamp[4]) +{ + return texture(SPIRV_Cross_CombineduTexturesamp[2], uv); +} + +vec4 sample_func_dual(vec2 uv, mediump sampler2D SPIRV_Cross_Combinedtexsamp) +{ + return texture(SPIRV_Cross_Combinedtexsamp, uv); +} + +vec4 sample_func_dual_array(vec2 uv, mediump sampler2D SPIRV_Cross_Combinedtexsamp[4]) +{ + return texture(SPIRV_Cross_Combinedtexsamp[1], uv); +} + +void main() +{ + vec2 off = vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler[1], 0)); + vec2 off2 = vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler[2], 1)); + highp vec2 param = (vTex + off) + off2; + vec4 c0 = sample_func(param, SPIRV_Cross_CombineduTextureuSampler); + highp vec2 param_1 = (vTex + off) + off2; + vec4 c1 = sample_func_dual(param_1, SPIRV_Cross_CombineduTextureuSampler[1]); + highp vec2 param_2 = (vTex + off) + off2; + vec4 c2 = sample_func_dual_array(param_2, SPIRV_Cross_CombineduTextureuSampler); + vec4 c3 = texture(SPIRV_Cross_CombineduTextureArrayuSampler[3], vTex3); + vec4 c4 = texture(SPIRV_Cross_CombineduTextureCubeuSampler[1], vTex3); + vec4 c5 = texture(SPIRV_Cross_CombineduTexture3DuSampler[2], vTex3); + FragColor = ((((c0 + c1) + c2) + c3) + c4) + c5; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag.vk new file mode 100644 index 000000000..495874ecc --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag.vk @@ -0,0 +1,45 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 1) uniform mediump texture2D uTexture[4]; +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 4) uniform mediump texture2DArray uTextureArray[4]; +layout(set = 0, binding = 3) uniform mediump textureCube uTextureCube[4]; +layout(set = 0, binding = 2) uniform mediump texture3D uTexture3D[4]; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +vec4 sample_func(mediump sampler samp, vec2 uv) +{ + return texture(sampler2D(uTexture[2], samp), uv); +} + +vec4 sample_func_dual(mediump sampler samp, mediump texture2D tex, vec2 uv) +{ + return texture(sampler2D(tex, samp), uv); +} + +vec4 sample_func_dual_array(mediump sampler samp, mediump texture2D tex[4], vec2 uv) +{ + return texture(sampler2D(tex[1], samp), uv); +} + +void main() +{ + vec2 off = vec2(1.0) / vec2(textureSize(sampler2D(uTexture[1], uSampler), 0)); + vec2 off2 = vec2(1.0) / vec2(textureSize(sampler2D(uTexture[2], uSampler), 1)); + highp vec2 param = (vTex + off) + off2; + vec4 c0 = sample_func(uSampler, param); + highp vec2 param_1 = (vTex + off) + off2; + vec4 c1 = sample_func_dual(uSampler, uTexture[1], param_1); + highp vec2 param_2 = (vTex + off) + off2; + vec4 c2 = sample_func_dual_array(uSampler, uTexture, param_2); + vec4 c3 = texture(sampler2DArray(uTextureArray[3], uSampler), vTex3); + vec4 c4 = texture(samplerCube(uTextureCube[1], uSampler), vTex3); + vec4 c5 = texture(sampler3D(uTexture3D[2], uSampler), vTex3); + FragColor = ((((c0 + c1) + c2) + c3) + c4) + c5; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture.vk.frag new file mode 100644 index 000000000..78477cfba --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture.vk.frag @@ -0,0 +1,37 @@ +#version 310 es +precision mediump float; +precision highp int; + +uniform mediump sampler2D SPIRV_Cross_CombineduTextureuSampler; +uniform mediump sampler2DArray SPIRV_Cross_CombineduTextureArrayuSampler; +uniform mediump samplerCube SPIRV_Cross_CombineduTextureCubeuSampler; +uniform mediump sampler3D SPIRV_Cross_CombineduTexture3DuSampler; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +vec4 sample_func(vec2 uv, mediump sampler2D SPIRV_Cross_CombineduTexturesamp) +{ + return texture(SPIRV_Cross_CombineduTexturesamp, uv); +} + +vec4 sample_func_dual(vec2 uv, mediump sampler2D SPIRV_Cross_Combinedtexsamp) +{ + return texture(SPIRV_Cross_Combinedtexsamp, uv); +} + +void main() +{ + vec2 off = vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler, 0)); + vec2 off2 = vec2(1.0) / vec2(textureSize(SPIRV_Cross_CombineduTextureuSampler, 1)); + highp vec2 param = (vTex + off) + off2; + vec4 c0 = sample_func(param, SPIRV_Cross_CombineduTextureuSampler); + highp vec2 param_1 = (vTex + off) + off2; + vec4 c1 = sample_func_dual(param_1, SPIRV_Cross_CombineduTextureuSampler); + vec4 c2 = texture(SPIRV_Cross_CombineduTextureArrayuSampler, vTex3); + vec4 c3 = texture(SPIRV_Cross_CombineduTextureCubeuSampler, vTex3); + vec4 c4 = texture(SPIRV_Cross_CombineduTexture3DuSampler, vTex3); + FragColor = (((c0 + c1) + c2) + c3) + c4; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture.vk.frag.vk new file mode 100644 index 000000000..cfa2f3961 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/separate-sampler-texture.vk.frag.vk @@ -0,0 +1,38 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(set = 0, binding = 1) uniform mediump texture2D uTexture; +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 4) uniform mediump texture2DArray uTextureArray; +layout(set = 0, binding = 3) uniform mediump textureCube uTextureCube; +layout(set = 0, binding = 2) uniform mediump texture3D uTexture3D; + +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; +layout(location = 0) out vec4 FragColor; + +vec4 sample_func(mediump sampler samp, vec2 uv) +{ + return texture(sampler2D(uTexture, samp), uv); +} + +vec4 sample_func_dual(mediump sampler samp, mediump texture2D tex, vec2 uv) +{ + return texture(sampler2D(tex, samp), uv); +} + +void main() +{ + vec2 off = vec2(1.0) / vec2(textureSize(sampler2D(uTexture, uSampler), 0)); + vec2 off2 = vec2(1.0) / vec2(textureSize(sampler2D(uTexture, uSampler), 1)); + highp vec2 param = (vTex + off) + off2; + vec4 c0 = sample_func(uSampler, param); + highp vec2 param_1 = (vTex + off) + off2; + vec4 c1 = sample_func_dual(uSampler, uTexture, param_1); + vec4 c2 = texture(sampler2DArray(uTextureArray, uSampler), vTex3); + vec4 c3 = texture(samplerCube(uTextureCube, uSampler), vTex3); + vec4 c4 = texture(sampler3D(uTexture3D, uSampler), vTex3); + FragColor = (((c0 + c1) + c2) + c3) + c4; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-block-size.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-block-size.vk.frag new file mode 100644 index 000000000..19ea6ae06 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-block-size.vk.frag @@ -0,0 +1,22 @@ +#version 310 es +precision mediump float; +precision highp int; + +#ifndef SPIRV_CROSS_CONSTANT_ID_10 +#define SPIRV_CROSS_CONSTANT_ID_10 2 +#endif +const int Value = SPIRV_CROSS_CONSTANT_ID_10; + +layout(binding = 0, std140) uniform SpecConstArray +{ + vec4 samples[Value]; +} _15; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int Index; + +void main() +{ + FragColor = _15.samples[Index]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-block-size.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-block-size.vk.frag.vk new file mode 100644 index 000000000..133761a83 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-block-size.vk.frag.vk @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(constant_id = 10) const int Value = 2; + +layout(set = 0, binding = 0, std140) uniform SpecConstArray +{ + vec4 samples[Value]; +} _15; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in mediump int Index; + +void main() +{ + FragColor = _15.samples[Index]; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-ternary.vk.frag b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-ternary.vk.frag new file mode 100644 index 000000000..e03dfcb9a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-ternary.vk.frag @@ -0,0 +1,16 @@ +#version 450 + +#ifndef SPIRV_CROSS_CONSTANT_ID_0 +#define SPIRV_CROSS_CONSTANT_ID_0 10u +#endif +const uint s = SPIRV_CROSS_CONSTANT_ID_0; +const bool _13 = (s > 20u); +const uint _16 = _13 ? 30u : 50u; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = float(_16); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-ternary.vk.frag.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-ternary.vk.frag.vk new file mode 100644 index 000000000..59d3b99b9 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/frag/spec-constant-ternary.vk.frag.vk @@ -0,0 +1,13 @@ +#version 450 + +layout(constant_id = 0) const uint s = 10u; +const bool _13 = (s > 20u); +const uint _16 = _13 ? 30u : 50u; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = float(_16); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/vert/multiview.nocompat.vk.vert.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/multiview.nocompat.vk.vert.vk new file mode 100644 index 000000000..90055473d --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/multiview.nocompat.vk.vert.vk @@ -0,0 +1,15 @@ +#version 310 es +#extension GL_EXT_multiview : require + +layout(set = 0, binding = 0, std140) uniform MVPs +{ + mat4 MVP[2]; +} _19; + +layout(location = 0) in vec4 Position; + +void main() +{ + gl_Position = _19.MVP[gl_ViewIndex] * Position; +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/vert/small-storage.vk.vert b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/small-storage.vk.vert new file mode 100644 index 000000000..2ade81104 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/small-storage.vk.vert @@ -0,0 +1,63 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +#extension GL_EXT_shader_8bit_storage : require +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_NV_gpu_shader5) +#extension GL_NV_gpu_shader5 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif + +layout(binding = 0, std140) uniform block +{ + i16vec2 a; + u16vec2 b; + i8vec2 c; + u8vec2 d; + f16vec2 e; +} _26; + +layout(binding = 1, std430) readonly buffer storage +{ + i16vec3 f; + u16vec3 g; + i8vec3 h; + u8vec3 i; + f16vec3 j; +} _53; + +struct pushconst +{ + i16vec4 k; + u16vec4 l; + i8vec4 m; + u8vec4 n; + f16vec4 o; +}; + +uniform pushconst _76; + +layout(location = 0) out i16vec4 p; +layout(location = 0, component = 0) in int16_t foo; +layout(location = 1) out u16vec4 q; +layout(location = 0, component = 1) in uint16_t bar; +layout(location = 2) out f16vec4 r; +layout(location = 1) in float16_t baz; + +void main() +{ + p = i16vec4((((ivec4(int(foo)) + ivec4(ivec2(_26.a), ivec2(_26.c))) - ivec4(ivec3(_53.f) / ivec3(_53.h), 1)) + ivec4(_76.k)) + ivec4(_76.m)); + q = u16vec4((((uvec4(uint(bar)) + uvec4(uvec2(_26.b), uvec2(_26.d))) - uvec4(uvec3(_53.g) / uvec3(_53.i), 1u)) + uvec4(_76.l)) + uvec4(_76.n)); + r = f16vec4(((vec4(float(baz)) + vec4(vec2(_26.e), 0.0, 1.0)) - vec4(vec3(_53.j), 1.0)) + vec4(_76.o)); + gl_Position = vec4(0.0, 0.0, 0.0, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/vert/small-storage.vk.vert.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/small-storage.vk.vert.vk new file mode 100644 index 000000000..c283d6064 --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/small-storage.vk.vert.vk @@ -0,0 +1,59 @@ +#version 450 +#if defined(GL_AMD_gpu_shader_int16) +#extension GL_AMD_gpu_shader_int16 : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for Int16. +#endif +#extension GL_EXT_shader_8bit_storage : require +#if defined(GL_AMD_gpu_shader_half_float) +#extension GL_AMD_gpu_shader_half_float : require +#elif defined(GL_EXT_shader_16bit_storage) +#extension GL_EXT_shader_16bit_storage : require +#else +#error No extension available for FP16. +#endif + +layout(set = 0, binding = 0, std140) uniform block +{ + i16vec2 a; + u16vec2 b; + i8vec2 c; + u8vec2 d; + f16vec2 e; +} _26; + +layout(set = 0, binding = 1, std430) readonly buffer storage +{ + i16vec3 f; + u16vec3 g; + i8vec3 h; + u8vec3 i; + f16vec3 j; +} _53; + +layout(push_constant, std430) uniform pushconst +{ + i16vec4 k; + u16vec4 l; + i8vec4 m; + u8vec4 n; + f16vec4 o; +} _76; + +layout(location = 0) out i16vec4 p; +layout(location = 0, component = 0) in int16_t foo; +layout(location = 1) out u16vec4 q; +layout(location = 0, component = 1) in uint16_t bar; +layout(location = 2) out f16vec4 r; +layout(location = 1) in float16_t baz; + +void main() +{ + p = i16vec4((((ivec4(int(foo)) + ivec4(ivec2(_26.a), ivec2(_26.c))) - ivec4(ivec3(_53.f) / ivec3(_53.h), 1)) + ivec4(_76.k)) + ivec4(_76.m)); + q = u16vec4((((uvec4(uint(bar)) + uvec4(uvec2(_26.b), uvec2(_26.d))) - uvec4(uvec3(_53.g) / uvec3(_53.i), 1u)) + uvec4(_76.l)) + uvec4(_76.n)); + r = f16vec4(((vec4(float(baz)) + vec4(vec2(_26.e), 0.0, 1.0)) - vec4(vec3(_53.j), 1.0)) + vec4(_76.o)); + gl_Position = vec4(0.0, 0.0, 0.0, 1.0); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/vert/vulkan-vertex.vk.vert b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/vulkan-vertex.vk.vert new file mode 100644 index 000000000..60ba1882f --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/vulkan-vertex.vk.vert @@ -0,0 +1,9 @@ +#version 310 es + +uniform int SPIRV_Cross_BaseInstance; + +void main() +{ + gl_Position = vec4(1.0, 2.0, 3.0, 4.0) * float(gl_VertexID + (gl_InstanceID + SPIRV_Cross_BaseInstance)); +} + diff --git a/3rdparty/spirv-cross/reference/shaders/vulkan/vert/vulkan-vertex.vk.vert.vk b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/vulkan-vertex.vk.vert.vk new file mode 100644 index 000000000..8c4930d7a --- /dev/null +++ b/3rdparty/spirv-cross/reference/shaders/vulkan/vert/vulkan-vertex.vk.vert.vk @@ -0,0 +1,7 @@ +#version 310 es + +void main() +{ + gl_Position = vec4(1.0, 2.0, 3.0, 4.0) * float(gl_VertexIndex + gl_InstanceIndex); +} + diff --git a/3rdparty/spirv-cross/samples/cpp/Makefile b/3rdparty/spirv-cross/samples/cpp/Makefile new file mode 100644 index 000000000..225bb3d57 --- /dev/null +++ b/3rdparty/spirv-cross/samples/cpp/Makefile @@ -0,0 +1,28 @@ +SOURCES := $(wildcard *.comp) +SPIRV := $(SOURCES:.comp=.spv) +CPP_INTERFACE := $(SOURCES:.comp=.spv.cpp) +CPP_DRIVER := $(SOURCES:.comp=.cpp) +EXECUTABLES := $(SOURCES:.comp=.shader) +OBJECTS := $(CPP_DRIVER:.cpp=.o) $(CPP_INTERFACE:.cpp=.o) + +CXXFLAGS += -std=c++11 -I../../include -I. +LDFLAGS += -pthread -lm + +all: $(EXECUTABLES) + +%.spv: %.comp + glslangValidator -V -o $@ $< + +%.spv.cpp: %.spv + ../../spirv-cross --cpp --output $@ $< + +%.o: %.cpp + $(CXX) -c -o $@ $< $(CXXFLAGS) + +%.shader: %.o %.spv.o + $(CXX) -o $@ $^ $(LDFLAGS) + +clean: + $(RM) -f $(EXECUTABLES) $(SPIRV) $(CPP_INTERFACE) $(OBJECTS) + +.PHONY: clean diff --git a/3rdparty/spirv-cross/samples/cpp/atomics.comp b/3rdparty/spirv-cross/samples/cpp/atomics.comp new file mode 100644 index 000000000..0bf6d2ad0 --- /dev/null +++ b/3rdparty/spirv-cross/samples/cpp/atomics.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 64) in; + +layout(set = 0, binding = 0, std430) readonly buffer SSBO0 +{ + float inputs[]; +}; + +layout(set = 0, binding = 1, std430) writeonly buffer SSBO1 +{ + float outputs[]; +}; + +layout(set = 0, binding = 2, std430) buffer SSBO2 +{ + uint counter; +}; + +void main() +{ + // Builds a tightly packed list of all values less than 10.0. + // The output order is random. + float value = inputs[gl_GlobalInvocationID.x]; + if (value < 10.0) + { + uint output_index = atomicAdd(counter, 1u); + outputs[output_index] = value; + } +} diff --git a/3rdparty/spirv-cross/samples/cpp/atomics.cpp b/3rdparty/spirv-cross/samples/cpp/atomics.cpp new file mode 100644 index 000000000..89351a5ae --- /dev/null +++ b/3rdparty/spirv-cross/samples/cpp/atomics.cpp @@ -0,0 +1,90 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cross/external_interface.h" +#include + +#ifndef GLM_SWIZZLE +#define GLM_SWIZZLE +#endif + +#ifndef GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS +#endif + +#include +using namespace glm; + +int main() +{ + // First, we get the C interface to the shader. + // This can be loaded from a dynamic library, or as here, + // linked in as a static library. + auto *iface = spirv_cross_get_interface(); + + // Create an instance of the shader interface. + auto *shader = iface->construct(); + +// Build some input data for our compute shader. +#define NUM_WORKGROUPS 4 + float a[64 * NUM_WORKGROUPS]; + float b[64 * NUM_WORKGROUPS] = {}; + uint32_t counter = 0; + + for (int i = 0; i < 64 * NUM_WORKGROUPS; i++) + { + a[i] = i * 0.46f; + } + + void *aptr = a; + void *bptr = b; + void *cptr = &counter; + + // Bind resources to the shader. + // For resources like samplers and buffers, we provide a list of pointers, + // since UBOs, SSBOs and samplers can be arrays, and can point to different types, + // which is especially true for samplers. + spirv_cross_set_resource(shader, 0, 0, &aptr, sizeof(aptr)); + spirv_cross_set_resource(shader, 0, 1, &bptr, sizeof(bptr)); + spirv_cross_set_resource(shader, 0, 2, &cptr, sizeof(cptr)); + + // We also have to set builtins. + // The relevant builtins will depend on the shader, + // but for compute, there are few builtins, which are gl_NumWorkGroups and gl_WorkGroupID. + // LocalInvocationID and GlobalInvocationID are inferred when executing the invocation. + uvec3 num_workgroups(NUM_WORKGROUPS, 1, 1); + uvec3 work_group_id(0, 0, 0); + spirv_cross_set_builtin(shader, SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS, &num_workgroups, sizeof(num_workgroups)); + spirv_cross_set_builtin(shader, SPIRV_CROSS_BUILTIN_WORK_GROUP_ID, &work_group_id, sizeof(work_group_id)); + + // Execute 4 work groups. + for (unsigned i = 0; i < NUM_WORKGROUPS; i++) + { + work_group_id.x = i; + iface->invoke(shader); + } + + // Call destructor. + iface->destruct(shader); + + // Verify our output. + // TODO: Implement a test framework that asserts results computed. + fprintf(stderr, "Counter = %u\n", counter); + for (unsigned i = 0; i < counter; i++) + { + fprintf(stderr, "[%3u] = %.1f\n", i, b[i]); + } +} diff --git a/3rdparty/spirv-cross/samples/cpp/multiply.comp b/3rdparty/spirv-cross/samples/cpp/multiply.comp new file mode 100644 index 000000000..1ac7869ad --- /dev/null +++ b/3rdparty/spirv-cross/samples/cpp/multiply.comp @@ -0,0 +1,22 @@ +#version 310 es +layout(local_size_x = 64) in; + +layout(set = 0, binding = 0, std430) readonly buffer SSBO0 +{ + vec4 a[]; +}; + +layout(set = 0, binding = 1, std430) readonly buffer SSBO1 +{ + vec4 b[]; +}; + +layout(set = 0, binding = 2, std430) buffer SSBO2 +{ + vec4 c[]; +}; + +void main() +{ + c[gl_GlobalInvocationID.x] = a[gl_GlobalInvocationID.x] * b[gl_GlobalInvocationID.x]; +} diff --git a/3rdparty/spirv-cross/samples/cpp/multiply.cpp b/3rdparty/spirv-cross/samples/cpp/multiply.cpp new file mode 100644 index 000000000..daa1fc647 --- /dev/null +++ b/3rdparty/spirv-cross/samples/cpp/multiply.cpp @@ -0,0 +1,91 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cross/external_interface.h" +#include + +#ifndef GLM_SWIZZLE +#define GLM_SWIZZLE +#endif + +#ifndef GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS +#endif + +#include +using namespace glm; + +int main() +{ + // First, we get the C interface to the shader. + // This can be loaded from a dynamic library, or as here, + // linked in as a static library. + auto *iface = spirv_cross_get_interface(); + + // Create an instance of the shader interface. + auto *shader = iface->construct(); + +// Build some input data for our compute shader. +#define NUM_WORKGROUPS 4 + vec4 a[64 * NUM_WORKGROUPS]; + vec4 b[64 * NUM_WORKGROUPS]; + vec4 c[64 * NUM_WORKGROUPS] = {}; + + for (int i = 0; i < 64 * NUM_WORKGROUPS; i++) + { + a[i] = vec4(100 + i, 101 + i, 102 + i, 103 + i); + b[i] = vec4(100 - i, 99 - i, 98 - i, 97 - i); + } + + void *aptr = a; + void *bptr = b; + void *cptr = c; + + // Bind resources to the shader. + // For resources like samplers and buffers, we provide a list of pointers, + // since UBOs, SSBOs and samplers can be arrays, and can point to different types, + // which is especially true for samplers. + spirv_cross_set_resource(shader, 0, 0, &aptr, sizeof(aptr)); + spirv_cross_set_resource(shader, 0, 1, &bptr, sizeof(bptr)); + spirv_cross_set_resource(shader, 0, 2, &cptr, sizeof(cptr)); + + // We also have to set builtins. + // The relevant builtins will depend on the shader, + // but for compute, there are few builtins, which are gl_NumWorkGroups and gl_WorkGroupID. + // LocalInvocationID and GlobalInvocationID are inferred when executing the invocation. + uvec3 num_workgroups(NUM_WORKGROUPS, 1, 1); + uvec3 work_group_id(0, 0, 0); + spirv_cross_set_builtin(shader, SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS, &num_workgroups, sizeof(num_workgroups)); + spirv_cross_set_builtin(shader, SPIRV_CROSS_BUILTIN_WORK_GROUP_ID, &work_group_id, sizeof(work_group_id)); + + // Execute 4 work groups. + for (unsigned i = 0; i < NUM_WORKGROUPS; i++) + { + work_group_id.x = i; + iface->invoke(shader); + } + + // Call destructor. + iface->destruct(shader); + + // Verify our output. + // TODO: Implement a test framework that asserts results computed. + for (unsigned i = 0; i < 64 * NUM_WORKGROUPS; i++) + { + fprintf(stderr, "(%.1f, %.1f, %.1f, %.1f) * (%.1f, %.1f, %.1f, %.1f) => (%.1f, %.1f, %.1f, %.1f)\n", a[i].x, + a[i].y, a[i].z, a[i].w, b[i].x, b[i].y, b[i].z, b[i].w, c[i].x, c[i].y, c[i].z, c[i].w); + } +} diff --git a/3rdparty/spirv-cross/samples/cpp/shared.comp b/3rdparty/spirv-cross/samples/cpp/shared.comp new file mode 100644 index 000000000..7d59060aa --- /dev/null +++ b/3rdparty/spirv-cross/samples/cpp/shared.comp @@ -0,0 +1,36 @@ +#version 310 es +layout(local_size_x = 64) in; + +layout(set = 0, binding = 0, std430) readonly buffer SSBO0 +{ + float inputs[]; +}; + +layout(set = 0, binding = 1, std430) writeonly buffer SSBO1 +{ + float outputs[]; +}; + +shared float tmp[gl_WorkGroupSize.x]; + +void main() +{ + uint local = gl_LocalInvocationIndex; + uint work_group = gl_WorkGroupID.x; + + // Does a trivial parallel reduction through shared memory. + tmp[local] = inputs[work_group * gl_WorkGroupSize.x * 2u + local] + inputs[work_group * gl_WorkGroupSize.x * 2u + local + gl_WorkGroupSize.x]; + memoryBarrierShared(); + barrier(); + + for (uint limit = 32u; limit > 1u; limit >>= 1u) + { + if (local < limit) + tmp[local] = tmp[local] + tmp[local + limit]; + memoryBarrierShared(); + barrier(); + } + + if (local == 0u) + outputs[work_group] = tmp[0] + tmp[1]; +} diff --git a/3rdparty/spirv-cross/samples/cpp/shared.cpp b/3rdparty/spirv-cross/samples/cpp/shared.cpp new file mode 100644 index 000000000..5be62d681 --- /dev/null +++ b/3rdparty/spirv-cross/samples/cpp/shared.cpp @@ -0,0 +1,89 @@ +/* + * Copyright 2015-2017 ARM Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cross/external_interface.h" +#include + +#ifndef GLM_SWIZZLE +#define GLM_SWIZZLE +#endif + +#ifndef GLM_FORCE_RADIANS +#define GLM_FORCE_RADIANS +#endif + +#include +using namespace glm; + +int main() +{ + // First, we get the C interface to the shader. + // This can be loaded from a dynamic library, or as here, + // linked in as a static library. + auto *iface = spirv_cross_get_interface(); + + // Create an instance of the shader interface. + auto *shader = iface->construct(); + +// Build some input data for our compute shader. +#define NUM_WORKGROUPS 4 + float a[128 * NUM_WORKGROUPS]; + float b[NUM_WORKGROUPS] = {}; + + for (int i = 0; i < 128 * NUM_WORKGROUPS; i++) + { + a[i] = float(i); + } + + void *aptr = a; + void *bptr = b; + + // Bind resources to the shader. + // For resources like samplers and buffers, we provide a list of pointers, + // since UBOs, SSBOs and samplers can be arrays, and can point to different types, + // which is especially true for samplers. + spirv_cross_set_resource(shader, 0, 0, &aptr, sizeof(aptr)); + spirv_cross_set_resource(shader, 0, 1, &bptr, sizeof(bptr)); + + // We also have to set builtins. + // The relevant builtins will depend on the shader, + // but for compute, there are few builtins, which are gl_NumWorkGroups and gl_WorkGroupID. + // LocalInvocationID and GlobalInvocationID are inferred when executing the invocation. + uvec3 num_workgroups(NUM_WORKGROUPS, 1, 1); + uvec3 work_group_id(0, 0, 0); + spirv_cross_set_builtin(shader, SPIRV_CROSS_BUILTIN_NUM_WORK_GROUPS, &num_workgroups, sizeof(num_workgroups)); + spirv_cross_set_builtin(shader, SPIRV_CROSS_BUILTIN_WORK_GROUP_ID, &work_group_id, sizeof(work_group_id)); + + // Execute 4 work groups. + for (unsigned i = 0; i < NUM_WORKGROUPS; i++) + { + work_group_id.x = i; + iface->invoke(shader); + } + + // Call destructor. + iface->destruct(shader); + + // Verify our output. + // TODO: Implement a test framework that asserts results computed. + for (unsigned i = 0; i < NUM_WORKGROUPS; i++) + { + float expected_sum = 0.0f; + for (unsigned j = i * 128; j < (i + 1) * 128; j++) + expected_sum += a[j]; + fprintf(stderr, "Sum in workgroup #%u = %.1f, expected %.1f\n", i, b[i], expected_sum); + } +} diff --git a/3rdparty/spirv-cross/shaders-hlsl-no-opt/asm/comp/specialization-constant-workgroup.nofxc.asm.comp b/3rdparty/spirv-cross/shaders-hlsl-no-opt/asm/comp/specialization-constant-workgroup.nofxc.asm.comp new file mode 100644 index 000000000..188e3fec3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl-no-opt/asm/comp/specialization-constant-workgroup.nofxc.asm.comp @@ -0,0 +1,47 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 24 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 20 1 + OpSource ESSL 310 + OpName %main "main" + OpName %SSBO "SSBO" + OpMemberName %SSBO 0 "a" + OpName %_ "" + OpMemberDecorate %SSBO 0 Offset 0 + OpDecorate %SSBO BufferBlock + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %19 SpecId 10 + OpDecorate %21 SpecId 12 + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %SSBO = OpTypeStruct %float +%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO + %_ = OpVariable %_ptr_Uniform_SSBO Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %uint = OpTypeInt 32 0 + %19 = OpSpecConstant %uint 9 + %uint_20 = OpConstant %uint 20 + %21 = OpSpecConstant %uint 4 + %v3uint = OpTypeVector %uint 3 +%gl_WorkGroupSize = OpSpecConstantComposite %v3uint %19 %uint_20 %21 + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpAccessChain %_ptr_Uniform_float %_ %int_0 + %15 = OpLoad %float %14 + %16 = OpFAdd %float %15 %float_1 + %17 = OpAccessChain %_ptr_Uniform_float %_ %int_0 + OpStore %17 %16 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl-no-opt/asm/vert/empty-struct-composite.asm.vert b/3rdparty/spirv-cross/shaders-hlsl-no-opt/asm/vert/empty-struct-composite.asm.vert new file mode 100644 index 000000000..37a2d8793 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl-no-opt/asm/vert/empty-struct-composite.asm.vert @@ -0,0 +1,37 @@ +; SPIR-V +; Version: 1.1 +; Generator: Google rspirv; 0 +; Bound: 17 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %2 "main" + OpExecutionMode %2 OriginUpperLeft + OpName %Test "Test" + OpName %t "t" + OpName %retvar "retvar" + OpName %main "main" + OpName %retvar_0 "retvar" + %void = OpTypeVoid + %6 = OpTypeFunction %void + %Test = OpTypeStruct +%_ptr_Function_Test = OpTypePointer Function %Test +%_ptr_Function_void = OpTypePointer Function %void + %2 = OpFunction %void None %6 + %7 = OpLabel + %t = OpVariable %_ptr_Function_Test Function + %retvar = OpVariable %_ptr_Function_void Function + OpBranch %4 + %4 = OpLabel + %13 = OpCompositeConstruct %Test + OpStore %t %13 + OpReturn + OpFunctionEnd + %main = OpFunction %void None %6 + %15 = OpLabel + %retvar_0 = OpVariable %_ptr_Function_void Function + OpBranch %14 + %14 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl-no-opt/comp/bitfield.comp b/3rdparty/spirv-cross/shaders-hlsl-no-opt/comp/bitfield.comp new file mode 100644 index 000000000..a2ef9aa0b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl-no-opt/comp/bitfield.comp @@ -0,0 +1,44 @@ +#version 310 es + +void main() +{ + int signed_value = 0; + uint unsigned_value = 0u; + + ivec3 signed_values = ivec3(0); + uvec3 unsigned_values = uvec3(0u); + + { + int s = bitfieldExtract(signed_value, 5, 20); + uint u = bitfieldExtract(unsigned_value, 6, 21); + + s = bitfieldInsert(s, 40, 5, 4); + u = bitfieldInsert(u, 60u, 5, 4); + + u = bitfieldReverse(u); + s = bitfieldReverse(s); + + int v0 = bitCount(u); + int v1 = bitCount(s); + + int v2 = findMSB(u); + int v3 = findLSB(s); + } + + { + ivec3 s = bitfieldExtract(signed_values, 5, 20); + uvec3 u = bitfieldExtract(unsigned_values, 6, 21); + + s = bitfieldInsert(s, ivec3(40), 5, 4); + u = bitfieldInsert(u, uvec3(60u), 5, 4); + + u = bitfieldReverse(u); + s = bitfieldReverse(s); + + ivec3 v0 = bitCount(u); + ivec3 v1 = bitCount(s); + + ivec3 v2 = findMSB(u); + ivec3 v3 = findLSB(s); + } +} diff --git a/3rdparty/spirv-cross/shaders-hlsl-no-opt/frag/spec-constant.frag b/3rdparty/spirv-cross/shaders-hlsl-no-opt/frag/spec-constant.frag new file mode 100644 index 000000000..a6c8d94e7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl-no-opt/frag/spec-constant.frag @@ -0,0 +1,80 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(constant_id = 1) const float a = 1.0; +layout(constant_id = 2) const float b = 2.0; +layout(constant_id = 3) const int c = 3; +layout(constant_id = 4) const int d = 4; +layout(constant_id = 5) const uint e = 5u; +layout(constant_id = 6) const uint f = 6u; +layout(constant_id = 7) const bool g = false; +layout(constant_id = 8) const bool h = true; +// glslang doesn't seem to support partial spec constants or composites yet, so only test the basics. + +struct Foo +{ + float elems[d + 2]; +}; + +void main() +{ + float t0 = a; + float t1 = b; + + uint c0 = uint(c); // OpIAdd with different types. + // FConvert, float-to-double. + int c1 = -c; // SNegate + int c2 = ~c; // OpNot + int c3 = c + d; // OpIAdd + int c4 = c - d; // OpISub + int c5 = c * d; // OpIMul + int c6 = c / d; // OpSDiv + uint c7 = e / f; // OpUDiv + int c8 = c % d; // OpSMod + uint c9 = e % f; // OpUMod + // TODO: OpSRem, any way to access this in GLSL? + int c10 = c >> d; // OpShiftRightArithmetic + uint c11 = e >> f; // OpShiftRightLogical + int c12 = c << d; // OpShiftLeftLogical + int c13 = c | d; // OpBitwiseOr + int c14 = c ^ d; // OpBitwiseXor + int c15 = c & d; // OpBitwiseAnd + // VectorShuffle, CompositeExtract, CompositeInsert, not testable atm. + bool c16 = g || h; // OpLogicalOr + bool c17 = g && h; // OpLogicalAnd + bool c18 = !g; // OpLogicalNot + bool c19 = g == h; // OpLogicalEqual + bool c20 = g != h; // OpLogicalNotEqual + // OpSelect not testable atm. + bool c21 = c == d; // OpIEqual + bool c22 = c != d; // OpINotEqual + bool c23 = c < d; // OpSLessThan + bool c24 = e < f; // OpULessThan + bool c25 = c > d; // OpSGreaterThan + bool c26 = e > f; // OpUGreaterThan + bool c27 = c <= d; // OpSLessThanEqual + bool c28 = e <= f; // OpULessThanEqual + bool c29 = c >= d; // OpSGreaterThanEqual + bool c30 = e >= f; // OpUGreaterThanEqual + // OpQuantizeToF16 not testable atm. + + int c31 = c8 + c3; + + int c32 = int(e); // OpIAdd with different types. + bool c33 = bool(c); // int -> bool + bool c34 = bool(e); // uint -> bool + int c35 = int(g); // bool -> int + uint c36 = uint(g); // bool -> uint + float c37 = float(g); // bool -> float + + // Flexible sized arrays with spec constants and spec constant ops. + float vec0[c + 3][8]; + float vec1[c + 2]; + vec0[0][0] = 10.0; + vec1[0] = 20.0; + + Foo foo; + foo.elems[c] = 10.0; + FragColor = vec4(t0 + t1) + vec0[0][0] + vec1[0] + foo.elems[c]; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp new file mode 100644 index 000000000..6e6ced122 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/access-chain-invalidate.asm.comp @@ -0,0 +1,61 @@ +; SPIR-V +; Version: 1.0 +; Generator: Google Shaderc over Glslang; 7 +; Bound: 41 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpName %main "main" + OpName %SSBO "SSBO" + OpMemberName %SSBO 0 "index" + OpMemberName %SSBO 1 "array" + OpName %_ "" + OpDecorate %_arr_uint_uint_64 ArrayStride 4 + OpMemberDecorate %SSBO 0 Offset 0 + OpMemberDecorate %SSBO 1 Offset 4 + OpDecorate %SSBO BufferBlock + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 + %uint_64 = OpConstant %uint 64 +%_arr_uint_uint_64 = OpTypeArray %uint %uint_64 + %SSBO = OpTypeStruct %uint %_arr_uint_uint_64 +%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO + %_ = OpVariable %_ptr_Uniform_SSBO Uniform + %int = OpTypeInt 32 1 + %int_1 = OpConstant %int 1 + %int_0 = OpConstant %int 0 +%_ptr_Uniform_uint = OpTypePointer Uniform %uint + %uint_0 = OpConstant %uint 0 + %bool = OpTypeBool + %main = OpFunction %void None %3 + %5 = OpLabel + %18 = OpAccessChain %_ptr_Uniform_uint %_ %int_0 + %19 = OpLoad %uint %18 + %20 = OpAccessChain %_ptr_Uniform_uint %_ %int_1 %19 + %21 = OpLoad %uint %20 + OpBranch %24 + %24 = OpLabel + %40 = OpPhi %uint %uint_0 %5 %35 %25 + %31 = OpULessThan %bool %40 %uint_64 + OpLoopMerge %26 %25 None + OpBranchConditional %31 %25 %26 + %25 = OpLabel + %33 = OpAccessChain %_ptr_Uniform_uint %_ %int_1 %40 + OpStore %33 %uint_0 + %35 = OpIAdd %uint %40 %int_1 + OpBranch %24 + %26 = OpLabel + %37 = OpLoad %uint %18 + %39 = OpAccessChain %_ptr_Uniform_uint %_ %int_1 %37 + OpStore %39 %21 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..f716cbb94 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %3 "main" %15 + OpExecutionMode %3 LocalSize 4 1 1 + OpName %3 "main" + OpName %8 "u0" + OpName %9 "u0_counters" + OpMemberName %9 0 "c" + OpName %11 "u0_counter" + OpName %15 "vThreadID" + OpName %19 "r0" + OpDecorate %8 DescriptorSet 0 + OpDecorate %8 Binding 0 + OpMemberDecorate %9 0 Offset 0 + OpDecorate %9 BufferBlock + OpDecorate %11 DescriptorSet 1 + OpDecorate %11 Binding 1 + OpDecorate %15 BuiltIn GlobalInvocationId + %1 = OpTypeVoid + %2 = OpTypeFunction %1 + %5 = OpTypeInt 32 0 + %6 = OpTypeImage %5 Buffer 0 0 0 2 R32ui + %7 = OpTypePointer UniformConstant %6 + %8 = OpVariable %7 UniformConstant + %9 = OpTypeStruct %5 + %10 = OpTypePointer Uniform %9 + %11 = OpVariable %10 Uniform + %12 = OpTypeInt 32 1 + %13 = OpTypeVector %12 3 + %14 = OpTypePointer Input %13 + %15 = OpVariable %14 Input + %16 = OpTypeFloat 32 + %17 = OpTypeVector %16 4 + %18 = OpTypePointer Function %17 + %20 = OpTypePointer Uniform %5 + %21 = OpConstant %5 0 + %23 = OpConstant %5 1 + %26 = OpTypePointer Function %16 + %33 = OpConstant %12 0 + %34 = OpConstant %5 2 + %37 = OpTypePointer Input %12 + %41 = OpTypeVector %5 4 + %3 = OpFunction %1 None %2 + %4 = OpLabel + %19 = OpVariable %18 Function + %22 = OpAccessChain %20 %11 %21 + %24 = OpAtomicIDecrement %5 %22 %23 %21 + %25 = OpBitcast %16 %24 + %27 = OpInBoundsAccessChain %26 %19 %21 + OpStore %27 %25 + %28 = OpLoad %6 %8 + %29 = OpInBoundsAccessChain %26 %19 %21 + %30 = OpLoad %16 %29 + %31 = OpBitcast %12 %30 + %32 = OpIMul %5 %31 %23 + %35 = OpShiftRightLogical %5 %33 %34 + %36 = OpIAdd %5 %32 %35 + %38 = OpInBoundsAccessChain %37 %15 %21 + %39 = OpLoad %12 %38 + %40 = OpBitcast %5 %39 + %42 = OpCompositeConstruct %41 %40 %40 %40 %40 + OpImageWrite %28 %36 %42 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..40c1de3b9 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %3 "main" %15 + OpExecutionMode %3 LocalSize 4 1 1 + OpName %3 "main" + OpName %8 "u0" + OpName %9 "u0_counters" + OpMemberName %9 0 "c" + OpName %11 "u0_counter" + OpName %15 "vThreadID" + OpName %19 "r0" + OpDecorate %8 DescriptorSet 0 + OpDecorate %8 Binding 0 + OpMemberDecorate %9 0 Offset 0 + OpDecorate %9 BufferBlock + OpDecorate %11 DescriptorSet 1 + OpDecorate %11 Binding 1 + OpDecorate %15 BuiltIn GlobalInvocationId + %1 = OpTypeVoid + %2 = OpTypeFunction %1 + %5 = OpTypeInt 32 0 + %6 = OpTypeImage %5 Buffer 0 0 0 2 R32ui + %7 = OpTypePointer UniformConstant %6 + %8 = OpVariable %7 UniformConstant + %9 = OpTypeStruct %5 + %10 = OpTypePointer Uniform %9 + %11 = OpVariable %10 Uniform + %12 = OpTypeInt 32 1 + %13 = OpTypeVector %12 3 + %14 = OpTypePointer Input %13 + %15 = OpVariable %14 Input + %16 = OpTypeFloat 32 + %17 = OpTypeVector %16 4 + %18 = OpTypePointer Function %17 + %20 = OpTypePointer Uniform %5 + %21 = OpConstant %5 0 + %23 = OpConstant %5 1 + %26 = OpTypePointer Function %16 + %33 = OpConstant %12 0 + %34 = OpConstant %5 2 + %37 = OpTypePointer Input %12 + %41 = OpTypeVector %5 4 + %3 = OpFunction %1 None %2 + %4 = OpLabel + %19 = OpVariable %18 Function + %22 = OpAccessChain %20 %11 %21 + %24 = OpAtomicIIncrement %5 %22 %23 %21 + %25 = OpBitcast %16 %24 + %27 = OpInBoundsAccessChain %26 %19 %21 + OpStore %27 %25 + %28 = OpLoad %6 %8 + %29 = OpInBoundsAccessChain %26 %19 %21 + %30 = OpLoad %16 %29 + %31 = OpBitcast %12 %30 + %32 = OpIMul %5 %31 %23 + %35 = OpShiftRightLogical %5 %33 %34 + %36 = OpIAdd %5 %32 %35 + %38 = OpInBoundsAccessChain %37 %15 %21 + %39 = OpLoad %12 %38 + %40 = OpBitcast %5 %39 + %42 = OpCompositeConstruct %41 %40 %40 %40 %40 + OpImageWrite %28 %36 %42 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..85f6cc041 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,119 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 59 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %Foo "A" + OpMemberName %Foo 0 "a" + OpMemberName %Foo 1 "b" + OpName %A "A" + OpMemberName %A 0 "Data" + OpName %C1 "C1" + OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" + OpName %Foo_0 "A" + OpMemberName %Foo_0 0 "a" + OpMemberName %Foo_0 1 "b" + OpName %A_0 "A" + OpMemberName %A_0 0 "Data" + OpName %C2 "C2" + OpName %B "B" + OpMemberName %B 0 "Data" + OpName %C3 "C3" + OpName %B_0 "B" + OpMemberName %B_0 0 "Data" + OpName %C4 "C4" + OpMemberDecorate %Foo 0 Offset 0 + OpMemberDecorate %Foo 1 Offset 4 + OpDecorate %_runtimearr_Foo ArrayStride 8 + OpMemberDecorate %A 0 Offset 0 + OpDecorate %A BufferBlock + OpDecorate %C1 DescriptorSet 0 + OpDecorate %C1 Binding 1 + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + OpMemberDecorate %Foo_0 0 Offset 0 + OpMemberDecorate %Foo_0 1 Offset 4 + OpDecorate %_arr_Foo_0_uint_1024 ArrayStride 16 + OpMemberDecorate %A_0 0 Offset 0 + OpDecorate %A_0 Block + OpDecorate %C2 DescriptorSet 0 + OpDecorate %C2 Binding 2 + OpDecorate %_runtimearr_Foo_0 ArrayStride 8 + OpMemberDecorate %B 0 Offset 0 + OpDecorate %B BufferBlock + OpDecorate %C3 DescriptorSet 0 + OpDecorate %C3 Binding 0 + OpDecorate %_arr_Foo_0_uint_1024_0 ArrayStride 16 + OpMemberDecorate %B_0 0 Offset 0 + OpDecorate %B_0 Block + OpDecorate %C4 DescriptorSet 0 + OpDecorate %C4 Binding 3 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %Foo = OpTypeStruct %int %int +%_runtimearr_Foo = OpTypeRuntimeArray %Foo + %A = OpTypeStruct %_runtimearr_Foo +%_ptr_Uniform_A = OpTypePointer Uniform %A + %C1 = OpVariable %_ptr_Uniform_A Uniform + %int_0 = OpConstant %int 0 + %uint = OpTypeInt 32 0 + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input + %uint_0 = OpConstant %uint 0 +%_ptr_Input_uint = OpTypePointer Input %uint + %Foo_0 = OpTypeStruct %int %int + %uint_1024 = OpConstant %uint 1024 +%_arr_Foo_0_uint_1024 = OpTypeArray %Foo_0 %uint_1024 + %A_0 = OpTypeStruct %_arr_Foo_0_uint_1024 +%_ptr_Uniform_A_0 = OpTypePointer Uniform %A_0 + %C2 = OpVariable %_ptr_Uniform_A_0 Uniform +%_ptr_Uniform_Foo_0 = OpTypePointer Uniform %Foo_0 +%_ptr_Uniform_Foo = OpTypePointer Uniform %Foo +%_ptr_Uniform_int = OpTypePointer Uniform %int + %int_1 = OpConstant %int 1 +%_runtimearr_Foo_0 = OpTypeRuntimeArray %Foo + %B = OpTypeStruct %_runtimearr_Foo_0 +%_ptr_Uniform_B = OpTypePointer Uniform %B + %C3 = OpVariable %_ptr_Uniform_B Uniform +%_arr_Foo_0_uint_1024_0 = OpTypeArray %Foo_0 %uint_1024 + %B_0 = OpTypeStruct %_arr_Foo_0_uint_1024_0 +%_ptr_Uniform_B_0 = OpTypePointer Uniform %B_0 + %C4 = OpVariable %_ptr_Uniform_B_0 Uniform + %main = OpFunction %void None %3 + %5 = OpLabel + %19 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %20 = OpLoad %uint %19 + %27 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %28 = OpLoad %uint %27 + %30 = OpAccessChain %_ptr_Uniform_Foo_0 %C2 %int_0 %28 + %31 = OpLoad %Foo_0 %30 + %33 = OpAccessChain %_ptr_Uniform_Foo %C1 %int_0 %20 + %34 = OpCompositeExtract %int %31 0 + %36 = OpAccessChain %_ptr_Uniform_int %33 %int_0 + OpStore %36 %34 + %37 = OpCompositeExtract %int %31 1 + %39 = OpAccessChain %_ptr_Uniform_int %33 %int_1 + OpStore %39 %37 + %44 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %45 = OpLoad %uint %44 + %50 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %51 = OpLoad %uint %50 + %52 = OpAccessChain %_ptr_Uniform_Foo_0 %C4 %int_0 %51 + %53 = OpLoad %Foo_0 %52 + %54 = OpAccessChain %_ptr_Uniform_Foo %C3 %int_0 %45 + %55 = OpCompositeExtract %int %53 0 + %56 = OpAccessChain %_ptr_Uniform_int %54 %int_0 + OpStore %56 %55 + %57 = OpCompositeExtract %int %53 1 + %58 = OpAccessChain %_ptr_Uniform_int %54 %int_1 + OpStore %58 %57 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/comp/control-flow-hints.asm.comp b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/control-flow-hints.asm.comp new file mode 100644 index 000000000..74a15955c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/control-flow-hints.asm.comp @@ -0,0 +1,146 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 85 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 1 1 + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %i "i" + OpName %bar "bar" + OpMemberName %bar 0 "@data" + OpName %bar_0 "bar" + OpName %foo "foo" + OpName %i_0 "i" + OpName %v "v" + OpName %w "w" + OpName %value "value" + OpDecorate %_runtimearr_v4float ArrayStride 16 + OpMemberDecorate %bar 0 Offset 0 + OpDecorate %bar BufferBlock + OpDecorate %bar_0 DescriptorSet 0 + OpDecorate %bar_0 Binding 0 + OpDecorate %foo DescriptorSet 0 + OpDecorate %foo Binding 1 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_16 = OpConstant %int 16 + %bool = OpTypeBool + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_runtimearr_v4float = OpTypeRuntimeArray %v4float + %bar = OpTypeStruct %_runtimearr_v4float +%_ptr_Uniform_bar = OpTypePointer Uniform %bar + %bar_0 = OpVariable %_ptr_Uniform_bar Uniform + %foo = OpVariable %_ptr_Uniform_bar Uniform +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %int_1 = OpConstant %int 1 + %int_15 = OpConstant %int 15 +%_ptr_Function_float = OpTypePointer Function %float + %int_10 = OpConstant %int 10 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %float_10 = OpConstant %float 10 + %int_20 = OpConstant %int 20 + %float_5 = OpConstant %float 5 + %72 = OpConstantComposite %v4float %float_5 %float_5 %float_5 %float_5 + %float_20 = OpConstant %float 20 + %float_40 = OpConstant %float 40 + %main = OpFunction %void None %3 + %5 = OpLabel + %84 = OpFunctionCall %void %_main_ + OpReturn + OpFunctionEnd + %_main_ = OpFunction %void None %3 + %7 = OpLabel + %i = OpVariable %_ptr_Function_int Function + %i_0 = OpVariable %_ptr_Function_int Function + %v = OpVariable %_ptr_Function_float Function + %w = OpVariable %_ptr_Function_float Function + %value = OpVariable %_ptr_Function_float Function + OpStore %i %int_0 + OpBranch %12 + %12 = OpLabel + OpLoopMerge %14 %15 Unroll + OpBranch %16 + %16 = OpLabel + %17 = OpLoad %int %i + %20 = OpSLessThan %bool %17 %int_16 + OpBranchConditional %20 %13 %14 + %13 = OpLabel + %27 = OpLoad %int %i + %29 = OpLoad %int %i + %31 = OpAccessChain %_ptr_Uniform_v4float %foo %int_0 %29 + %32 = OpLoad %v4float %31 + %33 = OpAccessChain %_ptr_Uniform_v4float %bar_0 %int_0 %27 + OpStore %33 %32 + OpBranch %15 + %15 = OpLabel + %34 = OpLoad %int %i + %36 = OpIAdd %int %34 %int_1 + OpStore %i %36 + OpBranch %12 + %14 = OpLabel + OpStore %i_0 %int_0 + OpBranch %38 + %38 = OpLabel + OpLoopMerge %40 %41 DontUnroll + OpBranch %42 + %42 = OpLabel + %43 = OpLoad %int %i_0 + %44 = OpSLessThan %bool %43 %int_16 + OpBranchConditional %44 %39 %40 + %39 = OpLabel + %46 = OpLoad %int %i_0 + %47 = OpISub %int %int_15 %46 + %48 = OpLoad %int %i_0 + %49 = OpAccessChain %_ptr_Uniform_v4float %foo %int_0 %48 + %50 = OpLoad %v4float %49 + %51 = OpAccessChain %_ptr_Uniform_v4float %bar_0 %int_0 %47 + OpStore %51 %50 + OpBranch %41 + %41 = OpLabel + %52 = OpLoad %int %i_0 + %53 = OpIAdd %int %52 %int_1 + OpStore %i_0 %53 + OpBranch %38 + %40 = OpLabel + %60 = OpAccessChain %_ptr_Uniform_float %bar_0 %int_0 %int_10 %uint_0 + %61 = OpLoad %float %60 + OpStore %v %61 + %63 = OpAccessChain %_ptr_Uniform_float %foo %int_0 %int_10 %uint_0 + %64 = OpLoad %float %63 + OpStore %w %64 + %65 = OpLoad %float %v + %67 = OpFOrdGreaterThan %bool %65 %float_10 + OpSelectionMerge %69 DontFlatten + OpBranchConditional %67 %68 %69 + %68 = OpLabel + %73 = OpAccessChain %_ptr_Uniform_v4float %foo %int_0 %int_20 + OpStore %73 %72 + OpBranch %69 + %69 = OpLabel + OpStore %value %float_20 + %76 = OpLoad %float %w + %78 = OpFOrdGreaterThan %bool %76 %float_40 + OpSelectionMerge %80 Flatten + OpBranchConditional %78 %79 %80 + %79 = OpLabel + OpStore %value %float_20 + OpBranch %80 + %80 = OpLabel + %81 = OpLoad %float %value + %82 = OpCompositeConstruct %v4float %81 %81 %81 %81 + %83 = OpAccessChain %_ptr_Uniform_v4float %foo %int_0 %int_20 + OpStore %83 %82 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..78b1dc74e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,102 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 61 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %id_1 + OpExecutionMode %main LocalSize 1 1 1 + OpSource HLSL 500 + OpName %main "main" + OpName %Load_u1_ "Load(u1;" + OpName %size "size" + OpName %_main_vu3_ "@main(vu3;" + OpName %id "id" + OpName %data "data" + OpName %byteAddrTemp "byteAddrTemp" + OpName %ssbo "ssbo" + OpMemberName %ssbo 0 "@data" + OpName %ssbo_0 "ssbo" + OpName %param "param" + OpName %id_0 "id" + OpName %id_1 "id" + OpName %param_0 "param" + OpDecorate %_runtimearr_uint ArrayStride 4 + OpMemberDecorate %ssbo 0 NonWritable + OpMemberDecorate %ssbo 0 Offset 0 + OpDecorate %ssbo BufferBlock + OpDecorate %ssbo_0 DescriptorSet 0 + OpDecorate %ssbo_0 Binding 1 + OpDecorate %id_1 BuiltIn GlobalInvocationId + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 +%_ptr_Function_uint = OpTypePointer Function %uint + %8 = OpTypeFunction %void %_ptr_Function_uint + %v3uint = OpTypeVector %uint 3 +%_ptr_Function_v3uint = OpTypePointer Function %v3uint + %14 = OpTypeFunction %void %_ptr_Function_v3uint + %v4uint = OpTypeVector %uint 4 +%_ptr_Function_v4uint = OpTypePointer Function %v4uint + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_2 = OpConstant %int 2 +%_runtimearr_uint = OpTypeRuntimeArray %uint + %ssbo = OpTypeStruct %_runtimearr_uint +%_ptr_Uniform_ssbo = OpTypePointer Uniform %ssbo + %ssbo_0 = OpVariable %_ptr_Uniform_ssbo Uniform + %int_0 = OpConstant %int 0 +%_ptr_Uniform_uint = OpTypePointer Uniform %uint + %int_1 = OpConstant %int 1 + %int_3 = OpConstant %int 3 + %uint_4 = OpConstant %uint 4 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint + %id_1 = OpVariable %_ptr_Input_v3uint Input + %main = OpFunction %void None %3 + %5 = OpLabel + %id_0 = OpVariable %_ptr_Function_v3uint Function + %param_0 = OpVariable %_ptr_Function_v3uint Function + %57 = OpLoad %v3uint %id_1 + OpStore %id_0 %57 + %59 = OpLoad %v3uint %id_0 + OpStore %param_0 %59 + %60 = OpFunctionCall %void %_main_vu3_ %param_0 + OpReturn + OpFunctionEnd + %Load_u1_ = OpFunction %void None %8 + %size = OpFunctionParameter %_ptr_Function_uint + %11 = OpLabel + %data = OpVariable %_ptr_Function_v4uint Function +%byteAddrTemp = OpVariable %_ptr_Function_int Function + %24 = OpLoad %uint %size + %26 = OpShiftRightLogical %int %24 %int_2 + OpStore %byteAddrTemp %26 + %32 = OpLoad %int %byteAddrTemp + %34 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %32 + %35 = OpLoad %uint %34 + %36 = OpLoad %int %byteAddrTemp + %38 = OpIAdd %int %36 %int_1 + %39 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %38 + %40 = OpLoad %uint %39 + %41 = OpLoad %int %byteAddrTemp + %42 = OpIAdd %int %41 %int_2 + %43 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %42 + %44 = OpLoad %uint %43 + %45 = OpLoad %int %byteAddrTemp + %47 = OpIAdd %int %45 %int_3 + %48 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %47 + %49 = OpLoad %uint %48 + %50 = OpCompositeConstruct %v4uint %35 %40 %44 %49 + OpStore %data %50 + OpReturn + OpFunctionEnd + %_main_vu3_ = OpFunction %void None %14 + %id = OpFunctionParameter %_ptr_Function_v3uint + %17 = OpLabel + %param = OpVariable %_ptr_Function_uint Function + OpStore %param %uint_4 + %53 = OpFunctionCall %void %Load_u1_ %param + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp new file mode 100644 index 000000000..edb1a05e5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/comp/storage-buffer-basic.invalid.nofxc.asm.comp @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Codeplay; 0 +; Bound: 31 +; Schema: 0 + OpCapability Shader + OpCapability VariablePointers + OpExtension "SPV_KHR_storage_buffer_storage_class" + OpExtension "SPV_KHR_variable_pointers" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %22 "main" %gl_WorkGroupID + OpSource OpenCL_C 120 + OpDecorate %15 SpecId 0 + ;OpDecorate %16 SpecId 1 + OpDecorate %17 SpecId 2 + OpDecorate %_runtimearr_float ArrayStride 4 + OpMemberDecorate %_struct_4 0 Offset 0 + OpDecorate %_struct_4 Block + OpDecorate %gl_WorkGroupID BuiltIn WorkgroupId + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + OpDecorate %20 DescriptorSet 0 + OpDecorate %20 Binding 0 + OpDecorate %21 DescriptorSet 0 + OpDecorate %21 Binding 1 + %float = OpTypeFloat 32 +%_ptr_StorageBuffer_float = OpTypePointer StorageBuffer %float +%_runtimearr_float = OpTypeRuntimeArray %float + %_struct_4 = OpTypeStruct %_runtimearr_float +%_ptr_StorageBuffer__struct_4 = OpTypePointer StorageBuffer %_struct_4 + %uint = OpTypeInt 32 0 + %void = OpTypeVoid + %8 = OpTypeFunction %void + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%_ptr_Input_uint = OpTypePointer Input %uint +%_ptr_Private_v3uint = OpTypePointer Private %v3uint + %uint_0 = OpConstant %uint 0 +%gl_WorkGroupID = OpVariable %_ptr_Input_v3uint Input + %15 = OpSpecConstant %uint 1 + %16 = OpConstant %uint 2 + %17 = OpSpecConstant %uint 3 +%gl_WorkGroupSize = OpSpecConstantComposite %v3uint %15 %16 %17 + %19 = OpVariable %_ptr_Private_v3uint Private %gl_WorkGroupSize + %20 = OpVariable %_ptr_StorageBuffer__struct_4 StorageBuffer + %21 = OpVariable %_ptr_StorageBuffer__struct_4 StorageBuffer + %22 = OpFunction %void None %8 + %23 = OpLabel + %24 = OpAccessChain %_ptr_Input_uint %gl_WorkGroupID %uint_0 + %25 = OpLoad %uint %24 + %26 = OpAccessChain %_ptr_StorageBuffer_float %21 %uint_0 %25 + %27 = OpLoad %float %26 + %28 = OpAccessChain %_ptr_StorageBuffer_float %20 %uint_0 %25 + %29 = OpLoad %float %28 + %30 = OpFAdd %float %27 %29 + OpStore %28 %30 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag new file mode 100644 index 000000000..d778034b5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/cbuffer-stripped.asm.frag @@ -0,0 +1,55 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 34 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpMemberDecorate %UBO 0 RowMajor + OpMemberDecorate %UBO 0 Offset 0 + OpMemberDecorate %UBO 0 MatrixStride 16 + OpMemberDecorate %UBO 1 Offset 64 + OpDecorate %UBO Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %8 = OpTypeFunction %v2float +%_ptr_Function_v2float = OpTypePointer Function %v2float + %v4float = OpTypeVector %float 4 +%mat2v4float = OpTypeMatrix %v4float 2 + %UBO = OpTypeStruct %mat2v4float %v4float +%_ptr_Uniform_UBO = OpTypePointer Uniform %UBO + %_ = OpVariable %_ptr_Uniform_UBO Uniform + %int = OpTypeInt 32 1 + %int_1 = OpConstant %int 1 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %int_0 = OpConstant %int 0 +%_ptr_Uniform_mat2v4float = OpTypePointer Uniform %mat2v4float +%_ptr_Output_v2float = OpTypePointer Output %v2float +%_entryPointOutput = OpVariable %_ptr_Output_v2float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %33 = OpFunctionCall %v2float %_main_ + OpStore %_entryPointOutput %33 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %v2float None %8 + %10 = OpLabel + %a0 = OpVariable %_ptr_Function_v2float Function + %21 = OpAccessChain %_ptr_Uniform_v4float %_ %int_1 + %22 = OpLoad %v4float %21 + %25 = OpAccessChain %_ptr_Uniform_mat2v4float %_ %int_0 + %26 = OpLoad %mat2v4float %25 + %27 = OpVectorTimesMatrix %v2float %22 %26 + OpStore %a0 %27 + %28 = OpLoad %v2float %a0 + OpReturnValue %28 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag new file mode 100644 index 000000000..ba2f95b23 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/combined-sampler-reuse.asm.frag @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vUV + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %uTex "uTex" + OpName %uSampler "uSampler" + OpName %vUV "vUV" + OpDecorate %FragColor Location 0 + OpDecorate %uTex DescriptorSet 0 + OpDecorate %uTex Binding 1 + OpDecorate %uSampler DescriptorSet 0 + OpDecorate %uSampler Binding 0 + OpDecorate %vUV Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %10 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_UniformConstant_10 = OpTypePointer UniformConstant %10 + %uTex = OpVariable %_ptr_UniformConstant_10 UniformConstant + %14 = OpTypeSampler +%_ptr_UniformConstant_14 = OpTypePointer UniformConstant %14 + %uSampler = OpVariable %_ptr_UniformConstant_14 UniformConstant + %18 = OpTypeSampledImage %10 + %v2float = OpTypeVector %float 2 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %vUV = OpVariable %_ptr_Input_v2float Input + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 + %int_1 = OpConstant %int 1 + %32 = OpConstantComposite %v2int %int_1 %int_1 + %main = OpFunction %void None %3 + %5 = OpLabel + %13 = OpLoad %10 %uTex + %17 = OpLoad %14 %uSampler + %19 = OpSampledImage %18 %13 %17 + %23 = OpLoad %v2float %vUV + %24 = OpImageSampleImplicitLod %v4float %19 %23 + OpStore %FragColor %24 + %28 = OpLoad %v2float %vUV + %33 = OpImageSampleImplicitLod %v4float %19 %28 ConstOffset %32 + %34 = OpLoad %v4float %FragColor + %35 = OpFAdd %v4float %34 %33 + OpStore %FragColor %35 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..0efd3158c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/empty-struct.asm.frag @@ -0,0 +1,55 @@ +; SPIR-V +; Version: 1.2 +; Generator: Khronos; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %EntryPoint_Main "main" + OpExecutionMode %EntryPoint_Main OriginUpperLeft + OpSource Unknown 100 + OpName %EmptyStructTest "EmptyStructTest" + OpName %GetValue "GetValue" + OpName %GetValue2 "GetValue" + OpName %self "self" + OpName %self2 "self" + OpName %emptyStruct "emptyStruct" + OpName %value "value" + OpName %EntryPoint_Main "EntryPoint_Main" + +%EmptyStructTest = OpTypeStruct +%_ptr_Function_EmptyStructTest = OpTypePointer Function %EmptyStructTest + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %5 = OpTypeFunction %float %_ptr_Function_EmptyStructTest + %6 = OpTypeFunction %float %EmptyStructTest + %void = OpTypeVoid +%_ptr_Function_void = OpTypePointer Function %void + %8 = OpTypeFunction %void %_ptr_Function_EmptyStructTest + %9 = OpTypeFunction %void + %float_0 = OpConstant %float 0 + + %GetValue = OpFunction %float None %5 + %self = OpFunctionParameter %_ptr_Function_EmptyStructTest + %13 = OpLabel + OpReturnValue %float_0 + OpFunctionEnd + + %GetValue2 = OpFunction %float None %6 + %self2 = OpFunctionParameter %EmptyStructTest + %14 = OpLabel + OpReturnValue %float_0 + OpFunctionEnd + +%EntryPoint_Main = OpFunction %void None %9 + %37 = OpLabel + %emptyStruct = OpVariable %_ptr_Function_EmptyStructTest Function + %18 = OpVariable %_ptr_Function_EmptyStructTest Function + %value = OpVariable %_ptr_Function_float Function + %value2 = OpCompositeConstruct %EmptyStructTest + %22 = OpFunctionCall %float %GetValue %emptyStruct + %23 = OpFunctionCall %float %GetValue2 %value2 + OpStore %value %22 + OpStore %value %23 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/frem.asm.frag new file mode 100644 index 000000000..8350c75c0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/frem.asm.frag @@ -0,0 +1,41 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 16 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vA %vB + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %vA "vA" + OpName %vB "vB" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %vA RelaxedPrecision + OpDecorate %vA Location 0 + OpDecorate %12 RelaxedPrecision + OpDecorate %vB RelaxedPrecision + OpDecorate %vB Location 1 + OpDecorate %14 RelaxedPrecision + OpDecorate %15 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output +%_ptr_Input_v4float = OpTypePointer Input %v4float + %vA = OpVariable %_ptr_Input_v4float Input + %vB = OpVariable %_ptr_Input_v4float Input + %main = OpFunction %void None %3 + %5 = OpLabel + %12 = OpLoad %v4float %vA + %14 = OpLoad %v4float %vB + %15 = OpFRem %v4float %12 %14 + OpStore %FragColor %15 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..397aa98ce --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,153 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 76 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %foobar_vf4_ "foo" + OpName %a "foo" + OpName %foobar_vf3_ "foo" + OpName %a_0 "foo" + OpName %foobaz_vf4_ "foo" + OpName %a_1 "foo" + OpName %foobaz_vf2_ "foo" + OpName %a_2 "foo" + OpName %a_3 "foo" + OpName %param "foo" + OpName %b "foo" + OpName %param_0 "foo" + OpName %c "foo" + OpName %param_1 "foo" + OpName %d "foo" + OpName %param_2 "foo" + OpName %FragColor "FragColor" + OpDecorate %foobar_vf4_ RelaxedPrecision + OpDecorate %a RelaxedPrecision + OpDecorate %foobar_vf3_ RelaxedPrecision + OpDecorate %a_0 RelaxedPrecision + OpDecorate %foobaz_vf4_ RelaxedPrecision + OpDecorate %a_1 RelaxedPrecision + OpDecorate %foobaz_vf2_ RelaxedPrecision + OpDecorate %a_2 RelaxedPrecision + OpDecorate %28 RelaxedPrecision + OpDecorate %30 RelaxedPrecision + OpDecorate %31 RelaxedPrecision + OpDecorate %34 RelaxedPrecision + OpDecorate %35 RelaxedPrecision + OpDecorate %36 RelaxedPrecision + OpDecorate %37 RelaxedPrecision + OpDecorate %40 RelaxedPrecision + OpDecorate %42 RelaxedPrecision + OpDecorate %43 RelaxedPrecision + OpDecorate %46 RelaxedPrecision + OpDecorate %47 RelaxedPrecision + OpDecorate %48 RelaxedPrecision + OpDecorate %49 RelaxedPrecision + OpDecorate %a_3 RelaxedPrecision + OpDecorate %55 RelaxedPrecision + OpDecorate %b RelaxedPrecision + OpDecorate %59 RelaxedPrecision + OpDecorate %c RelaxedPrecision + OpDecorate %62 RelaxedPrecision + OpDecorate %d RelaxedPrecision + OpDecorate %66 RelaxedPrecision + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %69 RelaxedPrecision + OpDecorate %70 RelaxedPrecision + OpDecorate %71 RelaxedPrecision + OpDecorate %72 RelaxedPrecision + OpDecorate %73 RelaxedPrecision + OpDecorate %74 RelaxedPrecision + OpDecorate %75 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %9 = OpTypeFunction %v4float %_ptr_Function_v4float + %v3float = OpTypeVector %float 3 +%_ptr_Function_v3float = OpTypePointer Function %v3float + %15 = OpTypeFunction %v4float %_ptr_Function_v3float + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %24 = OpTypeFunction %v4float %_ptr_Function_v2float + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 + %53 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %57 = OpConstantComposite %v3float %float_1 %float_1 %float_1 + %64 = OpConstantComposite %v2float %float_1 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %a_3 = OpVariable %_ptr_Function_v4float Function + %param = OpVariable %_ptr_Function_v4float Function + %b = OpVariable %_ptr_Function_v4float Function + %param_0 = OpVariable %_ptr_Function_v3float Function + %c = OpVariable %_ptr_Function_v4float Function + %param_1 = OpVariable %_ptr_Function_v4float Function + %d = OpVariable %_ptr_Function_v4float Function + %param_2 = OpVariable %_ptr_Function_v2float Function + OpStore %param %53 + %55 = OpFunctionCall %v4float %foobar_vf4_ %param + OpStore %a_3 %55 + OpStore %param_0 %57 + %59 = OpFunctionCall %v4float %foobar_vf3_ %param_0 + OpStore %b %59 + OpStore %param_1 %53 + %62 = OpFunctionCall %v4float %foobaz_vf4_ %param_1 + OpStore %c %62 + OpStore %param_2 %64 + %66 = OpFunctionCall %v4float %foobaz_vf2_ %param_2 + OpStore %d %66 + %69 = OpLoad %v4float %a_3 + %70 = OpLoad %v4float %b + %71 = OpFAdd %v4float %69 %70 + %72 = OpLoad %v4float %c + %73 = OpFAdd %v4float %71 %72 + %74 = OpLoad %v4float %d + %75 = OpFAdd %v4float %73 %74 + OpStore %FragColor %75 + OpReturn + OpFunctionEnd +%foobar_vf4_ = OpFunction %v4float None %9 + %a = OpFunctionParameter %_ptr_Function_v4float + %12 = OpLabel + %28 = OpLoad %v4float %a + %30 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1 + %31 = OpFAdd %v4float %28 %30 + OpReturnValue %31 + OpFunctionEnd +%foobar_vf3_ = OpFunction %v4float None %15 + %a_0 = OpFunctionParameter %_ptr_Function_v3float + %18 = OpLabel + %34 = OpLoad %v3float %a_0 + %35 = OpVectorShuffle %v4float %34 %34 0 1 2 2 + %36 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1 + %37 = OpFAdd %v4float %35 %36 + OpReturnValue %37 + OpFunctionEnd +%foobaz_vf4_ = OpFunction %v4float None %9 + %a_1 = OpFunctionParameter %_ptr_Function_v4float + %21 = OpLabel + %40 = OpLoad %v4float %a_1 + %42 = OpCompositeConstruct %v4float %float_2 %float_2 %float_2 %float_2 + %43 = OpFAdd %v4float %40 %42 + OpReturnValue %43 + OpFunctionEnd +%foobaz_vf2_ = OpFunction %v4float None %24 + %a_2 = OpFunctionParameter %_ptr_Function_v2float + %27 = OpLabel + %46 = OpLoad %v2float %a_2 + %47 = OpVectorShuffle %v4float %46 %46 0 1 0 1 + %48 = OpCompositeConstruct %v4float %float_2 %float_2 %float_2 %float_2 + %49 = OpFAdd %v4float %47 %48 + OpReturnValue %49 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..63c8ab57a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,41 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 19 +; Schema: 0 + OpCapability Shader + OpCapability ImageQuery + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %Size + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %Size "Size" + OpName %uTexture "uTexture" + OpDecorate %Size Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 +%_ptr_Output_v2int = OpTypePointer Output %v2int + %Size = OpVariable %_ptr_Output_v2int Output + %float = OpTypeFloat 32 + %11 = OpTypeImage %float 2D 0 0 0 1 Unknown + %12 = OpTypeSampledImage %11 +%_ptr_UniformConstant_12 = OpTypePointer UniformConstant %12 + %uTexture = OpVariable %_ptr_UniformConstant_12 UniformConstant + %int_0 = OpConstant %int 0 + %int_1 = OpConstant %int 1 + %main = OpFunction %void None %3 + %5 = OpLabel + %15 = OpLoad %12 %uTexture + %17 = OpImage %11 %15 + %18 = OpImageQuerySizeLod %v2int %17 %int_0 + %19 = OpImageQuerySizeLod %v2int %17 %int_1 + %20 = OpIAdd %v2int %18 %19 + OpStore %Size %20 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..ccdfeef58 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,81 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 60 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %v0 %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %phi "phi" + OpName %i "i" + OpName %v0 "v0" + OpName %FragColor "FragColor" + OpName %uImage "uImage" + OpDecorate %v0 Location 0 + OpDecorate %FragColor Location 0 + OpDecorate %uImage DescriptorSet 0 + OpDecorate %uImage Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %float_1 = OpConstant %float 1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_4 = OpConstant %int 4 + %bool = OpTypeBool + %v4float = OpTypeVector %float 4 +%_ptr_Input_v4float = OpTypePointer Input %v4float + %v0 = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_float = OpTypePointer Input %float + %float_0 = OpConstant %float 0 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %36 = OpTypeImage %float 2D 0 0 0 1 Unknown + %37 = OpTypeSampledImage %36 +%_ptr_UniformConstant_37 = OpTypePointer UniformConstant %37 + %uImage = OpVariable %_ptr_UniformConstant_37 UniformConstant + %v2float = OpTypeVector %float 2 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 + %float_2 = OpConstant %float 2 + %int_1 = OpConstant %int 1 + %float_1_vec = OpConstantComposite %v4float %float_1 %float_2 %float_1 %float_2 + %main = OpFunction %void None %3 + %5 = OpLabel + %i = OpVariable %_ptr_Function_int Function + OpStore %i %int_0 + OpBranch %loop_header + %loop_header = OpLabel + %phi = OpPhi %float %float_1 %5 %phi_plus_2 %continue_block + %tex_phi = OpPhi %v4float %float_1_vec %5 %texture_load_result %continue_block + OpLoopMerge %merge_block %continue_block None + OpBranch %loop_body + %loop_body = OpLabel + OpStore %FragColor %tex_phi + %19 = OpLoad %int %i + %22 = OpSLessThan %bool %19 %int_4 + OpBranchConditional %22 %15 %merge_block + %15 = OpLabel + %26 = OpLoad %int %i + %28 = OpAccessChain %_ptr_Input_float %v0 %26 + %29 = OpLoad %float %28 + %31 = OpFOrdGreaterThan %bool %29 %float_0 + OpBranchConditional %31 %continue_block %merge_block + %continue_block = OpLabel + %40 = OpLoad %37 %uImage + %43 = OpCompositeConstruct %v2float %phi %phi + %texture_load_result = OpImageSampleExplicitLod %v4float %40 %43 Lod %float_0 + %phi_plus_2 = OpFAdd %float %phi %float_2 + %54 = OpLoad %int %i + %56 = OpIAdd %int %54 %int_1 + OpStore %i %56 + OpBranch %loop_header + %merge_block = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..40e5d3a89 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,29 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 14 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v3float = OpTypeVector %float 3 +%_ptr_Output_v3float = OpTypePointer Output %v3float + %FragColor = OpVariable %_ptr_Output_v3float Output +%float_0x1p_128 = OpConstant %float 0x1p+128 +%float_n0x1p_128 = OpConstant %float -0x1p+128 +%float_0x1_8p_128 = OpConstant %float 0x1.8p+128 + %13 = OpConstantComposite %v3float %float_0x1p_128 %float_n0x1p_128 %float_0x1_8p_128 + %main = OpFunction %void None %3 + %5 = OpLabel + OpStore %FragColor %13 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..320e5ebfb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,195 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 111 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %index + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %index "index" + OpName %indexable "indexable" + OpName %indexable_0 "indexable" + OpName %indexable_1 "indexable" + OpName %foo "foo" + OpName %foobar "foobar" + OpName %baz "baz" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %index RelaxedPrecision + OpDecorate %index Flat + OpDecorate %index Location 0 + OpDecorate %20 RelaxedPrecision + OpDecorate %25 RelaxedPrecision + OpDecorate %26 RelaxedPrecision + OpDecorate %32 RelaxedPrecision + OpDecorate %34 RelaxedPrecision + OpDecorate %37 RelaxedPrecision + OpDecorate %38 RelaxedPrecision + OpDecorate %39 RelaxedPrecision + OpDecorate %41 RelaxedPrecision + OpDecorate %42 RelaxedPrecision + OpDecorate %45 RelaxedPrecision + OpDecorate %46 RelaxedPrecision + OpDecorate %47 RelaxedPrecision + OpDecorate %foo RelaxedPrecision + OpDecorate %61 RelaxedPrecision + OpDecorate %66 RelaxedPrecision + OpDecorate %68 RelaxedPrecision + OpDecorate %71 RelaxedPrecision + OpDecorate %72 RelaxedPrecision + OpDecorate %73 RelaxedPrecision + OpDecorate %75 RelaxedPrecision + OpDecorate %76 RelaxedPrecision + OpDecorate %79 RelaxedPrecision + OpDecorate %80 RelaxedPrecision + OpDecorate %81 RelaxedPrecision + OpDecorate %foobar RelaxedPrecision + OpDecorate %83 RelaxedPrecision + OpDecorate %90 RelaxedPrecision + OpDecorate %91 RelaxedPrecision + OpDecorate %93 RelaxedPrecision + OpDecorate %94 RelaxedPrecision + OpDecorate %95 RelaxedPrecision + OpDecorate %baz RelaxedPrecision + OpDecorate %105 RelaxedPrecision + OpDecorate %106 RelaxedPrecision + OpDecorate %108 RelaxedPrecision + OpDecorate %109 RelaxedPrecision + OpDecorate %110 RelaxedPrecision + OpDecorate %16 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %uint = OpTypeInt 32 0 + %uint_16 = OpConstant %uint 16 +%_arr_float_uint_16 = OpTypeArray %float %uint_16 + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 + %float_3 = OpConstant %float 3 + %float_4 = OpConstant %float 4 + %16 = OpConstantComposite %_arr_float_uint_16 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 + %int = OpTypeInt 32 1 +%_ptr_Input_int = OpTypePointer Input %int + %index = OpVariable %_ptr_Input_int Input +%_ptr_Function__arr_float_uint_16 = OpTypePointer Function %_arr_float_uint_16 +%_ptr_Function_float = OpTypePointer Function %float + %int_10 = OpConstant %int 10 + %bool = OpTypeBool + %int_1 = OpConstant %int 1 + %v4float = OpTypeVector %float 4 + %uint_4 = OpConstant %uint 4 +%_arr_v4float_uint_4 = OpTypeArray %v4float %uint_4 +%_ptr_Function__arr_v4float_uint_4 = OpTypePointer Function %_arr_v4float_uint_4 + %float_0 = OpConstant %float 0 + %54 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 + %55 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %float_8 = OpConstant %float 8 + %57 = OpConstantComposite %v4float %float_8 %float_8 %float_8 %float_8 + %float_5 = OpConstant %float 5 + %59 = OpConstantComposite %v4float %float_5 %float_5 %float_5 %float_5 + %60 = OpConstantComposite %_arr_v4float_uint_4 %54 %55 %57 %59 + %int_30 = OpConstant %int 30 + %int_3 = OpConstant %int 3 + %uint_1 = OpConstant %uint 1 + %uint_0 = OpConstant %uint 0 + %float_20 = OpConstant %float 20 + %uint_2 = OpConstant %uint 2 + %97 = OpConstantComposite %v4float %float_20 %float_20 %float_20 %float_20 + %float_30 = OpConstant %float 30 + %99 = OpConstantComposite %v4float %float_30 %float_30 %float_30 %float_30 + %float_50 = OpConstant %float 50 + %101 = OpConstantComposite %v4float %float_50 %float_50 %float_50 %float_50 + %float_60 = OpConstant %float 60 + %103 = OpConstantComposite %v4float %float_60 %float_60 %float_60 %float_60 + %104 = OpConstantComposite %_arr_v4float_uint_4 %97 %99 %101 %103 + %main = OpFunction %void None %3 + %5 = OpLabel + %indexable = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 +%indexable_0 = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 +%indexable_1 = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 + %foo = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %foobar = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %baz = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %20 = OpLoad %int %index + %24 = OpAccessChain %_ptr_Function_float %indexable %20 + %25 = OpLoad %float %24 + OpStore %FragColor %25 + %26 = OpLoad %int %index + %29 = OpSLessThan %bool %26 %int_10 + OpSelectionMerge %31 None + OpBranchConditional %29 %30 %40 + %30 = OpLabel + %32 = OpLoad %int %index + %34 = OpBitwiseXor %int %32 %int_1 + %36 = OpAccessChain %_ptr_Function_float %indexable_0 %34 + %37 = OpLoad %float %36 + %38 = OpLoad %float %FragColor + %39 = OpFAdd %float %38 %37 + OpStore %FragColor %39 + OpBranch %31 + %40 = OpLabel + %41 = OpLoad %int %index + %42 = OpBitwiseAnd %int %41 %int_1 + %44 = OpAccessChain %_ptr_Function_float %indexable_1 %42 + %45 = OpLoad %float %44 + %46 = OpLoad %float %FragColor + %47 = OpFAdd %float %46 %45 + OpStore %FragColor %47 + OpBranch %31 + %31 = OpLabel + %61 = OpLoad %int %index + %63 = OpSGreaterThan %bool %61 %int_30 + OpSelectionMerge %65 None + OpBranchConditional %63 %64 %74 + %64 = OpLabel + %66 = OpLoad %int %index + %68 = OpBitwiseAnd %int %66 %int_3 + %70 = OpAccessChain %_ptr_Function_float %foo %68 %uint_1 + %71 = OpLoad %float %70 + %72 = OpLoad %float %FragColor + %73 = OpFAdd %float %72 %71 + OpStore %FragColor %73 + OpBranch %65 + %74 = OpLabel + %75 = OpLoad %int %index + %76 = OpBitwiseAnd %int %75 %int_1 + %78 = OpAccessChain %_ptr_Function_float %foo %76 %uint_0 + %79 = OpLoad %float %78 + %80 = OpLoad %float %FragColor + %81 = OpFAdd %float %80 %79 + OpStore %FragColor %81 + OpBranch %65 + %65 = OpLabel + %83 = OpLoad %int %index + %84 = OpSGreaterThan %bool %83 %int_30 + OpSelectionMerge %86 None + OpBranchConditional %84 %85 %86 + %85 = OpLabel + %89 = OpAccessChain %_ptr_Function_float %foobar %int_1 %uint_2 + OpStore %89 %float_20 + OpBranch %86 + %86 = OpLabel + %90 = OpLoad %int %index + %91 = OpBitwiseAnd %int %90 %int_3 + %92 = OpAccessChain %_ptr_Function_float %foobar %91 %uint_2 + %93 = OpLoad %float %92 + %94 = OpLoad %float %FragColor + %95 = OpFAdd %float %94 %93 + OpStore %FragColor %95 + OpStore %baz %104 + %105 = OpLoad %int %index + %106 = OpBitwiseAnd %int %105 %int_3 + %107 = OpAccessChain %_ptr_Function_float %baz %106 %uint_2 + %108 = OpLoad %float %107 + %109 = OpLoad %float %FragColor + %110 = OpFAdd %float %109 %108 + OpStore %FragColor %110 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..083c85d9b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,51 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 32 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %add_value_f1_f1_ "add_value(f1;f1;" + OpName %v "v" + OpName %w "w" + OpName %FragColor "FragColor" + OpName %Registers "Registers" + OpMemberName %Registers 0 "foo" + OpName %registers "registers" + OpDecorate %FragColor Location 0 + OpMemberDecorate %Registers 0 Offset 0 + OpDecorate %Registers Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %8 = OpTypeFunction %float %float %float +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %float_10 = OpConstant %float 10 + %Registers = OpTypeStruct %float +%_ptr_PushConstant_Registers = OpTypePointer PushConstant %Registers + %registers = OpVariable %_ptr_PushConstant_Registers PushConstant + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_PushConstant_float = OpTypePointer PushConstant %float + %main = OpFunction %void None %3 + %5 = OpLabel + %29 = OpAccessChain %_ptr_PushConstant_float %registers %int_0 + %30 = OpLoad %float %29 + %31 = OpFunctionCall %float %add_value_f1_f1_ %float_10 %30 + OpStore %FragColor %31 + OpReturn + OpFunctionEnd +%add_value_f1_f1_ = OpFunction %float None %8 + %v = OpFunctionParameter %float + %w = OpFunctionParameter %float + %12 = OpLabel + %15 = OpFAdd %float %v %w + OpReturnValue %15 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/srem.asm.frag new file mode 100644 index 000000000..c6f8e27cb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/srem.asm.frag @@ -0,0 +1,43 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 23 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vA %vB + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %vA "vA" + OpName %vB "vB" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %vA Flat + OpDecorate %vA Location 0 + OpDecorate %vB Flat + OpDecorate %vB Location 1 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %int = OpTypeInt 32 1 + %v4int = OpTypeVector %int 4 +%_ptr_Input_v4int = OpTypePointer Input %v4int + %vA = OpVariable %_ptr_Input_v4int Input + %vB = OpVariable %_ptr_Input_v4int Input + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpLoad %v4int %vA + %16 = OpLoad %v4int %vB + %17 = OpLoad %v4int %vA + %18 = OpLoad %v4int %vB + %19 = OpSRem %v4int %17 %18 + %20 = OpConvertSToF %v4float %19 + OpStore %FragColor %20 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..53dc63809 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,46 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 26 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %gl_FragCoord + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %uTexture "uTexture" + OpName %gl_FragCoord "gl_FragCoord" + OpDecorate %FragColor Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 0 + OpDecorate %gl_FragCoord BuiltIn FragCoord + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %10 = OpTypeImage %float 2D 0 0 0 1 Unknown + %11 = OpTypeSampledImage %10 +%_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 + %uTexture = OpVariable %_ptr_UniformConstant_11 UniformConstant +%_ptr_Input_v4float = OpTypePointer Input %v4float +%gl_FragCoord = OpVariable %_ptr_Input_v4float Input + %v2float = OpTypeVector %float 2 + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 + %int_0 = OpConstant %int 0 + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpLoad %11 %uTexture + %18 = OpLoad %v4float %gl_FragCoord + %19 = OpVectorShuffle %v2float %18 %18 0 1 + %22 = OpConvertFToS %v2int %19 + %24 = OpImage %10 %14 + %25 = OpImageFetch %v4float %24 %22 + OpStore %FragColor %25 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag new file mode 100644 index 000000000..89036f0eb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/unknown-depth-state.asm.frag @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 44 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %vUV %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %sample_combined_ "sample_combined(" + OpName %sample_separate_ "sample_separate(" + OpName %uShadow "uShadow" + OpName %vUV "vUV" + OpName %uTexture "uTexture" + OpName %uSampler "uSampler" + OpName %FragColor "FragColor" + OpDecorate %uShadow DescriptorSet 0 + OpDecorate %uShadow Binding 0 + OpDecorate %vUV Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 1 + OpDecorate %uSampler DescriptorSet 0 + OpDecorate %uSampler Binding 2 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %7 = OpTypeFunction %float + %12 = OpTypeImage %float 2D 2 0 0 1 Unknown + %13 = OpTypeSampledImage %12 +%_ptr_UniformConstant_13 = OpTypePointer UniformConstant %13 + %uShadow = OpVariable %_ptr_UniformConstant_13 UniformConstant + %v3float = OpTypeVector %float 3 +%_ptr_Input_v3float = OpTypePointer Input %v3float + %vUV = OpVariable %_ptr_Input_v3float Input +%_ptr_UniformConstant_25 = OpTypePointer UniformConstant %12 + %uTexture = OpVariable %_ptr_UniformConstant_25 UniformConstant + %29 = OpTypeSampler +%_ptr_UniformConstant_29 = OpTypePointer UniformConstant %29 + %uSampler = OpVariable %_ptr_UniformConstant_29 UniformConstant +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %41 = OpFunctionCall %float %sample_combined_ + %42 = OpFunctionCall %float %sample_separate_ + %43 = OpFAdd %float %41 %42 + OpStore %FragColor %43 + OpReturn + OpFunctionEnd +%sample_combined_ = OpFunction %float None %7 + %9 = OpLabel + %16 = OpLoad %13 %uShadow + %20 = OpLoad %v3float %vUV + %21 = OpCompositeExtract %float %20 2 + %22 = OpImageSampleDrefImplicitLod %float %16 %20 %21 + OpReturnValue %22 + OpFunctionEnd +%sample_separate_ = OpFunction %float None %7 + %11 = OpLabel + %28 = OpLoad %12 %uTexture + %32 = OpLoad %29 %uSampler + %33 = OpSampledImage %13 %28 %32 + %34 = OpLoad %v3float %vUV + %35 = OpCompositeExtract %float %34 2 + %36 = OpImageSampleDrefImplicitLod %float %33 %34 %35 + OpReturnValue %36 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..e2ce2eb56 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/frag/unreachable.asm.frag @@ -0,0 +1,61 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 47 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %counter %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %counter "counter" + OpName %FragColor "FragColor" + OpDecorate %counter Flat + OpDecorate %counter Location 0 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float + %int = OpTypeInt 32 1 +%_ptr_Input_int = OpTypePointer Input %int + %counter = OpVariable %_ptr_Input_int Input + %int_10 = OpConstant %int 10 + %bool = OpTypeBool + %float_10 = OpConstant %float 10 + %21 = OpConstantComposite %v4float %float_10 %float_10 %float_10 %float_10 + %float_30 = OpConstant %float 30 + %25 = OpConstantComposite %v4float %float_30 %float_30 %float_30 %float_30 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %false = OpConstantFalse %bool + %44 = OpUndef %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + OpBranch %33 + %33 = OpLabel + %45 = OpPhi %v4float %44 %5 %44 %35 + OpLoopMerge %34 %35 None + OpBranch %36 + %36 = OpLabel + %37 = OpLoad %int %counter + %38 = OpIEqual %bool %37 %int_10 + OpSelectionMerge %39 None + OpBranchConditional %38 %40 %41 + %40 = OpLabel + OpBranch %34 + %41 = OpLabel + OpBranch %34 + %39 = OpLabel + OpUnreachable + %35 = OpLabel + OpBranchConditional %false %33 %34 + %34 = OpLabel + %46 = OpPhi %v4float %21 %40 %25 %41 %44 %35 + OpStore %FragColor %46 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert b/3rdparty/spirv-cross/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert new file mode 100644 index 000000000..b566a3d1a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/vert/spec-constant-op-composite.asm.vert @@ -0,0 +1,98 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 58 +; Schema: 0 + OpCapability Shader + OpCapability ClipDistance + OpCapability CullDistance + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %4 "main" %52 %output + OpSource GLSL 450 + OpName %4 "main" + OpName %9 "pos" + OpName %50 "gl_PerVertex" + OpMemberName %50 0 "gl_Position" + OpMemberName %50 1 "gl_PointSize" + OpMemberName %50 2 "gl_ClipDistance" + OpMemberName %50 3 "gl_CullDistance" + OpName %52 "" + OpDecorate %13 SpecId 201 + OpDecorate %24 SpecId 202 + OpMemberDecorate %50 0 BuiltIn Position + OpMemberDecorate %50 1 BuiltIn PointSize + OpMemberDecorate %50 2 BuiltIn ClipDistance + OpMemberDecorate %50 3 BuiltIn CullDistance + OpDecorate %50 Block + OpDecorate %57 SpecId 200 + OpDecorate %output Flat + OpDecorate %output Location 0 + %2 = OpTypeVoid + %3 = OpTypeFunction %2 + %6 = OpTypeFloat 32 + %7 = OpTypeVector %6 4 + %8 = OpTypePointer Function %7 + %10 = OpConstant %6 0 + %11 = OpConstantComposite %7 %10 %10 %10 %10 + %12 = OpTypeInt 32 1 + %int_ptr = OpTypePointer Output %12 + %13 = OpSpecConstant %12 -10 + %14 = OpConstant %12 2 + %15 = OpSpecConstantOp %12 IAdd %13 %14 + %17 = OpTypeInt 32 0 + %18 = OpConstant %17 1 + %19 = OpTypePointer Function %6 + %24 = OpSpecConstant %17 100 + %25 = OpConstant %17 5 + %26 = OpSpecConstantOp %17 UMod %24 %25 + %28 = OpConstant %17 2 + %33 = OpConstant %12 20 + %34 = OpConstant %12 30 + %35 = OpTypeVector %12 4 + %36 = OpSpecConstantComposite %35 %33 %34 %15 %15 + %40 = OpTypeVector %12 2 + %41 = OpSpecConstantOp %40 VectorShuffle %36 %36 1 0 + %foo = OpSpecConstantOp %12 CompositeExtract %36 1 + %42 = OpTypeVector %6 2 + %49 = OpTypeArray %6 %18 + %50 = OpTypeStruct %7 %6 %49 %49 + %51 = OpTypePointer Output %50 + %52 = OpVariable %51 Output + %output = OpVariable %int_ptr Output + %53 = OpConstant %12 0 + %55 = OpTypePointer Output %7 + %57 = OpSpecConstant %6 3.14159 + %4 = OpFunction %2 None %3 + %5 = OpLabel + %9 = OpVariable %8 Function + OpStore %9 %11 + %16 = OpConvertSToF %6 %15 + %20 = OpAccessChain %19 %9 %18 + %21 = OpLoad %6 %20 + %22 = OpFAdd %6 %21 %16 + %23 = OpAccessChain %19 %9 %18 + OpStore %23 %22 + %27 = OpConvertUToF %6 %26 + %29 = OpAccessChain %19 %9 %28 + %30 = OpLoad %6 %29 + %31 = OpFAdd %6 %30 %27 + %32 = OpAccessChain %19 %9 %28 + OpStore %32 %31 + %37 = OpConvertSToF %7 %36 + %38 = OpLoad %7 %9 + %39 = OpFAdd %7 %38 %37 + OpStore %9 %39 + %43 = OpConvertSToF %42 %41 + %44 = OpLoad %7 %9 + %45 = OpVectorShuffle %42 %44 %44 0 1 + %46 = OpFAdd %42 %45 %43 + %47 = OpLoad %7 %9 + %48 = OpVectorShuffle %7 %47 %46 4 5 2 3 + OpStore %9 %48 + %54 = OpLoad %7 %9 + %56 = OpAccessChain %55 %52 %53 + OpStore %56 %54 + OpStore %output %foo + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..29b0076a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,65 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %vid_1 %iid_1 %_entryPointOutput + OpSource HLSL 500 + OpName %main "main" + OpName %_main_u1_u1_ "@main(u1;u1;" + OpName %vid "vid" + OpName %iid "iid" + OpName %vid_0 "vid" + OpName %vid_1 "vid" + OpName %iid_0 "iid" + OpName %iid_1 "iid" + OpName %_entryPointOutput "@entryPointOutput" + OpName %param "param" + OpName %param_0 "param" + OpDecorate %vid_1 BuiltIn VertexIndex + OpDecorate %iid_1 BuiltIn InstanceIndex + OpDecorate %_entryPointOutput BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 +%_ptr_Function_uint = OpTypePointer Function %uint + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %10 = OpTypeFunction %v4float %_ptr_Function_uint %_ptr_Function_uint +%_ptr_Input_uint = OpTypePointer Input %uint + %vid_1 = OpVariable %_ptr_Input_uint Input + %iid_1 = OpVariable %_ptr_Input_uint Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %vid_0 = OpVariable %_ptr_Function_uint Function + %iid_0 = OpVariable %_ptr_Function_uint Function + %param = OpVariable %_ptr_Function_uint Function + %param_0 = OpVariable %_ptr_Function_uint Function + %25 = OpLoad %uint %vid_1 + OpStore %vid_0 %25 + %28 = OpLoad %uint %iid_1 + OpStore %iid_0 %28 + %32 = OpLoad %uint %vid_0 + OpStore %param %32 + %34 = OpLoad %uint %iid_0 + OpStore %param_0 %34 + %35 = OpFunctionCall %v4float %_main_u1_u1_ %param %param_0 + OpStore %_entryPointOutput %35 + OpReturn + OpFunctionEnd +%_main_u1_u1_ = OpFunction %v4float None %10 + %vid = OpFunctionParameter %_ptr_Function_uint + %iid = OpFunctionParameter %_ptr_Function_uint + %14 = OpLabel + %15 = OpLoad %uint %vid + %16 = OpLoad %uint %iid + %17 = OpIAdd %uint %15 %16 + %18 = OpConvertUToF %float %17 + %19 = OpCompositeConstruct %v4float %18 %18 %18 %18 + OpReturnValue %19 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..85acc47ff --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/asm/vert/vertex-id-instance-id.asm.vert @@ -0,0 +1,53 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 26 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_ %gl_VertexID %gl_InstanceID + OpSource GLSL 450 + OpName %main "main" + OpName %gl_PerVertex "gl_PerVertex" + OpMemberName %gl_PerVertex 0 "gl_Position" + OpMemberName %gl_PerVertex 1 "gl_PointSize" + OpMemberName %gl_PerVertex 2 "gl_ClipDistance" + OpMemberName %gl_PerVertex 3 "gl_CullDistance" + OpName %_ "" + OpName %gl_VertexID "gl_VertexID" + OpName %gl_InstanceID "gl_InstanceID" + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpMemberDecorate %gl_PerVertex 3 BuiltIn CullDistance + OpDecorate %gl_PerVertex Block + OpDecorate %gl_VertexID BuiltIn VertexId + OpDecorate %gl_InstanceID BuiltIn InstanceId + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 %_arr_float_uint_1 +%_ptr_Output_gl_PerVertex = OpTypePointer Output %gl_PerVertex + %_ = OpVariable %_ptr_Output_gl_PerVertex Output + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Input_int = OpTypePointer Input %int +%gl_VertexID = OpVariable %_ptr_Input_int Input +%gl_InstanceID = OpVariable %_ptr_Input_int Input +%_ptr_Output_v4float = OpTypePointer Output %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %18 = OpLoad %int %gl_VertexID + %20 = OpLoad %int %gl_InstanceID + %21 = OpIAdd %int %18 %20 + %22 = OpConvertSToF %float %21 + %23 = OpCompositeConstruct %v4float %22 %22 %22 %22 + %25 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %25 %23 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/access-chains.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/access-chains.comp new file mode 100644 index 000000000..639f3cac1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/access-chains.comp @@ -0,0 +1,24 @@ +#version 310 es +layout(local_size_x = 1) in; + +// TODO: Read structs, matrices and arrays. + +layout(std430, binding = 0) readonly buffer SSBO +{ + vec4 a[3][2][4]; + float b[3][2][4]; + vec4 unsized[]; +} ro; + +layout(std430, binding = 1) writeonly buffer SSBO1 +{ + vec4 c[3][2][4]; + float d[3][2][4]; + vec4 unsized[]; +} wo; + +void main() +{ + wo.c[2][gl_GlobalInvocationID.x][1] = ro.a[1][gl_GlobalInvocationID.x][2]; + wo.unsized[gl_GlobalInvocationID.x] = ro.unsized[gl_GlobalInvocationID.x]; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/address-buffers.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/address-buffers.comp new file mode 100644 index 000000000..3ba582ccd --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/address-buffers.comp @@ -0,0 +1,23 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 0, std430) readonly buffer ReadOnlyBuffer +{ + vec4 ro; +} ReadOnly; + +layout(binding = 1, std430) buffer ReadWriteBuffer +{ + vec4 rw; +} ReadWrite; + +layout(binding = 2, std430) buffer WriteOnlyBuffer +{ + vec4 wo; +} WriteOnly; + +void main() +{ + WriteOnly.wo = ReadOnly.ro; + ReadWrite.rw += 10.0; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/atomic.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/atomic.comp new file mode 100644 index 000000000..6f69ec725 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/atomic.comp @@ -0,0 +1,66 @@ +#version 310 es +#extension GL_OES_shader_image_atomic : require +layout(local_size_x = 1) in; + +layout(r32ui, binding = 0) uniform highp uimage2D uImage; +layout(r32i, binding = 1) uniform highp iimage2D iImage; +layout(binding = 2, std430) buffer SSBO +{ + uint u32; + int i32; +} ssbo; + +shared int int_atomic; +shared uint uint_atomic; +shared int int_atomic_array[1]; +shared uint uint_atomic_array[1]; + +void main() +{ + imageAtomicAdd(uImage, ivec2(1, 5), 1u); + + // Test that we do not invalidate OpImage variables which are loaded from UniformConstant + // address space. + imageStore(iImage, ivec2(1, 6), ivec4(imageAtomicAdd(uImage, ivec2(1, 5), 1u))); + + imageAtomicOr(uImage, ivec2(1, 5), 1u); + imageAtomicXor(uImage, ivec2(1, 5), 1u); + imageAtomicAnd(uImage, ivec2(1, 5), 1u); + imageAtomicMin(uImage, ivec2(1, 5), 1u); + imageAtomicMax(uImage, ivec2(1, 5), 1u); + //imageAtomicExchange(uImage, ivec2(1, 5), 1u); + imageAtomicCompSwap(uImage, ivec2(1, 5), 10u, 2u); + + imageAtomicAdd(iImage, ivec2(1, 6), 1); + imageAtomicOr(iImage, ivec2(1, 6), 1); + imageAtomicXor(iImage, ivec2(1, 6), 1); + imageAtomicAnd(iImage, ivec2(1, 6), 1); + imageAtomicMin(iImage, ivec2(1, 6), 1); + imageAtomicMax(iImage, ivec2(1, 6), 1); + //imageAtomicExchange(iImage, ivec2(1, 5), 1u); + imageAtomicCompSwap(iImage, ivec2(1, 5), 10, 2); + + atomicAdd(ssbo.u32, 1u); + atomicOr(ssbo.u32, 1u); + atomicXor(ssbo.u32, 1u); + atomicAnd(ssbo.u32, 1u); + atomicMin(ssbo.u32, 1u); + atomicMax(ssbo.u32, 1u); + atomicExchange(ssbo.u32, 1u); + atomicCompSwap(ssbo.u32, 10u, 2u); + + atomicAdd(ssbo.i32, 1); + atomicOr(ssbo.i32, 1); + atomicXor(ssbo.i32, 1); + atomicAnd(ssbo.i32, 1); + atomicMin(ssbo.i32, 1); + atomicMax(ssbo.i32, 1); + atomicExchange(ssbo.i32, 1); + atomicCompSwap(ssbo.i32, 10, 2); + + atomicAdd(int_atomic, 10); + atomicAdd(uint_atomic, 10u); + atomicAdd(int_atomic_array[0], 10); + atomicAdd(uint_atomic_array[0], 10u); +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/barriers.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/barriers.comp new file mode 100644 index 000000000..7e0ea42d4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/barriers.comp @@ -0,0 +1,79 @@ +#version 310 es +layout(local_size_x = 4) in; + +void barrier_shared() +{ + memoryBarrierShared(); +} + +void full_barrier() +{ + memoryBarrier(); +} + +void image_barrier() +{ + memoryBarrierImage(); +} + +void buffer_barrier() +{ + memoryBarrierBuffer(); +} + +void group_barrier() +{ + groupMemoryBarrier(); +} + +void barrier_shared_exec() +{ + memoryBarrierShared(); + barrier(); +} + +void full_barrier_exec() +{ + memoryBarrier(); + barrier(); +} + +void image_barrier_exec() +{ + memoryBarrierImage(); + barrier(); +} + +void buffer_barrier_exec() +{ + memoryBarrierBuffer(); + barrier(); +} + +void group_barrier_exec() +{ + groupMemoryBarrier(); + barrier(); +} + +void exec_barrier() +{ + barrier(); +} + +void main() +{ + barrier_shared(); + full_barrier(); + image_barrier(); + buffer_barrier(); + group_barrier(); + + barrier_shared_exec(); + full_barrier_exec(); + image_barrier_exec(); + buffer_barrier_exec(); + group_barrier_exec(); + + exec_barrier(); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/builtins.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/builtins.comp new file mode 100644 index 000000000..b41cb5391 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/builtins.comp @@ -0,0 +1,11 @@ +#version 310 es +layout(local_size_x = 8, local_size_y = 4, local_size_z = 2) in; + +void main() +{ + uvec3 local_id = gl_LocalInvocationID; + uvec3 global_id = gl_GlobalInvocationID; + uint local_index = gl_LocalInvocationIndex; + uvec3 work_group_size = gl_WorkGroupSize; + uvec3 work_group_id = gl_WorkGroupID; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/composite-array-initialization.comp new file mode 100644 index 000000000..319f46630 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/composite-array-initialization.comp @@ -0,0 +1,29 @@ +#version 450 +layout(local_size_x = 2) in; + +struct Data +{ + float a; + float b; +}; + +layout(std430, binding = 0) buffer SSBO +{ + Data outdata[]; +}; + +layout(constant_id = 0) const float X = 4.0; + +Data data[2] = Data[](Data(1.0, 2.0), Data(3.0, 4.0)); +Data data2[2] = Data[](Data(X, 2.0), Data(3.0, 5.0)); + +Data combine(Data a, Data b) +{ + return Data(a.a + b.a, a.b + b.b); +} + +void main() +{ + if (gl_LocalInvocationIndex == 0u) + outdata[gl_WorkGroupID.x] = combine(data[gl_LocalInvocationID.x], data2[gl_LocalInvocationID.x]); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/globallycoherent.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/globallycoherent.comp new file mode 100644 index 000000000..168b9404e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/globallycoherent.comp @@ -0,0 +1,25 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(r32f, binding = 0) uniform readonly image2D uImageIn; +layout(r32f, binding = 1) uniform coherent writeonly image2D uImageOut; + +layout(set = 0, binding = 2) readonly buffer Foo +{ + float foo; +}; + +layout(set = 0, binding = 3) coherent writeonly buffer Bar +{ + float bar; +}; + +void main() +{ + ivec2 coord = ivec2(9, 7); + vec4 indata = imageLoad(uImageIn, coord); + imageStore(uImageOut, coord, indata); + + bar = foo; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/image.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/image.comp new file mode 100644 index 000000000..1d3c8b4c6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/image.comp @@ -0,0 +1,77 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(r32f, binding = 0) uniform readonly image2D uImageInF; +layout(r32f, binding = 1) uniform writeonly image2D uImageOutF; +layout(r32i, binding = 2) uniform readonly iimage2D uImageInI; +layout(r32i, binding = 3) uniform writeonly iimage2D uImageOutI; +layout(r32ui, binding = 4) uniform readonly uimage2D uImageInU; +layout(r32ui, binding = 5) uniform writeonly uimage2D uImageOutU; +layout(r32f, binding = 6) uniform readonly imageBuffer uImageInBuffer; +layout(r32f, binding = 7) uniform writeonly imageBuffer uImageOutBuffer; + +layout(rg32f, binding = 8) uniform readonly image2D uImageInF2; +layout(rg32f, binding = 9) uniform writeonly image2D uImageOutF2; +layout(rg32i, binding = 10) uniform readonly iimage2D uImageInI2; +layout(rg32i, binding = 11) uniform writeonly iimage2D uImageOutI2; +layout(rg32ui, binding = 12) uniform readonly uimage2D uImageInU2; +layout(rg32ui, binding = 13) uniform writeonly uimage2D uImageOutU2; +layout(rg32f, binding = 14) uniform readonly imageBuffer uImageInBuffer2; +layout(rg32f, binding = 15) uniform writeonly imageBuffer uImageOutBuffer2; + +layout(rgba32f, binding = 16) uniform readonly image2D uImageInF4; +layout(rgba32f, binding = 17) uniform writeonly image2D uImageOutF4; +layout(rgba32i, binding = 18) uniform readonly iimage2D uImageInI4; +layout(rgba32i, binding = 19) uniform writeonly iimage2D uImageOutI4; +layout(rgba32ui, binding = 20) uniform readonly uimage2D uImageInU4; +layout(rgba32ui, binding = 21) uniform writeonly uimage2D uImageOutU4; +layout(rgba32f, binding = 22) uniform readonly imageBuffer uImageInBuffer4; +layout(rgba32f, binding = 23) uniform writeonly imageBuffer uImageOutBuffer4; + +layout(binding = 24) uniform writeonly image2D uImageNoFmtF; +layout(binding = 25) uniform writeonly uimage2D uImageNoFmtU; +layout(binding = 26) uniform writeonly iimage2D uImageNoFmtI; + +void main() +{ + vec4 f = imageLoad(uImageInF, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutF, ivec2(gl_GlobalInvocationID.xy), f); + + ivec4 i = imageLoad(uImageInI, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutI, ivec2(gl_GlobalInvocationID.xy), i); + + uvec4 u = imageLoad(uImageInU, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutU, ivec2(gl_GlobalInvocationID.xy), u); + + vec4 b = imageLoad(uImageInBuffer, int(gl_GlobalInvocationID.x)); + imageStore(uImageOutBuffer, int(gl_GlobalInvocationID.x), b); + + vec4 f2 = imageLoad(uImageInF2, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutF2, ivec2(gl_GlobalInvocationID.xy), f2); + + ivec4 i2 = imageLoad(uImageInI2, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutI2, ivec2(gl_GlobalInvocationID.xy), i2); + + uvec4 u2 = imageLoad(uImageInU2, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutU2, ivec2(gl_GlobalInvocationID.xy), u2); + + vec4 b2 = imageLoad(uImageInBuffer2, int(gl_GlobalInvocationID.x)); + imageStore(uImageOutBuffer2, int(gl_GlobalInvocationID.x), b2); + + vec4 f4 = imageLoad(uImageInF4, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutF4, ivec2(gl_GlobalInvocationID.xy), f4); + + ivec4 i4 = imageLoad(uImageInI4, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutI4, ivec2(gl_GlobalInvocationID.xy), i4); + + uvec4 u4 = imageLoad(uImageInU4, ivec2(gl_GlobalInvocationID.xy)); + imageStore(uImageOutU4, ivec2(gl_GlobalInvocationID.xy), u4); + + vec4 b4 = imageLoad(uImageInBuffer4, int(gl_GlobalInvocationID.x)); + imageStore(uImageOutBuffer4, int(gl_GlobalInvocationID.x), b4); + + imageStore(uImageNoFmtF, ivec2(gl_GlobalInvocationID.xy), b2); + imageStore(uImageNoFmtU, ivec2(gl_GlobalInvocationID.xy), u4); + imageStore(uImageNoFmtI, ivec2(gl_GlobalInvocationID.xy), i4); +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/inverse.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/inverse.comp new file mode 100644 index 000000000..03b06d646 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/inverse.comp @@ -0,0 +1,23 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(std430, binding = 0) writeonly buffer MatrixOut +{ + mat2 m2out; + mat3 m3out; + mat4 m4out; +}; + +layout(std430, binding = 1) readonly buffer MatrixIn +{ + mat2 m2in; + mat3 m3in; + mat4 m4in; +}; + +void main() +{ + m2out = inverse(m2in); + m3out = inverse(m3in); + m4out = inverse(m4in); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/num-workgroups-alone.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/num-workgroups-alone.comp new file mode 100644 index 000000000..10b5817ce --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/num-workgroups-alone.comp @@ -0,0 +1,13 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + uvec3 outdata; +}; + +void main() +{ + outdata = gl_NumWorkGroups; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/num-workgroups-with-builtins.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/num-workgroups-with-builtins.comp new file mode 100644 index 000000000..d19a06c10 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/num-workgroups-with-builtins.comp @@ -0,0 +1,13 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + uvec3 outdata; +}; + +void main() +{ + outdata = gl_NumWorkGroups + gl_WorkGroupID; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/rmw-matrix.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/rmw-matrix.comp new file mode 100644 index 000000000..c158ab4dd --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/rmw-matrix.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + float a; + vec4 b; + mat4 c; + + float a1; + vec4 b1; + mat4 c1; +}; + +void main() +{ + a *= a1; + b *= b1; + c *= c1; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/rwbuffer-matrix.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/rwbuffer-matrix.comp new file mode 100644 index 000000000..0e722e0a5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/rwbuffer-matrix.comp @@ -0,0 +1,104 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std140, binding = 1) uniform UBO +{ + int index0; + int index1; +}; + +layout(binding = 0, std430) buffer SSBO +{ + layout(column_major) mat4 mcol; + layout(row_major) mat4 mrow; + + layout(column_major) mat2 mcol2x2; + layout(row_major) mat2 mrow2x2; + + layout(column_major) mat2x3 mcol2x3; + layout(row_major) mat2x3 mrow2x3; + + layout(column_major) mat3x2 mcol3x2; + layout(row_major) mat3x2 mrow3x2; +}; + +void col_to_row() +{ + // Load column-major, store row-major. + mrow = mcol; + mrow2x2 = mcol2x2; + mrow2x3 = mcol2x3; + mrow3x2 = mcol3x2; +} + +void row_to_col() +{ + // Load row-major, store column-major. + mcol = mrow; + mcol2x2 = mrow2x2; + mcol2x3 = mrow2x3; + mcol3x2 = mrow3x2; +} + +void write_dynamic_index_row() +{ + mrow[index0][index1] = 1.0; + mrow2x2[index0][index1] = 2.0; + mrow2x3[index0][index1] = 3.0; + mrow3x2[index0][index1] = 4.0; + + mrow[index0] = vec4(1.0); + mrow2x2[index0] = vec2(2.0); + mrow2x3[index0] = vec3(3.0); + mrow3x2[index0] = vec2(4.0); +} + +void write_dynamic_index_col() +{ + mcol[index0][index1] = 1.0; + mcol2x2[index0][index1] = 2.0; + mcol2x3[index0][index1] = 3.0; + mcol3x2[index0][index1] = 4.0; + + mcol[index0] = vec4(1.0); + mcol2x2[index0] = vec2(2.0); + mcol2x3[index0] = vec3(3.0); + mcol3x2[index0] = vec2(4.0); +} + +void read_dynamic_index_row() +{ + float a0 = mrow[index0][index1]; + float a1 = mrow2x2[index0][index1]; + float a2 = mrow2x3[index0][index1]; + float a3 = mrow3x2[index0][index1]; + + vec4 v0 = mrow[index0]; + vec2 v1 = mrow2x2[index0]; + vec3 v2 = mrow2x3[index0]; + vec2 v3 = mrow3x2[index0]; +} + +void read_dynamic_index_col() +{ + float a0 = mcol[index0][index1]; + float a1 = mcol2x2[index0][index1]; + float a2 = mcol2x3[index0][index1]; + float a3 = mcol3x2[index0][index1]; + + vec4 v0 = mcol[index0]; + vec2 v1 = mcol2x2[index0]; + vec3 v2 = mcol2x3[index0]; + vec2 v3 = mcol3x2[index0]; +} + +void main() +{ + row_to_col(); + col_to_row(); + write_dynamic_index_row(); + write_dynamic_index_col(); + read_dynamic_index_row(); + read_dynamic_index_col(); +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/shared.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/shared.comp new file mode 100644 index 000000000..4deff9359 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/shared.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 4) in; + +shared float sShared[gl_WorkGroupSize.x]; + +layout(std430, binding = 0) readonly buffer SSBO +{ + float in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + float out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = in_data[ident]; + + sShared[gl_LocalInvocationIndex] = idata; + memoryBarrierShared(); + barrier(); + + out_data[ident] = sShared[gl_WorkGroupSize.x - gl_LocalInvocationIndex - 1u]; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/spec-constant-op-member-array.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/spec-constant-op-member-array.comp new file mode 100644 index 000000000..0b428eb0c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/spec-constant-op-member-array.comp @@ -0,0 +1,33 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(constant_id = 0) const int a = 100; +layout(constant_id = 1) const int b = 200; +layout(constant_id = 2) const int c = 300; +const int d = c + 50; +layout(constant_id = 3) const int e = 400; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +layout(set = 1, binding = 0) buffer SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[d]; +}; + +void main() +{ + w[gl_GlobalInvocationID.x] += v[gl_GlobalInvocationID.x] + e; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/spec-constant-work-group-size.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/spec-constant-work-group-size.comp new file mode 100644 index 000000000..c86097ec7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/spec-constant-work-group-size.comp @@ -0,0 +1,19 @@ +#version 450 +layout(local_size_x_id = 10, local_size_y = 20) in; + +layout(constant_id = 0) const int a = 1; +layout(constant_id = 1) const int b = 2; + +layout(set = 1, binding = 0) writeonly buffer SSBO +{ + int v[]; +}; + +void main() +{ + int spec_const_array_size[b]; + spec_const_array_size[0] = 10; + spec_const_array_size[1] = 40; + spec_const_array_size[a] = a; + v[a + gl_WorkGroupSize.x + gl_WorkGroupSize.y + gl_GlobalInvocationID.x] = b + spec_const_array_size[1 - a]; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/ssbo-array.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/ssbo-array.comp new file mode 100644 index 000000000..38b56e9a0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/ssbo-array.comp @@ -0,0 +1,29 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + vec4 a; +} ssbo0; + +// Does not seem to work in glslang yet in HLSL output, disable for now. +#if 0 +layout(binding = 1, std430) buffer SSBO1 +{ + vec4 b; +} ssbo1[2]; + +layout(binding = 2, std430) buffer SSBO2 +{ + vec4 c; +} ssbo2[3][3]; +#endif + +void main() +{ +#if 0 + ssbo1[1].b = ssbo0.a; + ssbo2[1][2].c = ssbo0.a; +#endif +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp b/3rdparty/spirv-cross/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp new file mode 100644 index 000000000..81135e2a9 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/comp/subgroups.invalid.nofxc.sm60.comp @@ -0,0 +1,131 @@ +#version 450 +#extension GL_KHR_shader_subgroup_basic : require +#extension GL_KHR_shader_subgroup_ballot : require +#extension GL_KHR_shader_subgroup_vote : require +#extension GL_KHR_shader_subgroup_shuffle : require +#extension GL_KHR_shader_subgroup_shuffle_relative : require +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_KHR_shader_subgroup_clustered : require +#extension GL_KHR_shader_subgroup_quad : require +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + float FragColor; +}; + +void main() +{ + // basic + //FragColor = float(gl_NumSubgroups); + //FragColor = float(gl_SubgroupID); + FragColor = float(gl_SubgroupSize); + FragColor = float(gl_SubgroupInvocationID); + subgroupBarrier(); + subgroupMemoryBarrier(); + subgroupMemoryBarrierBuffer(); + subgroupMemoryBarrierShared(); + subgroupMemoryBarrierImage(); + bool elected = subgroupElect(); + + // ballot + FragColor = float(gl_SubgroupEqMask); + FragColor = float(gl_SubgroupGeMask); + FragColor = float(gl_SubgroupGtMask); + FragColor = float(gl_SubgroupLeMask); + FragColor = float(gl_SubgroupLtMask); + vec4 broadcasted = subgroupBroadcast(vec4(10.0), 8u); + vec3 first = subgroupBroadcastFirst(vec3(20.0)); + uvec4 ballot_value = subgroupBallot(true); + //bool inverse_ballot_value = subgroupInverseBallot(ballot_value); + //bool bit_extracted = subgroupBallotBitExtract(uvec4(10u), 8u); + uint bit_count = subgroupBallotBitCount(ballot_value); + //uint inclusive_bit_count = subgroupBallotInclusiveBitCount(ballot_value); + //uint exclusive_bit_count = subgroupBallotExclusiveBitCount(ballot_value); + //uint lsb = subgroupBallotFindLSB(ballot_value); + //uint msb = subgroupBallotFindMSB(ballot_value); + + // shuffle + //uint shuffled = subgroupShuffle(10u, 8u); + //uint shuffled_xor = subgroupShuffleXor(30u, 8u); + + // shuffle relative + //uint shuffled_up = subgroupShuffleUp(20u, 4u); + //uint shuffled_down = subgroupShuffleDown(20u, 4u); + + // vote + bool has_all = subgroupAll(true); + bool has_any = subgroupAny(true); + bool has_equal = subgroupAllEqual(true); + + // arithmetic + vec4 added = subgroupAdd(vec4(20.0)); + ivec4 iadded = subgroupAdd(ivec4(20)); + vec4 multiplied = subgroupMul(vec4(20.0)); + ivec4 imultiplied = subgroupMul(ivec4(20)); + vec4 lo = subgroupMin(vec4(20.0)); + vec4 hi = subgroupMax(vec4(20.0)); + ivec4 slo = subgroupMin(ivec4(20)); + ivec4 shi = subgroupMax(ivec4(20)); + uvec4 ulo = subgroupMin(uvec4(20)); + uvec4 uhi = subgroupMax(uvec4(20)); + uvec4 anded = subgroupAnd(ballot_value); + uvec4 ored = subgroupOr(ballot_value); + uvec4 xored = subgroupXor(ballot_value); + + added = subgroupInclusiveAdd(added); + iadded = subgroupInclusiveAdd(iadded); + multiplied = subgroupInclusiveMul(multiplied); + imultiplied = subgroupInclusiveMul(imultiplied); +#if 0 + lo = subgroupInclusiveMin(lo); + hi = subgroupInclusiveMax(hi); + slo = subgroupInclusiveMin(slo); + shi = subgroupInclusiveMax(shi); + ulo = subgroupInclusiveMin(ulo); + uhi = subgroupInclusiveMax(uhi); + anded = subgroupInclusiveAnd(anded); + ored = subgroupInclusiveOr(ored); + xored = subgroupInclusiveXor(ored); + added = subgroupExclusiveAdd(lo); +#endif + + added = subgroupExclusiveAdd(multiplied); + multiplied = subgroupExclusiveMul(multiplied); + iadded = subgroupExclusiveAdd(imultiplied); + imultiplied = subgroupExclusiveMul(imultiplied); +#if 0 + lo = subgroupExclusiveMin(lo); + hi = subgroupExclusiveMax(hi); + ulo = subgroupExclusiveMin(ulo); + uhi = subgroupExclusiveMax(uhi); + slo = subgroupExclusiveMin(slo); + shi = subgroupExclusiveMax(shi); + anded = subgroupExclusiveAnd(anded); + ored = subgroupExclusiveOr(ored); + xored = subgroupExclusiveXor(ored); +#endif + +#if 0 + // clustered + added = subgroupClusteredAdd(added, 4u); + multiplied = subgroupClusteredMul(multiplied, 4u); + iadded = subgroupClusteredAdd(iadded, 4u); + imultiplied = subgroupClusteredMul(imultiplied, 4u); + lo = subgroupClusteredMin(lo, 4u); + hi = subgroupClusteredMax(hi, 4u); + ulo = subgroupClusteredMin(ulo, 4u); + uhi = subgroupClusteredMax(uhi, 4u); + slo = subgroupClusteredMin(slo, 4u); + shi = subgroupClusteredMax(shi, 4u); + anded = subgroupClusteredAnd(anded, 4u); + ored = subgroupClusteredOr(ored, 4u); + xored = subgroupClusteredXor(xored, 4u); +#endif + + // quad + vec4 swap_horiz = subgroupQuadSwapHorizontal(vec4(20.0)); + vec4 swap_vertical = subgroupQuadSwapVertical(vec4(20.0)); + vec4 swap_diagonal = subgroupQuadSwapDiagonal(vec4(20.0)); + vec4 quad_broadcast = subgroupQuadBroadcast(vec4(20.0), 3u); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..3493e0ccc --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 v0; + +void main() +{ + float lut[5] = float[](1.0, 2.0, 3.0, 4.0, 5.0); + for (int i = 0; i < 4; i++, FragColor += lut[i]) + { + } +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/basic-color-3comp.sm30.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/basic-color-3comp.sm30.frag new file mode 100644 index 000000000..64211b650 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/basic-color-3comp.sm30.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; +layout(location = 0) out vec3 FragColor; + +void main() +{ + FragColor = vColor.xyz; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/basic-color-3comp.sm50.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/basic-color-3comp.sm50.frag new file mode 100644 index 000000000..64211b650 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/basic-color-3comp.sm50.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; +layout(location = 0) out vec3 FragColor; + +void main() +{ + FragColor = vColor.xyz; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/basic.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/basic.frag new file mode 100644 index 000000000..dd9a8f850 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/basic.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; +layout(binding = 0) uniform sampler2D uTex; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vColor * texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/bit-conversions.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/bit-conversions.frag new file mode 100644 index 000000000..faacdc0f1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/bit-conversions.frag @@ -0,0 +1,12 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec2 value; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + int i = floatBitsToInt(value.x); + FragColor = vec4(1.0, 0.0, intBitsToFloat(i), 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/boolean-mix.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/boolean-mix.frag new file mode 100644 index 000000000..9fd8ab347 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/boolean-mix.frag @@ -0,0 +1,10 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec2 x0; +layout(location = 0) out vec2 FragColor; + +void main() +{ + FragColor = x0.x > x0.y ? vec2(1.0, 0.0) : vec2(0.0, 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/builtins.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/builtins.frag new file mode 100644 index 000000000..99e6e2df5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/builtins.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; + +void main() +{ + FragColor = gl_FragCoord + vColor; + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/bvec-operations.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/bvec-operations.frag new file mode 100644 index 000000000..7221604d9 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/bvec-operations.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec2 value; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + bvec2 bools1 = not(bvec2(value.x == 0.0, value.y == 0.0)); + bvec2 bools2 = lessThanEqual(value, vec2(1.5, 0.5)); + FragColor = vec4(1.0, 0.0, bools1.x ? 1.0 : 0.0, bools2.x ? 1.0 : 0.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/clip-cull-distance.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/clip-cull-distance.frag new file mode 100644 index 000000000..625a7dab3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/clip-cull-distance.frag @@ -0,0 +1,12 @@ +#version 450 + +in float gl_ClipDistance[2]; +in float gl_CullDistance[1]; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = gl_ClipDistance[0] + gl_CullDistance[0] + gl_ClipDistance[1]; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/combined-texture-sampler-parameter.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/combined-texture-sampler-parameter.frag new file mode 100644 index 000000000..e5721b902 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/combined-texture-sampler-parameter.frag @@ -0,0 +1,31 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump sampler2D uSampler; +layout(set = 0, binding = 1) uniform mediump sampler2DShadow uSamplerShadow; +layout(location = 0) out float FragColor; + +vec4 samp2(sampler2D s) +{ + return texture(s, vec2(1.0)) + texelFetch(s, ivec2(10), 0); +} + +vec4 samp3(sampler2D s) +{ + return samp2(s); +} + +float samp4(mediump sampler2DShadow s) +{ + return texture(s, vec3(1.0)); +} + +float samp(sampler2D s0, mediump sampler2DShadow s1) +{ + return samp3(s0).x + samp4(s1); +} + +void main() +{ + FragColor = samp(uSampler, uSamplerShadow); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/combined-texture-sampler-shadow.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/combined-texture-sampler-shadow.frag new file mode 100644 index 000000000..2fabb5ea8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/combined-texture-sampler-shadow.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump samplerShadow uSampler; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; +layout(set = 0, binding = 2) uniform texture2D uDepth; +layout(location = 0) out float FragColor; + +float samp2(texture2D t, mediump samplerShadow s) +{ + return texture(sampler2DShadow(t, s), vec3(1.0)); +} + +float samp3(texture2D t, mediump sampler s) +{ + return texture(sampler2D(t, s), vec2(1.0)).x; +} + +float samp(texture2D t, mediump samplerShadow s, mediump sampler s1) +{ + float r0 = samp2(t, s); + float r1 = samp3(t, s1); + return r0 + r1; +} + +void main() +{ + FragColor = samp(uDepth, uSampler, uSampler1); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..47f93931c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; + +struct Foo +{ + vec4 a; + vec4 b; +}; + +layout(binding = 0) buffer UBO +{ + vec4 results[1024]; +}; + +layout(binding = 1) uniform highp isampler2D Buf; +layout(location = 0) flat in int vIn; +layout(location = 1) flat in int vIn2; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + ivec4 coords = texelFetch(Buf, ivec2(gl_FragCoord.xy), 0); + vec4 foo = results[coords.x % 16]; + + int c = vIn * vIn; + int d = vIn2 * vIn2; + FragColor = foo + foo + results[c + d]; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag new file mode 100644 index 000000000..d60002a0f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/constant-buffer-array.invalid.sm51.frag @@ -0,0 +1,32 @@ +#version 450 + +layout(std140, binding = 4) uniform CBO +{ + vec4 a; + vec4 b; + vec4 c; + vec4 d; +} cbo[2][4]; + +layout(std430, push_constant) uniform PushMe +{ + vec4 a; + vec4 b; + vec4 c; + vec4 d; +} push; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = cbo[1][2].a; + FragColor += cbo[1][2].b; + FragColor += cbo[1][2].c; + FragColor += cbo[1][2].d; + FragColor += push.a; + FragColor += push.b; + FragColor += push.c; + FragColor += push.d; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/constant-composites.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/constant-composites.frag new file mode 100644 index 000000000..a12e22ff4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/constant-composites.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; + +float lut[4] = float[](1.0, 4.0, 3.0, 2.0); + +struct Foo +{ + float a; + float b; +}; +Foo foos[2] = Foo[](Foo(10.0, 20.0), Foo(30.0, 40.0)); + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in int line; + +void main() +{ + FragColor = vec4(lut[line]); + FragColor += foos[line].a * foos[1 - line].a; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..7c75ffe1b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,36 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uSampler; +layout(location = 0) in vec4 vInput; + +void main() +{ + FragColor = vInput; + vec4 t = texture(uSampler, vInput.xy); + vec4 d0 = dFdx(vInput); + vec4 d1 = dFdy(vInput); + vec4 d2 = fwidth(vInput); + vec4 d3 = dFdxCoarse(vInput); + vec4 d4 = dFdyCoarse(vInput); + vec4 d5 = fwidthCoarse(vInput); + vec4 d6 = dFdxFine(vInput); + vec4 d7 = dFdyFine(vInput); + vec4 d8 = fwidthFine(vInput); + vec2 lod = textureQueryLod(uSampler, vInput.zw); + if (vInput.y > 10.0) + { + FragColor += t; + FragColor += d0; + FragColor += d1; + FragColor += d2; + FragColor += d3; + FragColor += d4; + FragColor += d5; + FragColor += d6; + FragColor += d7; + FragColor += d8; + FragColor += lod.xyxy; + } +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/depth-greater-than.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/depth-greater-than.frag new file mode 100644 index 000000000..88f9a4214 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/depth-greater-than.frag @@ -0,0 +1,8 @@ +#version 450 +layout(early_fragment_tests) in; +layout(depth_greater) out float gl_FragDepth; + +void main() +{ + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/depth-less-than.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/depth-less-than.frag new file mode 100644 index 000000000..87fdd4620 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/depth-less-than.frag @@ -0,0 +1,8 @@ +#version 450 +layout(early_fragment_tests) in; +layout(depth_less) out float gl_FragDepth; + +void main() +{ + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/early-fragment-test.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/early-fragment-test.frag new file mode 100644 index 000000000..9f84e0988 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/early-fragment-test.frag @@ -0,0 +1,7 @@ +#version 420 + +layout(early_fragment_tests) in; + +void main() +{ +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/fp16-packing.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/fp16-packing.frag new file mode 100644 index 000000000..98ca24e2f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/fp16-packing.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) flat in uint FP16; +layout(location = 1) flat in vec2 FP32; +layout(location = 0) out vec2 FP32Out; +layout(location = 1) out uint FP16Out; + +void main() +{ + FP32Out = unpackHalf2x16(FP16); + FP16Out = packHalf2x16(FP32); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/fp16.invalid.desktop.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/fp16.invalid.desktop.frag new file mode 100644 index 000000000..4f92e2035 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/fp16.invalid.desktop.frag @@ -0,0 +1,156 @@ +#version 450 +#extension GL_AMD_gpu_shader_half_float : require + +layout(location = 0) in float16_t v1; +layout(location = 1) in f16vec2 v2; +layout(location = 2) in f16vec3 v3; +layout(location = 3) in f16vec4 v4; + +layout(location = 0) out float o1; +layout(location = 1) out vec2 o2; +layout(location = 2) out vec3 o3; +layout(location = 3) out vec4 o4; + +#if 0 +// Doesn't work on glslang yet. +f16mat2 test_mat2(f16vec2 a, f16vec2 b, f16vec2 c, f16vec2 d) +{ + return f16mat2(a, b) * f16mat2(c, d); +} + +f16mat3 test_mat3(f16vec3 a, f16vec3 b, f16vec3 c, f16vec3 d, f16vec3 e, f16vec3 f) +{ + return f16mat3(a, b, c) * f16mat3(d, e, f); +} +#endif + +void test_constants() +{ + float16_t a = 1.0hf; + float16_t b = 1.5hf; + float16_t c = -1.5hf; // Negatives + float16_t d = (0.0hf / 0.0hf); // NaN + float16_t e = (1.0hf / 0.0hf); // +Inf + float16_t f = (-1.0hf / 0.0hf); // -Inf + float16_t g = 1014.0hf; // Large. + float16_t h = 0.000001hf; // Denormal +} + +float16_t test_result() +{ + return 1.0hf; +} + +void test_conversions() +{ + float16_t one = test_result(); + int a = int(one); + uint b = uint(one); + bool c = bool(one); + float d = float(one); + double e = double(one); + float16_t a2 = float16_t(a); + float16_t b2 = float16_t(b); + float16_t c2 = float16_t(c); + float16_t d2 = float16_t(d); + float16_t e2 = float16_t(e); +} + +void test_builtins() +{ + f16vec4 res; + res = radians(v4); + res = degrees(v4); + res = sin(v4); + res = cos(v4); + res = tan(v4); + res = asin(v4); + res = atan(v4, v3.xyzz); + res = atan(v4); + res = sinh(v4); + res = cosh(v4); + res = tanh(v4); + //res = asinh(v4); + //res = acosh(v4); + //res = atanh(v4); + res = pow(v4, v4); + res = exp(v4); + res = log(v4); + res = exp2(v4); + res = log2(v4); + res = sqrt(v4); + res = inversesqrt(v4); + res = abs(v4); + res = sign(v4); + res = floor(v4); + res = trunc(v4); + res = round(v4); + //res = roundEven(v4); + res = ceil(v4); + res = fract(v4); + res = mod(v4, v4); + f16vec4 tmp; + res = modf(v4, tmp); + res = min(v4, v4); + res = max(v4, v4); + res = clamp(v4, v4, v4); + res = mix(v4, v4, v4); + res = mix(v4, v4, lessThan(v4, v4)); + res = step(v4, v4); + res = smoothstep(v4, v4, v4); + + bvec4 btmp = isnan(v4); + btmp = isinf(v4); + res = fma(v4, v4, v4); + + //ivec4 itmp; + //res = frexp(v4, itmp); + //res = ldexp(res, itmp); + + uint pack0 = packFloat2x16(v4.xy); + uint pack1 = packFloat2x16(v4.zw); + res = f16vec4(unpackFloat2x16(pack0), unpackFloat2x16(pack1)); + + float16_t t0 = length(v4); + t0 = distance(v4, v4); + t0 = dot(v4, v4); + f16vec3 res3 = cross(v3, v3); + res = normalize(v4); + res = faceforward(v4, v4, v4); + res = reflect(v4, v4); + res = refract(v4, v4, v1); + + btmp = lessThan(v4, v4); + btmp = lessThanEqual(v4, v4); + btmp = greaterThan(v4, v4); + btmp = greaterThanEqual(v4, v4); + btmp = equal(v4, v4); + btmp = notEqual(v4, v4); + + res = dFdx(v4); + res = dFdy(v4); + res = dFdxFine(v4); + res = dFdyFine(v4); + res = dFdxCoarse(v4); + res = dFdyCoarse(v4); + res = fwidth(v4); + res = fwidthFine(v4); + res = fwidthCoarse(v4); + + //res = interpolateAtCentroid(v4); + //res = interpolateAtSample(v4, 0); + //res = interpolateAtOffset(v4, f16vec2(0.1hf)); +} + +void main() +{ + // Basic matrix tests. +#if 0 + f16mat2 m0 = test_mat2(v2, v2, v3.xy, v3.xy); + f16mat3 m1 = test_mat3(v3, v3, v3, v4.xyz, v4.xyz, v4.yzw); +#endif + + test_constants(); + test_conversions(); + test_builtins(); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/front-facing.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/front-facing.frag new file mode 100644 index 000000000..90ca1abf4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/front-facing.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void main() +{ + if (gl_FrontFacing) + FragColor = vA; + else + FragColor = vB; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/image-query-selective.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/image-query-selective.frag new file mode 100644 index 000000000..bb595bcb9 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/image-query-selective.frag @@ -0,0 +1,35 @@ +#version 450 + +layout(binding = 0) uniform usampler1D uSampler1DUint; +layout(binding = 0) uniform isampler1D uSampler1DInt; +layout(binding = 0) uniform sampler1D uSampler1DFloat; +layout(binding = 1) uniform sampler2D uSampler2D; +layout(binding = 2) uniform isampler2DArray uSampler2DArray; +layout(binding = 3) uniform sampler3D uSampler3D; +layout(binding = 4) uniform samplerCube uSamplerCube; +layout(binding = 5) uniform usamplerCubeArray uSamplerCubeArray; +layout(binding = 6) uniform samplerBuffer uSamplerBuffer; +layout(binding = 7) uniform isampler2DMS uSamplerMS; +layout(binding = 8) uniform sampler2DMSArray uSamplerMSArray; + +void main() +{ + int a = textureSize(uSampler1DUint, 0); + a = textureSize(uSampler1DInt, 0); + a = textureSize(uSampler1DFloat, 0); + + ivec3 c = textureSize(uSampler2DArray, 0); + ivec3 d = textureSize(uSampler3D, 0); + ivec2 e = textureSize(uSamplerCube, 0); + ivec3 f = textureSize(uSamplerCubeArray, 0); + int g = textureSize(uSamplerBuffer); + ivec2 h = textureSize(uSamplerMS); + ivec3 i = textureSize(uSamplerMSArray); + + int l1 = textureQueryLevels(uSampler2D); + int l2 = textureQueryLevels(uSampler2DArray); + int l3 = textureQueryLevels(uSampler3D); + int l4 = textureQueryLevels(uSamplerCube); + int s0 = textureSamples(uSamplerMS); + int s1 = textureSamples(uSamplerMSArray); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/image-query.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/image-query.frag new file mode 100644 index 000000000..8e840fba1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/image-query.frag @@ -0,0 +1,33 @@ +#version 450 + +layout(binding = 0) uniform sampler1D uSampler1D; +layout(binding = 1) uniform sampler2D uSampler2D; +layout(binding = 2) uniform sampler2DArray uSampler2DArray; +layout(binding = 3) uniform sampler3D uSampler3D; +layout(binding = 4) uniform samplerCube uSamplerCube; +layout(binding = 5) uniform samplerCubeArray uSamplerCubeArray; +layout(binding = 6) uniform samplerBuffer uSamplerBuffer; +layout(binding = 7) uniform sampler2DMS uSamplerMS; +layout(binding = 8) uniform sampler2DMSArray uSamplerMSArray; + +void main() +{ + int a = textureSize(uSampler1D, 0); + ivec2 b = textureSize(uSampler2D, 0); + ivec3 c = textureSize(uSampler2DArray, 0); + ivec3 d = textureSize(uSampler3D, 0); + ivec2 e = textureSize(uSamplerCube, 0); + ivec3 f = textureSize(uSamplerCubeArray, 0); + int g = textureSize(uSamplerBuffer); + ivec2 h = textureSize(uSamplerMS); + ivec3 i = textureSize(uSamplerMSArray); + + int l0 = textureQueryLevels(uSampler1D); + int l1 = textureQueryLevels(uSampler2D); + int l2 = textureQueryLevels(uSampler2DArray); + int l3 = textureQueryLevels(uSampler3D); + int l4 = textureQueryLevels(uSamplerCube); + int l5 = textureQueryLevels(uSamplerCubeArray); + int s0 = textureSamples(uSamplerMS); + int s1 = textureSamples(uSamplerMSArray); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/input-attachment-ms.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/input-attachment-ms.frag new file mode 100644 index 000000000..b3d44c943 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/input-attachment-ms.frag @@ -0,0 +1,15 @@ +#version 450 + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform subpassInputMS uSubpass1; +layout(location = 0) out vec4 FragColor; + +vec4 load_subpasses(mediump subpassInputMS uInput) +{ + return subpassLoad(uInput, gl_SampleID); +} + +void main() +{ + FragColor = subpassLoad(uSubpass0, 1) + subpassLoad(uSubpass1, 2) + load_subpasses(uSubpass0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/input-attachment.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/input-attachment.frag new file mode 100644 index 000000000..877d0525a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/input-attachment.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform mediump subpassInput uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform mediump subpassInput uSubpass1; +layout(location = 0) out vec4 FragColor; + +vec4 load_subpasses(mediump subpassInput uInput) +{ + return subpassLoad(uInput); +} + +void main() +{ + FragColor = subpassLoad(uSubpass0) + load_subpasses(uSubpass1); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/io-block.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/io-block.frag new file mode 100644 index 000000000..1e3e3d77e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/io-block.frag @@ -0,0 +1,16 @@ +#version 310 es +#extension GL_EXT_shader_io_blocks : require +precision mediump float; + +layout(location = 1) in VertexOut +{ + vec4 a; + vec4 b; +}; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = a + b; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/lut-promotion.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/lut-promotion.frag new file mode 100644 index 000000000..0cdc8148f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/lut-promotion.frag @@ -0,0 +1,44 @@ +#version 310 es +precision mediump float; +layout(location = 0) out float FragColor; +layout(location = 0) flat in int index; + +const float LUT[16] = float[]( + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0); + +void main() +{ + // Try reading LUTs, both in branches and not branch. + FragColor = LUT[index]; + if (index < 10) + FragColor += LUT[index ^ 1]; + else + FragColor += LUT[index & 1]; + + // Not declared as a LUT, but can be promoted to one. + vec4 foo[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + if (index > 30) + { + FragColor += foo[index & 3].y; + } + else + { + FragColor += foo[index & 1].x; + } + + // Not declared as a LUT, but this cannot be promoted, because we have a partial write. + vec4 foobar[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + if (index > 30) + { + foobar[1].z = 20.0; + } + FragColor += foobar[index & 3].z; + + // Not declared as a LUT, but this cannot be promoted, because we have two complete writes. + vec4 baz[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + baz = vec4[](vec4(20.0), vec4(30.0), vec4(50.0), vec4(60.0)); + FragColor += baz[index & 3].z; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/matrix-input.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/matrix-input.frag new file mode 100644 index 000000000..ffe242cec --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/matrix-input.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 1) in mat4 m; + +void main() +{ + FragColor = m[0] + m[1] + m[2] + m[3]; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/mod.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/mod.frag new file mode 100644 index 000000000..32edb6184 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/mod.frag @@ -0,0 +1,22 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 a4; +layout(location = 1) in vec3 a3; +layout(location = 2) in vec2 a2; +layout(location = 3) in float a1; +layout(location = 4) in vec4 b4; +layout(location = 5) in vec3 b3; +layout(location = 6) in vec2 b2; +layout(location = 7) in float b1; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec4 m0 = mod(a4, b4); + vec3 m1 = mod(a3, b3); + vec2 m2 = mod(a2, b2); + float m3 = mod(a1, b1); + FragColor = m0 + m1.xyzx + m2.xyxy + m3; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/mrt.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/mrt.frag new file mode 100644 index 000000000..77a2bb29c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/mrt.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 RT0; +layout(location = 1) out vec4 RT1; +layout(location = 2) out vec4 RT2; +layout(location = 3) out vec4 RT3; + +void main() +{ + RT0 = vec4(1.0); + RT1 = vec4(2.0); + RT2 = vec4(3.0); + RT3 = vec4(4.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/no-return.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/no-return.frag new file mode 100644 index 000000000..f0ef8702c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/no-return.frag @@ -0,0 +1,5 @@ +#version 310 es + +void main() +{ +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/no-return2.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/no-return2.frag new file mode 100644 index 000000000..46bf9fbf2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/no-return2.frag @@ -0,0 +1,9 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; + +void main() +{ + vec4 v = vColor; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/partial-write-preserve.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/partial-write-preserve.frag new file mode 100644 index 000000000..f30270b91 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/partial-write-preserve.frag @@ -0,0 +1,64 @@ +#version 310 es +precision mediump float; + +layout(std140, binding = 0) uniform UBO +{ + int some_value; +}; + +struct B +{ + float a; + float b; +}; + +void partial_inout(inout vec4 x) +{ + x.x = 10.0; +} + +void partial_inout(inout B b) +{ + b.b = 40.0; +} + +// Make a complete write, but only conditionally ... +void branchy_inout(inout vec4 v) +{ + v.y = 20.0; + if (some_value == 20) + { + v = vec4(50.0); + } +} + +void branchy_inout_2(out vec4 v) +{ + if (some_value == 20) + { + v = vec4(50.0); + } + else + { + v = vec4(70.0); + } + v.y = 20.0; +} + +void complete_inout(out vec4 x) +{ + x = vec4(50.0); +} + +void main() +{ + vec4 a = vec4(10.0); + partial_inout(a); + complete_inout(a); + branchy_inout(a); + branchy_inout_2(a); + + B b = B(10.0, 20.0); + partial_inout(b); +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/point-coord-compat.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/point-coord-compat.frag new file mode 100644 index 000000000..dc7d6b55f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/point-coord-compat.frag @@ -0,0 +1,10 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec2 FragColor; + +void main() +{ + FragColor = gl_PointCoord; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/query-lod.desktop.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/query-lod.desktop.frag new file mode 100644 index 000000000..0cb160402 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/query-lod.desktop.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(location = 0) in vec2 vTexCoord; +layout(binding = 0) uniform sampler2D uSampler; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = textureQueryLod(uSampler, vTexCoord).xyxy; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/resources.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/resources.frag new file mode 100644 index 000000000..16178bfd7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/resources.frag @@ -0,0 +1,27 @@ +#version 310 es +precision mediump float; + +layout(binding = 3, std140) uniform CBuffer +{ + vec4 a; +} cbuf; + +layout(binding = 4) uniform sampler2D uSampledImage; +layout(binding = 5) uniform mediump texture2D uTexture; +layout(binding = 6) uniform mediump sampler uSampler; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTex; + +layout(std430, push_constant) uniform PushMe +{ + vec4 d; +} registers; + +void main() +{ + vec4 c0 = texture(uSampledImage, vTex); + vec4 c1 = texture(sampler2D(uTexture, uSampler), vTex); + vec4 c2 = cbuf.a + registers.d; + FragColor = c0 + c1 + c2; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/row-major-layout-in-struct.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/row-major-layout-in-struct.frag new file mode 100644 index 000000000..3e93bb2b4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/row-major-layout-in-struct.frag @@ -0,0 +1,29 @@ +#version 450 + +struct Foo +{ + mat4 v; + mat4 w; +}; + +struct NonFoo +{ + mat4 v; + mat4 w; +}; + +layout(std140, binding = 0) uniform UBO +{ + layout(column_major) Foo foo; +}; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vUV; + +void main() +{ + NonFoo f; + f.v = foo.v; + f.w = foo.w; + FragColor = f.v * (f.w * vUV); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/sample-cmp-level-zero.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/sample-cmp-level-zero.frag new file mode 100644 index 000000000..c40d742ee --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/sample-cmp-level-zero.frag @@ -0,0 +1,27 @@ +#version 450 + +layout(location = 0) out float FragColor; +layout(binding = 0) uniform sampler2DShadow uSampler2D; +layout(binding = 1) uniform sampler2DArrayShadow uSampler2DArray; +layout(binding = 2) uniform samplerCubeShadow uSamplerCube; +layout(binding = 3) uniform samplerCubeArrayShadow uSamplerCubeArray; + +layout(location = 0) in vec3 vUVRef; +layout(location = 1) in vec4 vDirRef; + +void main() +{ + float s0 = textureOffset(uSampler2D, vUVRef, ivec2(-1)); + float s1 = textureOffset(uSampler2DArray, vDirRef, ivec2(-1)); + float s2 = texture(uSamplerCube, vDirRef); + float s3 = texture(uSamplerCubeArray, vDirRef, 0.5); + + float l0 = textureLodOffset(uSampler2D, vUVRef, 0.0, ivec2(-1)); + float l1 = textureGradOffset(uSampler2DArray, vDirRef, vec2(0.0), vec2(0.0), ivec2(-1)); + float l2 = textureGrad(uSamplerCube, vDirRef, vec3(0.0), vec3(0.0)); + + float p0 = textureProjOffset(uSampler2D, vDirRef, ivec2(+1)); + float p1 = textureProjLodOffset(uSampler2D, vDirRef, 0.0, ivec2(+1)); + + FragColor = s0 + s1 + s2 + s3 + l0 + l1 + l2 + p0 + p1; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/sampler-array.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/sampler-array.frag new file mode 100644 index 000000000..75910ed16 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/sampler-array.frag @@ -0,0 +1,28 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uCombined[4]; +layout(binding = 4) uniform texture2D uTex[4]; +layout(binding = 8) uniform sampler uSampler[4]; +layout(binding = 12, rgba32f) uniform writeonly image2D uImage[8]; +layout(location = 0) in vec2 vTex; +layout(location = 1) flat in int vIndex; + +vec4 sample_in_function(sampler2D samp) +{ + return texture(samp, vTex); +} + +vec4 sample_in_function2(texture2D tex, sampler samp) +{ + return texture(sampler2D(tex, samp), vTex); +} + +void main() +{ + vec4 color = texture(uCombined[vIndex], vTex); + color += texture(sampler2D(uTex[vIndex], uSampler[vIndex]), vTex); + color += sample_in_function(uCombined[vIndex + 1]); + color += sample_in_function2(uTex[vIndex + 1], uSampler[vIndex + 1]); + + imageStore(uImage[vIndex], ivec2(gl_FragCoord.xy), color); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/sampler-image-arrays.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/sampler-image-arrays.frag new file mode 100644 index 000000000..42370d972 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/sampler-image-arrays.frag @@ -0,0 +1,33 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in vec2 vTex; +layout(location = 1) flat in int vIndex; +layout(binding = 0) uniform sampler2D uSampler[4]; +layout(binding = 4) uniform sampler uSamplers[4]; +layout(binding = 8) uniform texture2D uTextures[4]; + +vec4 sample_from_argument(sampler2D samplers[4]) +{ + return texture(samplers[vIndex], vTex + 0.2); +} + +vec4 sample_single_from_argument(sampler2D samp) +{ + return texture(samp, vTex + 0.3); +} + +vec4 sample_from_global() +{ + return texture(uSampler[vIndex], vTex + 0.1); +} + +void main() +{ + FragColor = vec4(0.0); + FragColor += texture(sampler2D(uTextures[2], uSamplers[1]), vTex); + FragColor += texture(uSampler[vIndex], vTex); + FragColor += sample_from_global(); + FragColor += sample_from_argument(uSampler); + FragColor += sample_single_from_argument(uSampler[3]); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag new file mode 100644 index 000000000..22d18a26a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/separate-combined-fake-overload.sm30.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uSamp; +layout(binding = 1) uniform texture2D uT; +layout(binding = 2) uniform sampler uS; + +vec4 samp(sampler2D uSamp) +{ + return texture(uSamp, vec2(0.5)); +} + +vec4 samp(texture2D T, sampler S) +{ + return texture(sampler2D(T, S), vec2(0.5)); +} + +void main() +{ + FragColor = samp(uSamp) + samp(uT, uS); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/spec-constant-block-size.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/spec-constant-block-size.frag new file mode 100644 index 000000000..8d2b1f326 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/spec-constant-block-size.frag @@ -0,0 +1,17 @@ +#version 310 es +precision mediump float; + +layout(constant_id = 10) const int Value = 2; +layout(binding = 0) uniform SpecConstArray +{ + vec4 samples[Value]; +}; + +layout(location = 0) flat in int Index; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = samples[Index]; +} + diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/spec-constant-ternary.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/spec-constant-ternary.frag new file mode 100644 index 000000000..78dccbf04 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/spec-constant-ternary.frag @@ -0,0 +1,9 @@ +#version 450 +layout(location = 0) out float FragColor; +layout(constant_id = 0) const uint s = 10u; +const uint f = s > 20u ? 30u : 50u; + +void main() +{ + FragColor = float(f); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..d8aee43a6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/switch-unsigned-case.frag @@ -0,0 +1,26 @@ +#version 310 es +precision mediump float; + +#define ENUM_0 0u +#define ENUM_1 1u + +layout(set = 0, binding = 0) uniform Buff +{ + uint TestVal; +}; + +layout(location = 0) out vec4 fsout_Color; + +void main() +{ + fsout_Color = vec4(1.0); + switch (TestVal) + { + case ENUM_0: + fsout_Color = vec4(0.1); + break; + case ENUM_1: + fsout_Color = vec4(0.2); + break; + } +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/swizzle-scalar.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/swizzle-scalar.frag new file mode 100644 index 000000000..c27524f7a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/swizzle-scalar.frag @@ -0,0 +1,16 @@ +#version 450 + +layout(location = 0) flat in float vFloat; +layout(location = 1) flat in int vInt; +layout(location = 0) out vec4 Float; +layout(location = 1) out ivec4 Int; +layout(location = 2) out vec4 Float2; +layout(location = 3) out ivec4 Int2; + +void main() +{ + Float = vec4(vFloat) * 2.0; + Int = ivec4(vInt) * 2; + Float2 = vec4(10.0); + Int2 = ivec4(10); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/tex-sampling-ms.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/tex-sampling-ms.frag new file mode 100644 index 000000000..7badbb1a3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/tex-sampling-ms.frag @@ -0,0 +1,16 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2DMS uTex; + +void main() +{ + FragColor = + texelFetch(uTex, ivec2(gl_FragCoord.xy), 0); + FragColor += + texelFetch(uTex, ivec2(gl_FragCoord.xy), 1); + FragColor += + texelFetch(uTex, ivec2(gl_FragCoord.xy), 2); + FragColor += + texelFetch(uTex, ivec2(gl_FragCoord.xy), 3); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/tex-sampling.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/tex-sampling.frag new file mode 100644 index 000000000..762c60ac6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/tex-sampling.frag @@ -0,0 +1,81 @@ +#version 450 + +layout(binding = 0) uniform sampler1D tex1d; +layout(binding = 1) uniform sampler2D tex2d; +layout(binding = 2) uniform sampler3D tex3d; +layout(binding = 3) uniform samplerCube texCube; + +layout(binding = 4) uniform sampler1DShadow tex1dShadow; +layout(binding = 5) uniform sampler2DShadow tex2dShadow; +layout(binding = 6) uniform samplerCubeShadow texCubeShadow; + +layout(binding = 7) uniform sampler1DArray tex1dArray; +layout(binding = 8) uniform sampler2DArray tex2dArray; +layout(binding = 9) uniform samplerCubeArray texCubeArray; + +layout(binding = 10) uniform samplerShadow samplerDepth; +layout(binding = 11) uniform sampler samplerNonDepth; +layout(binding = 12) uniform texture2D separateTex2d; +layout(binding = 13) uniform texture2D separateTex2dDepth; + +layout(location = 0) in float texCoord1d; +layout(location = 1) in vec2 texCoord2d; +layout(location = 2) in vec3 texCoord3d; +layout(location = 3) in vec4 texCoord4d; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec4 texcolor = texture(tex1d, texCoord1d); + texcolor += textureOffset(tex1d, texCoord1d, 1); + texcolor += textureLod(tex1d, texCoord1d, 2); + texcolor += textureGrad(tex1d, texCoord1d, 1.0, 2.0); + texcolor += textureProj(tex1d, vec2(texCoord1d, 2.0)); + texcolor += texture(tex1d, texCoord1d, 1.0); + + texcolor += texture(tex2d, texCoord2d); + texcolor += textureOffset(tex2d, texCoord2d, ivec2(1, 2)); + texcolor += textureLod(tex2d, texCoord2d, 2); + texcolor += textureGrad(tex2d, texCoord2d, vec2(1.0, 2.0), vec2(3.0, 4.0)); + texcolor += textureProj(tex2d, vec3(texCoord2d, 2.0)); + texcolor += texture(tex2d, texCoord2d, 1.0); + + texcolor += texture(tex3d, texCoord3d); + texcolor += textureOffset(tex3d, texCoord3d, ivec3(1, 2, 3)); + texcolor += textureLod(tex3d, texCoord3d, 2); + texcolor += textureGrad(tex3d, texCoord3d, vec3(1.0, 2.0, 3.0), vec3(4.0, 5.0, 6.0)); + texcolor += textureProj(tex3d, vec4(texCoord3d, 2.0)); + texcolor += texture(tex3d, texCoord3d, 1.0); + + texcolor += texture(texCube, texCoord3d); + texcolor += textureLod(texCube, texCoord3d, 2); + texcolor += texture(texCube, texCoord3d, 1.0); + + texcolor.a += texture(tex1dShadow, vec3(texCoord1d, 0.0, 0.0)); + texcolor.a += texture(tex2dShadow, vec3(texCoord2d, 0.0)); + texcolor.a += texture(texCubeShadow, vec4(texCoord3d, 0.0)); + + texcolor += texture(tex1dArray, texCoord2d); + texcolor += texture(tex2dArray, texCoord3d); + texcolor += texture(texCubeArray, texCoord4d); + + texcolor += textureGather(tex2d, texCoord2d); + texcolor += textureGather(tex2d, texCoord2d, 0); + texcolor += textureGather(tex2d, texCoord2d, 1); + texcolor += textureGather(tex2d, texCoord2d, 2); + texcolor += textureGather(tex2d, texCoord2d, 3); + + texcolor += textureGatherOffset(tex2d, texCoord2d, ivec2(1, 1)); + texcolor += textureGatherOffset(tex2d, texCoord2d, ivec2(1, 1), 0); + texcolor += textureGatherOffset(tex2d, texCoord2d, ivec2(1, 1), 1); + texcolor += textureGatherOffset(tex2d, texCoord2d, ivec2(1, 1), 2); + texcolor += textureGatherOffset(tex2d, texCoord2d, ivec2(1, 1), 3); + + texcolor += texelFetch(tex2d, ivec2(1, 2), 0); + + texcolor += texture(sampler2D(separateTex2d, samplerNonDepth), texCoord2d); + texcolor.a += texture(sampler2DShadow(separateTex2dDepth, samplerDepth), texCoord3d); + + FragColor = texcolor; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..e98748b8b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/texel-fetch-offset.frag @@ -0,0 +1,10 @@ +#version 310 es +precision mediump float; +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uTexture; + +void main() +{ + FragColor = texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(1, 1)); + FragColor += texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(-1, 1)); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/texture-proj-shadow.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/texture-proj-shadow.frag new file mode 100644 index 000000000..0c4cf8f5a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/texture-proj-shadow.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(binding = 0) uniform sampler1DShadow uShadow1D; +layout(binding = 1) uniform sampler2DShadow uShadow2D; +layout(binding = 2) uniform sampler1D uSampler1D; +layout(binding = 3) uniform sampler2D uSampler2D; +layout(binding = 4) uniform sampler3D uSampler3D; + +layout(location = 0) out float FragColor; +layout(location = 0) in vec3 vClip3; +layout(location = 1) in vec4 vClip4; +layout(location = 2) in vec2 vClip2; + +void main() +{ + FragColor = textureProj(uShadow1D, vClip4); + FragColor = textureProj(uShadow2D, vClip4); + FragColor = textureProj(uSampler1D, vClip2).x; + FragColor = textureProj(uSampler2D, vClip3).x; + FragColor = textureProj(uSampler3D, vClip4).x; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/texture-size-combined-image-sampler.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/texture-size-combined-image-sampler.frag new file mode 100644 index 000000000..948805959 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/texture-size-combined-image-sampler.frag @@ -0,0 +1,9 @@ +#version 450 +layout(set = 0, binding = 0) uniform texture2D uTex; +layout(set = 0, binding = 1) uniform sampler uSampler; +layout(location = 0) out ivec2 FooOut; + +void main() +{ + FooOut = textureSize(sampler2D(uTex, uSampler), 0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/unary-enclose.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/unary-enclose.frag new file mode 100644 index 000000000..ea502e1de --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/unary-enclose.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vIn; +layout(location = 1) flat in ivec4 vIn1; + +void main() +{ + FragColor = +(-(-vIn)); + ivec4 a = ~(~vIn1); + + bool b = false; + b = !!b; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/unorm-snorm-packing.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/unorm-snorm-packing.frag new file mode 100644 index 000000000..c0a01aaf8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/unorm-snorm-packing.frag @@ -0,0 +1,24 @@ +#version 450 + +layout(location = 0) flat in uint SNORM8; +layout(location = 1) flat in uint UNORM8; +layout(location = 2) flat in uint SNORM16; +layout(location = 3) flat in uint UNORM16; +layout(location = 4) flat in vec4 FP32; +layout(location = 0) out vec4 FP32Out; +layout(location = 1) out uint UNORM8Out; +layout(location = 2) out uint SNORM8Out; +layout(location = 3) out uint UNORM16Out; +layout(location = 4) out uint SNORM16Out; + +void main() +{ + FP32Out = unpackUnorm4x8(UNORM8); + FP32Out = unpackSnorm4x8(SNORM8); + FP32Out.xy = unpackUnorm2x16(UNORM16); + FP32Out.xy = unpackSnorm2x16(SNORM16); + UNORM8Out = packUnorm4x8(FP32); + SNORM8Out = packSnorm4x8(FP32); + UNORM16Out = packUnorm2x16(FP32.xy); + SNORM16Out = packSnorm2x16(FP32.zw); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/frag/various-glsl-ops.frag b/3rdparty/spirv-cross/shaders-hlsl/frag/various-glsl-ops.frag new file mode 100644 index 000000000..0d4af80a6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/frag/various-glsl-ops.frag @@ -0,0 +1,17 @@ +#version 450 + +layout(location = 0) in vec2 interpolant; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec4 color = vec4(0.0, 0.0, 0.0, interpolateAtOffset(interpolant, vec2(0.1, 0.1))); + + // glslang's HLSL parser currently fails here + //color += vec4(0.0, 0.0, 0.0, interpolateAtSample(interpolant, gl_SampleID)); + //color += vec4(0.0, 0.0, 0.0, interpolateAtCentroid(interpolant)); + + color += vec4(0.0, 0.0, 0.0, dFdxCoarse(interpolant.x)); + FragColor = color; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/basic.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/basic.vert new file mode 100644 index 000000000..f03114fee --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/basic.vert @@ -0,0 +1,15 @@ +#version 310 es + +layout(std140) uniform UBO +{ + uniform mat4 uMVP; +}; +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = uMVP * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/clip-cull-distance.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/clip-cull-distance.vert new file mode 100644 index 000000000..34f65fc18 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/clip-cull-distance.vert @@ -0,0 +1,11 @@ +#version 450 +out float gl_ClipDistance[2]; +out float gl_CullDistance[1]; + +void main() +{ + gl_Position = vec4(1.0); + gl_ClipDistance[0] = 0.0; + gl_ClipDistance[1] = 0.0; + gl_CullDistance[0] = 4.0; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/instancing.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/instancing.vert new file mode 100644 index 000000000..0e62ef6b0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/instancing.vert @@ -0,0 +1,6 @@ +#version 310 es + +void main() +{ + gl_Position = vec4(float(gl_VertexIndex + gl_InstanceIndex)); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/locations.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/locations.vert new file mode 100644 index 000000000..df1a44e92 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/locations.vert @@ -0,0 +1,51 @@ +#version 310 es +#extension GL_EXT_shader_io_blocks : require + +struct Foo +{ + vec3 a; + vec3 b; + vec3 c; +}; + +// This will lock to input location 2. +layout(location = 2) in vec4 Input2; +// This will lock to input location 4. +layout(location = 4) in vec4 Input4; +// This will pick first available, which is 0. +layout(location = 0) in vec4 Input0; + +// Locks output 0. +layout(location = 0) out float vLocation0; +// Locks output 1. +layout(location = 1) out float vLocation1; +// Picks first available two locations, so, 2 and 3. +layout(location = 2) out float vLocation2[2]; +// Picks first available location, 4. +layout(location = 4) out Foo vLocation4; +// Picks first available location 9. +layout(location = 9) out float vLocation9; + +// Locks location 7 and 8. +layout(location = 7) out VertexOut +{ + vec3 color; + vec3 foo; +} vout; + +void main() +{ + gl_Position = vec4(1.0) + Input2 + Input4 + Input0; + vLocation0 = 0.0; + vLocation1 = 1.0; + vLocation2[0] = 2.0; + vLocation2[1] = 2.0; + Foo foo; + foo.a = vec3(1.0); + foo.b = vec3(1.0); + foo.c = vec3(1.0); + vLocation4 = foo; + vLocation9 = 9.0; + vout.color = vec3(2.0); + vout.foo = vec3(4.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/matrix-attribute.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/matrix-attribute.vert new file mode 100644 index 000000000..8a1393f8d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/matrix-attribute.vert @@ -0,0 +1,9 @@ +#version 310 es + +layout(location = 0) in vec3 pos; +layout(location = 1) in mat4 m; + +void main() +{ + gl_Position = m * vec4(pos, 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/matrix-output.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/matrix-output.vert new file mode 100644 index 000000000..1151d4bd3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/matrix-output.vert @@ -0,0 +1,9 @@ +#version 450 + +layout(location = 0) out mat4 m; + +void main() +{ + gl_Position = vec4(1.0); + m = mat4(1.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/no-input.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/no-input.vert new file mode 100644 index 000000000..8de8e816a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/no-input.vert @@ -0,0 +1,6 @@ +#version 310 es + +void main() +{ + gl_Position = vec4(1.0); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/point-size-compat.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/point-size-compat.vert new file mode 100644 index 000000000..ed86c764a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/point-size-compat.vert @@ -0,0 +1,7 @@ +#version 310 es + +void main() +{ + gl_Position = vec4(1.0); + gl_PointSize = 1.0; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/qualifiers.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/qualifiers.vert new file mode 100644 index 000000000..080a70915 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/qualifiers.vert @@ -0,0 +1,27 @@ +#version 450 + +layout(location = 0) flat out float vFlat; +layout(location = 1) centroid out float vCentroid; +layout(location = 2) sample out float vSample; +layout(location = 3) noperspective out float vNoperspective; + +layout(location = 4) out Block +{ + flat float vFlat; + centroid float vCentroid; + sample float vSample; + noperspective float vNoperspective; +} vout; + +void main() +{ + gl_Position = vec4(1.0); + vFlat = 0.0; + vCentroid = 1.0; + vSample = 2.0; + vNoperspective = 3.0; + vout.vFlat = 0.0; + vout.vCentroid = 1.0; + vout.vSample = 2.0; + vout.vNoperspective = 3.0; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..792fb8e36 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/read-from-row-major-array.vert @@ -0,0 +1,20 @@ +#version 310 es +layout(location = 0) in highp vec4 a_position; +layout(location = 0) out mediump float v_vtxResult; + +layout(set = 0, binding = 0, std140, row_major) uniform Block +{ + highp mat2x3 var[3][4]; +}; + +mediump float compare_float (highp float a, highp float b) { return abs(a - b) < 0.05 ? 1.0 : 0.0; } +mediump float compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); } +mediump float compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); } + +void main (void) +{ + gl_Position = a_position; + mediump float result = 1.0; + result *= compare_mat2x3(var[0][0], mat2x3(2.0, 6.0, -6.0, 0.0, 5.0, 5.0)); + v_vtxResult = result; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/return-array.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/return-array.vert new file mode 100644 index 000000000..708460114 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/return-array.vert @@ -0,0 +1,22 @@ +#version 310 es + +layout(location = 0) in vec4 vInput0; +layout(location = 1) in vec4 vInput1; + +vec4[2] test() +{ + return vec4[](vec4(10.0), vec4(20.0)); +} + +vec4[2] test2() +{ + vec4 foobar[2]; + foobar[0] = vInput0; + foobar[1] = vInput1; + return foobar; +} + +void main() +{ + gl_Position = test()[0] + test2()[1]; +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/sampler-buffers.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/sampler-buffers.vert new file mode 100644 index 000000000..dccbf7784 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/sampler-buffers.vert @@ -0,0 +1,17 @@ +#version 450 + +layout(binding = 1) uniform samplerBuffer uFloatSampler; +layout(binding = 2) uniform isamplerBuffer uIntSampler; +layout(binding = 3) uniform usamplerBuffer uUintSampler; + +vec4 sample_from_function(samplerBuffer s0, isamplerBuffer s1, usamplerBuffer s2) +{ + return texelFetch(s0, 20) + + intBitsToFloat(texelFetch(s1, 40)) + + uintBitsToFloat(texelFetch(s2, 60)); +} + +void main() +{ + gl_Position = sample_from_function(uFloatSampler, uIntSampler, uUintSampler); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/struct-composite-decl.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/struct-composite-decl.vert new file mode 100644 index 000000000..c527fdf51 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/struct-composite-decl.vert @@ -0,0 +1,26 @@ +#version 310 es + +layout(location = 0) in vec4 a; +layout(location = 1) in vec4 b; +layout(location = 2) in vec4 c; +layout(location = 3) in vec4 d; + +struct VOut +{ + vec4 a; + vec4 b; + vec4 c; + vec4 d; +}; + +layout(location = 0) out VOut vout; + +void emit_result(VOut v) +{ + vout = v; +} + +void main() +{ + emit_result(VOut(a, b, c, d)); +} diff --git a/3rdparty/spirv-cross/shaders-hlsl/vert/texture_buffer.vert b/3rdparty/spirv-cross/shaders-hlsl/vert/texture_buffer.vert new file mode 100644 index 000000000..b071e0c96 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-hlsl/vert/texture_buffer.vert @@ -0,0 +1,9 @@ +#version 450 + +layout(binding = 4) uniform samplerBuffer uSamp; +layout(rgba32f, binding = 5) uniform readonly imageBuffer uSampo; + +void main() +{ + gl_Position = texelFetch(uSamp, 10) + imageLoad(uSampo, 100); +} diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/asm/comp/variable-pointers.asm.comp b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/comp/variable-pointers.asm.comp new file mode 100644 index 000000000..ba6267cc0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/comp/variable-pointers.asm.comp @@ -0,0 +1,152 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos SPIR-V Tools Assembler; 0 +; Bound: 89 +; Schema: 0 + OpCapability Shader + OpCapability VariablePointers + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %foo "foo" + OpMemberName %foo 0 "a" + OpMemberName %foo 1 "b" + OpMemberName %foo 2 "c" + OpName %bar "bar" + OpMemberName %bar 0 "d" + OpName %baz "baz" + OpMemberName %baz 0 "e" + OpName %buf "buf" + OpName %buf2 "buf2" + OpName %cb "cb" + OpName %tgsm "tgsm" + OpName %sbuf "sbuf" + OpName %sbuf2 "sbuf2" + OpName %stgsm "stgsm" + OpName %select_buffer "select_buffer" + OpName %select_buffer_null "select_buffer_null" + OpName %select_tgsm "select_tgsm" + OpName %cur "cur" + OpMemberDecorate %foo 0 Offset 0 + OpMemberDecorate %foo 1 Offset 512 + OpMemberDecorate %foo 2 Offset 520 + OpMemberDecorate %bar 0 Offset 0 + OpMemberDecorate %baz 0 Offset 0 + OpDecorate %foo Block + OpDecorate %bar Block + OpDecorate %baz Block + OpDecorate %buf DescriptorSet 0 + OpDecorate %buf Binding 0 + OpDecorate %cb DescriptorSet 0 + OpDecorate %cb Binding 3 + OpDecorate %buf2 DescriptorSet 0 + OpDecorate %buf2 Binding 4 + OpDecorate %_ptr_Workgroup_int ArrayStride 4 + OpDecorate %_ptr_StorageBuffer_int ArrayStride 4 + OpDecorate %_arr_int_uint_128 ArrayStride 4 + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + %void = OpTypeVoid + %22 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %uint = OpTypeInt 32 0 + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input + %uint_128 = OpConstant %uint 128 +%_arr_int_uint_128 = OpTypeArray %int %uint_128 + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %foo = OpTypeStruct %_arr_int_uint_128 %uint %v2float +%_ptr_StorageBuffer_foo = OpTypePointer StorageBuffer %foo + %buf = OpVariable %_ptr_StorageBuffer_foo StorageBuffer + %bar = OpTypeStruct %int +%_ptr_Uniform_bar = OpTypePointer Uniform %bar + %cb = OpVariable %_ptr_Uniform_bar Uniform + %baz = OpTypeStruct %_arr_int_uint_128 +%_ptr_StorageBuffer_baz = OpTypePointer StorageBuffer %baz + %buf2 = OpVariable %_ptr_StorageBuffer_baz StorageBuffer +%_ptr_Workgroup__arr_int_uint_128 = OpTypePointer Workgroup %_arr_int_uint_128 + %tgsm = OpVariable %_ptr_Workgroup__arr_int_uint_128 Workgroup +%_ptr_StorageBuffer_int = OpTypePointer StorageBuffer %int +%_ptr_Private__ptr_StorageBuffer_int = OpTypePointer Private %_ptr_StorageBuffer_int + %sbuf = OpVariable %_ptr_Private__ptr_StorageBuffer_int Private + %sbuf2 = OpVariable %_ptr_Private__ptr_StorageBuffer_int Private +%_ptr_Workgroup_int = OpTypePointer Workgroup %int +%_ptr_Private__ptr_Workgroup_int = OpTypePointer Private %_ptr_Workgroup_int + %stgsm = OpVariable %_ptr_Private__ptr_Workgroup_int Private + %uint_0 = OpConstant %uint 0 + %bool = OpTypeBool +%_ptr_Uniform_int = OpTypePointer Uniform %int + %44 = OpTypeFunction %_ptr_StorageBuffer_int + %int_0 = OpConstant %int 0 + %uint_1 = OpConstant %uint 1 + %47 = OpConstantNull %_ptr_StorageBuffer_int + %48 = OpTypeFunction %_ptr_Workgroup_int + %49 = OpConstantNull %_ptr_Workgroup_int +%_ptr_Function__ptr_Workgroup_int = OpTypePointer Function %_ptr_Workgroup_int +%select_buffer = OpFunction %_ptr_StorageBuffer_int None %44 + %51 = OpLabel + %52 = OpAccessChain %_ptr_Uniform_int %cb %uint_0 + %53 = OpLoad %int %52 + %54 = OpINotEqual %bool %53 %int_0 + %55 = OpAccessChain %_ptr_StorageBuffer_int %buf %uint_0 %uint_0 + %56 = OpAccessChain %_ptr_StorageBuffer_int %buf2 %uint_0 %uint_0 + %57 = OpSelect %_ptr_StorageBuffer_int %54 %55 %56 + OpReturnValue %57 + OpFunctionEnd +%select_buffer_null = OpFunction %_ptr_StorageBuffer_int None %44 + %58 = OpLabel + %59 = OpAccessChain %_ptr_Uniform_int %cb %uint_0 + %60 = OpLoad %int %59 + %61 = OpINotEqual %bool %60 %int_0 + %62 = OpAccessChain %_ptr_StorageBuffer_int %buf %uint_0 %uint_0 + %63 = OpSelect %_ptr_StorageBuffer_int %61 %62 %47 + OpReturnValue %63 + OpFunctionEnd +%select_tgsm = OpFunction %_ptr_Workgroup_int None %48 + %64 = OpLabel + %65 = OpAccessChain %_ptr_Uniform_int %cb %uint_0 + %66 = OpLoad %int %65 + %67 = OpINotEqual %bool %66 %int_0 + %68 = OpAccessChain %_ptr_Workgroup_int %tgsm %uint_0 + %69 = OpSelect %_ptr_Workgroup_int %67 %68 %49 + OpReturnValue %69 + OpFunctionEnd + %main = OpFunction %void None %22 + %70 = OpLabel + %cur = OpVariable %_ptr_Function__ptr_Workgroup_int Function + %71 = OpFunctionCall %_ptr_StorageBuffer_int %select_buffer + OpStore %sbuf %71 + %72 = OpFunctionCall %_ptr_StorageBuffer_int %select_buffer_null + OpStore %sbuf2 %72 + %73 = OpFunctionCall %_ptr_Workgroup_int %select_tgsm + OpStore %stgsm %73 + %74 = OpAccessChain %_ptr_StorageBuffer_int %buf %uint_0 %uint_0 + %75 = OpLoad %_ptr_Workgroup_int %stgsm + %76 = OpCopyObject %_ptr_Workgroup_int %75 + OpStore %cur %76 + OpBranch %77 + %77 = OpLabel + %78 = OpPhi %_ptr_StorageBuffer_int %74 %70 %79 %80 + %81 = OpLoad %_ptr_Workgroup_int %cur + %82 = OpLoad %int %78 + %83 = OpINotEqual %bool %82 %int_0 + OpLoopMerge %85 %80 None + OpBranchConditional %83 %84 %85 + %84 = OpLabel + %86 = OpLoad %int %81 + %87 = OpIAdd %int %82 %86 + OpStore %78 %87 + OpStore %81 %87 + OpBranch %80 + %80 = OpLabel + %79 = OpPtrAccessChain %_ptr_StorageBuffer_int %78 %uint_1 + %88 = OpPtrAccessChain %_ptr_Workgroup_int %81 %uint_1 + OpStore %cur %88 + OpBranch %77 + %85 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag new file mode 100644 index 000000000..8b09e5b68 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag @@ -0,0 +1,646 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 1532 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %IN_HPosition %IN_Uv_EdgeDistance1 %IN_UvStuds_EdgeDistance2 %IN_Color %IN_LightPosition_Fog %IN_View_Depth %IN_Normal_SpecPower %IN_Tangent %IN_PosLightSpace_Reflectance %IN_studIndex %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %VertexOutput "VertexOutput" + OpMemberName %VertexOutput 0 "HPosition" + OpMemberName %VertexOutput 1 "Uv_EdgeDistance1" + OpMemberName %VertexOutput 2 "UvStuds_EdgeDistance2" + OpMemberName %VertexOutput 3 "Color" + OpMemberName %VertexOutput 4 "LightPosition_Fog" + OpMemberName %VertexOutput 5 "View_Depth" + OpMemberName %VertexOutput 6 "Normal_SpecPower" + OpMemberName %VertexOutput 7 "Tangent" + OpMemberName %VertexOutput 8 "PosLightSpace_Reflectance" + OpMemberName %VertexOutput 9 "studIndex" + OpName %Surface "Surface" + OpMemberName %Surface 0 "albedo" + OpMemberName %Surface 1 "normal" + OpMemberName %Surface 2 "specular" + OpMemberName %Surface 3 "gloss" + OpMemberName %Surface 4 "reflectance" + OpMemberName %Surface 5 "opacity" + OpName %SurfaceInput "SurfaceInput" + OpMemberName %SurfaceInput 0 "Color" + OpMemberName %SurfaceInput 1 "Uv" + OpMemberName %SurfaceInput 2 "UvStuds" + OpName %Globals "Globals" + OpMemberName %Globals 0 "ViewProjection" + OpMemberName %Globals 1 "ViewRight" + OpMemberName %Globals 2 "ViewUp" + OpMemberName %Globals 3 "ViewDir" + OpMemberName %Globals 4 "CameraPosition" + OpMemberName %Globals 5 "AmbientColor" + OpMemberName %Globals 6 "Lamp0Color" + OpMemberName %Globals 7 "Lamp0Dir" + OpMemberName %Globals 8 "Lamp1Color" + OpMemberName %Globals 9 "FogParams" + OpMemberName %Globals 10 "FogColor" + OpMemberName %Globals 11 "LightBorder" + OpMemberName %Globals 12 "LightConfig0" + OpMemberName %Globals 13 "LightConfig1" + OpMemberName %Globals 14 "LightConfig2" + OpMemberName %Globals 15 "LightConfig3" + OpMemberName %Globals 16 "RefractionBias_FadeDistance_GlowFactor" + OpMemberName %Globals 17 "OutlineBrightness_ShadowInfo" + OpMemberName %Globals 18 "ShadowMatrix0" + OpMemberName %Globals 19 "ShadowMatrix1" + OpMemberName %Globals 20 "ShadowMatrix2" + OpName %CB0 "CB0" + OpMemberName %CB0 0 "CB0" + OpName %_ "" + OpName %LightMapTexture "LightMapTexture" + OpName %LightMapSampler "LightMapSampler" + OpName %ShadowMapSampler "ShadowMapSampler" + OpName %ShadowMapTexture "ShadowMapTexture" + OpName %EnvironmentMapTexture "EnvironmentMapTexture" + OpName %EnvironmentMapSampler "EnvironmentMapSampler" + OpName %IN_HPosition "IN.HPosition" + OpName %IN_Uv_EdgeDistance1 "IN.Uv_EdgeDistance1" + OpName %IN_UvStuds_EdgeDistance2 "IN.UvStuds_EdgeDistance2" + OpName %IN_Color "IN.Color" + OpName %IN_LightPosition_Fog "IN.LightPosition_Fog" + OpName %IN_View_Depth "IN.View_Depth" + OpName %IN_Normal_SpecPower "IN.Normal_SpecPower" + OpName %IN_Tangent "IN.Tangent" + OpName %IN_PosLightSpace_Reflectance "IN.PosLightSpace_Reflectance" + OpName %IN_studIndex "IN.studIndex" + OpName %_entryPointOutput "@entryPointOutput" + OpName %DiffuseMapSampler "DiffuseMapSampler" + OpName %DiffuseMapTexture "DiffuseMapTexture" + OpName %NormalMapSampler "NormalMapSampler" + OpName %NormalMapTexture "NormalMapTexture" + OpName %NormalDetailMapTexture "NormalDetailMapTexture" + OpName %NormalDetailMapSampler "NormalDetailMapSampler" + OpName %StudsMapTexture "StudsMapTexture" + OpName %StudsMapSampler "StudsMapSampler" + OpName %SpecularMapSampler "SpecularMapSampler" + OpName %SpecularMapTexture "SpecularMapTexture" + OpName %Params "Params" + OpMemberName %Params 0 "LqmatFarTilingFactor" + OpName %CB2 "CB2" + OpMemberName %CB2 0 "CB2" + OpMemberDecorate %Globals 0 ColMajor + OpMemberDecorate %Globals 0 Offset 0 + OpMemberDecorate %Globals 0 MatrixStride 16 + OpMemberDecorate %Globals 1 Offset 64 + OpMemberDecorate %Globals 2 Offset 80 + OpMemberDecorate %Globals 3 Offset 96 + OpMemberDecorate %Globals 4 Offset 112 + OpMemberDecorate %Globals 5 Offset 128 + OpMemberDecorate %Globals 6 Offset 144 + OpMemberDecorate %Globals 7 Offset 160 + OpMemberDecorate %Globals 8 Offset 176 + OpMemberDecorate %Globals 9 Offset 192 + OpMemberDecorate %Globals 10 Offset 208 + OpMemberDecorate %Globals 11 Offset 224 + OpMemberDecorate %Globals 12 Offset 240 + OpMemberDecorate %Globals 13 Offset 256 + OpMemberDecorate %Globals 14 Offset 272 + OpMemberDecorate %Globals 15 Offset 288 + OpMemberDecorate %Globals 16 Offset 304 + OpMemberDecorate %Globals 17 Offset 320 + OpMemberDecorate %Globals 18 Offset 336 + OpMemberDecorate %Globals 19 Offset 352 + OpMemberDecorate %Globals 20 Offset 368 + OpMemberDecorate %CB0 0 Offset 0 + OpDecorate %CB0 Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %LightMapTexture DescriptorSet 1 + OpDecorate %LightMapTexture Binding 6 + OpDecorate %LightMapSampler DescriptorSet 1 + OpDecorate %LightMapSampler Binding 6 + OpDecorate %ShadowMapSampler DescriptorSet 1 + OpDecorate %ShadowMapSampler Binding 1 + OpDecorate %ShadowMapTexture DescriptorSet 1 + OpDecorate %ShadowMapTexture Binding 1 + OpDecorate %EnvironmentMapTexture DescriptorSet 1 + OpDecorate %EnvironmentMapTexture Binding 2 + OpDecorate %EnvironmentMapSampler DescriptorSet 1 + OpDecorate %EnvironmentMapSampler Binding 2 + OpDecorate %IN_HPosition BuiltIn FragCoord + OpDecorate %IN_Uv_EdgeDistance1 Location 0 + OpDecorate %IN_UvStuds_EdgeDistance2 Location 1 + OpDecorate %IN_Color Location 2 + OpDecorate %IN_LightPosition_Fog Location 3 + OpDecorate %IN_View_Depth Location 4 + OpDecorate %IN_Normal_SpecPower Location 5 + OpDecorate %IN_Tangent Location 6 + OpDecorate %IN_PosLightSpace_Reflectance Location 7 + OpDecorate %IN_studIndex Location 8 + OpDecorate %_entryPointOutput Location 0 + OpDecorate %DiffuseMapSampler DescriptorSet 1 + OpDecorate %DiffuseMapSampler Binding 3 + OpDecorate %DiffuseMapTexture DescriptorSet 1 + OpDecorate %DiffuseMapTexture Binding 3 + OpDecorate %NormalMapSampler DescriptorSet 1 + OpDecorate %NormalMapSampler Binding 4 + OpDecorate %NormalMapTexture DescriptorSet 1 + OpDecorate %NormalMapTexture Binding 4 + OpDecorate %NormalDetailMapTexture DescriptorSet 1 + OpDecorate %NormalDetailMapTexture Binding 8 + OpDecorate %NormalDetailMapSampler DescriptorSet 1 + OpDecorate %NormalDetailMapSampler Binding 8 + OpDecorate %StudsMapTexture DescriptorSet 1 + OpDecorate %StudsMapTexture Binding 0 + OpDecorate %StudsMapSampler DescriptorSet 1 + OpDecorate %StudsMapSampler Binding 0 + OpDecorate %SpecularMapSampler DescriptorSet 1 + OpDecorate %SpecularMapSampler Binding 5 + OpDecorate %SpecularMapTexture DescriptorSet 1 + OpDecorate %SpecularMapTexture Binding 5 + OpMemberDecorate %Params 0 Offset 0 + OpMemberDecorate %CB2 0 Offset 0 + OpDecorate %CB2 Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %8 = OpTypeFunction %float %_ptr_Function_float + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %v3float = OpTypeVector %float 3 + %18 = OpTypeFunction %v3float %_ptr_Function_v4float +%_ptr_Function_v3float = OpTypePointer Function %v3float + %23 = OpTypeFunction %v4float %_ptr_Function_v3float + %27 = OpTypeFunction %float %_ptr_Function_v3float + %31 = OpTypeFunction %float %_ptr_Function_float %_ptr_Function_float + %36 = OpTypeSampler +%_ptr_Function_36 = OpTypePointer Function %36 + %38 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_Function_38 = OpTypePointer Function %38 + %40 = OpTypeFunction %float %_ptr_Function_36 %_ptr_Function_38 %_ptr_Function_v3float %_ptr_Function_float +%VertexOutput = OpTypeStruct %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v3float %v4float %float +%_ptr_Function_VertexOutput = OpTypePointer Function %VertexOutput + %Surface = OpTypeStruct %v3float %v3float %float %float %float %float + %50 = OpTypeFunction %Surface %_ptr_Function_VertexOutput + %54 = OpTypeFunction %v4float %_ptr_Function_VertexOutput + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %60 = OpTypeFunction %v4float %_ptr_Function_36 %_ptr_Function_38 %_ptr_Function_v2float %_ptr_Function_float %_ptr_Function_float +%SurfaceInput = OpTypeStruct %v4float %v2float %v2float +%_ptr_Function_SurfaceInput = OpTypePointer Function %SurfaceInput + %70 = OpTypeFunction %Surface %_ptr_Function_SurfaceInput %_ptr_Function_v2float + %float_0 = OpConstant %float 0 + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 +%mat4v4float = OpTypeMatrix %v4float 4 + %Globals = OpTypeStruct %mat4v4float %v4float %v4float %v4float %v3float %v3float %v3float %v3float %v3float %v4float %v3float %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v4float + %CB0 = OpTypeStruct %Globals +%_ptr_Uniform_CB0 = OpTypePointer Uniform %CB0 + %_ = OpVariable %_ptr_Uniform_CB0 Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %int_15 = OpConstant %int 15 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %int_14 = OpConstant %int 14 + %128 = OpConstantComposite %v3float %float_1 %float_1 %float_1 + %133 = OpTypeImage %float 3D 0 0 0 1 Unknown +%_ptr_UniformConstant_133 = OpTypePointer UniformConstant %133 +%LightMapTexture = OpVariable %_ptr_UniformConstant_133 UniformConstant +%_ptr_UniformConstant_36 = OpTypePointer UniformConstant %36 +%LightMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant + %140 = OpTypeSampledImage %133 + %int_11 = OpConstant %int 11 + %uint = OpTypeInt 32 0 + %float_9 = OpConstant %float 9 + %float_20 = OpConstant %float 20 + %float_0_5 = OpConstant %float 0.5 + %183 = OpTypeSampledImage %38 + %uint_0 = OpConstant %uint 0 + %uint_1 = OpConstant %uint 1 + %int_17 = OpConstant %int 17 + %uint_3 = OpConstant %uint 3 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %float_0_25 = OpConstant %float 0.25 + %int_5 = OpConstant %int 5 +%float_0_00333333 = OpConstant %float 0.00333333 + %int_16 = OpConstant %int 16 +%_ptr_Function_Surface = OpTypePointer Function %Surface + %int_6 = OpConstant %int 6 + %int_7 = OpConstant %int 7 +%_ptr_Uniform_v3float = OpTypePointer Uniform %v3float + %int_8 = OpConstant %int 8 +%ShadowMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%_ptr_UniformConstant_38 = OpTypePointer UniformConstant %38 +%ShadowMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant + %367 = OpTypeImage %float Cube 0 0 0 1 Unknown +%_ptr_UniformConstant_367 = OpTypePointer UniformConstant %367 +%EnvironmentMapTexture = OpVariable %_ptr_UniformConstant_367 UniformConstant +%EnvironmentMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant + %373 = OpTypeSampledImage %367 + %float_1_5 = OpConstant %float 1.5 + %int_10 = OpConstant %int 10 +%_ptr_Input_v4float = OpTypePointer Input %v4float +%IN_HPosition = OpVariable %_ptr_Input_v4float Input +%IN_Uv_EdgeDistance1 = OpVariable %_ptr_Input_v4float Input +%IN_UvStuds_EdgeDistance2 = OpVariable %_ptr_Input_v4float Input + %IN_Color = OpVariable %_ptr_Input_v4float Input +%IN_LightPosition_Fog = OpVariable %_ptr_Input_v4float Input +%IN_View_Depth = OpVariable %_ptr_Input_v4float Input +%IN_Normal_SpecPower = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_v3float = OpTypePointer Input %v3float + %IN_Tangent = OpVariable %_ptr_Input_v3float Input +%IN_PosLightSpace_Reflectance = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_float = OpTypePointer Input %float +%IN_studIndex = OpVariable %_ptr_Input_float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %bool = OpTypeBool +%DiffuseMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%DiffuseMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%NormalMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%NormalMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%NormalDetailMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%NormalDetailMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant + %float_0_3 = OpConstant %float 0.3 +%StudsMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%StudsMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%SpecularMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%SpecularMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant + %float_0_75 = OpConstant %float 0.75 + %float_256 = OpConstant %float 256 + %689 = OpConstantComposite %v2float %float_2 %float_256 + %float_0_01 = OpConstant %float 0.01 + %692 = OpConstantComposite %v2float %float_0 %float_0_01 + %float_0_8 = OpConstant %float 0.8 + %float_120 = OpConstant %float 120 + %697 = OpConstantComposite %v2float %float_0_8 %float_120 + %Params = OpTypeStruct %v4float + %CB2 = OpTypeStruct %Params +%_ptr_Uniform_CB2 = OpTypePointer Uniform %CB2 + %false = OpConstantFalse %bool + %1509 = OpUndef %VertexOutput + %1510 = OpUndef %SurfaceInput + %1511 = OpUndef %v2float + %1512 = OpUndef %v4float + %1531 = OpUndef %Surface + %main = OpFunction %void None %3 + %5 = OpLabel + %501 = OpLoad %v4float %IN_HPosition + %1378 = OpCompositeInsert %VertexOutput %501 %1509 0 + %504 = OpLoad %v4float %IN_Uv_EdgeDistance1 + %1380 = OpCompositeInsert %VertexOutput %504 %1378 1 + %507 = OpLoad %v4float %IN_UvStuds_EdgeDistance2 + %1382 = OpCompositeInsert %VertexOutput %507 %1380 2 + %510 = OpLoad %v4float %IN_Color + %1384 = OpCompositeInsert %VertexOutput %510 %1382 3 + %513 = OpLoad %v4float %IN_LightPosition_Fog + %1386 = OpCompositeInsert %VertexOutput %513 %1384 4 + %516 = OpLoad %v4float %IN_View_Depth + %1388 = OpCompositeInsert %VertexOutput %516 %1386 5 + %519 = OpLoad %v4float %IN_Normal_SpecPower + %1390 = OpCompositeInsert %VertexOutput %519 %1388 6 + %523 = OpLoad %v3float %IN_Tangent + %1392 = OpCompositeInsert %VertexOutput %523 %1390 7 + %526 = OpLoad %v4float %IN_PosLightSpace_Reflectance + %1394 = OpCompositeInsert %VertexOutput %526 %1392 8 + %530 = OpLoad %float %IN_studIndex + %1396 = OpCompositeInsert %VertexOutput %530 %1394 9 + %1400 = OpCompositeInsert %SurfaceInput %510 %1510 0 + %954 = OpVectorShuffle %v2float %504 %504 0 1 + %1404 = OpCompositeInsert %SurfaceInput %954 %1400 1 + %958 = OpVectorShuffle %v2float %507 %507 0 1 + %1408 = OpCompositeInsert %SurfaceInput %958 %1404 2 + %1410 = OpCompositeExtract %float %1408 2 1 + %962 = OpExtInst %float %1 Fract %1410 + %965 = OpFAdd %float %962 %530 + %966 = OpFMul %float %965 %float_0_25 + %1414 = OpCompositeInsert %SurfaceInput %966 %1408 2 1 + %1416 = OpCompositeExtract %float %1396 5 3 + %970 = OpFMul %float %1416 %float_0_00333333 + %971 = OpFSub %float %float_1 %970 + %987 = OpExtInst %float %1 FClamp %971 %float_0 %float_1 + %976 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_16 %uint_1 + %977 = OpLoad %float %976 + %978 = OpFMul %float %1416 %977 + %979 = OpFSub %float %float_1 %978 + %990 = OpExtInst %float %1 FClamp %979 %float_0 %float_1 + %1024 = OpVectorTimesScalar %v2float %954 %float_1 + %1029 = OpLoad %36 %DiffuseMapSampler + %1030 = OpLoad %38 %DiffuseMapTexture + OpBranch %1119 + %1119 = OpLabel + OpLoopMerge %1120 %1121 None + OpBranch %1122 + %1122 = OpLabel + %1124 = OpFOrdEqual %bool %float_0 %float_0 + OpSelectionMerge %1125 None + OpBranchConditional %1124 %1126 %1127 + %1126 = OpLabel + %1130 = OpSampledImage %183 %1030 %1029 + %1132 = OpImageSampleImplicitLod %v4float %1130 %1024 + OpBranch %1120 + %1127 = OpLabel + %1134 = OpFSub %float %float_1 %float_0 + %1135 = OpFDiv %float %float_1 %1134 + %1138 = OpSampledImage %183 %1030 %1029 + %1140 = OpVectorTimesScalar %v2float %1024 %float_0_25 + %1141 = OpImageSampleImplicitLod %v4float %1138 %1140 + %1144 = OpSampledImage %183 %1030 %1029 + %1146 = OpImageSampleImplicitLod %v4float %1144 %1024 + %1149 = OpFMul %float %987 %1135 + %1152 = OpFMul %float %float_0 %1135 + %1153 = OpFSub %float %1149 %1152 + %1161 = OpExtInst %float %1 FClamp %1153 %float_0 %float_1 + %1155 = OpCompositeConstruct %v4float %1161 %1161 %1161 %1161 + %1156 = OpExtInst %v4float %1 FMix %1141 %1146 %1155 + OpBranch %1120 + %1125 = OpLabel + %1157 = OpUndef %v4float + OpBranch %1120 + %1121 = OpLabel + OpBranchConditional %false %1119 %1120 + %1120 = OpLabel + %1517 = OpPhi %v4float %1132 %1126 %1156 %1127 %1157 %1125 %1512 %1121 + %1035 = OpVectorTimesScalar %v4float %1517 %float_1 + %1036 = OpLoad %36 %NormalMapSampler + %1037 = OpLoad %38 %NormalMapTexture + OpBranch %1165 + %1165 = OpLabel + OpLoopMerge %1166 %1167 None + OpBranch %1168 + %1168 = OpLabel + OpSelectionMerge %1171 None + OpBranchConditional %1124 %1172 %1173 + %1172 = OpLabel + %1176 = OpSampledImage %183 %1037 %1036 + %1178 = OpImageSampleImplicitLod %v4float %1176 %1024 + OpBranch %1166 + %1173 = OpLabel + %1180 = OpFSub %float %float_1 %float_0 + %1181 = OpFDiv %float %float_1 %1180 + %1184 = OpSampledImage %183 %1037 %1036 + %1186 = OpVectorTimesScalar %v2float %1024 %float_0_25 + %1187 = OpImageSampleImplicitLod %v4float %1184 %1186 + %1190 = OpSampledImage %183 %1037 %1036 + %1192 = OpImageSampleImplicitLod %v4float %1190 %1024 + %1195 = OpFMul %float %990 %1181 + %1198 = OpFMul %float %float_0 %1181 + %1199 = OpFSub %float %1195 %1198 + %1206 = OpExtInst %float %1 FClamp %1199 %float_0 %float_1 + %1201 = OpCompositeConstruct %v4float %1206 %1206 %1206 %1206 + %1202 = OpExtInst %v4float %1 FMix %1187 %1192 %1201 + OpBranch %1166 + %1171 = OpLabel + %1203 = OpUndef %v4float + OpBranch %1166 + %1167 = OpLabel + OpBranchConditional %false %1165 %1166 + %1166 = OpLabel + %1523 = OpPhi %v4float %1178 %1172 %1202 %1173 %1203 %1171 %1512 %1167 + %1210 = OpVectorShuffle %v2float %1523 %1523 3 1 + %1211 = OpVectorTimesScalar %v2float %1210 %float_2 + %1212 = OpCompositeConstruct %v2float %float_1 %float_1 + %1213 = OpFSub %v2float %1211 %1212 + %1216 = OpFNegate %v2float %1213 + %1218 = OpDot %float %1216 %1213 + %1219 = OpFAdd %float %float_1 %1218 + %1220 = OpExtInst %float %1 FClamp %1219 %float_0 %float_1 + %1221 = OpExtInst %float %1 Sqrt %1220 + %1222 = OpCompositeExtract %float %1213 0 + %1223 = OpCompositeExtract %float %1213 1 + %1224 = OpCompositeConstruct %v3float %1222 %1223 %1221 + %1042 = OpLoad %38 %NormalDetailMapTexture + %1043 = OpLoad %36 %NormalDetailMapSampler + %1044 = OpSampledImage %183 %1042 %1043 + %1046 = OpVectorTimesScalar %v2float %1024 %float_0 + %1047 = OpImageSampleImplicitLod %v4float %1044 %1046 + %1228 = OpVectorShuffle %v2float %1047 %1047 3 1 + %1229 = OpVectorTimesScalar %v2float %1228 %float_2 + %1231 = OpFSub %v2float %1229 %1212 + %1234 = OpFNegate %v2float %1231 + %1236 = OpDot %float %1234 %1231 + %1237 = OpFAdd %float %float_1 %1236 + %1238 = OpExtInst %float %1 FClamp %1237 %float_0 %float_1 + %1239 = OpExtInst %float %1 Sqrt %1238 + %1240 = OpCompositeExtract %float %1231 0 + %1241 = OpCompositeExtract %float %1231 1 + %1242 = OpCompositeConstruct %v3float %1240 %1241 %1239 + %1050 = OpVectorShuffle %v2float %1242 %1242 0 1 + %1051 = OpVectorTimesScalar %v2float %1050 %float_0 + %1053 = OpVectorShuffle %v2float %1224 %1224 0 1 + %1054 = OpFAdd %v2float %1053 %1051 + %1056 = OpVectorShuffle %v3float %1224 %1054 3 4 2 + %1059 = OpVectorShuffle %v2float %1056 %1056 0 1 + %1060 = OpVectorTimesScalar %v2float %1059 %990 + %1062 = OpVectorShuffle %v3float %1056 %1060 3 4 2 + %1430 = OpCompositeExtract %float %1062 0 + %1065 = OpFMul %float %1430 %float_0_3 + %1066 = OpFAdd %float %float_1 %1065 + %1069 = OpVectorShuffle %v3float %510 %510 0 1 2 + %1071 = OpVectorShuffle %v3float %1035 %1035 0 1 2 + %1072 = OpFMul %v3float %1069 %1071 + %1074 = OpVectorTimesScalar %v3float %1072 %1066 + %1075 = OpLoad %38 %StudsMapTexture + %1076 = OpLoad %36 %StudsMapSampler + %1077 = OpSampledImage %183 %1075 %1076 + %1434 = OpCompositeExtract %v2float %1414 2 + %1080 = OpImageSampleImplicitLod %v4float %1077 %1434 + %1436 = OpCompositeExtract %float %1080 0 + %1083 = OpFMul %float %1436 %float_2 + %1085 = OpVectorTimesScalar %v3float %1074 %1083 + %1086 = OpLoad %36 %SpecularMapSampler + %1087 = OpLoad %38 %SpecularMapTexture + OpBranch %1246 + %1246 = OpLabel + OpLoopMerge %1247 %1248 None + OpBranch %1249 + %1249 = OpLabel + %1251 = OpFOrdEqual %bool %float_0_75 %float_0 + OpSelectionMerge %1252 None + OpBranchConditional %1251 %1253 %1254 + %1253 = OpLabel + %1257 = OpSampledImage %183 %1087 %1086 + %1259 = OpImageSampleImplicitLod %v4float %1257 %1024 + OpBranch %1247 + %1254 = OpLabel + %1261 = OpFSub %float %float_1 %float_0_75 + %1262 = OpFDiv %float %float_1 %1261 + %1265 = OpSampledImage %183 %1087 %1086 + %1267 = OpVectorTimesScalar %v2float %1024 %float_0_25 + %1268 = OpImageSampleImplicitLod %v4float %1265 %1267 + %1271 = OpSampledImage %183 %1087 %1086 + %1273 = OpImageSampleImplicitLod %v4float %1271 %1024 + %1276 = OpFMul %float %990 %1262 + %1279 = OpFMul %float %float_0_75 %1262 + %1280 = OpFSub %float %1276 %1279 + %1287 = OpExtInst %float %1 FClamp %1280 %float_0 %float_1 + %1282 = OpCompositeConstruct %v4float %1287 %1287 %1287 %1287 + %1283 = OpExtInst %v4float %1 FMix %1268 %1273 %1282 + OpBranch %1247 + %1252 = OpLabel + %1284 = OpUndef %v4float + OpBranch %1247 + %1248 = OpLabel + OpBranchConditional %false %1246 %1247 + %1247 = OpLabel + %1530 = OpPhi %v4float %1259 %1253 %1283 %1254 %1284 %1252 %1512 %1248 + %1091 = OpVectorShuffle %v2float %1530 %1530 0 1 + %1093 = OpFMul %v2float %1091 %689 + %1094 = OpFAdd %v2float %1093 %692 + %1097 = OpCompositeConstruct %v2float %990 %990 + %1098 = OpExtInst %v2float %1 FMix %697 %1094 %1097 + %1438 = OpCompositeInsert %Surface %1085 %1531 0 + %1440 = OpCompositeInsert %Surface %1062 %1438 1 + %1442 = OpCompositeExtract %float %1098 0 + %1444 = OpCompositeInsert %Surface %1442 %1440 2 + %1446 = OpCompositeExtract %float %1098 1 + %1448 = OpCompositeInsert %Surface %1446 %1444 3 + %1450 = OpCompositeExtract %float %1091 1 + %1112 = OpFMul %float %1450 %990 + %1113 = OpFMul %float %1112 %float_0 + %1452 = OpCompositeInsert %Surface %1113 %1448 4 + %1456 = OpCompositeExtract %float %1396 3 3 + %764 = OpCompositeExtract %float %1085 0 + %765 = OpCompositeExtract %float %1085 1 + %766 = OpCompositeExtract %float %1085 2 + %767 = OpCompositeConstruct %v4float %764 %765 %766 %1456 + %770 = OpVectorShuffle %v3float %519 %519 0 1 2 + %773 = OpExtInst %v3float %1 Cross %770 %523 + %1462 = OpCompositeExtract %float %1452 1 0 + %778 = OpVectorTimesScalar %v3float %523 %1462 + %1466 = OpCompositeExtract %float %1452 1 1 + %782 = OpVectorTimesScalar %v3float %773 %1466 + %783 = OpFAdd %v3float %778 %782 + %1468 = OpCompositeExtract %float %1452 1 2 + %789 = OpVectorTimesScalar %v3float %770 %1468 + %790 = OpFAdd %v3float %783 %789 + %791 = OpExtInst %v3float %1 Normalize %790 + %793 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_7 + %794 = OpLoad %v3float %793 + %795 = OpFNegate %v3float %794 + %796 = OpDot %float %791 %795 + %1290 = OpExtInst %float %1 FClamp %796 %float_0 %float_1 + %799 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_6 + %800 = OpLoad %v3float %799 + %801 = OpVectorTimesScalar %v3float %800 %1290 + %803 = OpFNegate %float %796 + %804 = OpExtInst %float %1 FMax %803 %float_0 + %805 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_8 + %806 = OpLoad %v3float %805 + %807 = OpVectorTimesScalar %v3float %806 %804 + %808 = OpFAdd %v3float %801 %807 + %810 = OpExtInst %float %1 Step %float_0 %796 + %813 = OpFMul %float %810 %1442 + %820 = OpVectorShuffle %v3float %513 %513 0 1 2 + %1296 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_15 + %1297 = OpLoad %v4float %1296 + %1298 = OpVectorShuffle %v3float %1297 %1297 0 1 2 + %1300 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_14 + %1301 = OpLoad %v4float %1300 + %1302 = OpVectorShuffle %v3float %1301 %1301 0 1 2 + %1303 = OpFSub %v3float %820 %1302 + %1304 = OpExtInst %v3float %1 FAbs %1303 + %1305 = OpExtInst %v3float %1 Step %1298 %1304 + %1307 = OpDot %float %1305 %128 + %1328 = OpExtInst %float %1 FClamp %1307 %float_0 %float_1 + %1309 = OpLoad %133 %LightMapTexture + %1310 = OpLoad %36 %LightMapSampler + %1311 = OpSampledImage %140 %1309 %1310 + %1313 = OpVectorShuffle %v3float %820 %820 1 2 0 + %1317 = OpVectorTimesScalar %v3float %1313 %1328 + %1318 = OpFSub %v3float %1313 %1317 + %1319 = OpImageSampleImplicitLod %v4float %1311 %1318 + %1321 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_11 + %1322 = OpLoad %v4float %1321 + %1324 = OpCompositeConstruct %v4float %1328 %1328 %1328 %1328 + %1325 = OpExtInst %v4float %1 FMix %1319 %1322 %1324 + %822 = OpLoad %36 %ShadowMapSampler + %823 = OpLoad %38 %ShadowMapTexture + %826 = OpVectorShuffle %v3float %526 %526 0 1 2 + %1482 = OpCompositeExtract %float %1325 3 + %1337 = OpSampledImage %183 %823 %822 + %1339 = OpVectorShuffle %v2float %826 %826 0 1 + %1340 = OpImageSampleImplicitLod %v4float %1337 %1339 + %1341 = OpVectorShuffle %v2float %1340 %1340 0 1 + %1484 = OpCompositeExtract %float %826 2 + %1486 = OpCompositeExtract %float %1341 0 + %1363 = OpExtInst %float %1 Step %1486 %1484 + %1365 = OpFSub %float %1484 %float_0_5 + %1366 = OpExtInst %float %1 FAbs %1365 + %1367 = OpFMul %float %float_20 %1366 + %1368 = OpFSub %float %float_9 %1367 + %1369 = OpExtInst %float %1 FClamp %1368 %float_0 %float_1 + %1370 = OpFMul %float %1363 %1369 + %1488 = OpCompositeExtract %float %1341 1 + %1350 = OpFMul %float %1370 %1488 + %1351 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_17 %uint_3 + %1352 = OpLoad %float %1351 + %1353 = OpFMul %float %1350 %1352 + %1354 = OpFSub %float %float_1 %1353 + %1356 = OpFMul %float %1354 %1482 + %830 = OpLoad %367 %EnvironmentMapTexture + %831 = OpLoad %36 %EnvironmentMapSampler + %832 = OpSampledImage %373 %830 %831 + %835 = OpVectorShuffle %v3float %516 %516 0 1 2 + %836 = OpFNegate %v3float %835 + %838 = OpExtInst %v3float %1 Reflect %836 %791 + %839 = OpImageSampleImplicitLod %v4float %832 %838 + %840 = OpVectorShuffle %v3float %839 %839 0 1 2 + %842 = OpVectorShuffle %v3float %767 %767 0 1 2 + %845 = OpCompositeConstruct %v3float %1113 %1113 %1113 + %846 = OpExtInst %v3float %1 FMix %842 %840 %845 + %848 = OpVectorShuffle %v4float %767 %846 4 5 6 3 + %849 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_5 + %850 = OpLoad %v3float %849 + %853 = OpVectorTimesScalar %v3float %808 %1356 + %854 = OpFAdd %v3float %850 %853 + %856 = OpVectorShuffle %v3float %1325 %1325 0 1 2 + %857 = OpFAdd %v3float %854 %856 + %859 = OpVectorShuffle %v3float %848 %848 0 1 2 + %860 = OpFMul %v3float %857 %859 + %865 = OpFMul %float %813 %1356 + %873 = OpExtInst %v3float %1 Normalize %835 + %874 = OpFAdd %v3float %795 %873 + %875 = OpExtInst %v3float %1 Normalize %874 + %876 = OpDot %float %791 %875 + %877 = OpExtInst %float %1 FClamp %876 %float_0 %float_1 + %879 = OpExtInst %float %1 Pow %877 %1446 + %880 = OpFMul %float %865 %879 + %881 = OpVectorTimesScalar %v3float %800 %880 + %884 = OpFAdd %v3float %860 %881 + %886 = OpVectorShuffle %v4float %1512 %884 4 5 6 3 + %1494 = OpCompositeExtract %float %848 3 + %1496 = OpCompositeInsert %v4float %1494 %886 3 + %896 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_17 %uint_0 + %897 = OpLoad %float %896 + %898 = OpFMul %float %978 %897 + %899 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_17 %uint_1 + %900 = OpLoad %float %899 + %901 = OpFAdd %float %898 %900 + %1373 = OpExtInst %float %1 FClamp %901 %float_0 %float_1 + %905 = OpVectorShuffle %v2float %504 %504 3 2 + %908 = OpVectorShuffle %v2float %507 %507 3 2 + %909 = OpExtInst %v2float %1 FMin %905 %908 + %1504 = OpCompositeExtract %float %909 0 + %1506 = OpCompositeExtract %float %909 1 + %914 = OpExtInst %float %1 FMin %1504 %1506 + %916 = OpFDiv %float %914 %978 + %919 = OpFSub %float %float_1_5 %916 + %920 = OpFMul %float %1373 %919 + %922 = OpFAdd %float %920 %916 + %1376 = OpExtInst %float %1 FClamp %922 %float_0 %float_1 + %925 = OpVectorShuffle %v3float %1496 %1496 0 1 2 + %926 = OpVectorTimesScalar %v3float %925 %1376 + %928 = OpVectorShuffle %v4float %1496 %926 4 5 6 3 + %1508 = OpCompositeExtract %float %1396 4 3 + %931 = OpExtInst %float %1 FClamp %1508 %float_0 %float_1 + %932 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_10 + %933 = OpLoad %v3float %932 + %935 = OpVectorShuffle %v3float %928 %928 0 1 2 + %937 = OpCompositeConstruct %v3float %931 %931 %931 + %938 = OpExtInst %v3float %1 FMix %933 %935 %937 + %940 = OpVectorShuffle %v4float %928 %938 4 5 6 3 + OpStore %_entryPointOutput %940 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/asm/frag/texture-access.swizzle.asm.frag b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/frag/texture-access.swizzle.asm.frag new file mode 100644 index 000000000..4c1408b38 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/frag/texture-access.swizzle.asm.frag @@ -0,0 +1,364 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 247 +; Schema: 0 + OpCapability Shader + OpCapability Sampled1D + OpCapability SampledCubeArray + OpCapability SampledBuffer + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %c "c" + OpName %tex1d "tex1d" + OpName %tex2d "tex2d" + OpName %tex3d "tex3d" + OpName %texCube "texCube" + OpName %tex2dArray "tex2dArray" + OpName %texCubeArray "texCubeArray" + OpName %depth2d "depth2d" + OpName %depthCube "depthCube" + OpName %depth2dArray "depth2dArray" + OpName %depthCubeArray "depthCubeArray" + OpName %texBuffer "texBuffer" + OpName %tex1dSamp "tex1dSamp" + OpName %tex2dSamp "tex2dSamp" + OpName %tex3dSamp "tex3dSamp" + OpName %texCubeSamp "texCubeSamp" + OpName %tex2dArraySamp "tex2dArraySamp" + OpName %texCubeArraySamp "texCubeArraySamp" + OpName %depth2dSamp "depth2dSamp" + OpName %depthCubeSamp "depthCubeSamp" + OpName %depth2dArraySamp "depth2dArraySamp" + OpName %depthCubeArraySamp "depthCubeArraySamp" + OpDecorate %tex1d DescriptorSet 0 + OpDecorate %tex1d Binding 0 + OpDecorate %tex2d DescriptorSet 0 + OpDecorate %tex2d Binding 1 + OpDecorate %tex3d DescriptorSet 0 + OpDecorate %tex3d Binding 2 + OpDecorate %texCube DescriptorSet 0 + OpDecorate %texCube Binding 3 + OpDecorate %tex2dArray DescriptorSet 0 + OpDecorate %tex2dArray Binding 4 + OpDecorate %texCubeArray DescriptorSet 0 + OpDecorate %texCubeArray Binding 5 + OpDecorate %depth2d DescriptorSet 0 + OpDecorate %depth2d Binding 7 + OpDecorate %depthCube DescriptorSet 0 + OpDecorate %depthCube Binding 8 + OpDecorate %depth2dArray DescriptorSet 0 + OpDecorate %depth2dArray Binding 9 + OpDecorate %depthCubeArray DescriptorSet 0 + OpDecorate %depthCubeArray Binding 10 + OpDecorate %texBuffer DescriptorSet 0 + OpDecorate %texBuffer Binding 6 + OpDecorate %tex1dSamp DescriptorSet 1 + OpDecorate %tex1dSamp Binding 0 + OpDecorate %tex2dSamp DescriptorSet 1 + OpDecorate %tex2dSamp Binding 1 + OpDecorate %tex3dSamp DescriptorSet 1 + OpDecorate %tex3dSamp Binding 2 + OpDecorate %texCubeSamp DescriptorSet 1 + OpDecorate %texCubeSamp Binding 3 + OpDecorate %tex2dArraySamp DescriptorSet 1 + OpDecorate %tex2dArraySamp Binding 4 + OpDecorate %texCubeArraySamp DescriptorSet 1 + OpDecorate %texCubeArraySamp Binding 5 + OpDecorate %depth2dSamp DescriptorSet 1 + OpDecorate %depth2dSamp Binding 7 + OpDecorate %depthCubeSamp DescriptorSet 1 + OpDecorate %depthCubeSamp Binding 8 + OpDecorate %depth2dArraySamp DescriptorSet 1 + OpDecorate %depth2dArraySamp Binding 9 + OpDecorate %depthCubeArraySamp DescriptorSet 1 + OpDecorate %depthCubeArraySamp Binding 10 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %10 = OpTypeImage %float 1D 0 0 0 1 Unknown + %11 = OpTypeSampledImage %10 + %12 = OpTypeSampler +%_ptr_UniformConstant_10 = OpTypePointer UniformConstant %10 + %tex1d = OpVariable %_ptr_UniformConstant_10 UniformConstant +%_ptr_UniformConstant_12 = OpTypePointer UniformConstant %12 + %tex1dSamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %float_0 = OpConstant %float 0 + %17 = OpTypeImage %float 2D 0 0 0 1 Unknown + %18 = OpTypeSampledImage %17 +%_ptr_UniformConstant_17 = OpTypePointer UniformConstant %17 + %tex2d = OpVariable %_ptr_UniformConstant_17 UniformConstant + %tex2dSamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %v2float = OpTypeVector %float 2 + %23 = OpConstantComposite %v2float %float_0 %float_0 + %25 = OpTypeImage %float 3D 0 0 0 1 Unknown + %26 = OpTypeSampledImage %25 +%_ptr_UniformConstant_25 = OpTypePointer UniformConstant %25 + %tex3d = OpVariable %_ptr_UniformConstant_25 UniformConstant + %tex3dSamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %v3float = OpTypeVector %float 3 + %31 = OpConstantComposite %v3float %float_0 %float_0 %float_0 + %33 = OpTypeImage %float Cube 0 0 0 1 Unknown + %34 = OpTypeSampledImage %33 +%_ptr_UniformConstant_33 = OpTypePointer UniformConstant %33 + %texCube = OpVariable %_ptr_UniformConstant_33 UniformConstant +%texCubeSamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %39 = OpTypeImage %float 2D 0 1 0 1 Unknown + %40 = OpTypeSampledImage %39 +%_ptr_UniformConstant_39 = OpTypePointer UniformConstant %39 + %tex2dArray = OpVariable %_ptr_UniformConstant_39 UniformConstant +%tex2dArraySamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %45 = OpTypeImage %float Cube 0 1 0 1 Unknown + %46 = OpTypeSampledImage %45 +%_ptr_UniformConstant_45 = OpTypePointer UniformConstant %45 +%texCubeArray = OpVariable %_ptr_UniformConstant_45 UniformConstant +%texCubeArraySamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %50 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 + %52 = OpTypeImage %float 2D 1 0 0 1 Unknown + %53 = OpTypeSampledImage %52 +%_ptr_UniformConstant_52 = OpTypePointer UniformConstant %52 + %depth2d = OpVariable %_ptr_UniformConstant_52 UniformConstant +%depth2dSamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %float_1 = OpConstant %float 1 + %58 = OpConstantComposite %v3float %float_0 %float_0 %float_1 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Function_float = OpTypePointer Function %float + %65 = OpTypeImage %float Cube 1 0 0 1 Unknown + %66 = OpTypeSampledImage %65 +%_ptr_UniformConstant_65 = OpTypePointer UniformConstant %65 + %depthCube = OpVariable %_ptr_UniformConstant_65 UniformConstant +%depthCubeSamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %70 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_1 + %74 = OpTypeImage %float 2D 1 1 0 1 Unknown + %75 = OpTypeSampledImage %74 +%_ptr_UniformConstant_74 = OpTypePointer UniformConstant %74 +%depth2dArray = OpVariable %_ptr_UniformConstant_74 UniformConstant +%depth2dArraySamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %82 = OpTypeImage %float Cube 1 1 0 1 Unknown + %83 = OpTypeSampledImage %82 +%_ptr_UniformConstant_82 = OpTypePointer UniformConstant %82 +%depthCubeArray = OpVariable %_ptr_UniformConstant_82 UniformConstant +%depthCubeArraySamp = OpVariable %_ptr_UniformConstant_12 UniformConstant + %97 = OpConstantComposite %v2float %float_0 %float_1 + %98 = OpConstantComposite %v4float %float_0 %float_0 %float_1 %float_1 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %v2int = OpTypeVector %int 2 + %138 = OpConstantComposite %v2int %int_0 %int_0 + %v3int = OpTypeVector %int 3 + %143 = OpConstantComposite %v3int %int_0 %int_0 %int_0 + %149 = OpTypeImage %float Buffer 0 0 0 1 Unknown +%_ptr_UniformConstant_149 = OpTypePointer UniformConstant %149 + %texBuffer = OpVariable %_ptr_UniformConstant_149 UniformConstant + %int_1 = OpConstant %int 1 + %int_2 = OpConstant %int 2 + %int_3 = OpConstant %int 3 + %main = OpFunction %void None %3 + %5 = OpLabel + %c = OpVariable %_ptr_Function_v4float Function + %13 = OpLoad %10 %tex1d + %14 = OpLoad %12 %tex1dSamp + %15 = OpSampledImage %11 %13 %14 + %16 = OpImageSampleImplicitLod %v4float %15 %float_0 + OpStore %c %16 + %19 = OpLoad %17 %tex2d + %20 = OpLoad %12 %tex2dSamp + %21 = OpSampledImage %18 %19 %20 + %24 = OpImageSampleImplicitLod %v4float %21 %23 + OpStore %c %24 + %27 = OpLoad %25 %tex3d + %28 = OpLoad %12 %tex3dSamp + %29 = OpSampledImage %26 %27 %28 + %32 = OpImageSampleImplicitLod %v4float %29 %31 + OpStore %c %32 + %35 = OpLoad %33 %texCube + %36 = OpLoad %12 %texCubeSamp + %37 = OpSampledImage %34 %35 %36 + %38 = OpImageSampleImplicitLod %v4float %37 %31 + OpStore %c %38 + %41 = OpLoad %39 %tex2dArray + %42 = OpLoad %12 %tex2dArraySamp + %43 = OpSampledImage %40 %41 %42 + %44 = OpImageSampleImplicitLod %v4float %43 %31 + OpStore %c %44 + %47 = OpLoad %45 %texCubeArray + %48 = OpLoad %12 %texCubeArraySamp + %49 = OpSampledImage %46 %47 %48 + %51 = OpImageSampleImplicitLod %v4float %49 %50 + OpStore %c %51 + %54 = OpLoad %52 %depth2d + %55 = OpLoad %12 %depth2dSamp + %56 = OpSampledImage %53 %54 %55 + %59 = OpCompositeExtract %float %58 2 + %60 = OpImageSampleDrefImplicitLod %float %56 %58 %59 + %64 = OpAccessChain %_ptr_Function_float %c %uint_0 + OpStore %64 %60 + %67 = OpLoad %65 %depthCube + %68 = OpLoad %12 %depthCubeSamp + %69 = OpSampledImage %66 %67 %68 + %71 = OpCompositeExtract %float %70 3 + %72 = OpImageSampleDrefImplicitLod %float %69 %70 %71 + %73 = OpAccessChain %_ptr_Function_float %c %uint_0 + OpStore %73 %72 + %76 = OpLoad %74 %depth2dArray + %77 = OpLoad %12 %depth2dArraySamp + %78 = OpSampledImage %75 %76 %77 + %79 = OpCompositeExtract %float %70 3 + %80 = OpImageSampleDrefImplicitLod %float %78 %70 %79 + %81 = OpAccessChain %_ptr_Function_float %c %uint_0 + OpStore %81 %80 + %84 = OpLoad %82 %depthCubeArray + %85 = OpLoad %12 %depthCubeArraySamp + %86 = OpSampledImage %83 %84 %85 + %87 = OpImageSampleDrefImplicitLod %float %86 %50 %float_1 + %88 = OpAccessChain %_ptr_Function_float %c %uint_0 + OpStore %88 %87 + %89 = OpLoad %10 %tex1d + %90 = OpLoad %12 %tex1dSamp + %91 = OpSampledImage %11 %89 %90 + %92 = OpImageSampleProjImplicitLod %v4float %91 %97 + OpStore %c %92 + %93 = OpLoad %17 %tex2d + %94 = OpLoad %12 %tex2dSamp + %95 = OpSampledImage %18 %93 %94 + %96 = OpImageSampleProjImplicitLod %v4float %95 %58 + OpStore %c %96 + %99 = OpLoad %25 %tex3d + %100 = OpLoad %12 %tex3dSamp + %101 = OpSampledImage %26 %99 %100 + %102 = OpImageSampleProjImplicitLod %v4float %101 %70 + OpStore %c %102 + %103 = OpLoad %52 %depth2d + %104 = OpLoad %12 %depth2dSamp + %105 = OpSampledImage %53 %103 %104 + %106 = OpCompositeExtract %float %98 2 + %107 = OpCompositeExtract %float %98 3 + %108 = OpCompositeInsert %v4float %107 %98 2 + %109 = OpImageSampleProjDrefImplicitLod %float %105 %108 %106 + %110 = OpAccessChain %_ptr_Function_float %c %uint_0 + OpStore %110 %109 + %111 = OpLoad %10 %tex1d + %112 = OpLoad %12 %tex1dSamp + %113 = OpSampledImage %11 %111 %112 + %114 = OpImageSampleExplicitLod %v4float %113 %float_0 Lod %float_0 + OpStore %c %114 + %115 = OpLoad %17 %tex2d + %116 = OpLoad %12 %tex2dSamp + %117 = OpSampledImage %18 %115 %116 + %118 = OpImageSampleExplicitLod %v4float %117 %23 Lod %float_0 + OpStore %c %118 + %119 = OpLoad %25 %tex3d + %120 = OpLoad %12 %tex3dSamp + %121 = OpSampledImage %26 %119 %120 + %122 = OpImageSampleExplicitLod %v4float %121 %31 Lod %float_0 + OpStore %c %122 + %123 = OpLoad %33 %texCube + %124 = OpLoad %12 %texCubeSamp + %125 = OpSampledImage %34 %123 %124 + %126 = OpImageSampleExplicitLod %v4float %125 %31 Lod %float_0 + OpStore %c %126 + %127 = OpLoad %39 %tex2dArray + %128 = OpLoad %12 %tex2dArraySamp + %129 = OpSampledImage %40 %127 %128 + %130 = OpImageSampleExplicitLod %v4float %129 %31 Lod %float_0 + OpStore %c %130 + %131 = OpLoad %45 %texCubeArray + %132 = OpLoad %12 %texCubeArraySamp + %133 = OpSampledImage %46 %131 %132 + %134 = OpImageSampleExplicitLod %v4float %133 %50 Lod %float_0 + OpStore %c %134 + %135 = OpLoad %52 %depth2d + %136 = OpLoad %12 %depth2dSamp + %137 = OpSampledImage %53 %135 %136 + %139 = OpCompositeExtract %float %58 2 + %140 = OpImageSampleDrefExplicitLod %float %137 %58 %139 Lod %float_0 + %141 = OpAccessChain %_ptr_Function_float %c %uint_0 + OpStore %141 %140 + %142 = OpLoad %10 %tex1d + %144 = OpLoad %12 %tex1dSamp + %145 = OpSampledImage %11 %142 %144 + %146 = OpImageSampleProjExplicitLod %v4float %145 %97 Lod %float_0 + OpStore %c %146 + %147 = OpLoad %17 %tex2d + %148 = OpLoad %12 %tex2dSamp + %150 = OpSampledImage %18 %147 %148 + %151 = OpImageSampleProjExplicitLod %v4float %150 %58 Lod %float_0 + OpStore %c %151 + %152 = OpLoad %25 %tex3d + %153 = OpLoad %12 %tex3dSamp + %154 = OpSampledImage %26 %152 %153 + %155 = OpImageSampleProjExplicitLod %v4float %154 %70 Lod %float_0 + OpStore %c %155 + %156 = OpLoad %52 %depth2d + %157 = OpLoad %12 %depth2dSamp + %158 = OpSampledImage %53 %156 %157 + %159 = OpCompositeExtract %float %98 2 + %160 = OpCompositeExtract %float %98 3 + %161 = OpCompositeInsert %v4float %160 %98 2 + %162 = OpImageSampleProjDrefExplicitLod %float %158 %161 %159 Lod %float_0 + %163 = OpAccessChain %_ptr_Function_float %c %uint_0 + OpStore %163 %162 + %164 = OpLoad %10 %tex1d + %165 = OpImageFetch %v4float %164 %int_0 Lod %int_0 + OpStore %c %165 + %166 = OpLoad %17 %tex2d + %167 = OpImageFetch %v4float %166 %138 Lod %int_0 + OpStore %c %167 + %168 = OpLoad %25 %tex3d + %169 = OpImageFetch %v4float %168 %143 Lod %int_0 + OpStore %c %169 + %170 = OpLoad %39 %tex2dArray + %171 = OpImageFetch %v4float %170 %143 Lod %int_0 + OpStore %c %171 + %172 = OpLoad %149 %texBuffer + %173 = OpImageFetch %v4float %172 %int_0 + OpStore %c %173 + %174 = OpLoad %17 %tex2d + %175 = OpLoad %12 %tex2dSamp + %176 = OpSampledImage %18 %174 %175 + %177 = OpImageGather %v4float %176 %23 %int_0 + OpStore %c %177 + %178 = OpLoad %33 %texCube + %179 = OpLoad %12 %texCubeSamp + %180 = OpSampledImage %34 %178 %179 + %181 = OpImageGather %v4float %180 %31 %int_1 + OpStore %c %181 + %182 = OpLoad %39 %tex2dArray + %183 = OpLoad %12 %tex2dArraySamp + %184 = OpSampledImage %40 %182 %183 + %185 = OpImageGather %v4float %184 %31 %int_2 + OpStore %c %185 + %186 = OpLoad %45 %texCubeArray + %187 = OpLoad %12 %texCubeArraySamp + %188 = OpSampledImage %46 %186 %187 + %189 = OpImageGather %v4float %188 %50 %int_3 + OpStore %c %189 + %190 = OpLoad %52 %depth2d + %191 = OpLoad %12 %depth2dSamp + %192 = OpSampledImage %53 %190 %191 + %193 = OpImageDrefGather %v4float %192 %23 %float_1 + OpStore %c %193 + %194 = OpLoad %65 %depthCube + %195 = OpLoad %12 %depthCubeSamp + %196 = OpSampledImage %66 %194 %195 + %197 = OpImageDrefGather %v4float %196 %31 %float_1 + OpStore %c %197 + %198 = OpLoad %74 %depth2dArray + %199 = OpLoad %12 %depth2dArraySamp + %200 = OpSampledImage %75 %198 %199 + %201 = OpImageDrefGather %v4float %200 %31 %float_1 + OpStore %c %201 + %202 = OpLoad %82 %depthCubeArray + %203 = OpLoad %12 %depthCubeArraySamp + %204 = OpSampledImage %83 %202 %203 + %205 = OpImageDrefGather %v4float %204 %50 %float_1 + OpStore %c %205 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/asm/vert/empty-struct-composite.asm.vert b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/vert/empty-struct-composite.asm.vert new file mode 100644 index 000000000..37a2d8793 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/asm/vert/empty-struct-composite.asm.vert @@ -0,0 +1,37 @@ +; SPIR-V +; Version: 1.1 +; Generator: Google rspirv; 0 +; Bound: 17 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %2 "main" + OpExecutionMode %2 OriginUpperLeft + OpName %Test "Test" + OpName %t "t" + OpName %retvar "retvar" + OpName %main "main" + OpName %retvar_0 "retvar" + %void = OpTypeVoid + %6 = OpTypeFunction %void + %Test = OpTypeStruct +%_ptr_Function_Test = OpTypePointer Function %Test +%_ptr_Function_void = OpTypePointer Function %void + %2 = OpFunction %void None %6 + %7 = OpLabel + %t = OpVariable %_ptr_Function_Test Function + %retvar = OpVariable %_ptr_Function_void Function + OpBranch %4 + %4 = OpLabel + %13 = OpCompositeConstruct %Test + OpStore %t %13 + OpReturn + OpFunctionEnd + %main = OpFunction %void None %6 + %15 = OpLabel + %retvar_0 = OpVariable %_ptr_Function_void Function + OpBranch %14 + %14 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/comp/bitfield.comp b/3rdparty/spirv-cross/shaders-msl-no-opt/comp/bitfield.comp new file mode 100644 index 000000000..0cac0b257 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/comp/bitfield.comp @@ -0,0 +1,23 @@ +#version 310 es + +void main() +{ + int signed_value = 0; + uint unsigned_value = 0u; + + int s = bitfieldExtract(signed_value, 5, 20); + uint u = bitfieldExtract(unsigned_value, 6, 21); + s = bitfieldInsert(s, 40, 5, 4); + u = bitfieldInsert(u, 60u, 5, 4); + + u = bitfieldReverse(u); + s = bitfieldReverse(s); + + int v0 = bitCount(u); + int v1 = bitCount(s); + + int v2 = findMSB(u); + int v3 = findMSB(s); + int v4 = findLSB(u); + int v5 = findLSB(s); +} diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/comp/loop.comp b/3rdparty/spirv-cross/shaders-msl-no-opt/comp/loop.comp new file mode 100644 index 000000000..6d6c32424 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/comp/loop.comp @@ -0,0 +1,98 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idat = in_data[ident]; + + int k = 0; + uint i = 0u; + + if (idat.y == 20.0) + { + do + { + k = k * 2; + i++; + } while (i < ident); + } + + switch (k) + { + case 10: + for (;;) + { + i++; + if (i > 10u) + break; + } + break; + + default: + for (;;) + { + i += 2u; + if (i > 20u) + break; + } + break; + } + + while (k < 10) + { + idat *= 2.0; + k++; + } + + for (uint i = 0u; i < 16u; i++, k++) + for (uint j = 0u; j < 30u; j++) + idat = mvp * idat; + + k = 0; + for (;;) + { + k++; + if (k > 10) + { + k += 2; + } + else + { + k += 3; + continue; + } + + k += 10; + } + + k = 0; + do + { + k++; + } while (k > 10); + + int l = 0; + for (;; l++) + { + if (l == 5) + { + continue; + } + + idat += 1.0; + } + out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/comp/return.comp b/3rdparty/spirv-cross/shaders-msl-no-opt/comp/return.comp new file mode 100644 index 000000000..617f43718 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/comp/return.comp @@ -0,0 +1,33 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + + if (ident == 2u) + { + out_data[ident] = vec4(20.0); + } + else if (ident == 4u) + { + out_data[ident] = vec4(10.0); + return; + } + + for (int i = 0; i < 20; i++) + { + if (i == 10) + break; + + return; + } + + out_data[ident] = vec4(10.0); +} + diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/frag/in_block_assign.frag b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/in_block_assign.frag new file mode 100644 index 000000000..760a3ba2d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/in_block_assign.frag @@ -0,0 +1,16 @@ +#version 450 + +struct VOUT +{ + vec4 a; +}; + +layout(location = 0) in VOUT Clip; +layout(location = 0) out vec4 FragColor; + +void main() +{ + VOUT tmp = Clip; + tmp.a += 1.0; + FragColor = tmp.a; +} diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-int.swizzle.frag b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-int.swizzle.frag new file mode 100644 index 000000000..4b75d4f31 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-int.swizzle.frag @@ -0,0 +1,53 @@ +#version 450 + +layout(binding = 0) uniform isampler1D tex1d; +layout(binding = 1) uniform isampler2D tex2d; +layout(binding = 2) uniform isampler3D tex3d; +layout(binding = 3) uniform isamplerCube texCube; +layout(binding = 4) uniform isampler2DArray tex2dArray; +layout(binding = 5) uniform isamplerCubeArray texCubeArray; +layout(binding = 6) uniform isamplerBuffer texBuffer; + +void main() +{ + // OpImageSampleImplicitLod + vec4 c = texture(tex1d, 0.0); + c = texture(tex2d, vec2(0.0, 0.0)); + c = texture(tex3d, vec3(0.0, 0.0, 0.0)); + c = texture(texCube, vec3(0.0, 0.0, 0.0)); + c = texture(tex2dArray, vec3(0.0, 0.0, 0.0)); + c = texture(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0)); + + // OpImageSampleProjImplicitLod + c = textureProj(tex1d, vec2(0.0, 1.0)); + c = textureProj(tex2d, vec3(0.0, 0.0, 1.0)); + c = textureProj(tex3d, vec4(0.0, 0.0, 0.0, 1.0)); + + // OpImageSampleExplicitLod + c = textureLod(tex1d, 0.0, 0.0); + c = textureLod(tex2d, vec2(0.0, 0.0), 0.0); + c = textureLod(tex3d, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCube, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(tex2dArray, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 0.0); + + // OpImageSampleProjExplicitLod + c = textureProjLod(tex1d, vec2(0.0, 1.0), 0.0); + c = textureProjLod(tex2d, vec3(0.0, 0.0, 1.0), 0.0); + c = textureProjLod(tex3d, vec4(0.0, 0.0, 0.0, 1.0), 0.0); + + // OpImageFetch + c = texelFetch(tex1d, 0, 0); + c = texelFetch(tex2d, ivec2(0, 0), 0); + c = texelFetch(tex3d, ivec3(0, 0, 0), 0); + c = texelFetch(tex2dArray, ivec3(0, 0, 0), 0); + + // Show that this transformation doesn't apply to Buffer images. + c = texelFetch(texBuffer, 0); + + // OpImageGather + c = textureGather(tex2d, vec2(0.0, 0.0), 0); + c = textureGather(texCube, vec3(0.0, 0.0, 0.0), 1); + c = textureGather(tex2dArray, vec3(0.0, 0.0, 0.0), 2); + c = textureGather(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 3); +} diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-leaf.swizzle.frag b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-leaf.swizzle.frag new file mode 100644 index 000000000..2b7e9370d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-leaf.swizzle.frag @@ -0,0 +1,86 @@ +#version 450 + +layout(binding = 0) uniform sampler1D tex1d; +layout(binding = 1) uniform sampler2D tex2d; +layout(binding = 2) uniform sampler3D tex3d; +layout(binding = 3) uniform samplerCube texCube; +layout(binding = 4) uniform sampler2DArray tex2dArray; +layout(binding = 5) uniform samplerCubeArray texCubeArray; +layout(binding = 6) uniform samplerBuffer texBuffer; + +layout(binding = 7) uniform sampler2DShadow depth2d; +layout(binding = 8) uniform samplerCubeShadow depthCube; +layout(binding = 9) uniform sampler2DArrayShadow depth2dArray; +layout(binding = 10) uniform samplerCubeArrayShadow depthCubeArray; + +vec4 doSwizzle() +{ + // OpImageSampleImplicitLod + vec4 c = texture(tex1d, 0.0); + c = texture(tex2d, vec2(0.0, 0.0)); + c = texture(tex3d, vec3(0.0, 0.0, 0.0)); + c = texture(texCube, vec3(0.0, 0.0, 0.0)); + c = texture(tex2dArray, vec3(0.0, 0.0, 0.0)); + c = texture(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0)); + + // OpImageSampleDrefImplicitLod + c.r = texture(depth2d, vec3(0.0, 0.0, 1.0)); + c.r = texture(depthCube, vec4(0.0, 0.0, 0.0, 1.0)); + c.r = texture(depth2dArray, vec4(0.0, 0.0, 0.0, 1.0)); + c.r = texture(depthCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 1.0); + + // OpImageSampleProjImplicitLod + c = textureProj(tex1d, vec2(0.0, 1.0)); + c = textureProj(tex2d, vec3(0.0, 0.0, 1.0)); + c = textureProj(tex3d, vec4(0.0, 0.0, 0.0, 1.0)); + + // OpImageSampleProjDrefImplicitLod + c.r = textureProj(depth2d, vec4(0.0, 0.0, 1.0, 1.0)); + + // OpImageSampleExplicitLod + c = textureLod(tex1d, 0.0, 0.0); + c = textureLod(tex2d, vec2(0.0, 0.0), 0.0); + c = textureLod(tex3d, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCube, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(tex2dArray, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 0.0); + + // OpImageSampleDrefExplicitLod + c.r = textureLod(depth2d, vec3(0.0, 0.0, 1.0), 0.0); + + // OpImageSampleProjExplicitLod + c = textureProjLod(tex1d, vec2(0.0, 1.0), 0.0); + c = textureProjLod(tex2d, vec3(0.0, 0.0, 1.0), 0.0); + c = textureProjLod(tex3d, vec4(0.0, 0.0, 0.0, 1.0), 0.0); + + // OpImageSampleProjDrefExplicitLod + c.r = textureProjLod(depth2d, vec4(0.0, 0.0, 1.0, 1.0), 0.0); + + // OpImageFetch + c = texelFetch(tex1d, 0, 0); + c = texelFetch(tex2d, ivec2(0, 0), 0); + c = texelFetch(tex3d, ivec3(0, 0, 0), 0); + c = texelFetch(tex2dArray, ivec3(0, 0, 0), 0); + + // Show that this transformation doesn't apply to Buffer images. + c = texelFetch(texBuffer, 0); + + // OpImageGather + c = textureGather(tex2d, vec2(0.0, 0.0), 0); + c = textureGather(texCube, vec3(0.0, 0.0, 0.0), 1); + c = textureGather(tex2dArray, vec3(0.0, 0.0, 0.0), 2); + c = textureGather(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 3); + + // OpImageDrefGather + c = textureGather(depth2d, vec2(0.0, 0.0), 1.0); + c = textureGather(depthCube, vec3(0.0, 0.0, 0.0), 1.0); + c = textureGather(depth2dArray, vec3(0.0, 0.0, 0.0), 1.0); + c = textureGather(depthCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 1.0); + + return c; +} + +void main() +{ + vec4 c = doSwizzle(); +} diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-uint.swizzle.frag b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-uint.swizzle.frag new file mode 100644 index 000000000..5f0acf65f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access-uint.swizzle.frag @@ -0,0 +1,53 @@ +#version 450 + +layout(binding = 0) uniform usampler1D tex1d; +layout(binding = 1) uniform usampler2D tex2d; +layout(binding = 2) uniform usampler3D tex3d; +layout(binding = 3) uniform usamplerCube texCube; +layout(binding = 4) uniform usampler2DArray tex2dArray; +layout(binding = 5) uniform usamplerCubeArray texCubeArray; +layout(binding = 6) uniform usamplerBuffer texBuffer; + +void main() +{ + // OpImageSampleImplicitLod + vec4 c = texture(tex1d, 0.0); + c = texture(tex2d, vec2(0.0, 0.0)); + c = texture(tex3d, vec3(0.0, 0.0, 0.0)); + c = texture(texCube, vec3(0.0, 0.0, 0.0)); + c = texture(tex2dArray, vec3(0.0, 0.0, 0.0)); + c = texture(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0)); + + // OpImageSampleProjImplicitLod + c = textureProj(tex1d, vec2(0.0, 1.0)); + c = textureProj(tex2d, vec3(0.0, 0.0, 1.0)); + c = textureProj(tex3d, vec4(0.0, 0.0, 0.0, 1.0)); + + // OpImageSampleExplicitLod + c = textureLod(tex1d, 0.0, 0.0); + c = textureLod(tex2d, vec2(0.0, 0.0), 0.0); + c = textureLod(tex3d, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCube, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(tex2dArray, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 0.0); + + // OpImageSampleProjExplicitLod + c = textureProjLod(tex1d, vec2(0.0, 1.0), 0.0); + c = textureProjLod(tex2d, vec3(0.0, 0.0, 1.0), 0.0); + c = textureProjLod(tex3d, vec4(0.0, 0.0, 0.0, 1.0), 0.0); + + // OpImageFetch + c = texelFetch(tex1d, 0, 0); + c = texelFetch(tex2d, ivec2(0, 0), 0); + c = texelFetch(tex3d, ivec3(0, 0, 0), 0); + c = texelFetch(tex2dArray, ivec3(0, 0, 0), 0); + + // Show that this transformation doesn't apply to Buffer images. + c = texelFetch(texBuffer, 0); + + // OpImageGather + c = textureGather(tex2d, vec2(0.0, 0.0), 0); + c = textureGather(texCube, vec3(0.0, 0.0, 0.0), 1); + c = textureGather(tex2dArray, vec3(0.0, 0.0, 0.0), 2); + c = textureGather(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 3); +} diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access.swizzle.frag b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access.swizzle.frag new file mode 100644 index 000000000..b09ebed77 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/frag/texture-access.swizzle.frag @@ -0,0 +1,79 @@ +#version 450 + +layout(binding = 0) uniform sampler1D tex1d; +layout(binding = 1) uniform sampler2D tex2d; +layout(binding = 2) uniform sampler3D tex3d; +layout(binding = 3) uniform samplerCube texCube; +layout(binding = 4) uniform sampler2DArray tex2dArray; +layout(binding = 5) uniform samplerCubeArray texCubeArray; +layout(binding = 6) uniform samplerBuffer texBuffer; + +layout(binding = 7) uniform sampler2DShadow depth2d; +layout(binding = 8) uniform samplerCubeShadow depthCube; +layout(binding = 9) uniform sampler2DArrayShadow depth2dArray; +layout(binding = 10) uniform samplerCubeArrayShadow depthCubeArray; + +void main() +{ + // OpImageSampleImplicitLod + vec4 c = texture(tex1d, 0.0); + c = texture(tex2d, vec2(0.0, 0.0)); + c = texture(tex3d, vec3(0.0, 0.0, 0.0)); + c = texture(texCube, vec3(0.0, 0.0, 0.0)); + c = texture(tex2dArray, vec3(0.0, 0.0, 0.0)); + c = texture(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0)); + + // OpImageSampleDrefImplicitLod + c.r = texture(depth2d, vec3(0.0, 0.0, 1.0)); + c.r = texture(depthCube, vec4(0.0, 0.0, 0.0, 1.0)); + c.r = texture(depth2dArray, vec4(0.0, 0.0, 0.0, 1.0)); + c.r = texture(depthCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 1.0); + + // OpImageSampleProjImplicitLod + c = textureProj(tex1d, vec2(0.0, 1.0)); + c = textureProj(tex2d, vec3(0.0, 0.0, 1.0)); + c = textureProj(tex3d, vec4(0.0, 0.0, 0.0, 1.0)); + + // OpImageSampleProjDrefImplicitLod + c.r = textureProj(depth2d, vec4(0.0, 0.0, 1.0, 1.0)); + + // OpImageSampleExplicitLod + c = textureLod(tex1d, 0.0, 0.0); + c = textureLod(tex2d, vec2(0.0, 0.0), 0.0); + c = textureLod(tex3d, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCube, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(tex2dArray, vec3(0.0, 0.0, 0.0), 0.0); + c = textureLod(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 0.0); + + // OpImageSampleDrefExplicitLod + c.r = textureLod(depth2d, vec3(0.0, 0.0, 1.0), 0.0); + + // OpImageSampleProjExplicitLod + c = textureProjLod(tex1d, vec2(0.0, 1.0), 0.0); + c = textureProjLod(tex2d, vec3(0.0, 0.0, 1.0), 0.0); + c = textureProjLod(tex3d, vec4(0.0, 0.0, 0.0, 1.0), 0.0); + + // OpImageSampleProjDrefExplicitLod + c.r = textureProjLod(depth2d, vec4(0.0, 0.0, 1.0, 1.0), 0.0); + + // OpImageFetch + c = texelFetch(tex1d, 0, 0); + c = texelFetch(tex2d, ivec2(0, 0), 0); + c = texelFetch(tex3d, ivec3(0, 0, 0), 0); + c = texelFetch(tex2dArray, ivec3(0, 0, 0), 0); + + // Show that this transformation doesn't apply to Buffer images. + c = texelFetch(texBuffer, 0); + + // OpImageGather + c = textureGather(tex2d, vec2(0.0, 0.0), 0); + c = textureGather(texCube, vec3(0.0, 0.0, 0.0), 1); + c = textureGather(tex2dArray, vec3(0.0, 0.0, 0.0), 2); + c = textureGather(texCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 3); + + // OpImageDrefGather + c = textureGather(depth2d, vec2(0.0, 0.0), 1.0); + c = textureGather(depthCube, vec3(0.0, 0.0, 0.0), 1.0); + c = textureGather(depth2dArray, vec3(0.0, 0.0, 0.0), 1.0); + c = textureGather(depthCubeArray, vec4(0.0, 0.0, 0.0, 0.0), 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl-no-opt/vert/functions_nested.vert b/3rdparty/spirv-cross/shaders-msl-no-opt/vert/functions_nested.vert new file mode 100644 index 000000000..2eec5ac55 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl-no-opt/vert/functions_nested.vert @@ -0,0 +1,132 @@ +#version 450 +#extension GL_ARB_separate_shader_objects : enable + +layout(std140, set = 0, binding = 0) uniform VertexBuffer +{ + mat4 scale_offset_mat; + uint vertex_base_index; + ivec4 input_attributes[16]; +}; +layout(set=0, binding=3) uniform usamplerBuffer buff_in_1; +layout(set=0, binding=4) uniform usamplerBuffer buff_in_2; + +layout(location=10) out vec4 back_color; +layout(location=0) out vec4 tc0; + +layout(std140, set=0, binding = 1) uniform VertexConstantsBuffer +{ + vec4 vc[16]; +}; + +struct attr_desc +{ + int type; + int attribute_size; + int starting_offset; + int stride; + int swap_bytes; + int is_volatile; +}; + +uint get_bits(uvec4 v, int swap) +{ + if (swap != 0) return (v.w | v.z << 8 | v.y << 16 | v.x << 24); + return (v.x | v.y << 8 | v.z << 16 | v.w << 24); +} + +vec4 fetch_attr(attr_desc desc, int vertex_id, usamplerBuffer input_stream) +{ + vec4 result = vec4(0.0f, 0.0f, 0.0f, 1.0f); + uvec4 tmp; + uint bits; + bool reverse_order = false; + + int first_byte = (vertex_id * desc.stride) + desc.starting_offset; + for (int n = 0; n < 4; n++) + { + if (n == desc.attribute_size) break; + + switch (desc.type) + { + case 0: + //signed normalized 16-bit + tmp.x = texelFetch(input_stream, first_byte++).x; + tmp.y = texelFetch(input_stream, first_byte++).x; + result[n] = get_bits(tmp, desc.swap_bytes); + break; + case 1: + //float + tmp.x = texelFetch(input_stream, first_byte++).x; + tmp.y = texelFetch(input_stream, first_byte++).x; + tmp.z = texelFetch(input_stream, first_byte++).x; + tmp.w = texelFetch(input_stream, first_byte++).x; + result[n] = uintBitsToFloat(get_bits(tmp, desc.swap_bytes)); + break; + case 2: + //unsigned byte + result[n] = texelFetch(input_stream, first_byte++).x; + reverse_order = (desc.swap_bytes != 0); + break; + } + } + + return (reverse_order)? result.wzyx: result; +} + +attr_desc fetch_desc(int location) +{ + attr_desc result; + int attribute_flags = input_attributes[location].w; + result.type = input_attributes[location].x; + result.attribute_size = input_attributes[location].y; + result.starting_offset = input_attributes[location].z; + result.stride = attribute_flags & 0xFF; + result.swap_bytes = (attribute_flags >> 8) & 0x1; + result.is_volatile = (attribute_flags >> 9) & 0x1; + return result; +} + +vec4 read_location(int location) +{ + attr_desc desc = fetch_desc(location); + + int vertex_id = gl_VertexIndex - int(vertex_base_index); + if (desc.is_volatile != 0) + return fetch_attr(desc, vertex_id, buff_in_2); + else + return fetch_attr(desc, vertex_id, buff_in_1); +} + +void vs_adjust(inout vec4 dst_reg0, inout vec4 dst_reg1, inout vec4 dst_reg7) +{ + vec4 tmp0; + vec4 tmp1; + vec4 in_diff_color= read_location(3); + vec4 in_pos= read_location(0); + vec4 in_tc0= read_location(8); + dst_reg1 = (in_diff_color * vc[13]); + tmp0.x = vec4(dot(vec4(in_pos.xyzx.xyz, 1.0), vc[4])).x; + tmp0.y = vec4(dot(vec4(in_pos.xyzx.xyz, 1.0), vc[5])).y; + tmp0.z = vec4(dot(vec4(in_pos.xyzx.xyz, 1.0), vc[6])).z; + tmp1.xy = in_tc0.xyxx.xy; + tmp1.z = vc[15].xxxx.z; + dst_reg7.y = vec4(dot(vec4(tmp1.xyzx.xyz, 1.0), vc[8])).y; + dst_reg7.x = vec4(dot(vec4(tmp1.xyzx.xyz, 1.0), vc[7])).x; + dst_reg0.y = vec4(dot(vec4(tmp0.xyzx.xyz, 1.0), vc[1])).y; + dst_reg0.x = vec4(dot(vec4(tmp0.xyzx.xyz, 1.0), vc[0])).x; +} + +void main () +{ + vec4 dst_reg0= vec4(0.0f, 0.0f, 0.0f, 1.0f); + vec4 dst_reg1= vec4(0.0, 0.0, 0.0, 0.0); + vec4 dst_reg7= vec4(0.0, 0.0, 0.0, 0.0); + + vs_adjust(dst_reg0, dst_reg1, dst_reg7); + + gl_Position = dst_reg0; + back_color = dst_reg1; + tc0 = dst_reg7; + gl_Position = gl_Position * scale_offset_mat; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..a87b93188 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %3 "main" %15 + OpExecutionMode %3 LocalSize 4 1 1 + OpName %3 "main" + OpName %8 "u0" + OpName %9 "u0_counters" + OpMemberName %9 0 "c" + OpName %11 "u0_counter" + OpName %15 "vThreadID" + OpName %19 "r0" + OpDecorate %8 DescriptorSet 0 + OpDecorate %8 Binding 0 + OpMemberDecorate %9 0 Offset 0 + OpDecorate %9 BufferBlock + OpDecorate %11 DescriptorSet 1 + OpDecorate %11 Binding 0 + OpDecorate %15 BuiltIn GlobalInvocationId + %1 = OpTypeVoid + %2 = OpTypeFunction %1 + %5 = OpTypeInt 32 0 + %6 = OpTypeImage %5 Buffer 0 0 0 2 R32ui + %7 = OpTypePointer UniformConstant %6 + %8 = OpVariable %7 UniformConstant + %9 = OpTypeStruct %5 + %10 = OpTypePointer Uniform %9 + %11 = OpVariable %10 Uniform + %12 = OpTypeInt 32 1 + %13 = OpTypeVector %12 3 + %14 = OpTypePointer Input %13 + %15 = OpVariable %14 Input + %16 = OpTypeFloat 32 + %17 = OpTypeVector %16 4 + %18 = OpTypePointer Function %17 + %20 = OpTypePointer Uniform %5 + %21 = OpConstant %5 0 + %23 = OpConstant %5 1 + %26 = OpTypePointer Function %16 + %33 = OpConstant %12 0 + %34 = OpConstant %5 2 + %37 = OpTypePointer Input %12 + %41 = OpTypeVector %5 4 + %3 = OpFunction %1 None %2 + %4 = OpLabel + %19 = OpVariable %18 Function + %22 = OpAccessChain %20 %11 %21 + %24 = OpAtomicIDecrement %5 %22 %23 %21 + %25 = OpBitcast %16 %24 + %27 = OpInBoundsAccessChain %26 %19 %21 + OpStore %27 %25 + %28 = OpLoad %6 %8 + %29 = OpInBoundsAccessChain %26 %19 %21 + %30 = OpLoad %16 %29 + %31 = OpBitcast %12 %30 + %32 = OpIMul %5 %31 %23 + %35 = OpShiftRightLogical %5 %33 %34 + %36 = OpIAdd %5 %32 %35 + %38 = OpInBoundsAccessChain %37 %15 %21 + %39 = OpLoad %12 %38 + %40 = OpBitcast %5 %39 + %42 = OpCompositeConstruct %41 %40 %40 %40 %40 + OpImageWrite %28 %36 %42 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..3acb7115f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %3 "main" %15 + OpExecutionMode %3 LocalSize 4 1 1 + OpName %3 "main" + OpName %8 "u0" + OpName %9 "u0_counters" + OpMemberName %9 0 "c" + OpName %11 "u0_counter" + OpName %15 "vThreadID" + OpName %19 "r0" + OpDecorate %8 DescriptorSet 0 + OpDecorate %8 Binding 0 + OpMemberDecorate %9 0 Offset 0 + OpDecorate %9 BufferBlock + OpDecorate %11 DescriptorSet 1 + OpDecorate %11 Binding 0 + OpDecorate %15 BuiltIn GlobalInvocationId + %1 = OpTypeVoid + %2 = OpTypeFunction %1 + %5 = OpTypeInt 32 0 + %6 = OpTypeImage %5 Buffer 0 0 0 2 R32ui + %7 = OpTypePointer UniformConstant %6 + %8 = OpVariable %7 UniformConstant + %9 = OpTypeStruct %5 + %10 = OpTypePointer Uniform %9 + %11 = OpVariable %10 Uniform + %12 = OpTypeInt 32 1 + %13 = OpTypeVector %12 3 + %14 = OpTypePointer Input %13 + %15 = OpVariable %14 Input + %16 = OpTypeFloat 32 + %17 = OpTypeVector %16 4 + %18 = OpTypePointer Function %17 + %20 = OpTypePointer Uniform %5 + %21 = OpConstant %5 0 + %23 = OpConstant %5 1 + %26 = OpTypePointer Function %16 + %33 = OpConstant %12 0 + %34 = OpConstant %5 2 + %37 = OpTypePointer Input %12 + %41 = OpTypeVector %5 4 + %3 = OpFunction %1 None %2 + %4 = OpLabel + %19 = OpVariable %18 Function + %22 = OpAccessChain %20 %11 %21 + %24 = OpAtomicIIncrement %5 %22 %23 %21 + %25 = OpBitcast %16 %24 + %27 = OpInBoundsAccessChain %26 %19 %21 + OpStore %27 %25 + %28 = OpLoad %6 %8 + %29 = OpInBoundsAccessChain %26 %19 %21 + %30 = OpLoad %16 %29 + %31 = OpBitcast %12 %30 + %32 = OpIMul %5 %31 %23 + %35 = OpShiftRightLogical %5 %33 %34 + %36 = OpIAdd %5 %32 %35 + %38 = OpInBoundsAccessChain %37 %15 %21 + %39 = OpLoad %12 %38 + %40 = OpBitcast %5 %39 + %42 = OpCompositeConstruct %41 %40 %40 %40 %40 + OpImageWrite %28 %36 %42 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_iadd.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_iadd.asm.comp new file mode 100644 index 000000000..3b31ab285 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_iadd.asm.comp @@ -0,0 +1,79 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %inputs Restrict + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + OpDecorate %outputs Restrict + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of IAdd + %result_iadd_0 = OpIAdd %uvec4 %input0 %input1 + %result_iadd_1 = OpIAdd %uvec4 %input1 %input0 + %result_iadd_2 = OpIAdd %uvec4 %input0 %input0 + %result_iadd_3 = OpIAdd %uvec4 %input1 %input1 + %result_iadd_4 = OpIAdd %ivec4 %input0 %input0 + %result_iadd_5 = OpIAdd %ivec4 %input1 %input1 + %result_iadd_6 = OpIAdd %ivec4 %input0 %input1 + %result_iadd_7 = OpIAdd %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_sar.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_sar.asm.comp new file mode 100644 index 000000000..64f19fc34 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_sar.asm.comp @@ -0,0 +1,77 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of ShiftRightArithmetic + %result_iadd_0 = OpShiftRightArithmetic %uvec4 %input0 %input1 + %result_iadd_1 = OpShiftRightArithmetic %uvec4 %input1 %input0 + %result_iadd_2 = OpShiftRightArithmetic %uvec4 %input0 %input0 + %result_iadd_3 = OpShiftRightArithmetic %uvec4 %input1 %input1 + %result_iadd_4 = OpShiftRightArithmetic %ivec4 %input0 %input0 + %result_iadd_5 = OpShiftRightArithmetic %ivec4 %input1 %input1 + %result_iadd_6 = OpShiftRightArithmetic %ivec4 %input0 %input1 + %result_iadd_7 = OpShiftRightArithmetic %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_sdiv.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_sdiv.asm.comp new file mode 100644 index 000000000..ab73ec83d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_sdiv.asm.comp @@ -0,0 +1,77 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of SDiv + %result_iadd_0 = OpSDiv %uvec4 %input0 %input1 + %result_iadd_1 = OpSDiv %uvec4 %input1 %input0 + %result_iadd_2 = OpSDiv %uvec4 %input0 %input0 + %result_iadd_3 = OpSDiv %uvec4 %input1 %input1 + %result_iadd_4 = OpSDiv %ivec4 %input0 %input0 + %result_iadd_5 = OpSDiv %ivec4 %input1 %input1 + %result_iadd_6 = OpSDiv %ivec4 %input0 %input1 + %result_iadd_7 = OpSDiv %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_slr.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_slr.asm.comp new file mode 100644 index 000000000..6741f5cb5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/bitcast_slr.asm.comp @@ -0,0 +1,77 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of ShiftRightLogical + %result_iadd_0 = OpShiftRightLogical %uvec4 %input0 %input1 + %result_iadd_1 = OpShiftRightLogical %uvec4 %input1 %input0 + %result_iadd_2 = OpShiftRightLogical %uvec4 %input0 %input0 + %result_iadd_3 = OpShiftRightLogical %uvec4 %input1 %input1 + %result_iadd_4 = OpShiftRightLogical %ivec4 %input0 %input0 + %result_iadd_5 = OpShiftRightLogical %ivec4 %input1 %input1 + %result_iadd_6 = OpShiftRightLogical %ivec4 %input0 %input1 + %result_iadd_7 = OpShiftRightLogical %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..85f6cc041 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,119 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 59 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %Foo "A" + OpMemberName %Foo 0 "a" + OpMemberName %Foo 1 "b" + OpName %A "A" + OpMemberName %A 0 "Data" + OpName %C1 "C1" + OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" + OpName %Foo_0 "A" + OpMemberName %Foo_0 0 "a" + OpMemberName %Foo_0 1 "b" + OpName %A_0 "A" + OpMemberName %A_0 0 "Data" + OpName %C2 "C2" + OpName %B "B" + OpMemberName %B 0 "Data" + OpName %C3 "C3" + OpName %B_0 "B" + OpMemberName %B_0 0 "Data" + OpName %C4 "C4" + OpMemberDecorate %Foo 0 Offset 0 + OpMemberDecorate %Foo 1 Offset 4 + OpDecorate %_runtimearr_Foo ArrayStride 8 + OpMemberDecorate %A 0 Offset 0 + OpDecorate %A BufferBlock + OpDecorate %C1 DescriptorSet 0 + OpDecorate %C1 Binding 1 + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + OpMemberDecorate %Foo_0 0 Offset 0 + OpMemberDecorate %Foo_0 1 Offset 4 + OpDecorate %_arr_Foo_0_uint_1024 ArrayStride 16 + OpMemberDecorate %A_0 0 Offset 0 + OpDecorate %A_0 Block + OpDecorate %C2 DescriptorSet 0 + OpDecorate %C2 Binding 2 + OpDecorate %_runtimearr_Foo_0 ArrayStride 8 + OpMemberDecorate %B 0 Offset 0 + OpDecorate %B BufferBlock + OpDecorate %C3 DescriptorSet 0 + OpDecorate %C3 Binding 0 + OpDecorate %_arr_Foo_0_uint_1024_0 ArrayStride 16 + OpMemberDecorate %B_0 0 Offset 0 + OpDecorate %B_0 Block + OpDecorate %C4 DescriptorSet 0 + OpDecorate %C4 Binding 3 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %Foo = OpTypeStruct %int %int +%_runtimearr_Foo = OpTypeRuntimeArray %Foo + %A = OpTypeStruct %_runtimearr_Foo +%_ptr_Uniform_A = OpTypePointer Uniform %A + %C1 = OpVariable %_ptr_Uniform_A Uniform + %int_0 = OpConstant %int 0 + %uint = OpTypeInt 32 0 + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input + %uint_0 = OpConstant %uint 0 +%_ptr_Input_uint = OpTypePointer Input %uint + %Foo_0 = OpTypeStruct %int %int + %uint_1024 = OpConstant %uint 1024 +%_arr_Foo_0_uint_1024 = OpTypeArray %Foo_0 %uint_1024 + %A_0 = OpTypeStruct %_arr_Foo_0_uint_1024 +%_ptr_Uniform_A_0 = OpTypePointer Uniform %A_0 + %C2 = OpVariable %_ptr_Uniform_A_0 Uniform +%_ptr_Uniform_Foo_0 = OpTypePointer Uniform %Foo_0 +%_ptr_Uniform_Foo = OpTypePointer Uniform %Foo +%_ptr_Uniform_int = OpTypePointer Uniform %int + %int_1 = OpConstant %int 1 +%_runtimearr_Foo_0 = OpTypeRuntimeArray %Foo + %B = OpTypeStruct %_runtimearr_Foo_0 +%_ptr_Uniform_B = OpTypePointer Uniform %B + %C3 = OpVariable %_ptr_Uniform_B Uniform +%_arr_Foo_0_uint_1024_0 = OpTypeArray %Foo_0 %uint_1024 + %B_0 = OpTypeStruct %_arr_Foo_0_uint_1024_0 +%_ptr_Uniform_B_0 = OpTypePointer Uniform %B_0 + %C4 = OpVariable %_ptr_Uniform_B_0 Uniform + %main = OpFunction %void None %3 + %5 = OpLabel + %19 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %20 = OpLoad %uint %19 + %27 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %28 = OpLoad %uint %27 + %30 = OpAccessChain %_ptr_Uniform_Foo_0 %C2 %int_0 %28 + %31 = OpLoad %Foo_0 %30 + %33 = OpAccessChain %_ptr_Uniform_Foo %C1 %int_0 %20 + %34 = OpCompositeExtract %int %31 0 + %36 = OpAccessChain %_ptr_Uniform_int %33 %int_0 + OpStore %36 %34 + %37 = OpCompositeExtract %int %31 1 + %39 = OpAccessChain %_ptr_Uniform_int %33 %int_1 + OpStore %39 %37 + %44 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %45 = OpLoad %uint %44 + %50 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %51 = OpLoad %uint %50 + %52 = OpAccessChain %_ptr_Uniform_Foo_0 %C4 %int_0 %51 + %53 = OpLoad %Foo_0 %52 + %54 = OpAccessChain %_ptr_Uniform_Foo %C3 %int_0 %45 + %55 = OpCompositeExtract %int %53 0 + %56 = OpAccessChain %_ptr_Uniform_int %54 %int_0 + OpStore %56 %55 + %57 = OpCompositeExtract %int %53 1 + %58 = OpAccessChain %_ptr_Uniform_int %54 %int_1 + OpStore %58 %57 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp new file mode 100644 index 000000000..400690b04 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/buffer-write-relative-addr.asm.comp @@ -0,0 +1,93 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 59 +; Schema: 0 + OpCapability Shader + OpCapability UniformBufferArrayDynamicIndexing + OpCapability SampledBuffer + OpCapability ImageBuffer + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %vThreadIDInGroup + OpExecutionMode %main LocalSize 4 1 1 + OpName %main "main" + OpName %cb5_struct "cb5_struct" + OpName %cb0_5 "cb0_5" + OpName %u0 "u0" + OpName %vThreadIDInGroup "vThreadIDInGroup" + OpName %r0 "r0" + OpDecorate %_arr_v4float_uint_5 ArrayStride 16 + OpDecorate %cb5_struct Block + OpMemberDecorate %cb5_struct 0 Offset 0 + OpDecorate %cb0_5 DescriptorSet 0 + OpDecorate %cb0_5 Binding 1 + OpDecorate %u0 DescriptorSet 0 + OpDecorate %u0 Binding 0 + OpDecorate %u0 NonReadable + OpDecorate %vThreadIDInGroup BuiltIn LocalInvocationId + %void = OpTypeVoid + %2 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %uint = OpTypeInt 32 0 + %uint_5 = OpConstant %uint 5 +%_arr_v4float_uint_5 = OpTypeArray %v4float %uint_5 + %cb5_struct = OpTypeStruct %_arr_v4float_uint_5 +%_ptr_Uniform_cb5_struct = OpTypePointer Uniform %cb5_struct + %cb0_5 = OpVariable %_ptr_Uniform_cb5_struct Uniform + %13 = OpTypeImage %uint Buffer 0 0 0 2 R32ui +%_ptr_UniformConstant_13 = OpTypePointer UniformConstant %13 + %u0 = OpVariable %_ptr_UniformConstant_13 UniformConstant + %int = OpTypeInt 32 1 + %v3int = OpTypeVector %int 3 +%_ptr_Input_v3int = OpTypePointer Input %v3int +%vThreadIDInGroup = OpVariable %_ptr_Input_v3int Input +%_ptr_Function_v4float = OpTypePointer Function %v4float +%_ptr_Input_int = OpTypePointer Input %int + %uint_0 = OpConstant %uint 0 + %int_4 = OpConstant %int 4 +%_ptr_Function_float = OpTypePointer Function %float + %uint_1 = OpConstant %uint 1 + %uint_2 = OpConstant %uint 2 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %v4uint = OpTypeVector %uint 4 + %uint_3 = OpConstant %uint 3 + %main = OpFunction %void None %2 + %4 = OpLabel + %r0 = OpVariable %_ptr_Function_v4float Function + %24 = OpInBoundsAccessChain %_ptr_Input_int %vThreadIDInGroup %uint_0 + %25 = OpLoad %int %24 + %27 = OpShiftLeftLogical %int %25 %int_4 + %28 = OpBitcast %float %27 + %30 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_0 + OpStore %30 %28 + %31 = OpInBoundsAccessChain %_ptr_Input_int %vThreadIDInGroup %uint_0 + %32 = OpLoad %int %31 + %33 = OpBitcast %float %32 + %35 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_1 + OpStore %35 %33 + %36 = OpLoad %13 %u0 + %37 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_0 + %38 = OpLoad %float %37 + %39 = OpBitcast %uint %38 + %41 = OpShiftRightLogical %uint %39 %uint_2 + %42 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_1 + %43 = OpLoad %float %42 + %44 = OpBitcast %int %43 + %45 = OpIAdd %uint %44 %uint_1 + %47 = OpAccessChain %_ptr_Uniform_v4float %cb0_5 %uint_0 %45 + %48 = OpLoad %v4float %47 + %50 = OpBitcast %v4uint %48 + %51 = OpVectorShuffle %v4uint %50 %50 0 0 0 0 + OpImageWrite %36 %41 %51 + %52 = OpVectorShuffle %v4uint %50 %50 1 1 1 1 + %53 = OpIAdd %uint %41 %uint_1 + OpImageWrite %36 %53 %52 + %54 = OpVectorShuffle %v4uint %50 %50 2 2 2 2 + %55 = OpIAdd %uint %41 %uint_2 + OpImageWrite %36 %55 %54 + %56 = OpVectorShuffle %v4uint %50 %50 3 3 3 3 + %58 = OpIAdd %uint %41 %uint_3 + OpImageWrite %36 %58 %56 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/buffer-write.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/buffer-write.asm.comp new file mode 100644 index 000000000..aa7b32089 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/buffer-write.asm.comp @@ -0,0 +1,58 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 63 +; Schema: 0 + OpCapability Shader + OpCapability ImageBuffer + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %group_id %group_index + OpExecutionMode %main LocalSize 32 1 1 + OpSource HLSL 500 + OpName %main "main" + OpName %cb "cb" + OpMemberName %cb 0 "value" + OpName %_ "" + OpName %buffer "buffer" + OpName %group_id "group_id" + OpName %group_index "group_index" + OpMemberDecorate %cb 0 Offset 0 + OpDecorate %cb Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 7 + OpDecorate %buffer DescriptorSet 0 + OpDecorate %group_id BuiltIn WorkgroupId + OpDecorate %group_index BuiltIn LocalInvocationIndex + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 + %v3uint = OpTypeVector %uint 3 + %uint_32 = OpConstant %uint 32 + %float = OpTypeFloat 32 + %cb = OpTypeStruct %float +%_ptr_Uniform_cb = OpTypePointer Uniform %cb + %_ = OpVariable %_ptr_Uniform_cb Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %34 = OpTypeImage %float Buffer 0 0 0 2 R32f +%_ptr_UniformConstant_34 = OpTypePointer UniformConstant %34 + %buffer = OpVariable %_ptr_UniformConstant_34 UniformConstant +%_ptr_Input_v3uint = OpTypePointer Input %v3uint + %group_id = OpVariable %_ptr_Input_v3uint Input +%_ptr_Input_uint = OpTypePointer Input %uint +%group_index = OpVariable %_ptr_Input_uint Input + %main = OpFunction %void None %3 + %5 = OpLabel + %43 = OpLoad %v3uint %group_id + %47 = OpLoad %uint %group_index + %56 = OpCompositeExtract %uint %43 0 + %57 = OpIMul %uint %uint_32 %56 + %59 = OpIAdd %uint %57 %47 + %60 = OpAccessChain %_ptr_Uniform_float %_ %int_0 + %61 = OpLoad %float %60 + %62 = OpLoad %34 %buffer + OpImageWrite %62 %59 %61 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..78b1dc74e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,102 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 61 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %id_1 + OpExecutionMode %main LocalSize 1 1 1 + OpSource HLSL 500 + OpName %main "main" + OpName %Load_u1_ "Load(u1;" + OpName %size "size" + OpName %_main_vu3_ "@main(vu3;" + OpName %id "id" + OpName %data "data" + OpName %byteAddrTemp "byteAddrTemp" + OpName %ssbo "ssbo" + OpMemberName %ssbo 0 "@data" + OpName %ssbo_0 "ssbo" + OpName %param "param" + OpName %id_0 "id" + OpName %id_1 "id" + OpName %param_0 "param" + OpDecorate %_runtimearr_uint ArrayStride 4 + OpMemberDecorate %ssbo 0 NonWritable + OpMemberDecorate %ssbo 0 Offset 0 + OpDecorate %ssbo BufferBlock + OpDecorate %ssbo_0 DescriptorSet 0 + OpDecorate %ssbo_0 Binding 1 + OpDecorate %id_1 BuiltIn GlobalInvocationId + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 +%_ptr_Function_uint = OpTypePointer Function %uint + %8 = OpTypeFunction %void %_ptr_Function_uint + %v3uint = OpTypeVector %uint 3 +%_ptr_Function_v3uint = OpTypePointer Function %v3uint + %14 = OpTypeFunction %void %_ptr_Function_v3uint + %v4uint = OpTypeVector %uint 4 +%_ptr_Function_v4uint = OpTypePointer Function %v4uint + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_2 = OpConstant %int 2 +%_runtimearr_uint = OpTypeRuntimeArray %uint + %ssbo = OpTypeStruct %_runtimearr_uint +%_ptr_Uniform_ssbo = OpTypePointer Uniform %ssbo + %ssbo_0 = OpVariable %_ptr_Uniform_ssbo Uniform + %int_0 = OpConstant %int 0 +%_ptr_Uniform_uint = OpTypePointer Uniform %uint + %int_1 = OpConstant %int 1 + %int_3 = OpConstant %int 3 + %uint_4 = OpConstant %uint 4 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint + %id_1 = OpVariable %_ptr_Input_v3uint Input + %main = OpFunction %void None %3 + %5 = OpLabel + %id_0 = OpVariable %_ptr_Function_v3uint Function + %param_0 = OpVariable %_ptr_Function_v3uint Function + %57 = OpLoad %v3uint %id_1 + OpStore %id_0 %57 + %59 = OpLoad %v3uint %id_0 + OpStore %param_0 %59 + %60 = OpFunctionCall %void %_main_vu3_ %param_0 + OpReturn + OpFunctionEnd + %Load_u1_ = OpFunction %void None %8 + %size = OpFunctionParameter %_ptr_Function_uint + %11 = OpLabel + %data = OpVariable %_ptr_Function_v4uint Function +%byteAddrTemp = OpVariable %_ptr_Function_int Function + %24 = OpLoad %uint %size + %26 = OpShiftRightLogical %int %24 %int_2 + OpStore %byteAddrTemp %26 + %32 = OpLoad %int %byteAddrTemp + %34 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %32 + %35 = OpLoad %uint %34 + %36 = OpLoad %int %byteAddrTemp + %38 = OpIAdd %int %36 %int_1 + %39 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %38 + %40 = OpLoad %uint %39 + %41 = OpLoad %int %byteAddrTemp + %42 = OpIAdd %int %41 %int_2 + %43 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %42 + %44 = OpLoad %uint %43 + %45 = OpLoad %int %byteAddrTemp + %47 = OpIAdd %int %45 %int_3 + %48 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %47 + %49 = OpLoad %uint %48 + %50 = OpCompositeConstruct %v4uint %35 %40 %44 %49 + OpStore %data %50 + OpReturn + OpFunctionEnd + %_main_vu3_ = OpFunction %void None %14 + %id = OpFunctionParameter %_ptr_Function_v3uint + %17 = OpLabel + %param = OpVariable %_ptr_Function_uint Function + OpStore %param %uint_4 + %53 = OpFunctionCall %void %Load_u1_ %param + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/multiple-entry.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/multiple-entry.asm.comp new file mode 100644 index 000000000..0cfb5543d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/multiple-entry.asm.comp @@ -0,0 +1,97 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %func_alt "main2" %frag_in %frag_out + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %inputs Restrict + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + OpDecorate %outputs Restrict + OpDecorate %frag_in Location 0 + OpDecorate %frag_out Location 0 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %float = OpTypeFloat 32 + %vec4 = OpTypeVector %float 4 + %vec4_input_ptr = OpTypePointer Input %vec4 + %vec4_output_ptr = OpTypePointer Output %vec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %frag_in = OpVariable %vec4_input_ptr Input + %frag_out = OpVariable %vec4_output_ptr Output + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of IAdd + %result_iadd_0 = OpIAdd %uvec4 %input0 %input1 + %result_iadd_1 = OpIAdd %uvec4 %input1 %input0 + %result_iadd_2 = OpIAdd %uvec4 %input0 %input0 + %result_iadd_3 = OpIAdd %uvec4 %input1 %input1 + %result_iadd_4 = OpIAdd %ivec4 %input0 %input0 + %result_iadd_5 = OpIAdd %ivec4 %input1 %input1 + %result_iadd_6 = OpIAdd %ivec4 %input0 %input1 + %result_iadd_7 = OpIAdd %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd + + %func_alt = OpFunction %void None %main_func + %block_alt = OpLabel + %frag_input_value = OpLoad %vec4 %frag_in + OpStore %frag_out %frag_input_value + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/quantize.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/quantize.asm.comp new file mode 100644 index 000000000..f5afc6570 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/quantize.asm.comp @@ -0,0 +1,67 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 38 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %4 "main" + OpExecutionMode %4 LocalSize 1 1 1 + OpSource ESSL 310 + OpName %4 "main" + OpName %10 "SSBO0" + OpMemberName %10 0 "scalar" + OpMemberName %10 1 "vec2_val" + OpMemberName %10 2 "vec3_val" + OpMemberName %10 3 "vec4_val" + OpName %12 "" + OpMemberDecorate %10 0 Offset 0 + OpMemberDecorate %10 1 Offset 8 + OpMemberDecorate %10 2 Offset 16 + OpMemberDecorate %10 3 Offset 32 + OpDecorate %10 BufferBlock + OpDecorate %12 DescriptorSet 0 + OpDecorate %12 Binding 0 + %2 = OpTypeVoid + %3 = OpTypeFunction %2 + %6 = OpTypeFloat 32 + %7 = OpTypeVector %6 2 + %8 = OpTypeVector %6 3 + %9 = OpTypeVector %6 4 + %10 = OpTypeStruct %6 %7 %8 %9 + %11 = OpTypePointer Uniform %10 + %12 = OpVariable %11 Uniform + %13 = OpTypeInt 32 1 + %14 = OpConstant %13 0 + %15 = OpTypePointer Uniform %6 + %20 = OpConstant %13 1 + %21 = OpTypePointer Uniform %7 + %26 = OpConstant %13 2 + %27 = OpTypePointer Uniform %8 + %32 = OpConstant %13 3 + %33 = OpTypePointer Uniform %9 + %4 = OpFunction %2 None %3 + %5 = OpLabel + %16 = OpAccessChain %15 %12 %14 + %17 = OpLoad %6 %16 + %18 = OpQuantizeToF16 %6 %17 + %19 = OpAccessChain %15 %12 %14 + OpStore %19 %18 + %22 = OpAccessChain %21 %12 %20 + %23 = OpLoad %7 %22 + %24 = OpQuantizeToF16 %7 %23 + %25 = OpAccessChain %21 %12 %20 + OpStore %25 %24 + %28 = OpAccessChain %27 %12 %26 + %29 = OpLoad %8 %28 + %30 = OpQuantizeToF16 %8 %29 + %31 = OpAccessChain %27 %12 %26 + OpStore %31 %30 + %34 = OpAccessChain %33 %12 %32 + %35 = OpLoad %9 %34 + %36 = OpQuantizeToF16 %9 %35 + %37 = OpAccessChain %33 %12 %32 + OpStore %37 %36 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/relaxed-block-layout.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/relaxed-block-layout.asm.comp new file mode 100644 index 000000000..dd909426d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/relaxed-block-layout.asm.comp @@ -0,0 +1,108 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 63 +; Schema: 0 + OpCapability Shader + OpCapability StorageBuffer16BitAccess + OpCapability StorageBuffer8BitAccess + OpCapability UniformAndStorageBuffer8BitAccess + OpExtension "SPV_KHR_8bit_storage" + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_LocalInvocationID %gl_GlobalInvocationID %gl_WorkGroupID %gl_NumWorkGroups + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpSourceExtension "GL_EXT_shader_16bit_storage" + OpSourceExtension "GL_EXT_shader_8bit_storage" + OpName %main "main" + OpName %foo "foo" + OpMemberName %foo 0 "bar" + OpMemberName %foo 1 "baz" + OpMemberName %foo 2 "quux" + OpMemberName %foo 3 "blah" + OpMemberName %foo 4 "wibble" + OpName %_ "" + OpName %gl_LocalInvocationID "gl_LocalInvocationID" + OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" + OpName %gl_WorkGroupID "gl_WorkGroupID" + OpName %gl_NumWorkGroups "gl_NumWorkGroups" + OpMemberDecorate %foo 0 Offset 0 + OpMemberDecorate %foo 1 Offset 4 + OpMemberDecorate %foo 2 Offset 16 + OpMemberDecorate %foo 3 Offset 17 + OpMemberDecorate %foo 4 Offset 22 + OpDecorate %foo BufferBlock + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %gl_LocalInvocationID BuiltIn LocalInvocationId + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + OpDecorate %gl_WorkGroupID BuiltIn WorkgroupId + OpDecorate %gl_NumWorkGroups BuiltIn NumWorkgroups + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 + %float = OpTypeFloat 32 + %v3float = OpTypeVector %float 3 + %uchar = OpTypeInt 8 0 + %v4uchar = OpTypeVector %uchar 4 + %half = OpTypeFloat 16 + %v2half = OpTypeVector %half 2 + %foo = OpTypeStruct %uint %v3float %uchar %v4uchar %v2half +%_ptr_Uniform_foo = OpTypePointer Uniform %foo + %_ = OpVariable %_ptr_Uniform_foo Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%gl_LocalInvocationID = OpVariable %_ptr_Input_v3uint Input + %uint_0 = OpConstant %uint 0 +%_ptr_Input_uint = OpTypePointer Input %uint +%_ptr_Uniform_uint = OpTypePointer Uniform %uint + %int_1 = OpConstant %int 1 +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input +%_ptr_Uniform_v3float = OpTypePointer Uniform %v3float + %int_3 = OpConstant %int 3 +%_ptr_Uniform_v4uchar = OpTypePointer Uniform %v4uchar + %v4uint = OpTypeVector %uint 4 +%gl_WorkGroupID = OpVariable %_ptr_Input_v3uint Input + %int_4 = OpConstant %int 4 +%_ptr_Uniform_v2half = OpTypePointer Uniform %v2half + %v2float = OpTypeVector %float 2 +%gl_NumWorkGroups = OpVariable %_ptr_Input_v3uint Input + %v2uint = OpTypeVector %uint 2 + %main = OpFunction %void None %3 + %5 = OpLabel + %23 = OpAccessChain %_ptr_Input_uint %gl_LocalInvocationID %uint_0 + %24 = OpLoad %uint %23 + %26 = OpAccessChain %_ptr_Uniform_uint %_ %int_0 + OpStore %26 %24 + %29 = OpLoad %v3uint %gl_GlobalInvocationID + %30 = OpConvertUToF %v3float %29 + %32 = OpAccessChain %_ptr_Uniform_v3float %_ %int_1 + OpStore %32 %30 + %35 = OpAccessChain %_ptr_Uniform_v4uchar %_ %int_3 + %36 = OpLoad %v4uchar %35 + %38 = OpUConvert %v4uint %36 + %39 = OpVectorShuffle %v3uint %38 %38 0 1 2 + %41 = OpLoad %v3uint %gl_WorkGroupID + %42 = OpIAdd %v3uint %39 %41 + %43 = OpCompositeExtract %uint %42 0 + %44 = OpCompositeExtract %uint %42 1 + %45 = OpCompositeExtract %uint %42 2 + %46 = OpCompositeConstruct %v4uint %43 %44 %45 %uint_0 + %47 = OpUConvert %v4uchar %46 + %48 = OpAccessChain %_ptr_Uniform_v4uchar %_ %int_3 + OpStore %48 %47 + %51 = OpAccessChain %_ptr_Uniform_v2half %_ %int_4 + %52 = OpLoad %v2half %51 + %54 = OpFConvert %v2float %52 + %57 = OpLoad %v3uint %gl_NumWorkGroups + %58 = OpVectorShuffle %v2uint %57 %57 0 1 + %59 = OpConvertUToF %v2float %58 + %60 = OpFMul %v2float %54 %59 + %61 = OpFConvert %v2half %60 + %62 = OpAccessChain %_ptr_Uniform_v2half %_ %int_4 + OpStore %62 %61 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp new file mode 100644 index 000000000..188e3fec3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/specialization-constant-workgroup.asm.comp @@ -0,0 +1,47 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 24 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 20 1 + OpSource ESSL 310 + OpName %main "main" + OpName %SSBO "SSBO" + OpMemberName %SSBO 0 "a" + OpName %_ "" + OpMemberDecorate %SSBO 0 Offset 0 + OpDecorate %SSBO BufferBlock + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %19 SpecId 10 + OpDecorate %21 SpecId 12 + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %SSBO = OpTypeStruct %float +%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO + %_ = OpVariable %_ptr_Uniform_SSBO Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %uint = OpTypeInt 32 0 + %19 = OpSpecConstant %uint 9 + %uint_20 = OpConstant %uint 20 + %21 = OpSpecConstant %uint 4 + %v3uint = OpTypeVector %uint 3 +%gl_WorkGroupSize = OpSpecConstantComposite %v3uint %19 %uint_20 %21 + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpAccessChain %_ptr_Uniform_float %_ %int_0 + %15 = OpLoad %float %14 + %16 = OpFAdd %float %15 %float_1 + %17 = OpAccessChain %_ptr_Uniform_float %_ %int_0 + OpStore %17 %16 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp new file mode 100644 index 000000000..bdf2027a8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/storage-buffer-basic.invalid.asm.comp @@ -0,0 +1,58 @@ +; SPIR-V +; Version: 1.0 +; Generator: Codeplay; 0 +; Bound: 31 +; Schema: 0 + OpCapability Shader + OpCapability VariablePointers + OpExtension "SPV_KHR_storage_buffer_storage_class" + OpExtension "SPV_KHR_variable_pointers" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %22 "main" %gl_WorkGroupID + OpSource OpenCL_C 120 + OpDecorate %15 SpecId 0 + ;OpDecorate %16 SpecId 1 + OpDecorate %17 SpecId 2 + OpDecorate %_runtimearr_float ArrayStride 4 + OpMemberDecorate %_struct_4 0 Offset 0 + OpDecorate %_struct_4 Block + OpDecorate %gl_WorkGroupID BuiltIn WorkgroupId + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + OpDecorate %20 DescriptorSet 0 + OpDecorate %20 Binding 0 + OpDecorate %21 DescriptorSet 0 + OpDecorate %21 Binding 1 + %float = OpTypeFloat 32 + %uint = OpTypeInt 32 0 + %size1 = OpConstant %uint 1 +%_ptr_StorageBuffer_float = OpTypePointer StorageBuffer %float +%_runtimearr_float = OpTypeArray %float %size1 ; Runtime arrays do not work yet in MSL. + %_struct_4 = OpTypeStruct %_runtimearr_float +%_ptr_StorageBuffer__struct_4 = OpTypePointer StorageBuffer %_struct_4 + %void = OpTypeVoid + %8 = OpTypeFunction %void + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%_ptr_Input_uint = OpTypePointer Input %uint +%_ptr_Private_v3uint = OpTypePointer Private %v3uint + %uint_0 = OpConstant %uint 0 +%gl_WorkGroupID = OpVariable %_ptr_Input_v3uint Input + %15 = OpSpecConstant %uint 1 + %16 = OpConstant %uint 2 + %17 = OpSpecConstant %uint 3 +%gl_WorkGroupSize = OpSpecConstantComposite %v3uint %15 %16 %17 + %19 = OpVariable %_ptr_Private_v3uint Private %gl_WorkGroupSize + %20 = OpVariable %_ptr_StorageBuffer__struct_4 StorageBuffer + %21 = OpVariable %_ptr_StorageBuffer__struct_4 StorageBuffer + %22 = OpFunction %void None %8 + %23 = OpLabel + %24 = OpAccessChain %_ptr_Input_uint %gl_WorkGroupID %uint_0 + %25 = OpLoad %uint %24 + %26 = OpAccessChain %_ptr_StorageBuffer_float %21 %uint_0 %25 + %27 = OpLoad %float %26 + %28 = OpAccessChain %_ptr_StorageBuffer_float %20 %uint_0 %25 + %29 = OpLoad %float %28 + %30 = OpFAdd %float %27 %29 + OpStore %28 %30 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/variable-pointers-2.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/variable-pointers-2.asm.comp new file mode 100644 index 000000000..308162f0b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/variable-pointers-2.asm.comp @@ -0,0 +1,117 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos SPIR-V Tools Assembler; 0 +; Bound: 65 +; Schema: 0 + OpCapability Shader + OpCapability VariablePointers + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID %gl_LocalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %foo "foo" + OpMemberName %foo 0 "a" + OpMemberName %foo 1 "b" + OpMemberName %foo 2 "c" + OpName %bar "bar" + OpMemberName %bar 0 "d" + OpName %buf "buf" + OpName %cb "cb" + OpName %select_buffer "select_buffer" + OpName %select_input "select_input" + OpName %a "a" + OpMemberDecorate %foo 0 Offset 0 + OpMemberDecorate %foo 1 Offset 512 + OpMemberDecorate %foo 2 Offset 520 + OpMemberDecorate %bar 0 Offset 0 + OpDecorate %foo Block + OpDecorate %bar Block + OpDecorate %buf DescriptorSet 0 + OpDecorate %buf Binding 0 + OpDecorate %cb DescriptorSet 0 + OpDecorate %cb Binding 1 + OpDecorate %_ptr_StorageBuffer_int ArrayStride 4 + OpDecorate %_arr_int_uint_128 ArrayStride 4 + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + OpDecorate %gl_LocalInvocationID BuiltIn LocalInvocationId + %void = OpTypeVoid + %15 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %uint = OpTypeInt 32 0 + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input +%gl_LocalInvocationID = OpVariable %_ptr_Input_v3uint Input + %uint_128 = OpConstant %uint 128 +%_arr_int_uint_128 = OpTypeArray %int %uint_128 + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %foo = OpTypeStruct %_arr_int_uint_128 %uint %v2float +%_ptr_StorageBuffer_foo = OpTypePointer StorageBuffer %foo + %buf = OpVariable %_ptr_StorageBuffer_foo StorageBuffer + %bar = OpTypeStruct %int +%_ptr_Uniform_bar = OpTypePointer Uniform %bar + %cb = OpVariable %_ptr_Uniform_bar Uniform + %uint_0 = OpConstant %uint 0 + %bool = OpTypeBool +%_ptr_Uniform_int = OpTypePointer Uniform %int + %28 = OpTypeFunction %_ptr_StorageBuffer_foo %_ptr_StorageBuffer_foo + %int_0 = OpConstant %int 0 + %uint_1 = OpConstant %uint 1 + %31 = OpConstantNull %_ptr_StorageBuffer_foo + %32 = OpTypeFunction %_ptr_Input_v3uint +%_ptr_StorageBuffer_int = OpTypePointer StorageBuffer %int +%_ptr_Function__ptr_StorageBuffer_foo = OpTypePointer Function %_ptr_StorageBuffer_foo +%select_buffer = OpFunction %_ptr_StorageBuffer_foo None %28 + %a = OpFunctionParameter %_ptr_StorageBuffer_foo + %33 = OpLabel + %34 = OpAccessChain %_ptr_Uniform_int %cb %uint_0 + %35 = OpLoad %int %34 + %36 = OpINotEqual %bool %35 %int_0 + %37 = OpSelect %_ptr_StorageBuffer_foo %36 %a %31 + OpReturnValue %37 + OpFunctionEnd +%select_input = OpFunction %_ptr_Input_v3uint None %32 + %38 = OpLabel + %39 = OpAccessChain %_ptr_Uniform_int %cb %uint_0 + %40 = OpLoad %int %39 + %41 = OpINotEqual %bool %40 %int_0 + %42 = OpSelect %_ptr_Input_v3uint %41 %gl_GlobalInvocationID %gl_LocalInvocationID + OpReturnValue %42 + OpFunctionEnd + %main = OpFunction %void None %15 + %43 = OpLabel + %65 = OpVariable %_ptr_Function__ptr_StorageBuffer_foo Function + %44 = OpFunctionCall %_ptr_StorageBuffer_foo %select_buffer %buf + OpStore %65 %44 + %45 = OpFunctionCall %_ptr_Input_v3uint %select_input + %66 = OpLoad %_ptr_StorageBuffer_foo %65 + %46 = OpAccessChain %_ptr_StorageBuffer_int %66 %uint_0 %uint_0 + %47 = OpAccessChain %_ptr_StorageBuffer_int %buf %uint_0 %uint_0 + OpBranch %48 + %48 = OpLabel + %49 = OpPhi %_ptr_StorageBuffer_int %46 %43 %50 %51 + %52 = OpPhi %_ptr_StorageBuffer_int %47 %43 %53 %51 + %54 = OpLoad %int %49 + %55 = OpLoad %int %52 + %56 = OpINotEqual %bool %54 %55 + OpLoopMerge %58 %51 None + OpBranchConditional %56 %57 %58 + %57 = OpLabel + %59 = OpIAdd %int %54 %55 + %60 = OpLoad %v3uint %45 + %61 = OpCompositeExtract %uint %60 0 + %62 = OpBitcast %int %61 + %63 = OpIAdd %int %59 %62 + OpStore %49 %63 + OpStore %52 %63 + OpBranch %51 + %51 = OpLabel + %50 = OpPtrAccessChain %_ptr_StorageBuffer_int %49 %uint_1 + %53 = OpPtrAccessChain %_ptr_StorageBuffer_int %52 %uint_1 + OpBranch %48 + %58 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp new file mode 100644 index 000000000..3dcb04f02 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/variable-pointers-store-forwarding.asm.comp @@ -0,0 +1,75 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos SPIR-V Tools Assembler; 0 +; Bound: 40 +; Schema: 0 + OpCapability Shader + OpCapability VariablePointers + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %foo "foo" + OpMemberName %foo 0 "a" + OpName %bar "bar" + OpMemberName %bar 0 "b" + OpName %x "x" + OpName %y "y" + OpName %a "a" + OpName %b "b" + OpMemberDecorate %foo 0 Offset 0 + OpMemberDecorate %bar 0 Offset 0 + OpDecorate %foo Block + OpDecorate %bar Block + OpDecorate %x DescriptorSet 0 + OpDecorate %x Binding 0 + OpDecorate %y DescriptorSet 0 + OpDecorate %y Binding 1 + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + %void = OpTypeVoid + %11 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %uint = OpTypeInt 32 0 + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input + %foo = OpTypeStruct %int +%_ptr_StorageBuffer_foo = OpTypePointer StorageBuffer %foo + %x = OpVariable %_ptr_StorageBuffer_foo StorageBuffer + %bar = OpTypeStruct %int +%_ptr_StorageBuffer_bar = OpTypePointer StorageBuffer %bar + %y = OpVariable %_ptr_StorageBuffer_bar StorageBuffer + %uint_0 = OpConstant %uint 0 + %int_0 = OpConstant %int 0 + %bool = OpTypeBool +%_ptr_StorageBuffer_int = OpTypePointer StorageBuffer %int + %22 = OpTypeFunction %_ptr_StorageBuffer_int %_ptr_StorageBuffer_foo %_ptr_StorageBuffer_bar +%_ptr_Function__ptr_StorageBuffer_int = OpTypePointer Function %_ptr_StorageBuffer_int + %24 = OpFunction %_ptr_StorageBuffer_int None %22 + %a = OpFunctionParameter %_ptr_StorageBuffer_foo + %b = OpFunctionParameter %_ptr_StorageBuffer_bar + %25 = OpLabel + %26 = OpLoad %v3uint %gl_GlobalInvocationID + %27 = OpCompositeExtract %uint %26 0 + %28 = OpINotEqual %bool %27 %uint_0 + %29 = OpAccessChain %_ptr_StorageBuffer_int %a %uint_0 + %30 = OpAccessChain %_ptr_StorageBuffer_int %b %uint_0 + %31 = OpSelect %_ptr_StorageBuffer_int %28 %29 %30 + OpReturnValue %31 + OpFunctionEnd + %main = OpFunction %void None %11 + %32 = OpLabel + %33 = OpVariable %_ptr_Function__ptr_StorageBuffer_int Function + %34 = OpFunctionCall %_ptr_StorageBuffer_int %24 %x %y + OpStore %33 %34 + %35 = OpLoad %_ptr_StorageBuffer_int %33 + %36 = OpAccessChain %_ptr_StorageBuffer_int %x %uint_0 + %37 = OpLoad %int %36 + OpStore %35 %int_0 + %38 = OpIAdd %int %37 %37 + %39 = OpAccessChain %_ptr_StorageBuffer_int %y %uint_0 + OpStore %39 %38 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp new file mode 100644 index 000000000..c01432b5d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/vector-builtin-type-cast-func.asm.comp @@ -0,0 +1,147 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 90 +; Schema: 0 + OpCapability Shader + OpCapability ImageQuery + OpCapability StorageImageWriteWithoutFormat + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_LocalInvocationID + OpExecutionMode %main LocalSize 16 16 1 + OpSource GLSL 450 + OpName %main "main" + OpName %get_texcoord_vi2_vi2_ "get_texcoord(vi2;vi2;" + OpName %base "base" + OpName %index "index" + OpName %gl_LocalInvocationID "gl_LocalInvocationID" + OpName %r0 "r0" + OpName %u0 "u0" + OpName %i "i" + OpName %j "j" + OpName %param "param" + OpName %param_0 "param" + OpName %cb1_struct "cb1_struct" + OpMemberName %cb1_struct 0 "_m0" + OpName %cb0_1 "cb0_1" + OpDecorate %gl_LocalInvocationID BuiltIn LocalInvocationId + OpDecorate %u0 DescriptorSet 0 + OpDecorate %u0 Binding 1 + OpDecorate %u0 NonReadable + OpDecorate %_arr_v4float_uint_1 ArrayStride 16 + OpMemberDecorate %cb1_struct 0 Offset 0 + OpDecorate %cb1_struct Block + OpDecorate %cb0_1 DescriptorSet 0 + OpDecorate %cb0_1 Binding 0 + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 +%_ptr_Function_v2int = OpTypePointer Function %v2int + %9 = OpTypeFunction %v2int %_ptr_Function_v2int %_ptr_Function_v2int + %v3int = OpTypeVector %int 3 +%_ptr_Input_v3int = OpTypePointer Input %v3int +%gl_LocalInvocationID = OpVariable %_ptr_Input_v3int Input + %uint = OpTypeInt 32 0 + %v2uint = OpTypeVector %uint 2 + %float = OpTypeFloat 32 + %30 = OpTypeImage %float 2D 0 0 0 2 Unknown +%_ptr_UniformConstant_30 = OpTypePointer UniformConstant %30 + %u0 = OpVariable %_ptr_UniformConstant_30 UniformConstant + %uint_4 = OpConstant %uint 4 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %uint_1 = OpConstant %uint 1 + %bool = OpTypeBool + %uint_0 = OpConstant %uint 0 + %v4float = OpTypeVector %float 4 +%_arr_v4float_uint_1 = OpTypeArray %v4float %uint_1 + %cb1_struct = OpTypeStruct %_arr_v4float_uint_1 +%_ptr_Uniform_cb1_struct = OpTypePointer Uniform %cb1_struct + %cb0_1 = OpVariable %_ptr_Uniform_cb1_struct Uniform +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %int_1 = OpConstant %int 1 + %uint_16 = OpConstant %uint 16 + %v3uint = OpTypeVector %uint 3 +%gl_WorkGroupSize = OpConstantComposite %v3uint %uint_16 %uint_16 %uint_1 + %main = OpFunction %void None %3 + %5 = OpLabel + %r0 = OpVariable %_ptr_Function_v2int Function + %i = OpVariable %_ptr_Function_int Function + %j = OpVariable %_ptr_Function_int Function + %param = OpVariable %_ptr_Function_v2int Function + %param_0 = OpVariable %_ptr_Function_v2int Function + %33 = OpLoad %30 %u0 + %34 = OpImageQuerySize %v2int %33 + %36 = OpCompositeConstruct %v2uint %uint_4 %uint_4 + %37 = OpShiftRightArithmetic %v2int %34 %36 + %38 = OpCompositeExtract %int %37 0 + %39 = OpCompositeExtract %int %37 1 + %40 = OpCompositeConstruct %v2int %38 %39 + OpStore %r0 %40 + OpStore %i %int_0 + OpBranch %44 + %44 = OpLabel + OpLoopMerge %46 %47 None + OpBranch %48 + %48 = OpLabel + %49 = OpLoad %int %i + %51 = OpAccessChain %_ptr_Function_int %r0 %uint_1 + %52 = OpLoad %int %51 + %54 = OpSLessThan %bool %49 %52 + OpBranchConditional %54 %45 %46 + %45 = OpLabel + OpStore %j %int_0 + OpBranch %56 + %56 = OpLabel + OpLoopMerge %58 %59 None + OpBranch %60 + %60 = OpLabel + %61 = OpLoad %int %j + %63 = OpAccessChain %_ptr_Function_int %r0 %uint_0 + %64 = OpLoad %int %63 + %65 = OpSLessThan %bool %61 %64 + OpBranchConditional %65 %57 %58 + %57 = OpLabel + %66 = OpLoad %30 %u0 + %67 = OpLoad %int %i + %68 = OpLoad %int %j + %69 = OpCompositeConstruct %v2int %67 %68 + %71 = OpLoad %v2int %r0 + OpStore %param %71 + OpStore %param_0 %69 + %73 = OpFunctionCall %v2int %get_texcoord_vi2_vi2_ %param %param_0 + %80 = OpAccessChain %_ptr_Uniform_v4float %cb0_1 %int_0 %int_0 + %81 = OpLoad %v4float %80 + %82 = OpVectorShuffle %v4float %81 %81 0 0 0 0 + OpImageWrite %66 %73 %82 + OpBranch %59 + %59 = OpLabel + %83 = OpLoad %int %j + %85 = OpIAdd %int %83 %int_1 + OpStore %j %85 + OpBranch %56 + %58 = OpLabel + OpBranch %47 + %47 = OpLabel + %86 = OpLoad %int %i + %87 = OpIAdd %int %86 %int_1 + OpStore %i %87 + OpBranch %44 + %46 = OpLabel + OpReturn + OpFunctionEnd +%get_texcoord_vi2_vi2_ = OpFunction %v2int None %9 + %base = OpFunctionParameter %_ptr_Function_v2int + %index = OpFunctionParameter %_ptr_Function_v2int + %13 = OpLabel + %14 = OpLoad %v2int %base + %20 = OpLoad %v3int %gl_LocalInvocationID + %21 = OpVectorShuffle %v2int %20 %20 0 1 + %23 = OpIMul %v2int %14 %21 + %24 = OpLoad %v2int %index + %25 = OpIAdd %v2int %23 %24 + OpReturnValue %25 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp b/3rdparty/spirv-cross/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp new file mode 100644 index 000000000..e79354026 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/comp/vector-builtin-type-cast.asm.comp @@ -0,0 +1,128 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 78 +; Schema: 0 + OpCapability Shader + OpCapability ImageQuery + OpCapability StorageImageWriteWithoutFormat + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_LocalInvocationID + OpExecutionMode %main LocalSize 16 16 1 + OpSource GLSL 450 + OpName %main "main" + OpName %r0 "r0" + OpName %u0 "u0" + OpName %i "i" + OpName %j "j" + OpName %gl_LocalInvocationID "gl_LocalInvocationID" + OpName %cb1_struct "cb1_struct" + OpMemberName %cb1_struct 0 "_m0" + OpName %cb0_1 "cb0_1" + OpDecorate %u0 DescriptorSet 0 + OpDecorate %u0 Binding 1 + OpDecorate %u0 NonReadable + OpDecorate %gl_LocalInvocationID BuiltIn LocalInvocationId + OpDecorate %_arr_v4float_uint_1 ArrayStride 16 + OpMemberDecorate %cb1_struct 0 Offset 0 + OpDecorate %cb1_struct Block + OpDecorate %cb0_1 DescriptorSet 0 + OpDecorate %cb0_1 Binding 0 + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 +%_ptr_Function_v2int = OpTypePointer Function %v2int + %float = OpTypeFloat 32 + %11 = OpTypeImage %float 2D 0 0 0 2 Unknown +%_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 + %u0 = OpVariable %_ptr_UniformConstant_11 UniformConstant + %uint = OpTypeInt 32 0 + %uint_4 = OpConstant %uint 4 + %v2uint = OpTypeVector %uint 2 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %uint_1 = OpConstant %uint 1 + %bool = OpTypeBool + %uint_0 = OpConstant %uint 0 + %v3int = OpTypeVector %int 3 +%_ptr_Input_v3int = OpTypePointer Input %v3int +%gl_LocalInvocationID = OpVariable %_ptr_Input_v3int Input + %v4float = OpTypeVector %float 4 +%_arr_v4float_uint_1 = OpTypeArray %v4float %uint_1 + %cb1_struct = OpTypeStruct %_arr_v4float_uint_1 +%_ptr_Uniform_cb1_struct = OpTypePointer Uniform %cb1_struct + %cb0_1 = OpVariable %_ptr_Uniform_cb1_struct Uniform +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %int_1 = OpConstant %int 1 + %uint_16 = OpConstant %uint 16 + %v3uint = OpTypeVector %uint 3 +%gl_WorkGroupSize = OpConstantComposite %v3uint %uint_16 %uint_16 %uint_1 + %main = OpFunction %void None %3 + %5 = OpLabel + %r0 = OpVariable %_ptr_Function_v2int Function + %i = OpVariable %_ptr_Function_int Function + %j = OpVariable %_ptr_Function_int Function + %14 = OpLoad %11 %u0 + %15 = OpImageQuerySize %v2int %14 + %19 = OpCompositeConstruct %v2uint %uint_4 %uint_4 + %20 = OpShiftRightArithmetic %v2int %15 %19 + %21 = OpCompositeExtract %int %20 0 + %22 = OpCompositeExtract %int %20 1 + %23 = OpCompositeConstruct %v2int %21 %22 + OpStore %r0 %23 + OpStore %i %int_0 + OpBranch %27 + %27 = OpLabel + OpLoopMerge %29 %30 None + OpBranch %31 + %31 = OpLabel + %32 = OpLoad %int %i + %34 = OpAccessChain %_ptr_Function_int %r0 %uint_1 + %35 = OpLoad %int %34 + %37 = OpSLessThan %bool %32 %35 + OpBranchConditional %37 %28 %29 + %28 = OpLabel + OpStore %j %int_0 + OpBranch %39 + %39 = OpLabel + OpLoopMerge %41 %42 None + OpBranch %43 + %43 = OpLabel + %44 = OpLoad %int %j + %46 = OpAccessChain %_ptr_Function_int %r0 %uint_0 + %47 = OpLoad %int %46 + %48 = OpSLessThan %bool %44 %47 + OpBranchConditional %48 %40 %41 + %40 = OpLabel + %49 = OpLoad %11 %u0 + %50 = OpLoad %v2int %r0 + %54 = OpLoad %v3int %gl_LocalInvocationID + %55 = OpVectorShuffle %v2int %54 %54 0 1 + %57 = OpIMul %v2int %50 %55 + %58 = OpLoad %int %i + %59 = OpLoad %int %j + %60 = OpCompositeConstruct %v2int %58 %59 + %61 = OpIAdd %v2int %57 %60 + %68 = OpAccessChain %_ptr_Uniform_v4float %cb0_1 %int_0 %int_0 + %69 = OpLoad %v4float %68 + %70 = OpVectorShuffle %v4float %69 %69 0 0 0 0 + OpImageWrite %49 %61 %70 + OpBranch %42 + %42 = OpLabel + %71 = OpLoad %int %j + %73 = OpIAdd %int %71 %int_1 + OpStore %j %73 + OpBranch %39 + %41 = OpLabel + OpBranch %30 + %30 = OpLabel + %74 = OpLoad %int %i + %75 = OpIAdd %int %74 %int_1 + OpStore %i %75 + OpBranch %27 + %29 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag new file mode 100644 index 000000000..ba2f95b23 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/combined-sampler-reuse.asm.frag @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vUV + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %uTex "uTex" + OpName %uSampler "uSampler" + OpName %vUV "vUV" + OpDecorate %FragColor Location 0 + OpDecorate %uTex DescriptorSet 0 + OpDecorate %uTex Binding 1 + OpDecorate %uSampler DescriptorSet 0 + OpDecorate %uSampler Binding 0 + OpDecorate %vUV Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %10 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_UniformConstant_10 = OpTypePointer UniformConstant %10 + %uTex = OpVariable %_ptr_UniformConstant_10 UniformConstant + %14 = OpTypeSampler +%_ptr_UniformConstant_14 = OpTypePointer UniformConstant %14 + %uSampler = OpVariable %_ptr_UniformConstant_14 UniformConstant + %18 = OpTypeSampledImage %10 + %v2float = OpTypeVector %float 2 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %vUV = OpVariable %_ptr_Input_v2float Input + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 + %int_1 = OpConstant %int 1 + %32 = OpConstantComposite %v2int %int_1 %int_1 + %main = OpFunction %void None %3 + %5 = OpLabel + %13 = OpLoad %10 %uTex + %17 = OpLoad %14 %uSampler + %19 = OpSampledImage %18 %13 %17 + %23 = OpLoad %v2float %vUV + %24 = OpImageSampleImplicitLod %v4float %19 %23 + OpStore %FragColor %24 + %28 = OpLoad %v2float %vUV + %33 = OpImageSampleImplicitLod %v4float %19 %28 ConstOffset %32 + %34 = OpLoad %v4float %FragColor + %35 = OpFAdd %v4float %34 %33 + OpStore %FragColor %35 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/default-member-names.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/default-member-names.asm.frag new file mode 100644 index 000000000..4d616fe49 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/default-member-names.asm.frag @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 43 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %2 "main" %3 + OpExecutionMode %2 OriginLowerLeft + OpDecorate %3 Location 0 + %void = OpTypeVoid + %9 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %12 = OpTypeFunction %v4float + %_struct_5 = OpTypeStruct %float + %_struct_6 = OpTypeStruct %float %float %float %float %float %float %float %float %float %float %float %float %_struct_5 +%_ptr_Function__struct_6 = OpTypePointer Function %_struct_6 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Function_float = OpTypePointer Function %float + %int_1 = OpConstant %int 1 + %int_2 = OpConstant %int 2 + %int_3 = OpConstant %int 3 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %3 = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %2 = OpFunction %void None %9 + %22 = OpLabel + %23 = OpVariable %_ptr_Function__struct_6 Function + %24 = OpAccessChain %_ptr_Function_float %23 %int_0 + %25 = OpLoad %float %24 + %26 = OpAccessChain %_ptr_Function_float %23 %int_1 + %27 = OpLoad %float %26 + %28 = OpAccessChain %_ptr_Function_float %23 %int_2 + %29 = OpLoad %float %28 + %30 = OpAccessChain %_ptr_Function_float %23 %int_3 + %31 = OpLoad %float %30 + %32 = OpCompositeConstruct %v4float %25 %27 %29 %31 + OpStore %3 %32 + OpReturn + OpFunctionEnd + %4 = OpFunction %v4float None %12 + %33 = OpLabel + %7 = OpVariable %_ptr_Function__struct_6 Function + %34 = OpAccessChain %_ptr_Function_float %7 %int_0 + %35 = OpLoad %float %34 + %36 = OpAccessChain %_ptr_Function_float %7 %int_1 + %37 = OpLoad %float %36 + %38 = OpAccessChain %_ptr_Function_float %7 %int_2 + %39 = OpLoad %float %38 + %40 = OpAccessChain %_ptr_Function_float %7 %int_3 + %41 = OpLoad %float %40 + %42 = OpCompositeConstruct %v4float %35 %37 %39 %41 + OpReturnValue %42 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..0efd3158c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/empty-struct.asm.frag @@ -0,0 +1,55 @@ +; SPIR-V +; Version: 1.2 +; Generator: Khronos; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %EntryPoint_Main "main" + OpExecutionMode %EntryPoint_Main OriginUpperLeft + OpSource Unknown 100 + OpName %EmptyStructTest "EmptyStructTest" + OpName %GetValue "GetValue" + OpName %GetValue2 "GetValue" + OpName %self "self" + OpName %self2 "self" + OpName %emptyStruct "emptyStruct" + OpName %value "value" + OpName %EntryPoint_Main "EntryPoint_Main" + +%EmptyStructTest = OpTypeStruct +%_ptr_Function_EmptyStructTest = OpTypePointer Function %EmptyStructTest + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %5 = OpTypeFunction %float %_ptr_Function_EmptyStructTest + %6 = OpTypeFunction %float %EmptyStructTest + %void = OpTypeVoid +%_ptr_Function_void = OpTypePointer Function %void + %8 = OpTypeFunction %void %_ptr_Function_EmptyStructTest + %9 = OpTypeFunction %void + %float_0 = OpConstant %float 0 + + %GetValue = OpFunction %float None %5 + %self = OpFunctionParameter %_ptr_Function_EmptyStructTest + %13 = OpLabel + OpReturnValue %float_0 + OpFunctionEnd + + %GetValue2 = OpFunction %float None %6 + %self2 = OpFunctionParameter %EmptyStructTest + %14 = OpLabel + OpReturnValue %float_0 + OpFunctionEnd + +%EntryPoint_Main = OpFunction %void None %9 + %37 = OpLabel + %emptyStruct = OpVariable %_ptr_Function_EmptyStructTest Function + %18 = OpVariable %_ptr_Function_EmptyStructTest Function + %value = OpVariable %_ptr_Function_float Function + %value2 = OpCompositeConstruct %EmptyStructTest + %22 = OpFunctionCall %float %GetValue %emptyStruct + %23 = OpFunctionCall %float %GetValue2 %value2 + OpStore %value %22 + OpStore %value %23 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag new file mode 100644 index 000000000..e205a15c4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/extract-packed-from-composite.asm.frag @@ -0,0 +1,107 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 64 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %pos_1 %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %_main_vf4_ "@main(vf4;" + OpName %pos "pos" + OpName %Foo "Foo" + OpMemberName %Foo 0 "a" + OpMemberName %Foo 1 "b" + OpName %foo "foo" + OpName %Foo_0 "Foo" + OpMemberName %Foo_0 0 "a" + OpMemberName %Foo_0 1 "b" + OpName %buf "buf" + OpMemberName %buf 0 "results" + OpMemberName %buf 1 "bar" + OpName %_ "" + OpName %pos_0 "pos" + OpName %pos_1 "pos" + OpName %_entryPointOutput "@entryPointOutput" + OpName %param "param" + OpMemberDecorate %Foo_0 0 Offset 0 + OpMemberDecorate %Foo_0 1 Offset 12 + OpDecorate %_arr_Foo_0_uint_16 ArrayStride 16 + OpMemberDecorate %buf 0 Offset 0 + OpMemberDecorate %buf 1 Offset 256 + OpDecorate %buf Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %pos_1 BuiltIn FragCoord + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %9 = OpTypeFunction %v4float %_ptr_Function_v4float + %v3float = OpTypeVector %float 3 + %Foo = OpTypeStruct %v3float %float +%_ptr_Function_Foo = OpTypePointer Function %Foo + %Foo_0 = OpTypeStruct %v3float %float + %uint = OpTypeInt 32 0 + %uint_16 = OpConstant %uint 16 +%_arr_Foo_0_uint_16 = OpTypeArray %Foo_0 %uint_16 + %buf = OpTypeStruct %_arr_Foo_0_uint_16 %v4float +%_ptr_Uniform_buf = OpTypePointer Uniform %buf + %_ = OpVariable %_ptr_Uniform_buf Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Function_float = OpTypePointer Function %float + %int_16 = OpConstant %int 16 +%_ptr_Uniform_Foo_0 = OpTypePointer Uniform %Foo_0 +%_ptr_Function_v3float = OpTypePointer Function %v3float + %int_1 = OpConstant %int 1 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %float_0 = OpConstant %float 0 +%_ptr_Input_v4float = OpTypePointer Input %v4float + %pos_1 = OpVariable %_ptr_Input_v4float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %pos_0 = OpVariable %_ptr_Function_v4float Function + %param = OpVariable %_ptr_Function_v4float Function + %58 = OpLoad %v4float %pos_1 + OpStore %pos_0 %58 + %62 = OpLoad %v4float %pos_0 + OpStore %param %62 + %63 = OpFunctionCall %v4float %_main_vf4_ %param + OpStore %_entryPointOutput %63 + OpReturn + OpFunctionEnd + %_main_vf4_ = OpFunction %v4float None %9 + %pos = OpFunctionParameter %_ptr_Function_v4float + %12 = OpLabel + %foo = OpVariable %_ptr_Function_Foo Function + %28 = OpAccessChain %_ptr_Function_float %pos %uint_0 + %29 = OpLoad %float %28 + %30 = OpConvertFToS %int %29 + %32 = OpSMod %int %30 %int_16 + %34 = OpAccessChain %_ptr_Uniform_Foo_0 %_ %int_0 %32 + %35 = OpLoad %Foo_0 %34 + %36 = OpCompositeExtract %v3float %35 0 + %38 = OpAccessChain %_ptr_Function_v3float %foo %int_0 + OpStore %38 %36 + %39 = OpCompositeExtract %float %35 1 + %41 = OpAccessChain %_ptr_Function_float %foo %int_1 + OpStore %41 %39 + %42 = OpAccessChain %_ptr_Function_v3float %foo %int_0 + %43 = OpLoad %v3float %42 + %45 = OpAccessChain %_ptr_Uniform_v4float %_ %int_1 + %46 = OpLoad %v4float %45 + %47 = OpVectorShuffle %v3float %46 %46 0 1 2 + %48 = OpDot %float %43 %47 + %49 = OpAccessChain %_ptr_Function_float %foo %int_1 + %50 = OpLoad %float %49 + %52 = OpCompositeConstruct %v4float %48 %50 %float_0 %float_0 + OpReturnValue %52 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/frem.asm.frag new file mode 100644 index 000000000..8350c75c0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/frem.asm.frag @@ -0,0 +1,41 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 16 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vA %vB + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %vA "vA" + OpName %vB "vB" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %vA RelaxedPrecision + OpDecorate %vA Location 0 + OpDecorate %12 RelaxedPrecision + OpDecorate %vB RelaxedPrecision + OpDecorate %vB Location 1 + OpDecorate %14 RelaxedPrecision + OpDecorate %15 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output +%_ptr_Input_v4float = OpTypePointer Input %v4float + %vA = OpVariable %_ptr_Input_v4float Input + %vB = OpVariable %_ptr_Input_v4float Input + %main = OpFunction %void None %3 + %5 = OpLabel + %12 = OpLoad %v4float %vA + %14 = OpLoad %v4float %vB + %15 = OpFRem %v4float %12 %14 + OpStore %FragColor %15 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..397aa98ce --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,153 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 76 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %foobar_vf4_ "foo" + OpName %a "foo" + OpName %foobar_vf3_ "foo" + OpName %a_0 "foo" + OpName %foobaz_vf4_ "foo" + OpName %a_1 "foo" + OpName %foobaz_vf2_ "foo" + OpName %a_2 "foo" + OpName %a_3 "foo" + OpName %param "foo" + OpName %b "foo" + OpName %param_0 "foo" + OpName %c "foo" + OpName %param_1 "foo" + OpName %d "foo" + OpName %param_2 "foo" + OpName %FragColor "FragColor" + OpDecorate %foobar_vf4_ RelaxedPrecision + OpDecorate %a RelaxedPrecision + OpDecorate %foobar_vf3_ RelaxedPrecision + OpDecorate %a_0 RelaxedPrecision + OpDecorate %foobaz_vf4_ RelaxedPrecision + OpDecorate %a_1 RelaxedPrecision + OpDecorate %foobaz_vf2_ RelaxedPrecision + OpDecorate %a_2 RelaxedPrecision + OpDecorate %28 RelaxedPrecision + OpDecorate %30 RelaxedPrecision + OpDecorate %31 RelaxedPrecision + OpDecorate %34 RelaxedPrecision + OpDecorate %35 RelaxedPrecision + OpDecorate %36 RelaxedPrecision + OpDecorate %37 RelaxedPrecision + OpDecorate %40 RelaxedPrecision + OpDecorate %42 RelaxedPrecision + OpDecorate %43 RelaxedPrecision + OpDecorate %46 RelaxedPrecision + OpDecorate %47 RelaxedPrecision + OpDecorate %48 RelaxedPrecision + OpDecorate %49 RelaxedPrecision + OpDecorate %a_3 RelaxedPrecision + OpDecorate %55 RelaxedPrecision + OpDecorate %b RelaxedPrecision + OpDecorate %59 RelaxedPrecision + OpDecorate %c RelaxedPrecision + OpDecorate %62 RelaxedPrecision + OpDecorate %d RelaxedPrecision + OpDecorate %66 RelaxedPrecision + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %69 RelaxedPrecision + OpDecorate %70 RelaxedPrecision + OpDecorate %71 RelaxedPrecision + OpDecorate %72 RelaxedPrecision + OpDecorate %73 RelaxedPrecision + OpDecorate %74 RelaxedPrecision + OpDecorate %75 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %9 = OpTypeFunction %v4float %_ptr_Function_v4float + %v3float = OpTypeVector %float 3 +%_ptr_Function_v3float = OpTypePointer Function %v3float + %15 = OpTypeFunction %v4float %_ptr_Function_v3float + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %24 = OpTypeFunction %v4float %_ptr_Function_v2float + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 + %53 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %57 = OpConstantComposite %v3float %float_1 %float_1 %float_1 + %64 = OpConstantComposite %v2float %float_1 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %a_3 = OpVariable %_ptr_Function_v4float Function + %param = OpVariable %_ptr_Function_v4float Function + %b = OpVariable %_ptr_Function_v4float Function + %param_0 = OpVariable %_ptr_Function_v3float Function + %c = OpVariable %_ptr_Function_v4float Function + %param_1 = OpVariable %_ptr_Function_v4float Function + %d = OpVariable %_ptr_Function_v4float Function + %param_2 = OpVariable %_ptr_Function_v2float Function + OpStore %param %53 + %55 = OpFunctionCall %v4float %foobar_vf4_ %param + OpStore %a_3 %55 + OpStore %param_0 %57 + %59 = OpFunctionCall %v4float %foobar_vf3_ %param_0 + OpStore %b %59 + OpStore %param_1 %53 + %62 = OpFunctionCall %v4float %foobaz_vf4_ %param_1 + OpStore %c %62 + OpStore %param_2 %64 + %66 = OpFunctionCall %v4float %foobaz_vf2_ %param_2 + OpStore %d %66 + %69 = OpLoad %v4float %a_3 + %70 = OpLoad %v4float %b + %71 = OpFAdd %v4float %69 %70 + %72 = OpLoad %v4float %c + %73 = OpFAdd %v4float %71 %72 + %74 = OpLoad %v4float %d + %75 = OpFAdd %v4float %73 %74 + OpStore %FragColor %75 + OpReturn + OpFunctionEnd +%foobar_vf4_ = OpFunction %v4float None %9 + %a = OpFunctionParameter %_ptr_Function_v4float + %12 = OpLabel + %28 = OpLoad %v4float %a + %30 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1 + %31 = OpFAdd %v4float %28 %30 + OpReturnValue %31 + OpFunctionEnd +%foobar_vf3_ = OpFunction %v4float None %15 + %a_0 = OpFunctionParameter %_ptr_Function_v3float + %18 = OpLabel + %34 = OpLoad %v3float %a_0 + %35 = OpVectorShuffle %v4float %34 %34 0 1 2 2 + %36 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1 + %37 = OpFAdd %v4float %35 %36 + OpReturnValue %37 + OpFunctionEnd +%foobaz_vf4_ = OpFunction %v4float None %9 + %a_1 = OpFunctionParameter %_ptr_Function_v4float + %21 = OpLabel + %40 = OpLoad %v4float %a_1 + %42 = OpCompositeConstruct %v4float %float_2 %float_2 %float_2 %float_2 + %43 = OpFAdd %v4float %40 %42 + OpReturnValue %43 + OpFunctionEnd +%foobaz_vf2_ = OpFunction %v4float None %24 + %a_2 = OpFunctionParameter %_ptr_Function_v2float + %27 = OpLabel + %46 = OpLoad %v2float %a_2 + %47 = OpVectorShuffle %v4float %46 %46 0 1 0 1 + %48 = OpCompositeConstruct %v4float %float_2 %float_2 %float_2 %float_2 + %49 = OpFAdd %v4float %47 %48 + OpReturnValue %49 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..63c8ab57a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,41 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 19 +; Schema: 0 + OpCapability Shader + OpCapability ImageQuery + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %Size + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %Size "Size" + OpName %uTexture "uTexture" + OpDecorate %Size Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 +%_ptr_Output_v2int = OpTypePointer Output %v2int + %Size = OpVariable %_ptr_Output_v2int Output + %float = OpTypeFloat 32 + %11 = OpTypeImage %float 2D 0 0 0 1 Unknown + %12 = OpTypeSampledImage %11 +%_ptr_UniformConstant_12 = OpTypePointer UniformConstant %12 + %uTexture = OpVariable %_ptr_UniformConstant_12 UniformConstant + %int_0 = OpConstant %int 0 + %int_1 = OpConstant %int 1 + %main = OpFunction %void None %3 + %5 = OpLabel + %15 = OpLoad %12 %uTexture + %17 = OpImage %11 %15 + %18 = OpImageQuerySizeLod %v2int %17 %int_0 + %19 = OpImageQuerySizeLod %v2int %17 %int_1 + %20 = OpIAdd %v2int %18 %19 + OpStore %Size %20 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..ccdfeef58 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,81 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 60 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %v0 %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %phi "phi" + OpName %i "i" + OpName %v0 "v0" + OpName %FragColor "FragColor" + OpName %uImage "uImage" + OpDecorate %v0 Location 0 + OpDecorate %FragColor Location 0 + OpDecorate %uImage DescriptorSet 0 + OpDecorate %uImage Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %float_1 = OpConstant %float 1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_4 = OpConstant %int 4 + %bool = OpTypeBool + %v4float = OpTypeVector %float 4 +%_ptr_Input_v4float = OpTypePointer Input %v4float + %v0 = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_float = OpTypePointer Input %float + %float_0 = OpConstant %float 0 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %36 = OpTypeImage %float 2D 0 0 0 1 Unknown + %37 = OpTypeSampledImage %36 +%_ptr_UniformConstant_37 = OpTypePointer UniformConstant %37 + %uImage = OpVariable %_ptr_UniformConstant_37 UniformConstant + %v2float = OpTypeVector %float 2 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 + %float_2 = OpConstant %float 2 + %int_1 = OpConstant %int 1 + %float_1_vec = OpConstantComposite %v4float %float_1 %float_2 %float_1 %float_2 + %main = OpFunction %void None %3 + %5 = OpLabel + %i = OpVariable %_ptr_Function_int Function + OpStore %i %int_0 + OpBranch %loop_header + %loop_header = OpLabel + %phi = OpPhi %float %float_1 %5 %phi_plus_2 %continue_block + %tex_phi = OpPhi %v4float %float_1_vec %5 %texture_load_result %continue_block + OpLoopMerge %merge_block %continue_block None + OpBranch %loop_body + %loop_body = OpLabel + OpStore %FragColor %tex_phi + %19 = OpLoad %int %i + %22 = OpSLessThan %bool %19 %int_4 + OpBranchConditional %22 %15 %merge_block + %15 = OpLabel + %26 = OpLoad %int %i + %28 = OpAccessChain %_ptr_Input_float %v0 %26 + %29 = OpLoad %float %28 + %31 = OpFOrdGreaterThan %bool %29 %float_0 + OpBranchConditional %31 %continue_block %merge_block + %continue_block = OpLabel + %40 = OpLoad %37 %uImage + %43 = OpCompositeConstruct %v2float %phi %phi + %texture_load_result = OpImageSampleExplicitLod %v4float %40 %43 Lod %float_0 + %phi_plus_2 = OpFAdd %float %phi %float_2 + %54 = OpLoad %int %i + %56 = OpIAdd %int %54 %int_1 + OpStore %i %56 + OpBranch %loop_header + %merge_block = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..40e5d3a89 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,29 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 14 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v3float = OpTypeVector %float 3 +%_ptr_Output_v3float = OpTypePointer Output %v3float + %FragColor = OpVariable %_ptr_Output_v3float Output +%float_0x1p_128 = OpConstant %float 0x1p+128 +%float_n0x1p_128 = OpConstant %float -0x1p+128 +%float_0x1_8p_128 = OpConstant %float 0x1.8p+128 + %13 = OpConstantComposite %v3float %float_0x1p_128 %float_n0x1p_128 %float_0x1_8p_128 + %main = OpFunction %void None %3 + %5 = OpLabel + OpStore %FragColor %13 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag new file mode 100644 index 000000000..bd6d06fa7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/interpolation-qualifiers-struct.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 51 +; Schema: 0 + OpCapability Shader + OpCapability SampleRateShading + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %inp + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %Input "Input" + OpMemberName %Input 0 "v0" + OpMemberName %Input 1 "v1" + OpMemberName %Input 2 "v2" + OpMemberName %Input 3 "v3" + OpMemberName %Input 4 "v4" + OpMemberName %Input 5 "v5" + OpMemberName %Input 6 "v6" + OpName %inp "inp" + OpDecorate %FragColor Location 0 + OpDecorate %inp Location 0 + OpMemberDecorate %Input 1 NoPerspective + OpMemberDecorate %Input 2 Centroid + OpMemberDecorate %Input 3 Centroid + OpMemberDecorate %Input 3 NoPerspective + OpMemberDecorate %Input 4 Sample + OpMemberDecorate %Input 5 Sample + OpMemberDecorate %Input 5 NoPerspective + OpMemberDecorate %Input 6 Flat + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %v2float = OpTypeVector %float 2 + %v3float = OpTypeVector %float 3 + %Input = OpTypeStruct %v2float %v2float %v3float %v4float %float %float %float +%_ptr_Input_Input = OpTypePointer Input %Input + %inp = OpVariable %_ptr_Input_Input Input + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Input_float = OpTypePointer Input %float + %int_1 = OpConstant %int 1 + %uint_1 = OpConstant %uint 1 + %int_2 = OpConstant %int 2 +%_ptr_Input_v3float = OpTypePointer Input %v3float + %int_3 = OpConstant %int 3 + %uint_3 = OpConstant %uint 3 + %int_4 = OpConstant %int 4 + %int_5 = OpConstant %int 5 + %int_6 = OpConstant %int 6 + %main = OpFunction %void None %3 + %5 = OpLabel + %20 = OpAccessChain %_ptr_Input_float %inp %int_0 %uint_0 + %21 = OpLoad %float %20 + %24 = OpAccessChain %_ptr_Input_float %inp %int_1 %uint_1 + %25 = OpLoad %float %24 + %26 = OpFAdd %float %21 %25 + %29 = OpAccessChain %_ptr_Input_v3float %inp %int_2 + %30 = OpLoad %v3float %29 + %31 = OpVectorShuffle %v2float %30 %30 0 1 + %34 = OpAccessChain %_ptr_Input_float %inp %int_3 %uint_3 + %35 = OpLoad %float %34 + %37 = OpAccessChain %_ptr_Input_float %inp %int_4 + %38 = OpLoad %float %37 + %39 = OpFMul %float %35 %38 + %41 = OpAccessChain %_ptr_Input_float %inp %int_5 + %42 = OpLoad %float %41 + %43 = OpFAdd %float %39 %42 + %45 = OpAccessChain %_ptr_Input_float %inp %int_6 + %46 = OpLoad %float %45 + %47 = OpFSub %float %43 %46 + %48 = OpCompositeExtract %float %31 0 + %49 = OpCompositeExtract %float %31 1 + %50 = OpCompositeConstruct %v4float %26 %48 %49 %47 + OpStore %FragColor %50 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/locations-components.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/locations-components.asm.frag new file mode 100644 index 000000000..bf8c6a690 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/locations-components.asm.frag @@ -0,0 +1,102 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 67 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %8 %16 %22 %28 %33 %o0 + OpName %main "main" + OpName %v1 "v1" + OpName %v2 "v2" + OpName %o0 "o0" + OpName %r0 "r0" + OpDecorate %8 Location 1 + OpDecorate %16 Location 1 + OpDecorate %16 Component 2 + OpDecorate %22 Location 2 + OpDecorate %22 Flat + OpDecorate %28 Location 2 + OpDecorate %28 Component 1 + OpDecorate %28 Flat + OpDecorate %33 Location 2 + OpDecorate %33 Component 2 + OpDecorate %33 Flat + OpDecorate %o0 Location 0 + %void = OpTypeVoid + %2 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %8 = OpVariable %_ptr_Input_v2float Input + %v4float = OpTypeVector %float 4 +%_ptr_Private_v4float = OpTypePointer Private %v4float + %v1 = OpVariable %_ptr_Private_v4float Private +%_ptr_Input_float = OpTypePointer Input %float + %16 = OpVariable %_ptr_Input_float Input +%_ptr_Private_float = OpTypePointer Private %float + %uint = OpTypeInt 32 0 + %uint_2 = OpConstant %uint 2 + %22 = OpVariable %_ptr_Input_float Input + %v2 = OpVariable %_ptr_Private_v4float Private + %uint_0 = OpConstant %uint 0 +%_ptr_Input_uint = OpTypePointer Input %uint + %28 = OpVariable %_ptr_Input_uint Input + %uint_1 = OpConstant %uint 1 + %33 = OpVariable %_ptr_Input_uint Input +%_ptr_Output_v4float = OpTypePointer Output %v4float + %o0 = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %int = OpTypeInt 32 1 +%_ptr_Function_float = OpTypePointer Function %float +%_ptr_Output_float = OpTypePointer Output %float + %main = OpFunction %void None %2 + %4 = OpLabel + %r0 = OpVariable %_ptr_Function_v4float Function + %12 = OpLoad %v2float %8 + %13 = OpLoad %v4float %v1 + %14 = OpVectorShuffle %v4float %13 %12 4 5 2 3 + OpStore %v1 %14 + %17 = OpLoad %float %16 + %21 = OpInBoundsAccessChain %_ptr_Private_float %v1 %uint_2 + OpStore %21 %17 + %24 = OpLoad %float %22 + %26 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_0 + OpStore %26 %24 + %29 = OpLoad %uint %28 + %30 = OpBitcast %float %29 + %32 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_1 + OpStore %32 %30 + %34 = OpLoad %uint %33 + %35 = OpBitcast %float %34 + %36 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_2 + OpStore %36 %35 + %42 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_1 + %43 = OpLoad %float %42 + %44 = OpBitcast %int %43 + %45 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_2 + %46 = OpLoad %float %45 + %47 = OpBitcast %int %46 + %48 = OpIAdd %int %44 %47 + %49 = OpBitcast %float %48 + %51 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_0 + OpStore %51 %49 + %52 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_0 + %53 = OpLoad %float %52 + %54 = OpBitcast %uint %53 + %55 = OpConvertUToF %float %54 + %57 = OpInBoundsAccessChain %_ptr_Output_float %o0 %uint_1 + OpStore %57 %55 + %58 = OpInBoundsAccessChain %_ptr_Private_float %v1 %uint_1 + %59 = OpLoad %float %58 + %60 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_0 + %61 = OpLoad %float %60 + %62 = OpFAdd %float %59 %61 + %63 = OpInBoundsAccessChain %_ptr_Output_float %o0 %uint_0 + OpStore %63 %62 + %64 = OpLoad %v4float %v1 + %65 = OpLoad %v4float %o0 + %66 = OpVectorShuffle %v4float %65 %64 0 1 6 4 + OpStore %o0 %66 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..320e5ebfb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,195 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 111 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %index + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %index "index" + OpName %indexable "indexable" + OpName %indexable_0 "indexable" + OpName %indexable_1 "indexable" + OpName %foo "foo" + OpName %foobar "foobar" + OpName %baz "baz" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %index RelaxedPrecision + OpDecorate %index Flat + OpDecorate %index Location 0 + OpDecorate %20 RelaxedPrecision + OpDecorate %25 RelaxedPrecision + OpDecorate %26 RelaxedPrecision + OpDecorate %32 RelaxedPrecision + OpDecorate %34 RelaxedPrecision + OpDecorate %37 RelaxedPrecision + OpDecorate %38 RelaxedPrecision + OpDecorate %39 RelaxedPrecision + OpDecorate %41 RelaxedPrecision + OpDecorate %42 RelaxedPrecision + OpDecorate %45 RelaxedPrecision + OpDecorate %46 RelaxedPrecision + OpDecorate %47 RelaxedPrecision + OpDecorate %foo RelaxedPrecision + OpDecorate %61 RelaxedPrecision + OpDecorate %66 RelaxedPrecision + OpDecorate %68 RelaxedPrecision + OpDecorate %71 RelaxedPrecision + OpDecorate %72 RelaxedPrecision + OpDecorate %73 RelaxedPrecision + OpDecorate %75 RelaxedPrecision + OpDecorate %76 RelaxedPrecision + OpDecorate %79 RelaxedPrecision + OpDecorate %80 RelaxedPrecision + OpDecorate %81 RelaxedPrecision + OpDecorate %foobar RelaxedPrecision + OpDecorate %83 RelaxedPrecision + OpDecorate %90 RelaxedPrecision + OpDecorate %91 RelaxedPrecision + OpDecorate %93 RelaxedPrecision + OpDecorate %94 RelaxedPrecision + OpDecorate %95 RelaxedPrecision + OpDecorate %baz RelaxedPrecision + OpDecorate %105 RelaxedPrecision + OpDecorate %106 RelaxedPrecision + OpDecorate %108 RelaxedPrecision + OpDecorate %109 RelaxedPrecision + OpDecorate %110 RelaxedPrecision + OpDecorate %16 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %uint = OpTypeInt 32 0 + %uint_16 = OpConstant %uint 16 +%_arr_float_uint_16 = OpTypeArray %float %uint_16 + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 + %float_3 = OpConstant %float 3 + %float_4 = OpConstant %float 4 + %16 = OpConstantComposite %_arr_float_uint_16 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 + %int = OpTypeInt 32 1 +%_ptr_Input_int = OpTypePointer Input %int + %index = OpVariable %_ptr_Input_int Input +%_ptr_Function__arr_float_uint_16 = OpTypePointer Function %_arr_float_uint_16 +%_ptr_Function_float = OpTypePointer Function %float + %int_10 = OpConstant %int 10 + %bool = OpTypeBool + %int_1 = OpConstant %int 1 + %v4float = OpTypeVector %float 4 + %uint_4 = OpConstant %uint 4 +%_arr_v4float_uint_4 = OpTypeArray %v4float %uint_4 +%_ptr_Function__arr_v4float_uint_4 = OpTypePointer Function %_arr_v4float_uint_4 + %float_0 = OpConstant %float 0 + %54 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 + %55 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %float_8 = OpConstant %float 8 + %57 = OpConstantComposite %v4float %float_8 %float_8 %float_8 %float_8 + %float_5 = OpConstant %float 5 + %59 = OpConstantComposite %v4float %float_5 %float_5 %float_5 %float_5 + %60 = OpConstantComposite %_arr_v4float_uint_4 %54 %55 %57 %59 + %int_30 = OpConstant %int 30 + %int_3 = OpConstant %int 3 + %uint_1 = OpConstant %uint 1 + %uint_0 = OpConstant %uint 0 + %float_20 = OpConstant %float 20 + %uint_2 = OpConstant %uint 2 + %97 = OpConstantComposite %v4float %float_20 %float_20 %float_20 %float_20 + %float_30 = OpConstant %float 30 + %99 = OpConstantComposite %v4float %float_30 %float_30 %float_30 %float_30 + %float_50 = OpConstant %float 50 + %101 = OpConstantComposite %v4float %float_50 %float_50 %float_50 %float_50 + %float_60 = OpConstant %float 60 + %103 = OpConstantComposite %v4float %float_60 %float_60 %float_60 %float_60 + %104 = OpConstantComposite %_arr_v4float_uint_4 %97 %99 %101 %103 + %main = OpFunction %void None %3 + %5 = OpLabel + %indexable = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 +%indexable_0 = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 +%indexable_1 = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 + %foo = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %foobar = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %baz = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %20 = OpLoad %int %index + %24 = OpAccessChain %_ptr_Function_float %indexable %20 + %25 = OpLoad %float %24 + OpStore %FragColor %25 + %26 = OpLoad %int %index + %29 = OpSLessThan %bool %26 %int_10 + OpSelectionMerge %31 None + OpBranchConditional %29 %30 %40 + %30 = OpLabel + %32 = OpLoad %int %index + %34 = OpBitwiseXor %int %32 %int_1 + %36 = OpAccessChain %_ptr_Function_float %indexable_0 %34 + %37 = OpLoad %float %36 + %38 = OpLoad %float %FragColor + %39 = OpFAdd %float %38 %37 + OpStore %FragColor %39 + OpBranch %31 + %40 = OpLabel + %41 = OpLoad %int %index + %42 = OpBitwiseAnd %int %41 %int_1 + %44 = OpAccessChain %_ptr_Function_float %indexable_1 %42 + %45 = OpLoad %float %44 + %46 = OpLoad %float %FragColor + %47 = OpFAdd %float %46 %45 + OpStore %FragColor %47 + OpBranch %31 + %31 = OpLabel + %61 = OpLoad %int %index + %63 = OpSGreaterThan %bool %61 %int_30 + OpSelectionMerge %65 None + OpBranchConditional %63 %64 %74 + %64 = OpLabel + %66 = OpLoad %int %index + %68 = OpBitwiseAnd %int %66 %int_3 + %70 = OpAccessChain %_ptr_Function_float %foo %68 %uint_1 + %71 = OpLoad %float %70 + %72 = OpLoad %float %FragColor + %73 = OpFAdd %float %72 %71 + OpStore %FragColor %73 + OpBranch %65 + %74 = OpLabel + %75 = OpLoad %int %index + %76 = OpBitwiseAnd %int %75 %int_1 + %78 = OpAccessChain %_ptr_Function_float %foo %76 %uint_0 + %79 = OpLoad %float %78 + %80 = OpLoad %float %FragColor + %81 = OpFAdd %float %80 %79 + OpStore %FragColor %81 + OpBranch %65 + %65 = OpLabel + %83 = OpLoad %int %index + %84 = OpSGreaterThan %bool %83 %int_30 + OpSelectionMerge %86 None + OpBranchConditional %84 %85 %86 + %85 = OpLabel + %89 = OpAccessChain %_ptr_Function_float %foobar %int_1 %uint_2 + OpStore %89 %float_20 + OpBranch %86 + %86 = OpLabel + %90 = OpLoad %int %index + %91 = OpBitwiseAnd %int %90 %int_3 + %92 = OpAccessChain %_ptr_Function_float %foobar %91 %uint_2 + %93 = OpLoad %float %92 + %94 = OpLoad %float %FragColor + %95 = OpFAdd %float %94 %93 + OpStore %FragColor %95 + OpStore %baz %104 + %105 = OpLoad %int %index + %106 = OpBitwiseAnd %int %105 %int_3 + %107 = OpAccessChain %_ptr_Function_float %baz %106 %uint_2 + %108 = OpLoad %float %107 + %109 = OpLoad %float %FragColor + %110 = OpFAdd %float %109 %108 + OpStore %FragColor %110 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag new file mode 100644 index 000000000..ad566615f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/min-max-clamp.invalid.asm.frag @@ -0,0 +1,293 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 205 +; Schema: 0 + OpCapability Shader + OpCapability Float16 + OpExtension "SPV_AMD_gpu_shader_half_float" + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %v1 %v2 %v3 %v4 %h1 %h2 %h3 %h4 + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpSourceExtension "GL_AMD_gpu_shader_half_float" + OpName %main "main" + OpName %res "res" + OpName %res2 "res2" + OpName %res3 "res3" + OpName %res4 "res4" + OpName %hres "hres" + OpName %hres2 "hres2" + OpName %hres3 "hres3" + OpName %hres4 "hres4" + OpName %v1 "v1" + OpName %v2 "v2" + OpName %v3 "v3" + OpName %v4 "v4" + OpName %h1 "h1" + OpName %h2 "h2" + OpName %h3 "h3" + OpName %h4 "h4" + OpDecorate %v1 Location 0 + OpDecorate %v2 Location 1 + OpDecorate %v3 Location 2 + OpDecorate %v4 Location 3 + OpDecorate %h1 Location 4 + OpDecorate %h2 Location 5 + OpDecorate %h3 Location 6 + OpDecorate %h4 Location 7 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %v3float = OpTypeVector %float 3 + %v4float = OpTypeVector %float 4 + %half = OpTypeFloat 16 + %v2half = OpTypeVector %half 2 + %v3half = OpTypeVector %half 3 + %v4half = OpTypeVector %half 4 +%_ptr_Function_float = OpTypePointer Function %float +%_ptr_Input_float = OpTypePointer Input %float +%_ptr_Function_v2float = OpTypePointer Function %v2float +%_ptr_Input_v2float = OpTypePointer Input %v2float +%_ptr_Function_v3float = OpTypePointer Function %v3float +%_ptr_Input_v3float = OpTypePointer Input %v3float +%_ptr_Function_v4float = OpTypePointer Function %v4float +%_ptr_Input_v4float = OpTypePointer Input %v4float +%_ptr_Function_half = OpTypePointer Function %half +%_ptr_Input_half = OpTypePointer Input %half +%_ptr_Function_v2half = OpTypePointer Function %v2half +%_ptr_Input_v2half = OpTypePointer Input %v2half +%_ptr_Function_v3half = OpTypePointer Function %v3half +%_ptr_Input_v3half = OpTypePointer Input %v3half +%_ptr_Function_v4half = OpTypePointer Function %v4half +%_ptr_Input_v4half = OpTypePointer Input %v4half + %v1 = OpVariable %_ptr_Input_float Input + %v2 = OpVariable %_ptr_Input_v2float Input + %v3 = OpVariable %_ptr_Input_v3float Input + %v4 = OpVariable %_ptr_Input_v4float Input + %h1 = OpVariable %_ptr_Input_half Input + %h2 = OpVariable %_ptr_Input_v2half Input + %h3 = OpVariable %_ptr_Input_v3half Input + %h4 = OpVariable %_ptr_Input_v4half Input + %main = OpFunction %void None %3 + %5 = OpLabel + %res = OpVariable %_ptr_Function_float Function + %46 = OpLoad %float %v1 + %47 = OpLoad %float %v1 + %48 = OpExtInst %float %1 FMin %46 %47 + OpStore %res %48 + %49 = OpLoad %float %v1 + %50 = OpLoad %float %v1 + %51 = OpExtInst %float %1 FMax %49 %50 + OpStore %res %51 + %52 = OpLoad %float %v1 + %53 = OpLoad %float %v1 + %54 = OpLoad %float %v1 + %55 = OpExtInst %float %1 FClamp %52 %53 %54 + OpStore %res %55 + %56 = OpLoad %float %v1 + %57 = OpLoad %float %v1 + %58 = OpExtInst %float %1 NMin %56 %57 + OpStore %res %58 + %59 = OpLoad %float %v1 + %60 = OpLoad %float %v1 + %61 = OpExtInst %float %1 NMax %59 %60 + OpStore %res %61 + %62 = OpLoad %float %v1 + %63 = OpLoad %float %v1 + %64 = OpLoad %float %v1 + %65 = OpExtInst %float %1 NClamp %62 %63 %64 + OpStore %res %65 + %res2 = OpVariable %_ptr_Function_v2float Function + %66 = OpLoad %v2float %v2 + %67 = OpLoad %v2float %v2 + %68 = OpExtInst %v2float %1 FMin %66 %67 + OpStore %res2 %68 + %69 = OpLoad %v2float %v2 + %70 = OpLoad %v2float %v2 + %71 = OpExtInst %v2float %1 FMax %69 %70 + OpStore %res2 %71 + %72 = OpLoad %v2float %v2 + %73 = OpLoad %v2float %v2 + %74 = OpLoad %v2float %v2 + %75 = OpExtInst %v2float %1 FClamp %72 %73 %74 + OpStore %res2 %75 + %76 = OpLoad %v2float %v2 + %77 = OpLoad %v2float %v2 + %78 = OpExtInst %v2float %1 NMin %76 %77 + OpStore %res2 %78 + %79 = OpLoad %v2float %v2 + %80 = OpLoad %v2float %v2 + %81 = OpExtInst %v2float %1 NMax %79 %80 + OpStore %res2 %81 + %82 = OpLoad %v2float %v2 + %83 = OpLoad %v2float %v2 + %84 = OpLoad %v2float %v2 + %85 = OpExtInst %v2float %1 NClamp %82 %83 %84 + OpStore %res2 %85 + %res3 = OpVariable %_ptr_Function_v3float Function + %86 = OpLoad %v3float %v3 + %87 = OpLoad %v3float %v3 + %88 = OpExtInst %v3float %1 FMin %86 %87 + OpStore %res3 %88 + %89 = OpLoad %v3float %v3 + %90 = OpLoad %v3float %v3 + %91 = OpExtInst %v3float %1 FMax %89 %90 + OpStore %res3 %91 + %92 = OpLoad %v3float %v3 + %93 = OpLoad %v3float %v3 + %94 = OpLoad %v3float %v3 + %95 = OpExtInst %v3float %1 FClamp %92 %93 %94 + OpStore %res3 %95 + %96 = OpLoad %v3float %v3 + %97 = OpLoad %v3float %v3 + %98 = OpExtInst %v3float %1 NMin %96 %97 + OpStore %res3 %98 + %99 = OpLoad %v3float %v3 + %100 = OpLoad %v3float %v3 + %101 = OpExtInst %v3float %1 NMax %99 %100 + OpStore %res3 %101 + %102 = OpLoad %v3float %v3 + %103 = OpLoad %v3float %v3 + %104 = OpLoad %v3float %v3 + %105 = OpExtInst %v3float %1 NClamp %102 %103 %104 + OpStore %res3 %105 + %res4 = OpVariable %_ptr_Function_v4float Function + %106 = OpLoad %v4float %v4 + %107 = OpLoad %v4float %v4 + %108 = OpExtInst %v4float %1 FMin %106 %107 + OpStore %res4 %108 + %109 = OpLoad %v4float %v4 + %110 = OpLoad %v4float %v4 + %111 = OpExtInst %v4float %1 FMax %109 %110 + OpStore %res4 %111 + %112 = OpLoad %v4float %v4 + %113 = OpLoad %v4float %v4 + %114 = OpLoad %v4float %v4 + %115 = OpExtInst %v4float %1 FClamp %112 %113 %114 + OpStore %res4 %115 + %116 = OpLoad %v4float %v4 + %117 = OpLoad %v4float %v4 + %118 = OpExtInst %v4float %1 NMin %116 %117 + OpStore %res4 %118 + %119 = OpLoad %v4float %v4 + %120 = OpLoad %v4float %v4 + %121 = OpExtInst %v4float %1 NMax %119 %120 + OpStore %res4 %121 + %122 = OpLoad %v4float %v4 + %123 = OpLoad %v4float %v4 + %124 = OpLoad %v4float %v4 + %125 = OpExtInst %v4float %1 NClamp %122 %123 %124 + OpStore %res4 %125 + %hres = OpVariable %_ptr_Function_half Function + %126 = OpLoad %half %h1 + %127 = OpLoad %half %h1 + %128 = OpExtInst %half %1 FMin %126 %127 + OpStore %hres %128 + %129 = OpLoad %half %h1 + %130 = OpLoad %half %h1 + %131 = OpExtInst %half %1 FMax %129 %130 + OpStore %hres %131 + %132 = OpLoad %half %h1 + %133 = OpLoad %half %h1 + %134 = OpLoad %half %h1 + %135 = OpExtInst %half %1 FClamp %132 %133 %134 + OpStore %hres %135 + %136 = OpLoad %half %h1 + %137 = OpLoad %half %h1 + %138 = OpExtInst %half %1 NMin %136 %137 + OpStore %hres %138 + %139 = OpLoad %half %h1 + %140 = OpLoad %half %h1 + %141 = OpExtInst %half %1 NMax %139 %140 + OpStore %hres %141 + %142 = OpLoad %half %h1 + %143 = OpLoad %half %h1 + %144 = OpLoad %half %h1 + %145 = OpExtInst %half %1 NClamp %142 %143 %144 + OpStore %hres %145 + %hres2 = OpVariable %_ptr_Function_v2half Function + %146 = OpLoad %v2half %h2 + %147 = OpLoad %v2half %h2 + %148 = OpExtInst %v2half %1 FMin %146 %147 + OpStore %hres2 %148 + %149 = OpLoad %v2half %h2 + %150 = OpLoad %v2half %h2 + %151 = OpExtInst %v2half %1 FMax %149 %150 + OpStore %hres2 %151 + %152 = OpLoad %v2half %h2 + %153 = OpLoad %v2half %h2 + %154 = OpLoad %v2half %h2 + %155 = OpExtInst %v2half %1 FClamp %152 %153 %154 + OpStore %hres2 %155 + %156 = OpLoad %v2half %h2 + %157 = OpLoad %v2half %h2 + %158 = OpExtInst %v2half %1 NMin %156 %157 + OpStore %hres2 %158 + %159 = OpLoad %v2half %h2 + %160 = OpLoad %v2half %h2 + %161 = OpExtInst %v2half %1 NMax %159 %160 + OpStore %hres2 %161 + %162 = OpLoad %v2half %h2 + %163 = OpLoad %v2half %h2 + %164 = OpLoad %v2half %h2 + %165 = OpExtInst %v2half %1 NClamp %162 %163 %164 + OpStore %hres2 %165 + %hres3 = OpVariable %_ptr_Function_v3half Function + %166 = OpLoad %v3half %h3 + %167 = OpLoad %v3half %h3 + %168 = OpExtInst %v3half %1 FMin %166 %167 + OpStore %hres3 %168 + %169 = OpLoad %v3half %h3 + %170 = OpLoad %v3half %h3 + %171 = OpExtInst %v3half %1 FMax %169 %170 + OpStore %hres3 %171 + %172 = OpLoad %v3half %h3 + %173 = OpLoad %v3half %h3 + %174 = OpLoad %v3half %h3 + %175 = OpExtInst %v3half %1 FClamp %172 %173 %174 + OpStore %hres3 %175 + %176 = OpLoad %v3half %h3 + %177 = OpLoad %v3half %h3 + %178 = OpExtInst %v3half %1 NMin %176 %177 + OpStore %hres3 %178 + %179 = OpLoad %v3half %h3 + %180 = OpLoad %v3half %h3 + %181 = OpExtInst %v3half %1 NMax %179 %180 + OpStore %hres3 %181 + %182 = OpLoad %v3half %h3 + %183 = OpLoad %v3half %h3 + %184 = OpLoad %v3half %h3 + %185 = OpExtInst %v3half %1 NClamp %182 %183 %184 + OpStore %hres3 %185 + %hres4 = OpVariable %_ptr_Function_v4half Function + %186 = OpLoad %v4half %h4 + %187 = OpLoad %v4half %h4 + %188 = OpExtInst %v4half %1 FMin %186 %187 + OpStore %hres4 %188 + %189 = OpLoad %v4half %h4 + %190 = OpLoad %v4half %h4 + %191 = OpExtInst %v4half %1 FMax %189 %190 + OpStore %hres4 %191 + %192 = OpLoad %v4half %h4 + %193 = OpLoad %v4half %h4 + %194 = OpLoad %v4half %h4 + %195 = OpExtInst %v4half %1 FClamp %192 %193 %194 + OpStore %hres4 %195 + %196 = OpLoad %v4half %h4 + %197 = OpLoad %v4half %h4 + %198 = OpExtInst %v4half %1 NMin %196 %197 + OpStore %hres4 %198 + %199 = OpLoad %v4half %h4 + %200 = OpLoad %v4half %h4 + %201 = OpExtInst %v4half %1 NMax %199 %200 + OpStore %hres4 %201 + %202 = OpLoad %v4half %h4 + %203 = OpLoad %v4half %h4 + %204 = OpLoad %v4half %h4 + %205 = OpExtInst %v4half %1 NClamp %202 %203 %204 + OpStore %hres4 %205 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/op-constant-null.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/op-constant-null.asm.frag new file mode 100644 index 000000000..61d2e579c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/op-constant-null.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 45 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %a "a" + OpName %b "b" + OpName %c "c" + OpName %D "D" + OpMemberName %D 0 "a" + OpMemberName %D 1 "b" + OpName %d "d" + OpName %e "e" + OpName %FragColor "FragColor" + OpDecorate %a RelaxedPrecision + OpDecorate %b RelaxedPrecision + OpDecorate %c RelaxedPrecision + OpMemberDecorate %D 0 RelaxedPrecision + OpMemberDecorate %D 1 RelaxedPrecision + OpDecorate %e RelaxedPrecision + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %44 RelaxedPrecision + OpDecorate %float_1 RelaxedPrecision + OpDecorate %14 RelaxedPrecision + OpDecorate %23 RelaxedPrecision + OpDecorate %41 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %float_1 = OpConstantNull %float + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %float_2 = OpConstantNull %float + %14 = OpConstantNull %v4float + %v3float = OpTypeVector %float 3 +%mat2v3float = OpTypeMatrix %v3float 2 +%_ptr_Function_mat2v3float = OpTypePointer Function %mat2v3float + %float_4 = OpConstantNull %float + %20 = OpConstantNull %v3float + %float_5 = OpConstantNull %float + %22 = OpConstantNull %v3float + %23 = OpConstantNull %mat2v3float + %D = OpTypeStruct %v4float %float +%_ptr_Function_D = OpTypePointer Function %D + %27 = OpConstantNull %D + %uint = OpTypeInt 32 0 + %uint_4 = OpConstant %uint 4 +%_arr_v4float_uint_4 = OpTypeArray %v4float %uint_4 +%_ptr_Function__arr_v4float_uint_4 = OpTypePointer Function %_arr_v4float_uint_4 + %float_10 = OpConstantNull %float + %34 = OpConstantNull %v4float + %float_11 = OpConstantNull %float + %36 = OpConstantNull %v4float + %float_12 = OpConstantNull %float + %38 = OpConstantNull %v4float + %float_13 = OpConstantNull %float + %40 = OpConstantNull %v4float + %41 = OpConstantNull %_arr_v4float_uint_4 +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %a = OpVariable %_ptr_Function_float Function + %b = OpVariable %_ptr_Function_v4float Function + %c = OpVariable %_ptr_Function_mat2v3float Function + %d = OpVariable %_ptr_Function_D Function + %e = OpVariable %_ptr_Function__arr_v4float_uint_4 Function + OpStore %a %float_1 + OpStore %b %14 + OpStore %c %23 + OpStore %d %27 + OpStore %e %41 + %44 = OpLoad %float %a + OpStore %FragColor %44 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/op-image-sampled-image.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/op-image-sampled-image.asm.frag new file mode 100644 index 000000000..4dbede7bb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/op-image-sampled-image.asm.frag @@ -0,0 +1,81 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 54 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %o0 + OpName %main "main" + OpName %t0 "t0" + OpName %o0 "o0" + OpName %r0 "r0" + OpName %push_cb "push_cb" + OpMemberName %push_cb 0 "cb0" + OpName %dummy_sampler "dummy_sampler" + OpDecorate %t0 DescriptorSet 0 + OpDecorate %t0 Binding 2 + OpDecorate %o0 Location 0 + OpDecorate %_arr_v4float_uint_1 ArrayStride 16 + OpDecorate %push_cb Block + OpMemberDecorate %push_cb 0 Offset 0 + OpDecorate %dummy_sampler DescriptorSet 0 + OpDecorate %dummy_sampler Binding 4 + %void = OpTypeVoid + %2 = OpTypeFunction %void + %float = OpTypeFloat 32 + %6 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_UniformConstant_6 = OpTypePointer UniformConstant %6 + %t0 = OpVariable %_ptr_UniformConstant_6 UniformConstant + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %o0 = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_v4float_uint_1 = OpTypeArray %v4float %uint_1 + %push_cb = OpTypeStruct %_arr_v4float_uint_1 +%_ptr_PushConstant_push_cb = OpTypePointer PushConstant %push_cb + %19 = OpVariable %_ptr_PushConstant_push_cb PushConstant + %uint_0 = OpConstant %uint 0 +%_ptr_PushConstant_v4float = OpTypePointer PushConstant %v4float +%_ptr_PushConstant_float = OpTypePointer PushConstant %float + %int = OpTypeInt 32 1 + %v2float = OpTypeVector %float 2 + %float_0 = OpConstant %float 0 + %30 = OpConstantComposite %v2float %float_0 %float_0 + %33 = OpTypeSampler +%_ptr_UniformConstant_33 = OpTypePointer UniformConstant %33 +%dummy_sampler = OpVariable %_ptr_UniformConstant_33 UniformConstant + %38 = OpTypeSampledImage %6 + %v2int = OpTypeVector %int 2 +%_ptr_Function_float = OpTypePointer Function %float + %uint_3 = OpConstant %uint 3 + %int_n1 = OpConstant %int -1 + %int_n2 = OpConstant %int -2 + %52 = OpConstantComposite %v2int %int_n1 %int_n2 + %main = OpFunction %void None %2 + %4 = OpLabel + %r0 = OpVariable %_ptr_Function_v4float Function + %23 = OpAccessChain %_ptr_PushConstant_v4float %19 %uint_0 %uint_0 + %25 = OpLoad %v4float %23 + %26 = OpLoad %v4float %r0 + %27 = OpVectorShuffle %v4float %26 %25 6 7 2 3 + OpStore %r0 %27 + %31 = OpLoad %v4float %r0 + %32 = OpVectorShuffle %v4float %31 %30 0 1 4 5 + OpStore %r0 %32 + %36 = OpLoad %6 %t0 + %37 = OpLoad %33 %dummy_sampler + %39 = OpSampledImage %38 %36 %37 + %40 = OpImage %6 %39 + %41 = OpLoad %v4float %r0 + %42 = OpVectorShuffle %v2float %41 %41 0 1 + %44 = OpBitcast %v2int %42 + %47 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_3 + %48 = OpLoad %float %47 + %49 = OpBitcast %int %48 + %54 = OpImageFetch %v4float %40 %44 Lod|ConstOffset %49 %52 + OpStore %o0 %54 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..083c85d9b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,51 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 32 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %add_value_f1_f1_ "add_value(f1;f1;" + OpName %v "v" + OpName %w "w" + OpName %FragColor "FragColor" + OpName %Registers "Registers" + OpMemberName %Registers 0 "foo" + OpName %registers "registers" + OpDecorate %FragColor Location 0 + OpMemberDecorate %Registers 0 Offset 0 + OpDecorate %Registers Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %8 = OpTypeFunction %float %float %float +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %float_10 = OpConstant %float 10 + %Registers = OpTypeStruct %float +%_ptr_PushConstant_Registers = OpTypePointer PushConstant %Registers + %registers = OpVariable %_ptr_PushConstant_Registers PushConstant + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_PushConstant_float = OpTypePointer PushConstant %float + %main = OpFunction %void None %3 + %5 = OpLabel + %29 = OpAccessChain %_ptr_PushConstant_float %registers %int_0 + %30 = OpLoad %float %29 + %31 = OpFunctionCall %float %add_value_f1_f1_ %float_10 %30 + OpStore %FragColor %31 + OpReturn + OpFunctionEnd +%add_value_f1_f1_ = OpFunction %float None %8 + %v = OpFunctionParameter %float + %w = OpFunctionParameter %float + %12 = OpLabel + %15 = OpFAdd %float %v %w + OpReturnValue %15 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/phi-loop-variable.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/phi-loop-variable.asm.frag new file mode 100644 index 000000000..74c46b4af --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/phi-loop-variable.asm.frag @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 59 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %4 "main" + OpExecutionMode %4 OriginUpperLeft + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 +%mat2v2float = OpTypeMatrix %v2float 2 +%_ptr_Function_mat2v2float = OpTypePointer Function %mat2v2float + %v3float = OpTypeVector %float 3 + %11 = OpTypeFunction %v3float %_ptr_Function_mat2v2float +%_ptr_Function_v3float = OpTypePointer Function %v3float + %float_1 = OpConstant %float 1 + %18 = OpConstantComposite %v3float %float_1 %float_1 %float_1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_35 = OpConstant %int 35 + %int_0 = OpConstant %int 0 + %bool = OpTypeBool + %int_1 = OpConstant %int 1 + %4 = OpFunction %void None %3 + %5 = OpLabel + OpBranch %48 + %48 = OpLabel + %58 = OpPhi %int %int_35 %5 %56 %50 + OpLoopMerge %49 %50 None + OpBranch %51 + %51 = OpLabel + %53 = OpSGreaterThanEqual %bool %58 %int_0 + OpBranchConditional %53 %54 %49 + %54 = OpLabel + OpBranch %50 + %50 = OpLabel + %56 = OpISub %int %58 %int_1 + OpBranch %48 + %49 = OpLabel + OpReturn + OpFunctionEnd + %13 = OpFunction %v3float None %11 + %12 = OpFunctionParameter %_ptr_Function_mat2v2float + %14 = OpLabel + %16 = OpVariable %_ptr_Function_v3float Function + %21 = OpVariable %_ptr_Function_int Function + OpStore %16 %18 + OpStore %21 %int_35 + OpBranch %23 + %23 = OpLabel + OpLoopMerge %25 %26 None + OpBranch %27 + %27 = OpLabel + %28 = OpLoad %int %21 + %31 = OpSGreaterThanEqual %bool %28 %int_0 + OpBranchConditional %31 %24 %25 + %24 = OpLabel + OpBranch %26 + %26 = OpLabel + %32 = OpLoad %int %21 + %34 = OpISub %int %32 %int_1 + OpStore %21 %34 + OpBranch %23 + %25 = OpLabel + %35 = OpLoad %v3float %16 + OpReturnValue %35 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/srem.asm.frag new file mode 100644 index 000000000..c6f8e27cb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/srem.asm.frag @@ -0,0 +1,43 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 23 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vA %vB + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %vA "vA" + OpName %vB "vB" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %vA Flat + OpDecorate %vA Location 0 + OpDecorate %vB Flat + OpDecorate %vB Location 1 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %int = OpTypeInt 32 1 + %v4int = OpTypeVector %int 4 +%_ptr_Input_v4int = OpTypePointer Input %v4int + %vA = OpVariable %_ptr_Input_v4int Input + %vB = OpVariable %_ptr_Input_v4int Input + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpLoad %v4int %vA + %16 = OpLoad %v4int %vB + %17 = OpLoad %v4int %vA + %18 = OpLoad %v4int %vB + %19 = OpSRem %v4int %17 %18 + %20 = OpConvertSToF %v4float %19 + OpStore %FragColor %20 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..53dc63809 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,46 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 26 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %gl_FragCoord + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %uTexture "uTexture" + OpName %gl_FragCoord "gl_FragCoord" + OpDecorate %FragColor Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 0 + OpDecorate %gl_FragCoord BuiltIn FragCoord + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %10 = OpTypeImage %float 2D 0 0 0 1 Unknown + %11 = OpTypeSampledImage %10 +%_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 + %uTexture = OpVariable %_ptr_UniformConstant_11 UniformConstant +%_ptr_Input_v4float = OpTypePointer Input %v4float +%gl_FragCoord = OpVariable %_ptr_Input_v4float Input + %v2float = OpTypeVector %float 2 + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 + %int_0 = OpConstant %int 0 + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpLoad %11 %uTexture + %18 = OpLoad %v4float %gl_FragCoord + %19 = OpVectorShuffle %v2float %18 %18 0 1 + %22 = OpConvertFToS %v2int %19 + %24 = OpImage %10 %14 + %25 = OpImageFetch %v4float %24 %22 + OpStore %FragColor %25 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/undef-variable-store.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/undef-variable-store.asm.frag new file mode 100644 index 000000000..966c2d9d5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/undef-variable-store.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 50 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %fragmentProgram "main" %_entryPointOutput + OpExecutionMode %fragmentProgram OriginUpperLeft + OpSource HLSL 500 + OpName %fragmentProgram "fragmentProgram" + OpName %_fragmentProgram_ "@fragmentProgram(" + OpName %uv "uv" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %float_0 = OpConstant %float 0 + %15 = OpConstantComposite %v2float %float_0 %float_0 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Function_float = OpTypePointer Function %float + %bool = OpTypeBool + %float_1 = OpConstant %float 1 + %26 = OpConstantComposite %v4float %float_1 %float_0 %float_0 %float_1 + %29 = OpConstantComposite %v4float %float_1 %float_1 %float_0 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %false = OpConstantFalse %bool +%fragmentProgram = OpFunction %void None %3 + %5 = OpLabel + %35 = OpVariable %_ptr_Function_v2float Function + %37 = OpVariable %_ptr_Function_v4float Function + OpBranch %38 + %38 = OpLabel + OpLoopMerge %39 %40 None + OpBranch %41 + %41 = OpLabel + OpStore %35 %15 + %42 = OpAccessChain %_ptr_Function_float %35 %uint_0 + %43 = OpLoad %float %42 + %44 = OpFOrdNotEqual %bool %43 %float_0 + OpSelectionMerge %45 None + OpBranchConditional %44 %46 %47 + %46 = OpLabel + OpStore %37 %26 + OpBranch %39 + %47 = OpLabel + OpStore %37 %29 + OpBranch %39 + %45 = OpLabel + %48 = OpUndef %v4float + OpStore %37 %48 + OpBranch %39 + %40 = OpLabel + OpBranchConditional %false %38 %39 + %39 = OpLabel + %34 = OpLoad %v4float %37 + OpStore %_entryPointOutput %34 + OpReturn + OpFunctionEnd +%_fragmentProgram_ = OpFunction %v4float None %8 + %10 = OpLabel + %uv = OpVariable %_ptr_Function_v2float Function + OpStore %uv %15 + %19 = OpAccessChain %_ptr_Function_float %uv %uint_0 + %20 = OpLoad %float %19 + %22 = OpFOrdNotEqual %bool %20 %float_0 + OpSelectionMerge %24 None + OpBranchConditional %22 %23 %28 + %23 = OpLabel + OpReturnValue %26 + %28 = OpLabel + OpReturnValue %29 + %24 = OpLabel + %31 = OpUndef %v4float + OpReturnValue %31 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/unknown-depth-state.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/unknown-depth-state.asm.frag new file mode 100644 index 000000000..89036f0eb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/unknown-depth-state.asm.frag @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 44 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %vUV %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %sample_combined_ "sample_combined(" + OpName %sample_separate_ "sample_separate(" + OpName %uShadow "uShadow" + OpName %vUV "vUV" + OpName %uTexture "uTexture" + OpName %uSampler "uSampler" + OpName %FragColor "FragColor" + OpDecorate %uShadow DescriptorSet 0 + OpDecorate %uShadow Binding 0 + OpDecorate %vUV Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 1 + OpDecorate %uSampler DescriptorSet 0 + OpDecorate %uSampler Binding 2 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %7 = OpTypeFunction %float + %12 = OpTypeImage %float 2D 2 0 0 1 Unknown + %13 = OpTypeSampledImage %12 +%_ptr_UniformConstant_13 = OpTypePointer UniformConstant %13 + %uShadow = OpVariable %_ptr_UniformConstant_13 UniformConstant + %v3float = OpTypeVector %float 3 +%_ptr_Input_v3float = OpTypePointer Input %v3float + %vUV = OpVariable %_ptr_Input_v3float Input +%_ptr_UniformConstant_25 = OpTypePointer UniformConstant %12 + %uTexture = OpVariable %_ptr_UniformConstant_25 UniformConstant + %29 = OpTypeSampler +%_ptr_UniformConstant_29 = OpTypePointer UniformConstant %29 + %uSampler = OpVariable %_ptr_UniformConstant_29 UniformConstant +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %41 = OpFunctionCall %float %sample_combined_ + %42 = OpFunctionCall %float %sample_separate_ + %43 = OpFAdd %float %41 %42 + OpStore %FragColor %43 + OpReturn + OpFunctionEnd +%sample_combined_ = OpFunction %float None %7 + %9 = OpLabel + %16 = OpLoad %13 %uShadow + %20 = OpLoad %v3float %vUV + %21 = OpCompositeExtract %float %20 2 + %22 = OpImageSampleDrefImplicitLod %float %16 %20 %21 + OpReturnValue %22 + OpFunctionEnd +%sample_separate_ = OpFunction %float None %7 + %11 = OpLabel + %28 = OpLoad %12 %uTexture + %32 = OpLoad %29 %uSampler + %33 = OpSampledImage %13 %28 %32 + %34 = OpLoad %v3float %vUV + %35 = OpCompositeExtract %float %34 2 + %36 = OpImageSampleDrefImplicitLod %float %33 %34 %35 + OpReturnValue %36 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/unord-relational-op.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/unord-relational-op.asm.frag new file mode 100644 index 000000000..3e4cd6c2c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/unord-relational-op.asm.frag @@ -0,0 +1,205 @@ +; SPIR-V +; Version: 1.3 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 122 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %c %d %e %f %g %h %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 460 + OpName %main "main" + OpName %t0 "t0" + OpName %a "a" + OpName %t1 "t1" + OpName %b "b" + OpName %c1 "c1" + OpName %c2 "c2" + OpName %c3 "c3" + OpName %c4 "c4" + OpName %c5 "c5" + OpName %c6 "c6" + OpName %c7 "c7" + OpName %c "c" + OpName %d "d" + OpName %c8 "c8" + OpName %c9 "c9" + OpName %c10 "c10" + OpName %c11 "c11" + OpName %c12 "c12" + OpName %c13 "c13" + OpName %e "e" + OpName %f "f" + OpName %c14 "c14" + OpName %c15 "c15" + OpName %c16 "c16" + OpName %c17 "c17" + OpName %c18 "c18" + OpName %c19 "c19" + OpName %g "g" + OpName %h "h" + OpName %c20 "c20" + OpName %c21 "c21" + OpName %c22 "c22" + OpName %c23 "c23" + OpName %c24 "c24" + OpName %FragColor "FragColor" + OpDecorate %a SpecId 1 + OpDecorate %b SpecId 2 + OpDecorate %c Location 2 + OpDecorate %d Location 3 + OpDecorate %e Location 4 + OpDecorate %f Location 5 + OpDecorate %g Location 6 + OpDecorate %h Location 7 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %a = OpSpecConstant %float 1 + %b = OpSpecConstant %float 2 + %bool = OpTypeBool +%_ptr_Function_bool = OpTypePointer Function %bool + %v2bool = OpTypeVector %bool 2 +%_ptr_Function_v2bool = OpTypePointer Function %v2bool + %v2float = OpTypeVector %float 2 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %c = OpVariable %_ptr_Input_v2float Input + %d = OpVariable %_ptr_Input_v2float Input + %v3bool = OpTypeVector %bool 3 +%_ptr_Function_v3bool = OpTypePointer Function %v3bool + %v3float = OpTypeVector %float 3 +%_ptr_Input_v3float = OpTypePointer Input %v3float + %e = OpVariable %_ptr_Input_v3float Input + %f = OpVariable %_ptr_Input_v3float Input + %v4bool = OpTypeVector %bool 4 +%_ptr_Function_v4bool = OpTypePointer Function %v4bool + %v4float = OpTypeVector %float 4 +%_ptr_Input_v4float = OpTypePointer Input %v4float + %g = OpVariable %_ptr_Input_v4float Input + %h = OpVariable %_ptr_Input_v4float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %t0 = OpVariable %_ptr_Function_float Function + %t1 = OpVariable %_ptr_Function_float Function + %c1 = OpVariable %_ptr_Function_bool Function + %c2 = OpVariable %_ptr_Function_bool Function + %c3 = OpVariable %_ptr_Function_bool Function + %c4 = OpVariable %_ptr_Function_bool Function + %c5 = OpVariable %_ptr_Function_bool Function + %c6 = OpVariable %_ptr_Function_bool Function + %c7 = OpVariable %_ptr_Function_v2bool Function + %c8 = OpVariable %_ptr_Function_v2bool Function + %c9 = OpVariable %_ptr_Function_v2bool Function + %c10 = OpVariable %_ptr_Function_v2bool Function + %c11 = OpVariable %_ptr_Function_v2bool Function + %c12 = OpVariable %_ptr_Function_v2bool Function + %c13 = OpVariable %_ptr_Function_v3bool Function + %c14 = OpVariable %_ptr_Function_v3bool Function + %c15 = OpVariable %_ptr_Function_v3bool Function + %c16 = OpVariable %_ptr_Function_v3bool Function + %c17 = OpVariable %_ptr_Function_v3bool Function + %c18 = OpVariable %_ptr_Function_v3bool Function + %c19 = OpVariable %_ptr_Function_v4bool Function + %c20 = OpVariable %_ptr_Function_v4bool Function + %c21 = OpVariable %_ptr_Function_v4bool Function + %c22 = OpVariable %_ptr_Function_v4bool Function + %c23 = OpVariable %_ptr_Function_v4bool Function + %c24 = OpVariable %_ptr_Function_v4bool Function + OpStore %t0 %a + OpStore %t1 %b + %15 = OpFUnordEqual %bool %a %b + OpStore %c1 %15 + %17 = OpFUnordNotEqual %bool %a %b + OpStore %c2 %17 + %19 = OpFUnordLessThan %bool %a %b + OpStore %c3 %19 + %21 = OpFUnordGreaterThan %bool %a %b + OpStore %c4 %21 + %23 = OpFUnordLessThanEqual %bool %a %b + OpStore %c5 %23 + %25 = OpFUnordGreaterThanEqual %bool %a %b + OpStore %c6 %25 + %32 = OpLoad %v2float %c + %34 = OpLoad %v2float %d + %35 = OpFUnordEqual %v2bool %32 %34 + OpStore %c7 %35 + %37 = OpLoad %v2float %c + %38 = OpLoad %v2float %d + %39 = OpFUnordNotEqual %v2bool %37 %38 + OpStore %c8 %39 + %41 = OpLoad %v2float %c + %42 = OpLoad %v2float %d + %43 = OpFUnordLessThan %v2bool %41 %42 + OpStore %c9 %43 + %45 = OpLoad %v2float %c + %46 = OpLoad %v2float %d + %47 = OpFUnordGreaterThan %v2bool %45 %46 + OpStore %c10 %47 + %49 = OpLoad %v2float %c + %50 = OpLoad %v2float %d + %51 = OpFUnordLessThanEqual %v2bool %49 %50 + OpStore %c11 %51 + %53 = OpLoad %v2float %c + %54 = OpLoad %v2float %d + %55 = OpFUnordGreaterThanEqual %v2bool %53 %54 + OpStore %c12 %55 + %62 = OpLoad %v3float %e + %64 = OpLoad %v3float %f + %65 = OpFUnordEqual %v3bool %62 %64 + OpStore %c13 %65 + %67 = OpLoad %v3float %e + %68 = OpLoad %v3float %f + %69 = OpFUnordNotEqual %v3bool %67 %68 + OpStore %c14 %69 + %71 = OpLoad %v3float %e + %72 = OpLoad %v3float %f + %73 = OpFUnordLessThan %v3bool %71 %72 + OpStore %c15 %73 + %75 = OpLoad %v3float %e + %76 = OpLoad %v3float %f + %77 = OpFUnordGreaterThan %v3bool %75 %76 + OpStore %c16 %77 + %79 = OpLoad %v3float %e + %80 = OpLoad %v3float %f + %81 = OpFUnordLessThanEqual %v3bool %79 %80 + OpStore %c17 %81 + %83 = OpLoad %v3float %e + %84 = OpLoad %v3float %f + %85 = OpFUnordGreaterThanEqual %v3bool %83 %84 + OpStore %c18 %85 + %92 = OpLoad %v4float %g + %94 = OpLoad %v4float %h + %95 = OpFUnordEqual %v4bool %92 %94 + OpStore %c19 %95 + %97 = OpLoad %v4float %g + %98 = OpLoad %v4float %h + %99 = OpFUnordNotEqual %v4bool %97 %98 + OpStore %c20 %99 + %101 = OpLoad %v4float %g + %102 = OpLoad %v4float %h + %103 = OpFUnordLessThan %v4bool %101 %102 + OpStore %c21 %103 + %105 = OpLoad %v4float %g + %106 = OpLoad %v4float %h + %107 = OpFUnordGreaterThan %v4bool %105 %106 + OpStore %c22 %107 + %109 = OpLoad %v4float %g + %110 = OpLoad %v4float %h + %111 = OpFUnordLessThanEqual %v4bool %109 %110 + OpStore %c23 %111 + %113 = OpLoad %v4float %g + %114 = OpLoad %v4float %h + %115 = OpFUnordGreaterThanEqual %v4bool %113 %114 + OpStore %c24 %115 + %118 = OpLoad %float %t0 + %119 = OpLoad %float %t1 + %120 = OpFAdd %float %118 %119 + %121 = OpCompositeConstruct %v4float %120 %120 %120 %120 + OpStore %FragColor %121 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..e2ce2eb56 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/unreachable.asm.frag @@ -0,0 +1,61 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 47 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %counter %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %counter "counter" + OpName %FragColor "FragColor" + OpDecorate %counter Flat + OpDecorate %counter Location 0 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float + %int = OpTypeInt 32 1 +%_ptr_Input_int = OpTypePointer Input %int + %counter = OpVariable %_ptr_Input_int Input + %int_10 = OpConstant %int 10 + %bool = OpTypeBool + %float_10 = OpConstant %float 10 + %21 = OpConstantComposite %v4float %float_10 %float_10 %float_10 %float_10 + %float_30 = OpConstant %float 30 + %25 = OpConstantComposite %v4float %float_30 %float_30 %float_30 %float_30 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %false = OpConstantFalse %bool + %44 = OpUndef %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + OpBranch %33 + %33 = OpLabel + %45 = OpPhi %v4float %44 %5 %44 %35 + OpLoopMerge %34 %35 None + OpBranch %36 + %36 = OpLabel + %37 = OpLoad %int %counter + %38 = OpIEqual %bool %37 %int_10 + OpSelectionMerge %39 None + OpBranchConditional %38 %40 %41 + %40 = OpLabel + OpBranch %34 + %41 = OpLabel + OpBranch %34 + %39 = OpLabel + OpUnreachable + %35 = OpLabel + OpBranchConditional %false %33 %34 + %34 = OpLabel + %46 = OpPhi %v4float %21 %40 %25 %41 %44 %35 + OpStore %FragColor %46 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag b/3rdparty/spirv-cross/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag new file mode 100644 index 000000000..92652161c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/frag/vector-shuffle-oom.asm.frag @@ -0,0 +1,886 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 25007 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %5663 "main" %5800 %gl_FragCoord %4317 + OpExecutionMode %5663 OriginUpperLeft + OpMemberDecorate %_struct_1116 0 Offset 0 + OpMemberDecorate %_struct_1116 1 Offset 16 + OpMemberDecorate %_struct_1116 2 Offset 32 + OpDecorate %_struct_1116 Block + OpDecorate %22044 DescriptorSet 0 + OpDecorate %22044 Binding 0 + OpDecorate %5785 DescriptorSet 0 + OpDecorate %5785 Binding 14 + OpDecorate %5688 DescriptorSet 0 + OpDecorate %5688 Binding 6 + OpMemberDecorate %_struct_994 0 Offset 0 + OpMemberDecorate %_struct_994 1 Offset 16 + OpMemberDecorate %_struct_994 2 Offset 28 + OpMemberDecorate %_struct_994 3 Offset 32 + OpMemberDecorate %_struct_994 4 Offset 44 + OpMemberDecorate %_struct_994 5 Offset 48 + OpMemberDecorate %_struct_994 6 Offset 60 + OpMemberDecorate %_struct_994 7 Offset 64 + OpMemberDecorate %_struct_994 8 Offset 76 + OpMemberDecorate %_struct_994 9 Offset 80 + OpMemberDecorate %_struct_994 10 Offset 92 + OpMemberDecorate %_struct_994 11 Offset 96 + OpMemberDecorate %_struct_994 12 Offset 108 + OpMemberDecorate %_struct_994 13 Offset 112 + OpMemberDecorate %_struct_994 14 Offset 120 + OpMemberDecorate %_struct_994 15 Offset 128 + OpMemberDecorate %_struct_994 16 Offset 140 + OpMemberDecorate %_struct_994 17 Offset 144 + OpMemberDecorate %_struct_994 18 Offset 148 + OpMemberDecorate %_struct_994 19 Offset 152 + OpMemberDecorate %_struct_994 20 Offset 156 + OpMemberDecorate %_struct_994 21 Offset 160 + OpMemberDecorate %_struct_994 22 Offset 176 + OpMemberDecorate %_struct_994 23 RowMajor + OpMemberDecorate %_struct_994 23 Offset 192 + OpMemberDecorate %_struct_994 23 MatrixStride 16 + OpMemberDecorate %_struct_994 24 Offset 256 + OpDecorate %_struct_994 Block + OpDecorate %12348 DescriptorSet 0 + OpDecorate %12348 Binding 2 + OpDecorate %3312 DescriptorSet 0 + OpDecorate %3312 Binding 13 + OpDecorate %4646 DescriptorSet 0 + OpDecorate %4646 Binding 5 + OpDecorate %4862 DescriptorSet 0 + OpDecorate %4862 Binding 4 + OpDecorate %3594 DescriptorSet 0 + OpDecorate %3594 Binding 3 + OpDecorate %_arr_mat4v4float_uint_2 ArrayStride 64 + OpDecorate %_arr_v4float_uint_2 ArrayStride 16 + OpMemberDecorate %_struct_408 0 RowMajor + OpMemberDecorate %_struct_408 0 Offset 0 + OpMemberDecorate %_struct_408 0 MatrixStride 16 + OpMemberDecorate %_struct_408 1 RowMajor + OpMemberDecorate %_struct_408 1 Offset 64 + OpMemberDecorate %_struct_408 1 MatrixStride 16 + OpMemberDecorate %_struct_408 2 RowMajor + OpMemberDecorate %_struct_408 2 Offset 128 + OpMemberDecorate %_struct_408 2 MatrixStride 16 + OpMemberDecorate %_struct_408 3 RowMajor + OpMemberDecorate %_struct_408 3 Offset 192 + OpMemberDecorate %_struct_408 3 MatrixStride 16 + OpMemberDecorate %_struct_408 4 Offset 256 + OpMemberDecorate %_struct_408 5 Offset 272 + OpMemberDecorate %_struct_408 6 Offset 288 + OpMemberDecorate %_struct_408 7 Offset 292 + OpMemberDecorate %_struct_408 8 Offset 296 + OpMemberDecorate %_struct_408 9 Offset 300 + OpMemberDecorate %_struct_408 10 Offset 304 + OpMemberDecorate %_struct_408 11 Offset 316 + OpMemberDecorate %_struct_408 12 Offset 320 + OpMemberDecorate %_struct_408 13 Offset 332 + OpMemberDecorate %_struct_408 14 Offset 336 + OpMemberDecorate %_struct_408 15 Offset 348 + OpMemberDecorate %_struct_408 16 Offset 352 + OpMemberDecorate %_struct_408 17 Offset 364 + OpMemberDecorate %_struct_408 18 Offset 368 + OpMemberDecorate %_struct_408 19 Offset 372 + OpMemberDecorate %_struct_408 20 Offset 376 + OpMemberDecorate %_struct_408 21 Offset 384 + OpMemberDecorate %_struct_408 22 Offset 392 + OpMemberDecorate %_struct_408 23 Offset 400 + OpMemberDecorate %_struct_408 24 Offset 416 + OpMemberDecorate %_struct_408 25 Offset 424 + OpMemberDecorate %_struct_408 26 Offset 432 + OpMemberDecorate %_struct_408 27 Offset 448 + OpMemberDecorate %_struct_408 28 Offset 460 + OpMemberDecorate %_struct_408 29 Offset 464 + OpMemberDecorate %_struct_408 30 Offset 468 + OpMemberDecorate %_struct_408 31 Offset 472 + OpMemberDecorate %_struct_408 32 Offset 476 + OpMemberDecorate %_struct_408 33 Offset 480 + OpMemberDecorate %_struct_408 34 Offset 488 + OpMemberDecorate %_struct_408 35 Offset 492 + OpMemberDecorate %_struct_408 36 Offset 496 + OpMemberDecorate %_struct_408 37 RowMajor + OpMemberDecorate %_struct_408 37 Offset 512 + OpMemberDecorate %_struct_408 37 MatrixStride 16 + OpMemberDecorate %_struct_408 38 Offset 640 + OpDecorate %_struct_408 Block + OpDecorate %15259 DescriptorSet 0 + OpDecorate %15259 Binding 1 + OpDecorate %5800 Location 0 + OpDecorate %gl_FragCoord BuiltIn FragCoord + OpDecorate %4317 Location 0 + OpMemberDecorate %_struct_1395 0 Offset 0 + OpMemberDecorate %_struct_1395 1 Offset 16 + OpMemberDecorate %_struct_1395 2 Offset 32 + OpMemberDecorate %_struct_1395 3 Offset 40 + OpMemberDecorate %_struct_1395 4 Offset 48 + OpMemberDecorate %_struct_1395 5 Offset 60 + OpMemberDecorate %_struct_1395 6 Offset 64 + OpMemberDecorate %_struct_1395 7 Offset 76 + OpMemberDecorate %_struct_1395 8 Offset 80 + OpMemberDecorate %_struct_1395 9 Offset 96 + OpMemberDecorate %_struct_1395 10 Offset 112 + OpMemberDecorate %_struct_1395 11 Offset 128 + OpMemberDecorate %_struct_1395 12 Offset 140 + OpMemberDecorate %_struct_1395 13 Offset 144 + OpMemberDecorate %_struct_1395 14 Offset 156 + OpMemberDecorate %_struct_1395 15 Offset 160 + OpMemberDecorate %_struct_1395 16 Offset 176 + OpMemberDecorate %_struct_1395 17 Offset 192 + OpMemberDecorate %_struct_1395 18 Offset 204 + OpMemberDecorate %_struct_1395 19 Offset 208 + OpMemberDecorate %_struct_1395 20 Offset 224 + OpDecorate %_struct_1395 Block + OpMemberDecorate %_struct_1018 0 Offset 0 + OpDecorate %_struct_1018 Block + %void = OpTypeVoid + %1282 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %v4float = OpTypeVector %float 4 + %v3float = OpTypeVector %float 3 +%_struct_1017 = OpTypeStruct %v4float +%_struct_1116 = OpTypeStruct %v4float %float %v4float +%_ptr_Uniform__struct_1116 = OpTypePointer Uniform %_struct_1116 + %22044 = OpVariable %_ptr_Uniform__struct_1116 Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %150 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_UniformConstant_150 = OpTypePointer UniformConstant %150 + %5785 = OpVariable %_ptr_UniformConstant_150 UniformConstant + %508 = OpTypeSampler +%_ptr_UniformConstant_508 = OpTypePointer UniformConstant %508 + %5688 = OpVariable %_ptr_UniformConstant_508 UniformConstant + %510 = OpTypeSampledImage %150 + %float_0 = OpConstant %float 0 + %uint = OpTypeInt 32 0 + %int_1 = OpConstant %int 1 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %float_1 = OpConstant %float 1 +%mat4v4float = OpTypeMatrix %v4float 4 +%_struct_994 = OpTypeStruct %v3float %v3float %float %v3float %float %v3float %float %v3float %float %v3float %float %v3float %float %v2float %v2float %v3float %float %float %float %float %float %v4float %v4float %mat4v4float %v4float +%_ptr_Uniform__struct_994 = OpTypePointer Uniform %_struct_994 + %12348 = OpVariable %_ptr_Uniform__struct_994 Uniform + %int_5 = OpConstant %int 5 +%_ptr_Uniform_v3float = OpTypePointer Uniform %v3float + %3312 = OpVariable %_ptr_UniformConstant_150 UniformConstant + %4646 = OpVariable %_ptr_UniformConstant_508 UniformConstant + %bool = OpTypeBool + %4862 = OpVariable %_ptr_UniformConstant_150 UniformConstant + %3594 = OpVariable %_ptr_UniformConstant_508 UniformConstant + %uint_2 = OpConstant %uint 2 + %2938 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 +%_arr_mat4v4float_uint_2 = OpTypeArray %mat4v4float %uint_2 +%_arr_v4float_uint_2 = OpTypeArray %v4float %uint_2 +%_struct_408 = OpTypeStruct %mat4v4float %mat4v4float %mat4v4float %mat4v4float %v4float %v4float %float %float %float %float %v3float %float %v3float %float %v3float %float %v3float %float %float %float %v2float %v2float %v2float %v4float %v2float %v2float %v2float %v3float %float %float %float %float %float %v2float %float %float %v3float %_arr_mat4v4float_uint_2 %_arr_v4float_uint_2 +%_ptr_Uniform__struct_408 = OpTypePointer Uniform %_struct_408 + %15259 = OpVariable %_ptr_Uniform__struct_408 Uniform + %int_23 = OpConstant %int 23 + %int_2 = OpConstant %int 2 + %float_n2 = OpConstant %float -2 + %float_0_5 = OpConstant %float 0.5 + %1196 = OpConstantComposite %v3float %float_0 %float_n2 %float_0_5 + %float_n1 = OpConstant %float -1 + %836 = OpConstantComposite %v3float %float_n1 %float_n1 %float_0_5 + %float_0_75 = OpConstant %float 0.75 + %1367 = OpConstantComposite %v3float %float_0 %float_n1 %float_0_75 + %141 = OpConstantComposite %v3float %float_1 %float_n1 %float_0_5 + %38 = OpConstantComposite %v3float %float_n2 %float_0 %float_0_5 + %95 = OpConstantComposite %v3float %float_n1 %float_0 %float_0_75 + %626 = OpConstantComposite %v3float %float_0 %float_0 %float_1 + %2411 = OpConstantComposite %v3float %float_1 %float_0 %float_0_75 + %float_2 = OpConstant %float 2 + %2354 = OpConstantComposite %v3float %float_2 %float_0 %float_0_5 + %837 = OpConstantComposite %v3float %float_n1 %float_1 %float_0_5 + %1368 = OpConstantComposite %v3float %float_0 %float_1 %float_0_75 + %142 = OpConstantComposite %v3float %float_1 %float_1 %float_0_5 + %1197 = OpConstantComposite %v3float %float_0 %float_2 %float_0_5 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %5800 = OpVariable %_ptr_Input_v2float Input +%_ptr_Input_v4float = OpTypePointer Input %v4float +%gl_FragCoord = OpVariable %_ptr_Input_v4float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float + %4317 = OpVariable %_ptr_Output_v4float Output +%_struct_1395 = OpTypeStruct %v4float %v4float %v2float %v2float %v3float %float %v3float %float %v4float %v4float %v4float %v3float %float %v3float %float %v3float %v4float %v3float %float %v3float %v2float +%_struct_1018 = OpTypeStruct %v4float + %10264 = OpUndef %_struct_1017 + %5663 = OpFunction %void None %1282 + %25006 = OpLabel + %17463 = OpLoad %v4float %gl_FragCoord + %13863 = OpCompositeInsert %_struct_1017 %2938 %10264 0 + %22969 = OpVectorShuffle %v2float %17463 %17463 0 1 + %13206 = OpAccessChain %_ptr_Uniform_v4float %15259 %int_23 + %10343 = OpLoad %v4float %13206 + %7422 = OpVectorShuffle %v2float %10343 %10343 0 1 + %19927 = OpFMul %v2float %22969 %7422 + %18174 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_2 + %16206 = OpLoad %v4float %18174 + %20420 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %21354 = OpLoad %v4float %20420 + %7688 = OpVectorShuffle %v4float %21354 %21354 0 1 0 1 + %17581 = OpFMul %v4float %16206 %7688 + %10673 = OpVectorShuffle %v2float %1196 %1196 0 1 + %18824 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10344 = OpLoad %v4float %18824 + %8638 = OpVectorShuffle %v2float %10344 %10344 0 1 + %9197 = OpFMul %v2float %10673 %8638 + %18505 = OpFAdd %v2float %19927 %9197 + %7011 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21058 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13149 = OpExtInst %v2float %1 FClamp %18505 %7011 %21058 + %23584 = OpLoad %150 %5785 + %10339 = OpLoad %508 %5688 + %12147 = OpSampledImage %510 %23584 %10339 + %15371 = OpImageSampleExplicitLod %v4float %12147 %13149 Lod %float_0 + %15266 = OpCompositeExtract %float %15371 3 + %12116 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12972 = OpLoad %float %12116 + %15710 = OpFMul %float %15266 %12972 + %15279 = OpExtInst %float %1 FClamp %15710 %float_0 %float_1 + %22213 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11756 = OpLoad %v3float %22213 + %12103 = OpVectorTimesScalar %v3float %11756 %15279 + %15516 = OpLoad %150 %3312 + %24569 = OpLoad %508 %4646 + %12148 = OpSampledImage %510 %15516 %24569 + %17670 = OpImageSampleExplicitLod %v4float %12148 %13149 Lod %float_0 + %16938 = OpCompositeExtract %float %17670 1 + %14185 = OpFOrdGreaterThan %bool %16938 %float_0 + OpSelectionMerge %22307 DontFlatten + OpBranchConditional %14185 %12821 %22307 + %12821 = OpLabel + %13239 = OpLoad %150 %4862 + %19960 = OpLoad %508 %3594 + %12149 = OpSampledImage %510 %13239 %19960 + %15675 = OpImageSampleExplicitLod %v4float %12149 %13149 Lod %float_0 + %13866 = OpCompositeExtract %float %17670 1 + %12427 = OpCompositeExtract %float %17670 2 + %23300 = OpFMul %float %13866 %12427 + %17612 = OpExtInst %float %1 FClamp %23300 %float_0 %float_1 + %20291 = OpVectorShuffle %v3float %15675 %15675 0 1 2 + %11186 = OpVectorTimesScalar %v3float %20291 %17612 + %15293 = OpFAdd %v3float %12103 %11186 + OpBranch %22307 + %22307 = OpLabel + %7719 = OpPhi %v3float %12103 %25006 %15293 %12821 + %23399 = OpVectorTimesScalar %v3float %7719 %float_0_5 + %9339 = OpFAdd %float %float_0 %float_0_5 + %16235 = OpVectorShuffle %v3float %2938 %2938 0 1 2 + %22177 = OpFAdd %v3float %16235 %23399 + %15527 = OpVectorShuffle %v4float %2938 %22177 4 5 6 3 + %6434 = OpCompositeInsert %_struct_1017 %15527 %13863 0 + %24572 = OpVectorShuffle %v2float %836 %836 0 1 + %13207 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10345 = OpLoad %v4float %13207 + %8639 = OpVectorShuffle %v2float %10345 %10345 0 1 + %9198 = OpFMul %v2float %24572 %8639 + %18506 = OpFAdd %v2float %19927 %9198 + %7012 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21059 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13150 = OpExtInst %v2float %1 FClamp %18506 %7012 %21059 + %23585 = OpLoad %150 %5785 + %10340 = OpLoad %508 %5688 + %12150 = OpSampledImage %510 %23585 %10340 + %15372 = OpImageSampleExplicitLod %v4float %12150 %13150 Lod %float_0 + %15267 = OpCompositeExtract %float %15372 3 + %12117 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12973 = OpLoad %float %12117 + %15711 = OpFMul %float %15267 %12973 + %15280 = OpExtInst %float %1 FClamp %15711 %float_0 %float_1 + %22214 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11757 = OpLoad %v3float %22214 + %12104 = OpVectorTimesScalar %v3float %11757 %15280 + %15517 = OpLoad %150 %3312 + %24570 = OpLoad %508 %4646 + %12151 = OpSampledImage %510 %15517 %24570 + %17671 = OpImageSampleExplicitLod %v4float %12151 %13150 Lod %float_0 + %16939 = OpCompositeExtract %float %17671 1 + %14186 = OpFOrdGreaterThan %bool %16939 %float_0 + OpSelectionMerge %22308 DontFlatten + OpBranchConditional %14186 %12822 %22308 + %12822 = OpLabel + %13240 = OpLoad %150 %4862 + %19961 = OpLoad %508 %3594 + %12152 = OpSampledImage %510 %13240 %19961 + %15676 = OpImageSampleExplicitLod %v4float %12152 %13150 Lod %float_0 + %13867 = OpCompositeExtract %float %17671 1 + %12428 = OpCompositeExtract %float %17671 2 + %23301 = OpFMul %float %13867 %12428 + %17613 = OpExtInst %float %1 FClamp %23301 %float_0 %float_1 + %20292 = OpVectorShuffle %v3float %15676 %15676 0 1 2 + %11187 = OpVectorTimesScalar %v3float %20292 %17613 + %15294 = OpFAdd %v3float %12104 %11187 + OpBranch %22308 + %22308 = OpLabel + %7720 = OpPhi %v3float %12104 %22307 %15294 %12822 + %23400 = OpVectorTimesScalar %v3float %7720 %float_0_5 + %9340 = OpFAdd %float %9339 %float_0_5 + %16236 = OpVectorShuffle %v3float %15527 %15527 0 1 2 + %22178 = OpFAdd %v3float %16236 %23400 + %15528 = OpVectorShuffle %v4float %15527 %22178 4 5 6 3 + %6435 = OpCompositeInsert %_struct_1017 %15528 %6434 0 + %24573 = OpVectorShuffle %v2float %1367 %1367 0 1 + %13208 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10346 = OpLoad %v4float %13208 + %8640 = OpVectorShuffle %v2float %10346 %10346 0 1 + %9199 = OpFMul %v2float %24573 %8640 + %18507 = OpFAdd %v2float %19927 %9199 + %7013 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21060 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13151 = OpExtInst %v2float %1 FClamp %18507 %7013 %21060 + %23586 = OpLoad %150 %5785 + %10341 = OpLoad %508 %5688 + %12153 = OpSampledImage %510 %23586 %10341 + %15373 = OpImageSampleExplicitLod %v4float %12153 %13151 Lod %float_0 + %15268 = OpCompositeExtract %float %15373 3 + %12118 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12974 = OpLoad %float %12118 + %15712 = OpFMul %float %15268 %12974 + %15281 = OpExtInst %float %1 FClamp %15712 %float_0 %float_1 + %22215 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11758 = OpLoad %v3float %22215 + %12105 = OpVectorTimesScalar %v3float %11758 %15281 + %15518 = OpLoad %150 %3312 + %24571 = OpLoad %508 %4646 + %12154 = OpSampledImage %510 %15518 %24571 + %17672 = OpImageSampleExplicitLod %v4float %12154 %13151 Lod %float_0 + %16940 = OpCompositeExtract %float %17672 1 + %14187 = OpFOrdGreaterThan %bool %16940 %float_0 + OpSelectionMerge %22309 DontFlatten + OpBranchConditional %14187 %12823 %22309 + %12823 = OpLabel + %13241 = OpLoad %150 %4862 + %19962 = OpLoad %508 %3594 + %12155 = OpSampledImage %510 %13241 %19962 + %15677 = OpImageSampleExplicitLod %v4float %12155 %13151 Lod %float_0 + %13868 = OpCompositeExtract %float %17672 1 + %12429 = OpCompositeExtract %float %17672 2 + %23302 = OpFMul %float %13868 %12429 + %17614 = OpExtInst %float %1 FClamp %23302 %float_0 %float_1 + %20293 = OpVectorShuffle %v3float %15677 %15677 0 1 2 + %11188 = OpVectorTimesScalar %v3float %20293 %17614 + %15295 = OpFAdd %v3float %12105 %11188 + OpBranch %22309 + %22309 = OpLabel + %7721 = OpPhi %v3float %12105 %22308 %15295 %12823 + %23401 = OpVectorTimesScalar %v3float %7721 %float_0_75 + %9341 = OpFAdd %float %9340 %float_0_75 + %16237 = OpVectorShuffle %v3float %15528 %15528 0 1 2 + %22179 = OpFAdd %v3float %16237 %23401 + %15529 = OpVectorShuffle %v4float %15528 %22179 4 5 6 3 + %6436 = OpCompositeInsert %_struct_1017 %15529 %6435 0 + %24574 = OpVectorShuffle %v2float %141 %141 0 1 + %13209 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10347 = OpLoad %v4float %13209 + %8641 = OpVectorShuffle %v2float %10347 %10347 0 1 + %9200 = OpFMul %v2float %24574 %8641 + %18508 = OpFAdd %v2float %19927 %9200 + %7014 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21061 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13152 = OpExtInst %v2float %1 FClamp %18508 %7014 %21061 + %23587 = OpLoad %150 %5785 + %10342 = OpLoad %508 %5688 + %12156 = OpSampledImage %510 %23587 %10342 + %15374 = OpImageSampleExplicitLod %v4float %12156 %13152 Lod %float_0 + %15269 = OpCompositeExtract %float %15374 3 + %12119 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12975 = OpLoad %float %12119 + %15713 = OpFMul %float %15269 %12975 + %15282 = OpExtInst %float %1 FClamp %15713 %float_0 %float_1 + %22216 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11759 = OpLoad %v3float %22216 + %12106 = OpVectorTimesScalar %v3float %11759 %15282 + %15519 = OpLoad %150 %3312 + %24575 = OpLoad %508 %4646 + %12157 = OpSampledImage %510 %15519 %24575 + %17673 = OpImageSampleExplicitLod %v4float %12157 %13152 Lod %float_0 + %16941 = OpCompositeExtract %float %17673 1 + %14188 = OpFOrdGreaterThan %bool %16941 %float_0 + OpSelectionMerge %22310 DontFlatten + OpBranchConditional %14188 %12824 %22310 + %12824 = OpLabel + %13242 = OpLoad %150 %4862 + %19963 = OpLoad %508 %3594 + %12158 = OpSampledImage %510 %13242 %19963 + %15678 = OpImageSampleExplicitLod %v4float %12158 %13152 Lod %float_0 + %13869 = OpCompositeExtract %float %17673 1 + %12430 = OpCompositeExtract %float %17673 2 + %23303 = OpFMul %float %13869 %12430 + %17615 = OpExtInst %float %1 FClamp %23303 %float_0 %float_1 + %20294 = OpVectorShuffle %v3float %15678 %15678 0 1 2 + %11189 = OpVectorTimesScalar %v3float %20294 %17615 + %15296 = OpFAdd %v3float %12106 %11189 + OpBranch %22310 + %22310 = OpLabel + %7722 = OpPhi %v3float %12106 %22309 %15296 %12824 + %23402 = OpVectorTimesScalar %v3float %7722 %float_0_5 + %9342 = OpFAdd %float %9341 %float_0_5 + %16238 = OpVectorShuffle %v3float %15529 %15529 0 1 2 + %22180 = OpFAdd %v3float %16238 %23402 + %15530 = OpVectorShuffle %v4float %15529 %22180 4 5 6 3 + %6437 = OpCompositeInsert %_struct_1017 %15530 %6436 0 + %24576 = OpVectorShuffle %v2float %38 %38 0 1 + %13210 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10348 = OpLoad %v4float %13210 + %8642 = OpVectorShuffle %v2float %10348 %10348 0 1 + %9201 = OpFMul %v2float %24576 %8642 + %18509 = OpFAdd %v2float %19927 %9201 + %7015 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21062 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13153 = OpExtInst %v2float %1 FClamp %18509 %7015 %21062 + %23588 = OpLoad %150 %5785 + %10349 = OpLoad %508 %5688 + %12159 = OpSampledImage %510 %23588 %10349 + %15375 = OpImageSampleExplicitLod %v4float %12159 %13153 Lod %float_0 + %15270 = OpCompositeExtract %float %15375 3 + %12120 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12976 = OpLoad %float %12120 + %15714 = OpFMul %float %15270 %12976 + %15283 = OpExtInst %float %1 FClamp %15714 %float_0 %float_1 + %22217 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11760 = OpLoad %v3float %22217 + %12107 = OpVectorTimesScalar %v3float %11760 %15283 + %15520 = OpLoad %150 %3312 + %24577 = OpLoad %508 %4646 + %12160 = OpSampledImage %510 %15520 %24577 + %17674 = OpImageSampleExplicitLod %v4float %12160 %13153 Lod %float_0 + %16942 = OpCompositeExtract %float %17674 1 + %14189 = OpFOrdGreaterThan %bool %16942 %float_0 + OpSelectionMerge %22311 DontFlatten + OpBranchConditional %14189 %12825 %22311 + %12825 = OpLabel + %13243 = OpLoad %150 %4862 + %19964 = OpLoad %508 %3594 + %12161 = OpSampledImage %510 %13243 %19964 + %15679 = OpImageSampleExplicitLod %v4float %12161 %13153 Lod %float_0 + %13870 = OpCompositeExtract %float %17674 1 + %12431 = OpCompositeExtract %float %17674 2 + %23304 = OpFMul %float %13870 %12431 + %17616 = OpExtInst %float %1 FClamp %23304 %float_0 %float_1 + %20295 = OpVectorShuffle %v3float %15679 %15679 0 1 2 + %11190 = OpVectorTimesScalar %v3float %20295 %17616 + %15297 = OpFAdd %v3float %12107 %11190 + OpBranch %22311 + %22311 = OpLabel + %7723 = OpPhi %v3float %12107 %22310 %15297 %12825 + %23403 = OpVectorTimesScalar %v3float %7723 %float_0_5 + %9343 = OpFAdd %float %9342 %float_0_5 + %16239 = OpVectorShuffle %v3float %15530 %15530 0 1 2 + %22181 = OpFAdd %v3float %16239 %23403 + %15531 = OpVectorShuffle %v4float %15530 %22181 4 5 6 3 + %6438 = OpCompositeInsert %_struct_1017 %15531 %6437 0 + %24578 = OpVectorShuffle %v2float %95 %95 0 1 + %13211 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10350 = OpLoad %v4float %13211 + %8643 = OpVectorShuffle %v2float %10350 %10350 0 1 + %9202 = OpFMul %v2float %24578 %8643 + %18510 = OpFAdd %v2float %19927 %9202 + %7016 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21063 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13154 = OpExtInst %v2float %1 FClamp %18510 %7016 %21063 + %23589 = OpLoad %150 %5785 + %10351 = OpLoad %508 %5688 + %12162 = OpSampledImage %510 %23589 %10351 + %15376 = OpImageSampleExplicitLod %v4float %12162 %13154 Lod %float_0 + %15271 = OpCompositeExtract %float %15376 3 + %12121 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12977 = OpLoad %float %12121 + %15715 = OpFMul %float %15271 %12977 + %15284 = OpExtInst %float %1 FClamp %15715 %float_0 %float_1 + %22218 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11761 = OpLoad %v3float %22218 + %12108 = OpVectorTimesScalar %v3float %11761 %15284 + %15521 = OpLoad %150 %3312 + %24579 = OpLoad %508 %4646 + %12163 = OpSampledImage %510 %15521 %24579 + %17675 = OpImageSampleExplicitLod %v4float %12163 %13154 Lod %float_0 + %16943 = OpCompositeExtract %float %17675 1 + %14190 = OpFOrdGreaterThan %bool %16943 %float_0 + OpSelectionMerge %22312 DontFlatten + OpBranchConditional %14190 %12826 %22312 + %12826 = OpLabel + %13244 = OpLoad %150 %4862 + %19965 = OpLoad %508 %3594 + %12164 = OpSampledImage %510 %13244 %19965 + %15680 = OpImageSampleExplicitLod %v4float %12164 %13154 Lod %float_0 + %13871 = OpCompositeExtract %float %17675 1 + %12432 = OpCompositeExtract %float %17675 2 + %23305 = OpFMul %float %13871 %12432 + %17617 = OpExtInst %float %1 FClamp %23305 %float_0 %float_1 + %20296 = OpVectorShuffle %v3float %15680 %15680 0 1 2 + %11191 = OpVectorTimesScalar %v3float %20296 %17617 + %15298 = OpFAdd %v3float %12108 %11191 + OpBranch %22312 + %22312 = OpLabel + %7724 = OpPhi %v3float %12108 %22311 %15298 %12826 + %23404 = OpVectorTimesScalar %v3float %7724 %float_0_75 + %9344 = OpFAdd %float %9343 %float_0_75 + %16240 = OpVectorShuffle %v3float %15531 %15531 0 1 2 + %22182 = OpFAdd %v3float %16240 %23404 + %15532 = OpVectorShuffle %v4float %15531 %22182 4 5 6 3 + %6439 = OpCompositeInsert %_struct_1017 %15532 %6438 0 + %24580 = OpVectorShuffle %v2float %626 %626 0 1 + %13212 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10352 = OpLoad %v4float %13212 + %8644 = OpVectorShuffle %v2float %10352 %10352 0 1 + %9203 = OpFMul %v2float %24580 %8644 + %18511 = OpFAdd %v2float %19927 %9203 + %7017 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21064 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13155 = OpExtInst %v2float %1 FClamp %18511 %7017 %21064 + %23590 = OpLoad %150 %5785 + %10353 = OpLoad %508 %5688 + %12165 = OpSampledImage %510 %23590 %10353 + %15377 = OpImageSampleExplicitLod %v4float %12165 %13155 Lod %float_0 + %15272 = OpCompositeExtract %float %15377 3 + %12122 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12978 = OpLoad %float %12122 + %15716 = OpFMul %float %15272 %12978 + %15285 = OpExtInst %float %1 FClamp %15716 %float_0 %float_1 + %22219 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11762 = OpLoad %v3float %22219 + %12109 = OpVectorTimesScalar %v3float %11762 %15285 + %15522 = OpLoad %150 %3312 + %24581 = OpLoad %508 %4646 + %12166 = OpSampledImage %510 %15522 %24581 + %17676 = OpImageSampleExplicitLod %v4float %12166 %13155 Lod %float_0 + %16944 = OpCompositeExtract %float %17676 1 + %14191 = OpFOrdGreaterThan %bool %16944 %float_0 + OpSelectionMerge %22313 DontFlatten + OpBranchConditional %14191 %12827 %22313 + %12827 = OpLabel + %13245 = OpLoad %150 %4862 + %19966 = OpLoad %508 %3594 + %12167 = OpSampledImage %510 %13245 %19966 + %15681 = OpImageSampleExplicitLod %v4float %12167 %13155 Lod %float_0 + %13872 = OpCompositeExtract %float %17676 1 + %12433 = OpCompositeExtract %float %17676 2 + %23306 = OpFMul %float %13872 %12433 + %17618 = OpExtInst %float %1 FClamp %23306 %float_0 %float_1 + %20297 = OpVectorShuffle %v3float %15681 %15681 0 1 2 + %11192 = OpVectorTimesScalar %v3float %20297 %17618 + %15299 = OpFAdd %v3float %12109 %11192 + OpBranch %22313 + %22313 = OpLabel + %7725 = OpPhi %v3float %12109 %22312 %15299 %12827 + %23405 = OpVectorTimesScalar %v3float %7725 %float_1 + %9345 = OpFAdd %float %9344 %float_1 + %16241 = OpVectorShuffle %v3float %15532 %15532 0 1 2 + %22183 = OpFAdd %v3float %16241 %23405 + %15533 = OpVectorShuffle %v4float %15532 %22183 4 5 6 3 + %6440 = OpCompositeInsert %_struct_1017 %15533 %6439 0 + %24582 = OpVectorShuffle %v2float %2411 %2411 0 1 + %13213 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10354 = OpLoad %v4float %13213 + %8645 = OpVectorShuffle %v2float %10354 %10354 0 1 + %9204 = OpFMul %v2float %24582 %8645 + %18512 = OpFAdd %v2float %19927 %9204 + %7018 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21065 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13156 = OpExtInst %v2float %1 FClamp %18512 %7018 %21065 + %23591 = OpLoad %150 %5785 + %10355 = OpLoad %508 %5688 + %12168 = OpSampledImage %510 %23591 %10355 + %15378 = OpImageSampleExplicitLod %v4float %12168 %13156 Lod %float_0 + %15273 = OpCompositeExtract %float %15378 3 + %12123 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12979 = OpLoad %float %12123 + %15717 = OpFMul %float %15273 %12979 + %15286 = OpExtInst %float %1 FClamp %15717 %float_0 %float_1 + %22220 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11763 = OpLoad %v3float %22220 + %12110 = OpVectorTimesScalar %v3float %11763 %15286 + %15523 = OpLoad %150 %3312 + %24583 = OpLoad %508 %4646 + %12169 = OpSampledImage %510 %15523 %24583 + %17677 = OpImageSampleExplicitLod %v4float %12169 %13156 Lod %float_0 + %16945 = OpCompositeExtract %float %17677 1 + %14192 = OpFOrdGreaterThan %bool %16945 %float_0 + OpSelectionMerge %22314 DontFlatten + OpBranchConditional %14192 %12828 %22314 + %12828 = OpLabel + %13246 = OpLoad %150 %4862 + %19967 = OpLoad %508 %3594 + %12170 = OpSampledImage %510 %13246 %19967 + %15682 = OpImageSampleExplicitLod %v4float %12170 %13156 Lod %float_0 + %13873 = OpCompositeExtract %float %17677 1 + %12434 = OpCompositeExtract %float %17677 2 + %23307 = OpFMul %float %13873 %12434 + %17619 = OpExtInst %float %1 FClamp %23307 %float_0 %float_1 + %20298 = OpVectorShuffle %v3float %15682 %15682 0 1 2 + %11193 = OpVectorTimesScalar %v3float %20298 %17619 + %15300 = OpFAdd %v3float %12110 %11193 + OpBranch %22314 + %22314 = OpLabel + %7726 = OpPhi %v3float %12110 %22313 %15300 %12828 + %23406 = OpVectorTimesScalar %v3float %7726 %float_0_75 + %9346 = OpFAdd %float %9345 %float_0_75 + %16242 = OpVectorShuffle %v3float %15533 %15533 0 1 2 + %22184 = OpFAdd %v3float %16242 %23406 + %15534 = OpVectorShuffle %v4float %15533 %22184 4 5 6 3 + %6441 = OpCompositeInsert %_struct_1017 %15534 %6440 0 + %24584 = OpVectorShuffle %v2float %2354 %2354 0 1 + %13214 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10356 = OpLoad %v4float %13214 + %8646 = OpVectorShuffle %v2float %10356 %10356 0 1 + %9205 = OpFMul %v2float %24584 %8646 + %18513 = OpFAdd %v2float %19927 %9205 + %7019 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21066 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13157 = OpExtInst %v2float %1 FClamp %18513 %7019 %21066 + %23592 = OpLoad %150 %5785 + %10357 = OpLoad %508 %5688 + %12171 = OpSampledImage %510 %23592 %10357 + %15379 = OpImageSampleExplicitLod %v4float %12171 %13157 Lod %float_0 + %15274 = OpCompositeExtract %float %15379 3 + %12124 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12980 = OpLoad %float %12124 + %15718 = OpFMul %float %15274 %12980 + %15287 = OpExtInst %float %1 FClamp %15718 %float_0 %float_1 + %22221 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11764 = OpLoad %v3float %22221 + %12111 = OpVectorTimesScalar %v3float %11764 %15287 + %15524 = OpLoad %150 %3312 + %24585 = OpLoad %508 %4646 + %12172 = OpSampledImage %510 %15524 %24585 + %17678 = OpImageSampleExplicitLod %v4float %12172 %13157 Lod %float_0 + %16946 = OpCompositeExtract %float %17678 1 + %14193 = OpFOrdGreaterThan %bool %16946 %float_0 + OpSelectionMerge %22315 DontFlatten + OpBranchConditional %14193 %12829 %22315 + %12829 = OpLabel + %13247 = OpLoad %150 %4862 + %19968 = OpLoad %508 %3594 + %12173 = OpSampledImage %510 %13247 %19968 + %15683 = OpImageSampleExplicitLod %v4float %12173 %13157 Lod %float_0 + %13874 = OpCompositeExtract %float %17678 1 + %12435 = OpCompositeExtract %float %17678 2 + %23308 = OpFMul %float %13874 %12435 + %17620 = OpExtInst %float %1 FClamp %23308 %float_0 %float_1 + %20299 = OpVectorShuffle %v3float %15683 %15683 0 1 2 + %11194 = OpVectorTimesScalar %v3float %20299 %17620 + %15301 = OpFAdd %v3float %12111 %11194 + OpBranch %22315 + %22315 = OpLabel + %7727 = OpPhi %v3float %12111 %22314 %15301 %12829 + %23407 = OpVectorTimesScalar %v3float %7727 %float_0_5 + %9347 = OpFAdd %float %9346 %float_0_5 + %16243 = OpVectorShuffle %v3float %15534 %15534 0 1 2 + %22185 = OpFAdd %v3float %16243 %23407 + %15535 = OpVectorShuffle %v4float %15534 %22185 4 5 6 3 + %6442 = OpCompositeInsert %_struct_1017 %15535 %6441 0 + %24586 = OpVectorShuffle %v2float %837 %837 0 1 + %13215 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10358 = OpLoad %v4float %13215 + %8647 = OpVectorShuffle %v2float %10358 %10358 0 1 + %9206 = OpFMul %v2float %24586 %8647 + %18514 = OpFAdd %v2float %19927 %9206 + %7020 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21067 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13158 = OpExtInst %v2float %1 FClamp %18514 %7020 %21067 + %23593 = OpLoad %150 %5785 + %10359 = OpLoad %508 %5688 + %12174 = OpSampledImage %510 %23593 %10359 + %15380 = OpImageSampleExplicitLod %v4float %12174 %13158 Lod %float_0 + %15275 = OpCompositeExtract %float %15380 3 + %12125 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12981 = OpLoad %float %12125 + %15719 = OpFMul %float %15275 %12981 + %15288 = OpExtInst %float %1 FClamp %15719 %float_0 %float_1 + %22222 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11765 = OpLoad %v3float %22222 + %12112 = OpVectorTimesScalar %v3float %11765 %15288 + %15525 = OpLoad %150 %3312 + %24587 = OpLoad %508 %4646 + %12175 = OpSampledImage %510 %15525 %24587 + %17679 = OpImageSampleExplicitLod %v4float %12175 %13158 Lod %float_0 + %16947 = OpCompositeExtract %float %17679 1 + %14194 = OpFOrdGreaterThan %bool %16947 %float_0 + OpSelectionMerge %22316 DontFlatten + OpBranchConditional %14194 %12830 %22316 + %12830 = OpLabel + %13248 = OpLoad %150 %4862 + %19969 = OpLoad %508 %3594 + %12176 = OpSampledImage %510 %13248 %19969 + %15684 = OpImageSampleExplicitLod %v4float %12176 %13158 Lod %float_0 + %13875 = OpCompositeExtract %float %17679 1 + %12436 = OpCompositeExtract %float %17679 2 + %23309 = OpFMul %float %13875 %12436 + %17621 = OpExtInst %float %1 FClamp %23309 %float_0 %float_1 + %20300 = OpVectorShuffle %v3float %15684 %15684 0 1 2 + %11195 = OpVectorTimesScalar %v3float %20300 %17621 + %15302 = OpFAdd %v3float %12112 %11195 + OpBranch %22316 + %22316 = OpLabel + %7728 = OpPhi %v3float %12112 %22315 %15302 %12830 + %23408 = OpVectorTimesScalar %v3float %7728 %float_0_5 + %9348 = OpFAdd %float %9347 %float_0_5 + %16244 = OpVectorShuffle %v3float %15535 %15535 0 1 2 + %22186 = OpFAdd %v3float %16244 %23408 + %15536 = OpVectorShuffle %v4float %15535 %22186 4 5 6 3 + %6443 = OpCompositeInsert %_struct_1017 %15536 %6442 0 + %24588 = OpVectorShuffle %v2float %1368 %1368 0 1 + %13216 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10360 = OpLoad %v4float %13216 + %8648 = OpVectorShuffle %v2float %10360 %10360 0 1 + %9207 = OpFMul %v2float %24588 %8648 + %18515 = OpFAdd %v2float %19927 %9207 + %7021 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21068 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13159 = OpExtInst %v2float %1 FClamp %18515 %7021 %21068 + %23594 = OpLoad %150 %5785 + %10361 = OpLoad %508 %5688 + %12177 = OpSampledImage %510 %23594 %10361 + %15381 = OpImageSampleExplicitLod %v4float %12177 %13159 Lod %float_0 + %15276 = OpCompositeExtract %float %15381 3 + %12126 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12982 = OpLoad %float %12126 + %15720 = OpFMul %float %15276 %12982 + %15289 = OpExtInst %float %1 FClamp %15720 %float_0 %float_1 + %22223 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11766 = OpLoad %v3float %22223 + %12113 = OpVectorTimesScalar %v3float %11766 %15289 + %15526 = OpLoad %150 %3312 + %24589 = OpLoad %508 %4646 + %12178 = OpSampledImage %510 %15526 %24589 + %17680 = OpImageSampleExplicitLod %v4float %12178 %13159 Lod %float_0 + %16948 = OpCompositeExtract %float %17680 1 + %14195 = OpFOrdGreaterThan %bool %16948 %float_0 + OpSelectionMerge %22317 DontFlatten + OpBranchConditional %14195 %12831 %22317 + %12831 = OpLabel + %13249 = OpLoad %150 %4862 + %19970 = OpLoad %508 %3594 + %12179 = OpSampledImage %510 %13249 %19970 + %15685 = OpImageSampleExplicitLod %v4float %12179 %13159 Lod %float_0 + %13876 = OpCompositeExtract %float %17680 1 + %12437 = OpCompositeExtract %float %17680 2 + %23310 = OpFMul %float %13876 %12437 + %17622 = OpExtInst %float %1 FClamp %23310 %float_0 %float_1 + %20301 = OpVectorShuffle %v3float %15685 %15685 0 1 2 + %11196 = OpVectorTimesScalar %v3float %20301 %17622 + %15303 = OpFAdd %v3float %12113 %11196 + OpBranch %22317 + %22317 = OpLabel + %7729 = OpPhi %v3float %12113 %22316 %15303 %12831 + %23409 = OpVectorTimesScalar %v3float %7729 %float_0_75 + %9349 = OpFAdd %float %9348 %float_0_75 + %16245 = OpVectorShuffle %v3float %15536 %15536 0 1 2 + %22187 = OpFAdd %v3float %16245 %23409 + %15537 = OpVectorShuffle %v4float %15536 %22187 4 5 6 3 + %6444 = OpCompositeInsert %_struct_1017 %15537 %6443 0 + %24590 = OpVectorShuffle %v2float %142 %142 0 1 + %13217 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10362 = OpLoad %v4float %13217 + %8649 = OpVectorShuffle %v2float %10362 %10362 0 1 + %9208 = OpFMul %v2float %24590 %8649 + %18516 = OpFAdd %v2float %19927 %9208 + %7022 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21069 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13160 = OpExtInst %v2float %1 FClamp %18516 %7022 %21069 + %23595 = OpLoad %150 %5785 + %10363 = OpLoad %508 %5688 + %12180 = OpSampledImage %510 %23595 %10363 + %15382 = OpImageSampleExplicitLod %v4float %12180 %13160 Lod %float_0 + %15277 = OpCompositeExtract %float %15382 3 + %12127 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12983 = OpLoad %float %12127 + %15721 = OpFMul %float %15277 %12983 + %15290 = OpExtInst %float %1 FClamp %15721 %float_0 %float_1 + %22224 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11767 = OpLoad %v3float %22224 + %12114 = OpVectorTimesScalar %v3float %11767 %15290 + %15538 = OpLoad %150 %3312 + %24591 = OpLoad %508 %4646 + %12181 = OpSampledImage %510 %15538 %24591 + %17681 = OpImageSampleExplicitLod %v4float %12181 %13160 Lod %float_0 + %16949 = OpCompositeExtract %float %17681 1 + %14196 = OpFOrdGreaterThan %bool %16949 %float_0 + OpSelectionMerge %22318 DontFlatten + OpBranchConditional %14196 %12832 %22318 + %12832 = OpLabel + %13250 = OpLoad %150 %4862 + %19971 = OpLoad %508 %3594 + %12182 = OpSampledImage %510 %13250 %19971 + %15686 = OpImageSampleExplicitLod %v4float %12182 %13160 Lod %float_0 + %13877 = OpCompositeExtract %float %17681 1 + %12438 = OpCompositeExtract %float %17681 2 + %23311 = OpFMul %float %13877 %12438 + %17623 = OpExtInst %float %1 FClamp %23311 %float_0 %float_1 + %20302 = OpVectorShuffle %v3float %15686 %15686 0 1 2 + %11197 = OpVectorTimesScalar %v3float %20302 %17623 + %15304 = OpFAdd %v3float %12114 %11197 + OpBranch %22318 + %22318 = OpLabel + %7730 = OpPhi %v3float %12114 %22317 %15304 %12832 + %23410 = OpVectorTimesScalar %v3float %7730 %float_0_5 + %9350 = OpFAdd %float %9349 %float_0_5 + %16246 = OpVectorShuffle %v3float %15537 %15537 0 1 2 + %22188 = OpFAdd %v3float %16246 %23410 + %15539 = OpVectorShuffle %v4float %15537 %22188 4 5 6 3 + %6445 = OpCompositeInsert %_struct_1017 %15539 %6444 0 + %24592 = OpVectorShuffle %v2float %1197 %1197 0 1 + %13218 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10364 = OpLoad %v4float %13218 + %8650 = OpVectorShuffle %v2float %10364 %10364 0 1 + %9209 = OpFMul %v2float %24592 %8650 + %18517 = OpFAdd %v2float %19927 %9209 + %7023 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21070 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13161 = OpExtInst %v2float %1 FClamp %18517 %7023 %21070 + %23596 = OpLoad %150 %5785 + %10365 = OpLoad %508 %5688 + %12183 = OpSampledImage %510 %23596 %10365 + %15383 = OpImageSampleExplicitLod %v4float %12183 %13161 Lod %float_0 + %15278 = OpCompositeExtract %float %15383 3 + %12128 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12984 = OpLoad %float %12128 + %15722 = OpFMul %float %15278 %12984 + %15291 = OpExtInst %float %1 FClamp %15722 %float_0 %float_1 + %22225 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11768 = OpLoad %v3float %22225 + %12115 = OpVectorTimesScalar %v3float %11768 %15291 + %15540 = OpLoad %150 %3312 + %24593 = OpLoad %508 %4646 + %12184 = OpSampledImage %510 %15540 %24593 + %17682 = OpImageSampleExplicitLod %v4float %12184 %13161 Lod %float_0 + %16950 = OpCompositeExtract %float %17682 1 + %14197 = OpFOrdGreaterThan %bool %16950 %float_0 + OpSelectionMerge %22319 DontFlatten + OpBranchConditional %14197 %12833 %22319 + %12833 = OpLabel + %13251 = OpLoad %150 %4862 + %19972 = OpLoad %508 %3594 + %12185 = OpSampledImage %510 %13251 %19972 + %15687 = OpImageSampleExplicitLod %v4float %12185 %13161 Lod %float_0 + %13878 = OpCompositeExtract %float %17682 1 + %12439 = OpCompositeExtract %float %17682 2 + %23312 = OpFMul %float %13878 %12439 + %17624 = OpExtInst %float %1 FClamp %23312 %float_0 %float_1 + %20303 = OpVectorShuffle %v3float %15687 %15687 0 1 2 + %11198 = OpVectorTimesScalar %v3float %20303 %17624 + %15305 = OpFAdd %v3float %12115 %11198 + OpBranch %22319 + %22319 = OpLabel + %7731 = OpPhi %v3float %12115 %22318 %15305 %12833 + %23411 = OpVectorTimesScalar %v3float %7731 %float_0_5 + %9351 = OpFAdd %float %9350 %float_0_5 + %16247 = OpVectorShuffle %v3float %15539 %15539 0 1 2 + %22189 = OpFAdd %v3float %16247 %23411 + %15541 = OpVectorShuffle %v4float %15539 %22189 4 5 6 3 + %6719 = OpCompositeInsert %_struct_1017 %15541 %6445 0 + %23412 = OpVectorShuffle %v3float %15541 %15541 0 1 2 + %10833 = OpCompositeConstruct %v3float %9351 %9351 %9351 + %13750 = OpFDiv %v3float %23412 %10833 + %24033 = OpVectorShuffle %v4float %15541 %13750 4 5 6 3 + %8636 = OpCompositeInsert %_struct_1017 %24033 %6719 0 + %16315 = OpCompositeInsert %_struct_1017 %float_1 %8636 0 3 + %11544 = OpCompositeExtract %v4float %16315 0 + OpStore %4317 %11544 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/vert/packing-test.asm.vert b/3rdparty/spirv-cross/shaders-msl/asm/vert/packing-test.asm.vert new file mode 100644 index 000000000..8acdebc7d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/vert/packing-test.asm.vert @@ -0,0 +1,43 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 18 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" + OpSource HLSL 500 + OpName %main "main" + OpName %TestStruct "TestStruct" + OpMemberName %TestStruct 0 "transforms" + OpName %CB0 "CB0" + OpMemberName %CB0 0 "CB0" + OpName %_ "" + OpDecorate %_arr_mat4v4float_uint_6 ArrayStride 64 + OpMemberDecorate %TestStruct 0 RowMajor + OpMemberDecorate %TestStruct 0 Offset 0 + OpMemberDecorate %TestStruct 0 MatrixStride 16 + OpDecorate %_arr_TestStruct_uint_16 ArrayStride 384 + OpMemberDecorate %CB0 0 Offset 0 + OpDecorate %CB0 Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%mat4v4float = OpTypeMatrix %v4float 4 + %uint = OpTypeInt 32 0 + %uint_6 = OpConstant %uint 6 +%_arr_mat4v4float_uint_6 = OpTypeArray %mat4v4float %uint_6 + %TestStruct = OpTypeStruct %_arr_mat4v4float_uint_6 + %uint_16 = OpConstant %uint 16 +%_arr_TestStruct_uint_16 = OpTypeArray %TestStruct %uint_16 + %CB0 = OpTypeStruct %_arr_TestStruct_uint_16 +%_ptr_Uniform_CB0 = OpTypePointer Uniform %CB0 + %_ = OpVariable %_ptr_Uniform_CB0 Uniform + %main = OpFunction %void None %3 + %5 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert b/3rdparty/spirv-cross/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert new file mode 100644 index 000000000..b566a3d1a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/vert/spec-constant-op-composite.asm.vert @@ -0,0 +1,98 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 58 +; Schema: 0 + OpCapability Shader + OpCapability ClipDistance + OpCapability CullDistance + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %4 "main" %52 %output + OpSource GLSL 450 + OpName %4 "main" + OpName %9 "pos" + OpName %50 "gl_PerVertex" + OpMemberName %50 0 "gl_Position" + OpMemberName %50 1 "gl_PointSize" + OpMemberName %50 2 "gl_ClipDistance" + OpMemberName %50 3 "gl_CullDistance" + OpName %52 "" + OpDecorate %13 SpecId 201 + OpDecorate %24 SpecId 202 + OpMemberDecorate %50 0 BuiltIn Position + OpMemberDecorate %50 1 BuiltIn PointSize + OpMemberDecorate %50 2 BuiltIn ClipDistance + OpMemberDecorate %50 3 BuiltIn CullDistance + OpDecorate %50 Block + OpDecorate %57 SpecId 200 + OpDecorate %output Flat + OpDecorate %output Location 0 + %2 = OpTypeVoid + %3 = OpTypeFunction %2 + %6 = OpTypeFloat 32 + %7 = OpTypeVector %6 4 + %8 = OpTypePointer Function %7 + %10 = OpConstant %6 0 + %11 = OpConstantComposite %7 %10 %10 %10 %10 + %12 = OpTypeInt 32 1 + %int_ptr = OpTypePointer Output %12 + %13 = OpSpecConstant %12 -10 + %14 = OpConstant %12 2 + %15 = OpSpecConstantOp %12 IAdd %13 %14 + %17 = OpTypeInt 32 0 + %18 = OpConstant %17 1 + %19 = OpTypePointer Function %6 + %24 = OpSpecConstant %17 100 + %25 = OpConstant %17 5 + %26 = OpSpecConstantOp %17 UMod %24 %25 + %28 = OpConstant %17 2 + %33 = OpConstant %12 20 + %34 = OpConstant %12 30 + %35 = OpTypeVector %12 4 + %36 = OpSpecConstantComposite %35 %33 %34 %15 %15 + %40 = OpTypeVector %12 2 + %41 = OpSpecConstantOp %40 VectorShuffle %36 %36 1 0 + %foo = OpSpecConstantOp %12 CompositeExtract %36 1 + %42 = OpTypeVector %6 2 + %49 = OpTypeArray %6 %18 + %50 = OpTypeStruct %7 %6 %49 %49 + %51 = OpTypePointer Output %50 + %52 = OpVariable %51 Output + %output = OpVariable %int_ptr Output + %53 = OpConstant %12 0 + %55 = OpTypePointer Output %7 + %57 = OpSpecConstant %6 3.14159 + %4 = OpFunction %2 None %3 + %5 = OpLabel + %9 = OpVariable %8 Function + OpStore %9 %11 + %16 = OpConvertSToF %6 %15 + %20 = OpAccessChain %19 %9 %18 + %21 = OpLoad %6 %20 + %22 = OpFAdd %6 %21 %16 + %23 = OpAccessChain %19 %9 %18 + OpStore %23 %22 + %27 = OpConvertUToF %6 %26 + %29 = OpAccessChain %19 %9 %28 + %30 = OpLoad %6 %29 + %31 = OpFAdd %6 %30 %27 + %32 = OpAccessChain %19 %9 %28 + OpStore %32 %31 + %37 = OpConvertSToF %7 %36 + %38 = OpLoad %7 %9 + %39 = OpFAdd %7 %38 %37 + OpStore %9 %39 + %43 = OpConvertSToF %42 %41 + %44 = OpLoad %7 %9 + %45 = OpVectorShuffle %42 %44 %44 0 1 + %46 = OpFAdd %42 %45 %43 + %47 = OpLoad %7 %9 + %48 = OpVectorShuffle %7 %47 %46 4 5 2 3 + OpStore %9 %48 + %54 = OpLoad %7 %9 + %56 = OpAccessChain %55 %52 %53 + OpStore %56 %54 + OpStore %output %foo + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..29b0076a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,65 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %vid_1 %iid_1 %_entryPointOutput + OpSource HLSL 500 + OpName %main "main" + OpName %_main_u1_u1_ "@main(u1;u1;" + OpName %vid "vid" + OpName %iid "iid" + OpName %vid_0 "vid" + OpName %vid_1 "vid" + OpName %iid_0 "iid" + OpName %iid_1 "iid" + OpName %_entryPointOutput "@entryPointOutput" + OpName %param "param" + OpName %param_0 "param" + OpDecorate %vid_1 BuiltIn VertexIndex + OpDecorate %iid_1 BuiltIn InstanceIndex + OpDecorate %_entryPointOutput BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 +%_ptr_Function_uint = OpTypePointer Function %uint + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %10 = OpTypeFunction %v4float %_ptr_Function_uint %_ptr_Function_uint +%_ptr_Input_uint = OpTypePointer Input %uint + %vid_1 = OpVariable %_ptr_Input_uint Input + %iid_1 = OpVariable %_ptr_Input_uint Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %vid_0 = OpVariable %_ptr_Function_uint Function + %iid_0 = OpVariable %_ptr_Function_uint Function + %param = OpVariable %_ptr_Function_uint Function + %param_0 = OpVariable %_ptr_Function_uint Function + %25 = OpLoad %uint %vid_1 + OpStore %vid_0 %25 + %28 = OpLoad %uint %iid_1 + OpStore %iid_0 %28 + %32 = OpLoad %uint %vid_0 + OpStore %param %32 + %34 = OpLoad %uint %iid_0 + OpStore %param_0 %34 + %35 = OpFunctionCall %v4float %_main_u1_u1_ %param %param_0 + OpStore %_entryPointOutput %35 + OpReturn + OpFunctionEnd +%_main_u1_u1_ = OpFunction %v4float None %10 + %vid = OpFunctionParameter %_ptr_Function_uint + %iid = OpFunctionParameter %_ptr_Function_uint + %14 = OpLabel + %15 = OpLoad %uint %vid + %16 = OpLoad %uint %iid + %17 = OpIAdd %uint %15 %16 + %18 = OpConvertUToF %float %17 + %19 = OpCompositeConstruct %v4float %18 %18 %18 %18 + OpReturnValue %19 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-msl/comp/access-private-workgroup-in-function.comp b/3rdparty/spirv-cross/shaders-msl/comp/access-private-workgroup-in-function.comp new file mode 100644 index 000000000..7cb1e6f13 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/access-private-workgroup-in-function.comp @@ -0,0 +1,31 @@ +#version 450 +layout(local_size_x = 1) in; + +int f; +shared int u; + +void set_f() +{ + f = 40; +} + +void set_shared_u() +{ + u = 50; +} + +void main() +{ + set_f(); + set_shared_u(); + if (gl_LocalInvocationIndex == 0u) + { + f = 10; + } + else + { + f = 30; + u = 20; + } +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/atomic.comp b/3rdparty/spirv-cross/shaders-msl/comp/atomic.comp new file mode 100644 index 000000000..e25c4f6d2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/atomic.comp @@ -0,0 +1,56 @@ +#version 310 es +#extension GL_OES_shader_image_atomic : require +layout(local_size_x = 1) in; + +layout(r32ui, binding = 0) uniform highp uimage2D uImage; +layout(r32i, binding = 1) uniform highp iimage2D iImage; +layout(binding = 2, std430) buffer SSBO +{ + uint u32; + int i32; +} ssbo; + +shared uint shared_u32; +shared int shared_i32; + +void main() +{ + atomicAdd(ssbo.u32, 1u); + atomicOr(ssbo.u32, 1u); + atomicXor(ssbo.u32, 1u); + atomicAnd(ssbo.u32, 1u); + atomicMin(ssbo.u32, 1u); + atomicMax(ssbo.u32, 1u); + atomicExchange(ssbo.u32, 1u); + atomicCompSwap(ssbo.u32, 10u, 2u); + + atomicAdd(ssbo.i32, 1); + atomicOr(ssbo.i32, 1); + atomicXor(ssbo.i32, 1); + atomicAnd(ssbo.i32, 1); + atomicMin(ssbo.i32, 1); + atomicMax(ssbo.i32, 1); + atomicExchange(ssbo.i32, 1); + atomicCompSwap(ssbo.i32, 10, 2); + + shared_u32 = 10u; + shared_i32 = 10; + atomicAdd(shared_u32, 1u); + atomicOr(shared_u32, 1u); + atomicXor(shared_u32, 1u); + atomicAnd(shared_u32, 1u); + atomicMin(shared_u32, 1u); + atomicMax(shared_u32, 1u); + atomicExchange(shared_u32, 1u); + atomicCompSwap(shared_u32, 10u, 2u); + + atomicAdd(shared_i32, 1); + atomicOr(shared_i32, 1); + atomicXor(shared_i32, 1); + atomicAnd(shared_i32, 1); + atomicMin(shared_i32, 1); + atomicMax(shared_i32, 1); + atomicExchange(shared_i32, 1); + atomicCompSwap(shared_i32, 10, 2); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/barriers.comp b/3rdparty/spirv-cross/shaders-msl/comp/barriers.comp new file mode 100644 index 000000000..c49b62636 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/barriers.comp @@ -0,0 +1,83 @@ +#version 310 es +layout(local_size_x = 4) in; + +void barrier_shared() +{ + memoryBarrierShared(); +} + +void full_barrier() +{ + memoryBarrier(); +} + +#if 0 +void image_barrier() +{ + memoryBarrierImage(); +} +#endif + +void buffer_barrier() +{ + memoryBarrierBuffer(); +} + +void group_barrier() +{ + groupMemoryBarrier(); +} + +void barrier_shared_exec() +{ + memoryBarrierShared(); + barrier(); +} + +void full_barrier_exec() +{ + memoryBarrier(); + barrier(); +} + +#if 0 +void image_barrier_exec() +{ + memoryBarrierImage(); + barrier(); +} +#endif + +void buffer_barrier_exec() +{ + memoryBarrierBuffer(); + barrier(); +} + +void group_barrier_exec() +{ + groupMemoryBarrier(); + barrier(); +} + +void exec_barrier() +{ + barrier(); +} + +void main() +{ + barrier_shared(); + full_barrier(); + //image_barrier(); + buffer_barrier(); + group_barrier(); + + barrier_shared_exec(); + full_barrier_exec(); + //image_barrier_exec(); + buffer_barrier_exec(); + group_barrier_exec(); + + exec_barrier(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/basic.comp b/3rdparty/spirv-cross/shaders-msl/comp/basic.comp new file mode 100644 index 000000000..f9bf55670 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/basic.comp @@ -0,0 +1,28 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +layout(std430, binding = 2) buffer SSBO3 +{ + uint counter; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idata = in_data[ident]; + if (dot(idata, vec4(1.0, 5.0, 6.0, 2.0)) > 8.2) + { + out_data[atomicAdd(counter, 1u)] = idata; + } +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/bitcast-16bit-1.invalid.comp b/3rdparty/spirv-cross/shaders-msl/comp/bitcast-16bit-1.invalid.comp new file mode 100644 index 000000000..0c21cda30 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/bitcast-16bit-1.invalid.comp @@ -0,0 +1,23 @@ +#version 450 core +#extension GL_AMD_gpu_shader_half_float : require +#extension GL_AMD_gpu_shader_int16 : require +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + i16vec4 inputs[]; +}; + +layout(binding = 1, std430) buffer SSBO1 +{ + ivec4 outputs[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + f16vec2 a = int16BitsToFloat16(inputs[ident].xy); + outputs[ident].x = int(packFloat2x16(a + f16vec2(1, 1))); + outputs[ident].y = packInt2x16(inputs[ident].zw); + outputs[ident].z = int(packUint2x16(u16vec2(inputs[ident].xy))); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/bitcast-16bit-2.invalid.comp b/3rdparty/spirv-cross/shaders-msl/comp/bitcast-16bit-2.invalid.comp new file mode 100644 index 000000000..6bb662412 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/bitcast-16bit-2.invalid.comp @@ -0,0 +1,26 @@ +#version 450 core +#extension GL_AMD_gpu_shader_half_float : require +#extension GL_AMD_gpu_shader_int16 : require +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + ivec4 inputs[]; +}; + +layout(binding = 1, std430) buffer SSBO1 +{ + i16vec4 outputs[]; +}; + +layout(binding = 2) uniform UBO +{ + f16vec4 const0; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + outputs[ident].xy = unpackInt2x16(inputs[ident].x) + float16BitsToInt16(const0.xy); + outputs[ident].zw = i16vec2(unpackUint2x16(uint(inputs[ident].y)) - float16BitsToUint16(const0.zw)); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/builtins.comp b/3rdparty/spirv-cross/shaders-msl/comp/builtins.comp new file mode 100644 index 000000000..88bb5951e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/builtins.comp @@ -0,0 +1,12 @@ +#version 310 es +layout(local_size_x = 8, local_size_y = 4, local_size_z = 2) in; + +void main() +{ + uvec3 local_id = gl_LocalInvocationID; + uvec3 global_id = gl_GlobalInvocationID; + uint local_index = gl_LocalInvocationIndex; + uvec3 work_group_size = gl_WorkGroupSize; + uvec3 num_work_groups = gl_NumWorkGroups; + uvec3 work_group_id = gl_WorkGroupID; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/cfg-preserve-parameter.comp b/3rdparty/spirv-cross/shaders-msl/comp/cfg-preserve-parameter.comp new file mode 100644 index 000000000..9ef909200 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/cfg-preserve-parameter.comp @@ -0,0 +1,54 @@ +#version 310 es + +// We write in all paths (and no reads), so should just be out. +void out_test_0(int cond, inout int i) +{ + if (cond == 0) + i = 40; + else + i = 60; +} + +// We write in all paths (and no reads), so should just be out. +void out_test_1(int cond, inout int i) +{ + switch (cond) + { + case 40: + i = 40; + break; + + default: + i = 70; + break; + } +} + +// We don't write in all paths, so should be inout. +void inout_test_0(int cond, inout int i) +{ + if (cond == 0) + i = 40; +} + +void inout_test_1(int cond, inout int i) +{ + switch (cond) + { + case 40: + i = 40; + break; + } +} + + +void main() +{ + int cond = 40; + int i = 50; + + out_test_0(cond, i); + out_test_1(cond, i); + inout_test_0(cond, i); + inout_test_1(cond, i); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/coherent-block.comp b/3rdparty/spirv-cross/shaders-msl/comp/coherent-block.comp new file mode 100644 index 000000000..0a174e8ef --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/coherent-block.comp @@ -0,0 +1,12 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 1) coherent restrict writeonly buffer SSBO +{ + vec4 value; +}; + +void main() +{ + value = vec4(20.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/coherent-image.comp b/3rdparty/spirv-cross/shaders-msl/comp/coherent-image.comp new file mode 100644 index 000000000..fd6e28018 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/coherent-image.comp @@ -0,0 +1,14 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 1) coherent restrict writeonly buffer SSBO +{ + ivec4 value; +}; + +layout(r32i, binding = 3) coherent readonly restrict uniform mediump iimage2D uImage; + +void main() +{ + value = imageLoad(uImage, ivec2(10)); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/shaders-msl/comp/composite-array-initialization.comp new file mode 100644 index 000000000..1ecf4bcd4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/composite-array-initialization.comp @@ -0,0 +1,28 @@ +#version 450 +layout(local_size_x = 2) in; + +struct Data +{ + float a; + float b; +}; + +layout(std430, binding = 0) buffer SSBO +{ + Data outdata[]; +}; + +layout(constant_id = 0) const float X = 4.0; + +Data data[2] = Data[](Data(1.0, 2.0), Data(3.0, 4.0)); +Data data2[2] = Data[](Data(X, 2.0), Data(3.0, 5.0)); + +Data combine(Data a, Data b) +{ + return Data(a.a + b.a, a.b + b.b); +} + +void main() +{ + outdata[gl_WorkGroupID.x] = combine(data[gl_LocalInvocationID.x], data2[gl_LocalInvocationID.x]); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/composite-construct.comp b/3rdparty/spirv-cross/shaders-msl/comp/composite-construct.comp new file mode 100644 index 000000000..305477532 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/composite-construct.comp @@ -0,0 +1,31 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO0 +{ + vec4 as[]; +}; + +layout(std430, binding = 1) buffer SSBO1 +{ + vec4 bs[]; +}; + +struct Composite +{ + vec4 a; + vec4 b; +}; + +const vec4 const_values[2] = vec4[](vec4(20.0), vec4(40.0)); + +void main() +{ + vec4 values[2] = vec4[](as[gl_GlobalInvocationID.x], bs[gl_GlobalInvocationID.x]); + vec4 copy_values[2]; + copy_values = const_values; + Composite c = Composite(values[0], copy_values[1]); + + as[0] = values[gl_LocalInvocationIndex]; + bs[1] = c.b; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/copy-array-of-arrays.comp b/3rdparty/spirv-cross/shaders-msl/comp/copy-array-of-arrays.comp new file mode 100644 index 000000000..edf87195b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/copy-array-of-arrays.comp @@ -0,0 +1,21 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(set = 0, binding = 0, std430) buffer BUF +{ + int a; + float b; + float c; +} o; + +void main() +{ + const float a[2][2][2] = float[][][](float[][](float[](1.0, 2.0), float[](3.0, 4.0)), float[][](float[](1.0, 2.0), float[](3.0, 4.0))); + float b[2][2][2] = a; + float c[2][2][2] = b; + o.a = int(c[1][1][1]); + + float d[2][2][2] = float[][][](float[][](float[](o.b, o.c), float[](o.b, o.b)), float[][](float[](o.c, o.c), float[](o.c, o.b))); + float e[2][2][2] = d; + o.b = e[1][0][1]; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/culling.comp b/3rdparty/spirv-cross/shaders-msl/comp/culling.comp new file mode 100644 index 000000000..9f8331b10 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/culling.comp @@ -0,0 +1,26 @@ +#version 310 es +layout(local_size_x = 4) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + float in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + float out_data[]; +}; + +layout(std430, binding = 2) buffer SSBO3 +{ + uint count; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = in_data[ident]; + if (idata > 12.0) + out_data[atomicAdd(count, 1u)] = idata; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/defer-parens.comp b/3rdparty/spirv-cross/shaders-msl/comp/defer-parens.comp new file mode 100644 index 000000000..4e8ea6b39 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/defer-parens.comp @@ -0,0 +1,30 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + vec4 data; + int index; +}; + +void main() +{ + // Tests defer-parens behavior where a binary expression is OpCompositeExtracted chained together + // with an OpCompositeConstruct optimization. + vec4 d = data; + data = vec4(d.x, d.yz + 10.0, d.w); + + // Verify binary ops. + data = d + d + d; + + // Verify swizzles. + data = (d.yz + 10.0).xxyy; + + // OpCompositeExtract + float t = (d.yz + 10.0).y; + data = vec4(t); + + // OpVectorExtractDynamic + t = (d.zw + 10.0)[index]; + data = vec4(t); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/dowhile.comp b/3rdparty/spirv-cross/shaders-msl/comp/dowhile.comp new file mode 100644 index 000000000..709db75a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/dowhile.comp @@ -0,0 +1,31 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +int i; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + + i = 0; + vec4 idat = in_data[ident]; + do + { + idat = mvp * idat; + i++; + } while(i < 16); + + out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/functions.comp b/3rdparty/spirv-cross/shaders-msl/comp/functions.comp new file mode 100644 index 000000000..478c8ebe8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/functions.comp @@ -0,0 +1,12 @@ +#version 450 +shared int foo[1337]; + +void myfunc() +{ + foo[0]=13; +} + +void main() +{ + myfunc(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp b/3rdparty/spirv-cross/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp new file mode 100644 index 000000000..2fe074df7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/global-invocation-id-writable-ssbo-in-function.comp @@ -0,0 +1,12 @@ +#version 450 +layout(set = 0, binding = 0) buffer myBlock { + int a; + float b[1]; +} myStorage; +float getB() { + return myStorage.b[gl_GlobalInvocationID.x]; +} +void main() { + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_GlobalInvocationID.x] = mod((getB() + 0.02), 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/global-invocation-id.comp b/3rdparty/spirv-cross/shaders-msl/comp/global-invocation-id.comp new file mode 100644 index 000000000..f484637e1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/global-invocation-id.comp @@ -0,0 +1,9 @@ +#version 450 +layout(set = 0, binding = 0) buffer myBlock { + int a; + float b[1]; +} myStorage; +void main() { + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_GlobalInvocationID.x] = mod((myStorage.b[gl_GlobalInvocationID.x] + 0.02), 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/image-cube-array-load-store.comp b/3rdparty/spirv-cross/shaders-msl/comp/image-cube-array-load-store.comp new file mode 100644 index 000000000..36a9ffd8e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/image-cube-array-load-store.comp @@ -0,0 +1,13 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(r32f, binding = 0) uniform readonly imageCubeArray uImageIn; +layout(r32f, binding = 1) uniform writeonly imageCubeArray uImageOut; + +void main() +{ + ivec3 coord = ivec3(9, 7, 11); + vec4 indata = imageLoad(uImageIn, coord); + imageStore(uImageOut, coord, indata); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/image.comp b/3rdparty/spirv-cross/shaders-msl/comp/image.comp new file mode 100644 index 000000000..e375534a5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/image.comp @@ -0,0 +1,12 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(rgba8, binding = 0) uniform readonly mediump image2D uImageIn; +layout(rgba8, binding = 1) uniform writeonly mediump image2D uImageOut; + +void main() +{ + vec4 v = imageLoad(uImageIn, ivec2(gl_GlobalInvocationID.xy) + imageSize(uImageIn)); + imageStore(uImageOut, ivec2(gl_GlobalInvocationID.xy), v); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/insert.comp b/3rdparty/spirv-cross/shaders-msl/comp/insert.comp new file mode 100644 index 000000000..07c1f8d7a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/insert.comp @@ -0,0 +1,18 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) writeonly buffer SSBO +{ + vec4 out_data[]; +}; + +void main() +{ + vec4 v; + v.x = 10.0; + v.y = 30.0; + v.z = 70.0; + v.w = 90.0; + out_data[gl_GlobalInvocationID.x] = v; + out_data[gl_GlobalInvocationID.x].y = 20.0; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/inverse.comp b/3rdparty/spirv-cross/shaders-msl/comp/inverse.comp new file mode 100644 index 000000000..03b06d646 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/inverse.comp @@ -0,0 +1,23 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(std430, binding = 0) writeonly buffer MatrixOut +{ + mat2 m2out; + mat3 m3out; + mat4 m4out; +}; + +layout(std430, binding = 1) readonly buffer MatrixIn +{ + mat2 m2in; + mat3 m3in; + mat4 m4in; +}; + +void main() +{ + m2out = inverse(m2in); + m3out = inverse(m3in); + m4out = inverse(m4in); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/local-invocation-id.comp b/3rdparty/spirv-cross/shaders-msl/comp/local-invocation-id.comp new file mode 100644 index 000000000..281700f19 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/local-invocation-id.comp @@ -0,0 +1,9 @@ +#version 450 +layout(set = 0, binding = 0) buffer myBlock { + int a; + float b[1]; +} myStorage; +void main() { + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_LocalInvocationID.x] = mod((myStorage.b[gl_LocalInvocationID.x] + 0.02), 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/local-invocation-index.comp b/3rdparty/spirv-cross/shaders-msl/comp/local-invocation-index.comp new file mode 100644 index 000000000..68942da8e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/local-invocation-index.comp @@ -0,0 +1,9 @@ +#version 450 +layout(set = 0, binding = 0) buffer myBlock { + int a; + float b[1]; +} myStorage; +void main() { + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b[gl_LocalInvocationIndex.x] = mod((myStorage.b[gl_LocalInvocationIndex.x] + 0.02), 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/mat3.comp b/3rdparty/spirv-cross/shaders-msl/comp/mat3.comp new file mode 100644 index 000000000..7c5bb1e4f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/mat3.comp @@ -0,0 +1,14 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + mat3 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + out_data[ident] = mat3(vec3(10.0), vec3(20.0), vec3(40.0)); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/mod.comp b/3rdparty/spirv-cross/shaders-msl/comp/mod.comp new file mode 100644 index 000000000..1631456e3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/mod.comp @@ -0,0 +1,26 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 v = mod(in_data[ident], out_data[ident]); + out_data[ident] = v; + + uvec4 vu = floatBitsToUint(in_data[ident]) % floatBitsToUint(out_data[ident]); + out_data[ident] = uintBitsToFloat(vu); + + ivec4 vi = floatBitsToInt(in_data[ident]) % floatBitsToInt(out_data[ident]); + out_data[ident] = intBitsToFloat(vi); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/modf.comp b/3rdparty/spirv-cross/shaders-msl/comp/modf.comp new file mode 100644 index 000000000..edadefcf0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/modf.comp @@ -0,0 +1,23 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 i; + //vec4 v = frexp(in_data[ident], i); + //out_data[ident] = ldexp(v, i); + vec4 v = modf(in_data[ident], i); + out_data[ident] = v; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/packing-test-1.comp b/3rdparty/spirv-cross/shaders-msl/comp/packing-test-1.comp new file mode 100644 index 000000000..1a8a39e21 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/packing-test-1.comp @@ -0,0 +1,18 @@ +#version 450 +struct T1 +{ + vec3 a; + float b; +}; + +layout(std430, binding = 1) buffer Buffer0 { T1 buf0[]; }; +layout(std430, binding = 2) buffer Buffer1 { float buf1[]; }; + +layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in; +void main() +{ + // broken case in Metal! + T1 v = buf0[0]; + float x = v.b; + buf1[gl_GlobalInvocationID.x] = x; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/packing-test-2.comp b/3rdparty/spirv-cross/shaders-msl/comp/packing-test-2.comp new file mode 100644 index 000000000..73268beec --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/packing-test-2.comp @@ -0,0 +1,16 @@ +#version 450 +struct T1 +{ + vec3 a; + float b; +}; + +layout(std430, binding = 1) buffer Buffer0 { T1 buf0[]; }; +layout(std430, binding = 2) buffer Buffer1 { float buf1[]; }; + +layout(local_size_x = 32, local_size_y = 1, local_size_z = 1) in; +void main() +{ + float x = buf0[0].b; + buf1[gl_GlobalInvocationID.x] = x; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/read-write-only.comp b/3rdparty/spirv-cross/shaders-msl/comp/read-write-only.comp new file mode 100644 index 000000000..b224b6f12 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/read-write-only.comp @@ -0,0 +1,26 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO0 +{ + vec4 data0; + vec4 data1; +}; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + vec4 data2; + vec4 data3; +}; + +layout(binding = 2, std430) restrict writeonly buffer SSBO2 +{ + vec4 data4; + vec4 data5; +}; + +void main() +{ + data4 = data0 + data2; + data5 = data1 + data3; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/rmw-matrix.comp b/3rdparty/spirv-cross/shaders-msl/comp/rmw-matrix.comp new file mode 100644 index 000000000..c158ab4dd --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/rmw-matrix.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + float a; + vec4 b; + mat4 c; + + float a1; + vec4 b1; + mat4 c1; +}; + +void main() +{ + a *= a1; + b *= b1; + c *= c1; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/rmw-opt.comp b/3rdparty/spirv-cross/shaders-msl/comp/rmw-opt.comp new file mode 100644 index 000000000..a6e1e7fe7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/rmw-opt.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + int a; +}; + +void main() +{ + a += 10; + a -= 10; + a *= 10; + a /= 10; + a <<= 2; + a >>= 3; + a &= 40; + a ^= 10; + a %= 40; + a |= 1; + + bool c = false; + bool d = true; + c = c && d; + d = d || c; + a = c && d ? 1 : 0; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/shared-array-of-arrays.comp b/3rdparty/spirv-cross/shaders-msl/comp/shared-array-of-arrays.comp new file mode 100644 index 000000000..009b4e41d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/shared-array-of-arrays.comp @@ -0,0 +1,29 @@ +#version 310 es +layout(local_size_x = 4, local_size_y = 4) in; + +shared float foo[4][4]; + +layout(binding = 0, std430) buffer SSBO +{ + float out_data[]; +}; + +void work() +{ + foo[gl_LocalInvocationID.x][gl_LocalInvocationID.y] = float(gl_LocalInvocationIndex); + memoryBarrierShared(); + barrier(); + + float x = 0.0; + x += foo[gl_LocalInvocationID.x][0]; + x += foo[gl_LocalInvocationID.x][1]; + x += foo[gl_LocalInvocationID.x][2]; + x += foo[gl_LocalInvocationID.x][3]; + out_data[gl_GlobalInvocationID.x] = x; +} + +void main() +{ + work(); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/shared.comp b/3rdparty/spirv-cross/shaders-msl/comp/shared.comp new file mode 100644 index 000000000..4deff9359 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/shared.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 4) in; + +shared float sShared[gl_WorkGroupSize.x]; + +layout(std430, binding = 0) readonly buffer SSBO +{ + float in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + float out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = in_data[ident]; + + sShared[gl_LocalInvocationIndex] = idata; + memoryBarrierShared(); + barrier(); + + out_data[ident] = sShared[gl_WorkGroupSize.x - gl_LocalInvocationIndex - 1u]; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/spec-constant-op-member-array.comp b/3rdparty/spirv-cross/shaders-msl/comp/spec-constant-op-member-array.comp new file mode 100644 index 000000000..0b428eb0c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/spec-constant-op-member-array.comp @@ -0,0 +1,33 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(constant_id = 0) const int a = 100; +layout(constant_id = 1) const int b = 200; +layout(constant_id = 2) const int c = 300; +const int d = c + 50; +layout(constant_id = 3) const int e = 400; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +layout(set = 1, binding = 0) buffer SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[d]; +}; + +void main() +{ + w[gl_GlobalInvocationID.x] += v[gl_GlobalInvocationID.x] + e; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/spec-constant-work-group-size.comp b/3rdparty/spirv-cross/shaders-msl/comp/spec-constant-work-group-size.comp new file mode 100644 index 000000000..09b65dc99 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/spec-constant-work-group-size.comp @@ -0,0 +1,17 @@ +#version 450 +layout(local_size_x_id = 10, local_size_y = 20) in; + +layout(constant_id = 0) const int a = 1; +layout(constant_id = 1) const int b = 2; + +layout(set = 1, binding = 0) writeonly buffer SSBO +{ + int v[]; +}; + +void main() +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + v[a + gl_WorkGroupSize.x + gl_WorkGroupSize.y] = b + spec_const_array_size[1 - a]; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/struct-layout.comp b/3rdparty/spirv-cross/shaders-msl/comp/struct-layout.comp new file mode 100644 index 000000000..5a2b7802d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/struct-layout.comp @@ -0,0 +1,24 @@ +#version 310 es +layout(local_size_x = 1) in; + +struct Foo +{ + mat4 m; +}; + +layout(std430, binding = 0) readonly buffer SSBO +{ + Foo in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + Foo out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + out_data[ident].m = in_data[ident].m * in_data[ident].m; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/struct-nested.comp b/3rdparty/spirv-cross/shaders-msl/comp/struct-nested.comp new file mode 100644 index 000000000..d9645cbc4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/struct-nested.comp @@ -0,0 +1,20 @@ +#version 450 + +struct s1 +{ + int a; +}; + +struct s2 +{ + s1 b; +}; + +layout(std430, binding = 1) buffer dstbuffer{ s2 test[]; }; +layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in; +void main() +{ + s2 testVal; + testVal.b.a = 0; + test[0] = testVal; +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/shaders-msl/comp/struct-packing.comp b/3rdparty/spirv-cross/shaders-msl/comp/struct-packing.comp new file mode 100644 index 000000000..5baf45cb3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/struct-packing.comp @@ -0,0 +1,77 @@ +#version 310 es +layout(local_size_x = 1) in; + +struct S0 +{ + vec2 a[1]; + float b; +}; + +struct S1 +{ + vec3 a; + float b; +}; + +struct S2 +{ + vec3 a[1]; + float b; +}; + +struct S3 +{ + vec2 a; + float b; +}; + +struct S4 +{ + vec2 c; +}; + +struct Content +{ + S0 m0s[1]; + S1 m1s[1]; + S2 m2s[1]; + S0 m0; + S1 m1; + S2 m2; + S3 m3; + float m4; + + S4 m3s[8]; +}; + +layout(binding = 1, std430) buffer SSBO1 +{ + Content content; + Content content1[2]; + Content content2; + + layout(column_major) mat2 m0; + layout(column_major) mat2 m1; + layout(column_major) mat2x3 m2[4]; + layout(column_major) mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + float array[]; +} ssbo_430; + +layout(binding = 0, std140) buffer SSBO0 +{ + Content content; + Content content1[2]; + Content content2; + float array[]; +} ssbo_140; + +void main() +{ + ssbo_430.content = ssbo_140.content; + ssbo_430.content.m1.a = ssbo_430.m6[1][1] * ssbo_430.content.m3.a; // test packed matrix access +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/torture-loop.comp b/3rdparty/spirv-cross/shaders-msl/comp/torture-loop.comp new file mode 100644 index 000000000..54a1221a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/torture-loop.comp @@ -0,0 +1,40 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idat = in_data[ident]; + + int k = 0; + + // Continue with side effects. + while (++k < 10) + { + idat *= 2.0; + k++; + } + + // Again used here ... + for (uint i = 0u; i < 16u; i++, k++) + for (uint j = 0u; j < 30u; j++) + idat = mvp * idat; + + do + { + k++; + } while (k > 10); + out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/comp/type-alias.comp b/3rdparty/spirv-cross/shaders-msl/comp/type-alias.comp new file mode 100644 index 000000000..343d350a2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/type-alias.comp @@ -0,0 +1,45 @@ +#version 310 es +layout(local_size_x = 1) in; + +struct S0 +{ + vec4 a; +}; + +struct S1 +{ + vec4 a; +}; + +vec4 overload(S0 s0) +{ + return s0.a; +} + +vec4 overload(S1 s1) +{ + return s1.a; +} + +layout(std430, binding = 0) buffer SSBO0 +{ + S0 s0s[]; +}; + +layout(std430, binding = 1) buffer SSBO1 +{ + S1 s1s[]; +}; + +layout(std430, binding = 2) buffer SSBO2 +{ + vec4 outputs[]; +}; + + +void main() +{ + S0 s0 = s0s[gl_GlobalInvocationID.x]; + S1 s1 = s1s[gl_GlobalInvocationID.x]; + outputs[gl_GlobalInvocationID.x] = overload(s0) + overload(s1); +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/udiv.comp b/3rdparty/spirv-cross/shaders-msl/comp/udiv.comp new file mode 100644 index 000000000..d4e1133bc --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/udiv.comp @@ -0,0 +1,17 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + uint inputs[]; +}; + +layout(std430, binding = 1) buffer SSBO2 +{ + uint outputs[]; +}; + +void main() +{ + outputs[gl_GlobalInvocationID.x] = inputs[gl_GlobalInvocationID.x] / 29u; +} diff --git a/3rdparty/spirv-cross/shaders-msl/comp/writable-ssbo.comp b/3rdparty/spirv-cross/shaders-msl/comp/writable-ssbo.comp new file mode 100644 index 000000000..1d5128ce8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/comp/writable-ssbo.comp @@ -0,0 +1,9 @@ +#version 450 +layout(set = 0, binding = 0) buffer myBlock { + int a; + float b; +} myStorage; +void main() { + myStorage.a = (myStorage.a + 1) % 256; + myStorage.b = mod((myStorage.b + 0.02), 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp b/3rdparty/spirv-cross/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp new file mode 100644 index 000000000..9623751b6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/desktop-only/comp/extended-arithmetic.desktop.comp @@ -0,0 +1,41 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBOUint +{ + uint a, b, c, d; + uvec2 a2, b2, c2, d2; + uvec3 a3, b3, c3, d3; + uvec4 a4, b4, c4, d4; +} u; + +layout(binding = 1, std430) buffer SSBOInt +{ + int a, b, c, d; + ivec2 a2, b2, c2, d2; + ivec3 a3, b3, c3, d3; + ivec4 a4, b4, c4, d4; +} i; + +void main() +{ + u.c = uaddCarry(u.a, u.b, u.d); + u.c2 = uaddCarry(u.a2, u.b2, u.d2); + u.c3 = uaddCarry(u.a3, u.b3, u.d3); + u.c4 = uaddCarry(u.a4, u.b4, u.d4); + + u.c = usubBorrow(u.a, u.b, u.d); + u.c2 = usubBorrow(u.a2, u.b2, u.d2); + u.c3 = usubBorrow(u.a3, u.b3, u.d3); + u.c4 = usubBorrow(u.a4, u.b4, u.d4); + + umulExtended(u.a, u.b, u.c, u.d); + umulExtended(u.a2, u.b2, u.c2, u.d2); + umulExtended(u.a3, u.b3, u.c3, u.d3); + umulExtended(u.a4, u.b4, u.c4, u.d4); + + imulExtended(i.a, i.b, i.c, i.d); + imulExtended(i.a2, i.b2, i.c2, i.d2); + imulExtended(i.a3, i.b3, i.c3, i.d3); + imulExtended(i.a4, i.b4, i.c4, i.d4); +} diff --git a/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/image-ms.desktop.frag b/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/image-ms.desktop.frag new file mode 100644 index 000000000..e145cb8f0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/image-ms.desktop.frag @@ -0,0 +1,13 @@ +#version 450 + +layout(rgba8, binding = 0) uniform image2D uImage; +layout(rgba8, binding = 1) uniform image2DArray uImageArray; +layout(rgba8, binding = 2) uniform image2DMS uImageMS; + +void main() +{ + vec4 a = imageLoad(uImageMS, ivec2(1, 2), 2); + vec4 b = imageLoad(uImageArray, ivec3(1, 2, 4)); + imageStore(uImage, ivec2(2, 3), a); + imageStore(uImageArray, ivec3(2, 3, 7), b); +} diff --git a/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/query-levels.desktop.frag b/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/query-levels.desktop.frag new file mode 100644 index 000000000..4a80cbf81 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/query-levels.desktop.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(float(textureQueryLevels(uSampler))); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag b/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag new file mode 100644 index 000000000..4c8dcf976 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/desktop-only/frag/sampler-ms-query.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(binding = 0) uniform sampler2DMS uSampler; +layout(binding = 2, rgba8) uniform readonly writeonly image2DMS uImage; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(float(textureSamples(uSampler) + imageSamples(uImage))); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/basic.desktop.sso.vert b/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/basic.desktop.sso.vert new file mode 100644 index 000000000..9ddab08cd --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/basic.desktop.sso.vert @@ -0,0 +1,20 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; +}; +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = uMVP * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert b/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert new file mode 100644 index 000000000..9e4a0b7ac --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/clip-cull-distance.desktop.vert @@ -0,0 +1,10 @@ +#version 450 + +void main() +{ + gl_Position = vec4(10.0); + gl_ClipDistance[0] = 1.0; + gl_ClipDistance[1] = 4.0; + gl_CullDistance[0] = 4.0; + gl_CullDistance[1] = 9.0; +} diff --git a/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert b/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert new file mode 100644 index 000000000..fadd1e73b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/desktop-only/vert/shader-draw-parameters.desktop.vert @@ -0,0 +1,11 @@ +#version 460 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +void main() +{ + gl_Position = vec4(gl_BaseVertex, gl_BaseInstance, 0, 1); +} diff --git a/3rdparty/spirv-cross/shaders-msl/flatten/basic.flatten.vert b/3rdparty/spirv-cross/shaders-msl/flatten/basic.flatten.vert new file mode 100644 index 000000000..e60a9067b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/flatten/basic.flatten.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(std140) uniform UBO +{ + mat4 uMVP; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = uMVP * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders-msl/flatten/multiindex.flatten.vert b/3rdparty/spirv-cross/shaders-msl/flatten/multiindex.flatten.vert new file mode 100644 index 000000000..0b471d86e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/flatten/multiindex.flatten.vert @@ -0,0 +1,13 @@ +#version 310 es + +layout(std140) uniform UBO +{ + vec4 Data[3][5]; +}; + +layout(location = 0) in ivec2 aIndex; + +void main() +{ + gl_Position = Data[aIndex.x][aIndex.y]; +} diff --git a/3rdparty/spirv-cross/shaders-msl/flatten/push-constant.flatten.vert b/3rdparty/spirv-cross/shaders-msl/flatten/push-constant.flatten.vert new file mode 100644 index 000000000..c7b1b42e1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/flatten/push-constant.flatten.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(push_constant, std430) uniform PushMe +{ + mat4 MVP; + mat2 Rot; // The MatrixStride will be 8 here. + float Arr[4]; +} registers; + +layout(location = 0) in vec2 Rot; +layout(location = 1) in vec4 Pos; +layout(location = 0) out vec2 vRot; +void main() +{ + gl_Position = registers.MVP * Pos; + vRot = registers.Rot * Rot + registers.Arr[2]; // Constant access should work even if array stride is just 4 here. +} diff --git a/3rdparty/spirv-cross/shaders-msl/flatten/rowmajor.flatten.vert b/3rdparty/spirv-cross/shaders-msl/flatten/rowmajor.flatten.vert new file mode 100644 index 000000000..88c468c8f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/flatten/rowmajor.flatten.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(std140) uniform UBO +{ + layout(column_major) mat4 uMVPR; + layout(row_major) mat4 uMVPC; + layout(row_major) mat2x4 uMVP; +}; + +layout(location = 0) in vec4 aVertex; + +void main() +{ + vec2 v = aVertex * uMVP; + gl_Position = uMVPR * aVertex + uMVPC * aVertex; +} diff --git a/3rdparty/spirv-cross/shaders-msl/flatten/struct.flatten.vert b/3rdparty/spirv-cross/shaders-msl/flatten/struct.flatten.vert new file mode 100644 index 000000000..936bb41b8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/flatten/struct.flatten.vert @@ -0,0 +1,30 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + + vec4 Color; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; + + Light light; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec4 vColor; + +void main() +{ + gl_Position = uMVP * aVertex; + + vColor = vec4(0.0); + + vec3 L = aVertex.xyz - light.Position; + vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / light.Radius, 0.0, 1.0) * light.Color); +} diff --git a/3rdparty/spirv-cross/shaders-msl/flatten/swizzle.flatten.vert b/3rdparty/spirv-cross/shaders-msl/flatten/swizzle.flatten.vert new file mode 100644 index 000000000..e310cdf33 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/flatten/swizzle.flatten.vert @@ -0,0 +1,47 @@ +#version 310 es + +// comments note the 16b alignment boundaries (see GL spec 7.6.2.2 Standard Uniform Block Layout) +layout(std140) uniform UBO +{ + // 16b boundary + vec4 A; + // 16b boundary + vec2 B0; + vec2 B1; + // 16b boundary + float C0; + // 16b boundary (vec3 is aligned to 16b) + vec3 C1; + // 16b boundary + vec3 D0; + float D1; + // 16b boundary + float E0; + float E1; + float E2; + float E3; + // 16b boundary + float F0; + vec2 F1; + // 16b boundary (vec2 before us is aligned to 8b) + float F2; +}; + +layout(location = 0) out vec4 oA; +layout(location = 1) out vec4 oB; +layout(location = 2) out vec4 oC; +layout(location = 3) out vec4 oD; +layout(location = 4) out vec4 oE; +layout(location = 5) out vec4 oF; + +void main() +{ + gl_Position = vec4(0.0); + + oA = A; + oB = vec4(B0, B1); + oC = vec4(C0, C1) + vec4(C1.xy, C1.z, C0); // not packed + oD = vec4(D0, D1) + vec4(D0.xy, D0.z, D1); // packed - must convert for swizzle + oE = vec4(E0, E1, E2, E3); + oF = vec4(F0, F1, F2); +} diff --git a/3rdparty/spirv-cross/shaders-msl/flatten/types.flatten.frag b/3rdparty/spirv-cross/shaders-msl/flatten/types.flatten.frag new file mode 100644 index 000000000..c1231445f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/flatten/types.flatten.frag @@ -0,0 +1,27 @@ +#version 310 es +precision mediump float; + +layout(std140, binding = 0) uniform UBO0 +{ + vec4 a; + vec4 b; +}; + +layout(std140, binding = 1) uniform UBO1 +{ + ivec4 c; + ivec4 d; +}; + +layout(std140, binding = 2) uniform UBO2 +{ + uvec4 e; + uvec4 f; +}; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(c) + vec4(d) + vec4(e) + vec4(f) + a + b; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/16bit-constants.frag b/3rdparty/spirv-cross/shaders-msl/frag/16bit-constants.frag new file mode 100644 index 000000000..c53091b5b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/16bit-constants.frag @@ -0,0 +1,14 @@ +#version 450 core + +#extension GL_AMD_gpu_shader_int16 : require +#extension GL_AMD_gpu_shader_half_float : require + +layout(location = 0) out float16_t foo; +layout(location = 1) out int16_t bar; +layout(location = 2) out uint16_t baz; + +void main() { + foo = 1.0hf; + bar = 2s; + baz = 3us; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/shaders-msl/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..3493e0ccc --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 v0; + +void main() +{ + float lut[5] = float[](1.0, 2.0, 3.0, 4.0, 5.0); + for (int i = 0; i < 4; i++, FragColor += lut[i]) + { + } +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/basic.frag b/3rdparty/spirv-cross/shaders-msl/frag/basic.frag new file mode 100644 index 000000000..dd9a8f850 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/basic.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; +layout(binding = 0) uniform sampler2D uTex; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vColor * texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/frag/binary-func-unpack-pack-arguments.frag b/3rdparty/spirv-cross/shaders-msl/frag/binary-func-unpack-pack-arguments.frag new file mode 100644 index 000000000..c0e5dabd2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/binary-func-unpack-pack-arguments.frag @@ -0,0 +1,15 @@ +#version 450 +layout(location = 0) out float FragColor; + +layout(binding = 0, std140) uniform UBO +{ + vec3 color; + float v; +}; + +layout(location = 0) in vec3 vIn; + +void main() +{ + FragColor = dot(vIn, color); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/binary-unpack-pack-arguments.frag b/3rdparty/spirv-cross/shaders-msl/frag/binary-unpack-pack-arguments.frag new file mode 100644 index 000000000..be30f84df --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/binary-unpack-pack-arguments.frag @@ -0,0 +1,15 @@ +#version 450 +layout(location = 0) out vec3 FragColor; + +layout(binding = 0, std140) uniform UBO +{ + vec3 color; + float v; +}; + +layout(location = 0) in vec3 vIn; + +void main() +{ + FragColor = cross(vIn, color - vIn); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/bitcasting.frag b/3rdparty/spirv-cross/shaders-msl/frag/bitcasting.frag new file mode 100644 index 000000000..5dac78ef3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/bitcasting.frag @@ -0,0 +1,24 @@ +#version 310 es +precision mediump float; + +layout(binding = 0) uniform sampler2D TextureBase; +layout(binding = 1) uniform sampler2D TextureDetail; + +layout(location = 0) in vec4 VertGeom; + +layout(location = 0) out vec4 FragColor0; +layout(location = 1) out vec4 FragColor1; + +void main() +{ + vec4 texSample0 = texture(TextureBase, VertGeom.xy); + vec4 texSample1 = textureOffset(TextureDetail, VertGeom.xy, ivec2(3, 2)); + + ivec4 iResult0 = floatBitsToInt(texSample0); + ivec4 iResult1 = floatBitsToInt(texSample1); + FragColor0 = (intBitsToFloat(iResult0) * intBitsToFloat(iResult1)); + + uvec4 uResult0 = floatBitsToUint(texSample0); + uvec4 uResult1 = floatBitsToUint(texSample1); + FragColor1 = (uintBitsToFloat(uResult0) * uintBitsToFloat(uResult1)); +} \ No newline at end of file diff --git a/3rdparty/spirv-cross/shaders-msl/frag/buffer-read.frag b/3rdparty/spirv-cross/shaders-msl/frag/buffer-read.frag new file mode 100644 index 000000000..297f0a71c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/buffer-read.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(rgba8, binding = 0) uniform readonly imageBuffer buf; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = imageLoad(buf, 0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/builtins.frag b/3rdparty/spirv-cross/shaders-msl/frag/builtins.frag new file mode 100644 index 000000000..99e6e2df5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/builtins.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vColor; + +void main() +{ + FragColor = gl_FragCoord + vColor; + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/shaders-msl/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..47f93931c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; + +struct Foo +{ + vec4 a; + vec4 b; +}; + +layout(binding = 0) buffer UBO +{ + vec4 results[1024]; +}; + +layout(binding = 1) uniform highp isampler2D Buf; +layout(location = 0) flat in int vIn; +layout(location = 1) flat in int vIn2; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + ivec4 coords = texelFetch(Buf, ivec2(gl_FragCoord.xy), 0); + vec4 foo = results[coords.x % 16]; + + int c = vIn * vIn; + int d = vIn2 * vIn2; + FragColor = foo + foo + results[c + d]; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/composite-extract-forced-temporary.frag b/3rdparty/spirv-cross/shaders-msl/frag/composite-extract-forced-temporary.frag new file mode 100644 index 000000000..35fdbe862 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/composite-extract-forced-temporary.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; +layout(binding = 0) uniform sampler2D Texture; +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTexCoord; + +void main() +{ + float f = texture(Texture, vTexCoord).x; + FragColor = vec4(f * f); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/constant-array.frag b/3rdparty/spirv-cross/shaders-msl/frag/constant-array.frag new file mode 100644 index 000000000..b862cb1db --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/constant-array.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +layout(location = 0) out vec4 FragColor; + +layout(location = 0) flat in int index; + +struct Foobar { float a; float b; }; + +vec4 resolve(Foobar f) +{ + return vec4(f.a + f.b); +} + +void main() +{ + const vec4 foo[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0)); + const vec4 foobars[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0))); + const Foobar foos[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0)); + + FragColor = foo[index] + foobars[index][index + 1] + resolve(Foobar(10.0, 20.0)) + resolve(foos[index]); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/constant-composites.frag b/3rdparty/spirv-cross/shaders-msl/frag/constant-composites.frag new file mode 100644 index 000000000..a12e22ff4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/constant-composites.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; + +float lut[4] = float[](1.0, 4.0, 3.0, 2.0); + +struct Foo +{ + float a; + float b; +}; +Foo foos[2] = Foo[](Foo(10.0, 20.0), Foo(30.0, 40.0)); + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in int line; + +void main() +{ + FragColor = vec4(lut[line]); + FragColor += foos[line].a * foos[1 - line].a; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/shaders-msl/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..1f21bef8c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,34 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uSampler; +layout(location = 0) in vec4 vInput; + +void main() +{ + FragColor = vInput; + vec4 t = texture(uSampler, vInput.xy); + vec4 d0 = dFdx(vInput); + vec4 d1 = dFdy(vInput); + vec4 d2 = fwidth(vInput); + vec4 d3 = dFdxCoarse(vInput); + vec4 d4 = dFdyCoarse(vInput); + vec4 d5 = fwidthCoarse(vInput); + vec4 d6 = dFdxFine(vInput); + vec4 d7 = dFdyFine(vInput); + vec4 d8 = fwidthFine(vInput); + if (vInput.y > 10.0) + { + FragColor += t; + FragColor += d0; + FragColor += d1; + FragColor += d2; + FragColor += d3; + FragColor += d4; + FragColor += d5; + FragColor += d6; + FragColor += d7; + FragColor += d8; + } +} + diff --git a/3rdparty/spirv-cross/shaders-msl/frag/depth-greater-than.frag b/3rdparty/spirv-cross/shaders-msl/frag/depth-greater-than.frag new file mode 100644 index 000000000..8dcbcfdb4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/depth-greater-than.frag @@ -0,0 +1,7 @@ +#version 450 +layout(depth_greater) out float gl_FragDepth; + +void main() +{ + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/depth-less-than.frag b/3rdparty/spirv-cross/shaders-msl/frag/depth-less-than.frag new file mode 100644 index 000000000..cdcb80f77 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/depth-less-than.frag @@ -0,0 +1,7 @@ +#version 450 +layout(depth_less) out float gl_FragDepth; + +void main() +{ + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/dual-source-blending.frag b/3rdparty/spirv-cross/shaders-msl/frag/dual-source-blending.frag new file mode 100644 index 000000000..f322cf4c3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/dual-source-blending.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(location = 0, index = 0) out vec4 FragColor0; +layout(location = 0, index = 1) out vec4 FragColor1; + +void main() +{ + FragColor0 = vec4(1.0); + FragColor1 = vec4(2.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/early-fragment-tests.frag b/3rdparty/spirv-cross/shaders-msl/frag/early-fragment-tests.frag new file mode 100644 index 000000000..1a0acb950 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/early-fragment-tests.frag @@ -0,0 +1,9 @@ +#version 450 +layout(early_fragment_tests) in; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/false-loop-init.frag b/3rdparty/spirv-cross/shaders-msl/frag/false-loop-init.frag new file mode 100644 index 000000000..7ce5b52bd --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/false-loop-init.frag @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 accum; +layout(location = 0) out vec4 result; + +void main() +{ + result = vec4(0.0); + uint j; + for (int i = 0; i < 4; i += int(j)) + { + if (accum.y > 10.0) + j = 40u; + else + j = 30u; + result += accum; + } +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/flush_params.frag b/3rdparty/spirv-cross/shaders-msl/frag/flush_params.frag new file mode 100644 index 000000000..8a26ad3a2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/flush_params.frag @@ -0,0 +1,27 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; + +struct Structy +{ + vec4 c; +}; + +void foo2(out Structy f) +{ + f.c = vec4(10.0); +} + +Structy foo() +{ + Structy f; + foo2(f); + return f; +} + +void main() +{ + Structy s = foo(); + FragColor = s.c; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/for-loop-init.frag b/3rdparty/spirv-cross/shaders-msl/frag/for-loop-init.frag new file mode 100644 index 000000000..0cde26765 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/for-loop-init.frag @@ -0,0 +1,52 @@ +#version 310 es +precision mediump float; +layout(location = 0) out int FragColor; + +void main() +{ + FragColor = 16; + + // Basic loop variable. + for (int i = 0; i < 25; i++) + FragColor += 10; + + // Multiple loop variables. + for (int i = 1, j = 4; i < 30; i++, j += 4) + FragColor += 11; + + // A potential loop variables, but we access it outside the loop, + // so cannot be one. + int k = 0; + for (; k < 20; k++) + FragColor += 12; + k += 3; + FragColor += k; + + // Potential loop variables, but the dominator is not trivial. + int l; + if (k == 40) + { + for (l = 0; l < 40; l++) + FragColor += 13; + return; + } + else + { + l = k; + FragColor += l; + } + + // Vectors cannot be loop variables + for (ivec2 i = ivec2(0); i.x < 10; i.x += 4) + { + FragColor += i.y; + } + + // Check that static expressions can be used before the loop header. + int m = 0; + m = k; + int o = m; + for (; m < 40; m++) + FragColor += m; + FragColor += o; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/fp16-packing.frag b/3rdparty/spirv-cross/shaders-msl/frag/fp16-packing.frag new file mode 100644 index 000000000..98ca24e2f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/fp16-packing.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) flat in uint FP16; +layout(location = 1) flat in vec2 FP32; +layout(location = 0) out vec2 FP32Out; +layout(location = 1) out uint FP16Out; + +void main() +{ + FP32Out = unpackHalf2x16(FP16); + FP16Out = packHalf2x16(FP32); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/fp16.desktop.invalid.frag b/3rdparty/spirv-cross/shaders-msl/frag/fp16.desktop.invalid.frag new file mode 100644 index 000000000..1e4026eb2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/fp16.desktop.invalid.frag @@ -0,0 +1,151 @@ +#version 450 +#extension GL_AMD_gpu_shader_half_float : require + +layout(location = 0) in float16_t v1; +layout(location = 1) in f16vec2 v2; +layout(location = 2) in f16vec3 v3; +layout(location = 3) in f16vec4 v4; + +layout(location = 0) out float o1; +layout(location = 1) out vec2 o2; +layout(location = 2) out vec3 o3; +layout(location = 3) out vec4 o4; + +f16mat2 test_mat2(f16vec2 a, f16vec2 b, f16vec2 c, f16vec2 d) +{ + return f16mat2(a, b) * f16mat2(c, d); +} + +f16mat3 test_mat3(f16vec3 a, f16vec3 b, f16vec3 c, f16vec3 d, f16vec3 e, f16vec3 f) +{ + return f16mat3(a, b, c) * f16mat3(d, e, f); +} + +void test_constants() +{ + float16_t a = 1.0hf; + float16_t b = 1.5hf; + float16_t c = -1.5hf; // Negatives + float16_t d = (0.0hf / 0.0hf); // NaN + float16_t e = (1.0hf / 0.0hf); // +Inf + float16_t f = (-1.0hf / 0.0hf); // -Inf + float16_t g = 1014.0hf; // Large. + float16_t h = 0.000001hf; // Denormal +} + +float16_t test_result() +{ + return 1.0hf; +} + +void test_conversions() +{ + float16_t one = test_result(); + int a = int(one); + uint b = uint(one); + bool c = bool(one); + float d = float(one); + //double e = double(one); + float16_t a2 = float16_t(a); + float16_t b2 = float16_t(b); + float16_t c2 = float16_t(c); + float16_t d2 = float16_t(d); + //float16_t e2 = float16_t(e); +} + +void test_builtins() +{ + f16vec4 res; + res = radians(v4); + res = degrees(v4); + res = sin(v4); + res = cos(v4); + res = tan(v4); + res = asin(v4); + res = atan(v4, v3.xyzz); + res = atan(v4); + res = sinh(v4); + res = cosh(v4); + res = tanh(v4); + res = asinh(v4); + res = acosh(v4); + res = atanh(v4); + res = pow(v4, v4); + res = exp(v4); + res = log(v4); + res = exp2(v4); + res = log2(v4); + res = sqrt(v4); + res = inversesqrt(v4); + res = abs(v4); + res = sign(v4); + res = floor(v4); + res = trunc(v4); + res = round(v4); + res = roundEven(v4); + res = ceil(v4); + res = fract(v4); + res = mod(v4, v4); + f16vec4 tmp; + res = modf(v4, tmp); + res = min(v4, v4); + res = max(v4, v4); + res = clamp(v4, v4, v4); + res = mix(v4, v4, v4); + res = mix(v4, v4, lessThan(v4, v4)); + res = step(v4, v4); + res = smoothstep(v4, v4, v4); + + bvec4 btmp = isnan(v4); + btmp = isinf(v4); + res = fma(v4, v4, v4); + + ivec4 itmp; + res = frexp(v4, itmp); + res = ldexp(res, itmp); + + uint pack0 = packFloat2x16(v4.xy); + uint pack1 = packFloat2x16(v4.zw); + res = f16vec4(unpackFloat2x16(pack0), unpackFloat2x16(pack1)); + + float16_t t0 = length(v4); + t0 = distance(v4, v4); + t0 = dot(v4, v4); + f16vec3 res3 = cross(v3, v3); + res = normalize(v4); + res = faceforward(v4, v4, v4); + res = reflect(v4, v4); + res = refract(v4, v4, v1); + + btmp = lessThan(v4, v4); + btmp = lessThanEqual(v4, v4); + btmp = greaterThan(v4, v4); + btmp = greaterThanEqual(v4, v4); + btmp = equal(v4, v4); + btmp = notEqual(v4, v4); + + res = dFdx(v4); + res = dFdy(v4); + res = dFdxFine(v4); + res = dFdyFine(v4); + res = dFdxCoarse(v4); + res = dFdyCoarse(v4); + res = fwidth(v4); + res = fwidthFine(v4); + res = fwidthCoarse(v4); + + //res = interpolateAtCentroid(v4); + //res = interpolateAtSample(v4, 0); + //res = interpolateAtOffset(v4, f16vec2(0.1hf)); +} + +void main() +{ + // Basic matrix tests. + f16mat2 m0 = test_mat2(v2, v2, v3.xy, v3.xy); + f16mat3 m1 = test_mat3(v3, v3, v3, v4.xyz, v4.xyz, v4.yzw); + + test_constants(); + test_conversions(); + test_builtins(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/front-facing.frag b/3rdparty/spirv-cross/shaders-msl/frag/front-facing.frag new file mode 100644 index 000000000..90ca1abf4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/front-facing.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void main() +{ + if (gl_FrontFacing) + FragColor = vA; + else + FragColor = vB; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/gather-dref.frag b/3rdparty/spirv-cross/shaders-msl/frag/gather-dref.frag new file mode 100644 index 000000000..a8aac56cb --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/gather-dref.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(binding = 0) uniform mediump sampler2DShadow uT; +layout(location = 0) in vec3 vUV; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = textureGather(uT, vUV.xy, vUV.z); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/gather-offset.frag b/3rdparty/spirv-cross/shaders-msl/frag/gather-offset.frag new file mode 100644 index 000000000..409317ab5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/gather-offset.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uT; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = textureGather(uT, vec2(0.5), 3); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/helper-invocation.msl21.frag b/3rdparty/spirv-cross/shaders-msl/frag/helper-invocation.msl21.frag new file mode 100644 index 000000000..1da8c5763 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/helper-invocation.msl21.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vUV; +layout(binding = 0) uniform sampler2D uSampler; + +vec4 foo() +{ + vec4 color; + if (!gl_HelperInvocation) + color = textureLod(uSampler, vUV, 0.0); + else + color = vec4(1.0); + return color; +} + +void main() +{ + FragColor = foo(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/illegal-name-test-0.frag b/3rdparty/spirv-cross/shaders-msl/frag/illegal-name-test-0.frag new file mode 100644 index 000000000..8e6c11d1d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/illegal-name-test-0.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; + +void main() +{ + vec4 fragment = vec4(10.0); + vec4 compute = vec4(10.0); + vec4 kernel = vec4(10.0); + vec4 vertex = vec4(10.0); + FragColor = fragment + compute + kernel + vertex; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/in_block.frag b/3rdparty/spirv-cross/shaders-msl/frag/in_block.frag new file mode 100644 index 000000000..59b97074e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/in_block.frag @@ -0,0 +1,14 @@ +#version 450 + +layout(location = 2) in VertexOut +{ + vec4 color; + vec4 color2; +} inputs; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = inputs.color + inputs.color2; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/in_mat.frag b/3rdparty/spirv-cross/shaders-msl/frag/in_mat.frag new file mode 100644 index 000000000..dd0b5d035 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/in_mat.frag @@ -0,0 +1,19 @@ +#version 450 + +layout(binding = 1) uniform samplerCube samplerColor; + +layout(location = 0) in vec3 inPos; +layout(location = 1) in vec3 inNormal; +layout(location = 2) in mat4 inInvModelView; +layout(location = 6) in float inLodBias; +layout(location = 0) out vec4 outFragColor; + +void main() +{ + vec3 cI = normalize(inPos); + vec3 cR = reflect(cI, normalize(inNormal)); + cR = vec3((inInvModelView * vec4(cR, 0.0)).xyz); + cR.x *= (-1.0); + outFragColor = texture(samplerColor, cR, inLodBias); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/frag/input-attachment-ms.frag b/3rdparty/spirv-cross/shaders-msl/frag/input-attachment-ms.frag new file mode 100644 index 000000000..b3d44c943 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/input-attachment-ms.frag @@ -0,0 +1,15 @@ +#version 450 + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform subpassInputMS uSubpass1; +layout(location = 0) out vec4 FragColor; + +vec4 load_subpasses(mediump subpassInputMS uInput) +{ + return subpassLoad(uInput, gl_SampleID); +} + +void main() +{ + FragColor = subpassLoad(uSubpass0, 1) + subpassLoad(uSubpass1, 2) + load_subpasses(uSubpass0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/input-attachment.frag b/3rdparty/spirv-cross/shaders-msl/frag/input-attachment.frag new file mode 100644 index 000000000..877d0525a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/input-attachment.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform mediump subpassInput uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform mediump subpassInput uSubpass1; +layout(location = 0) out vec4 FragColor; + +vec4 load_subpasses(mediump subpassInput uInput) +{ + return subpassLoad(uInput); +} + +void main() +{ + FragColor = subpassLoad(uSubpass0) + load_subpasses(uSubpass1); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/interpolation-qualifiers-block.frag b/3rdparty/spirv-cross/shaders-msl/frag/interpolation-qualifiers-block.frag new file mode 100644 index 000000000..484208932 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/interpolation-qualifiers-block.frag @@ -0,0 +1,19 @@ +#version 450 + +struct Input { + vec2 v0; + vec2 v1; + vec3 v2; + vec4 v3; + float v4; + float v5; + float v6; +}; + +layout(location=0) in centroid noperspective Input inp; + +layout(location=0) out vec4 FragColor; + +void main() { + FragColor = vec4(inp.v0.x + inp.v1.y, inp.v2.xy, inp.v3.w * inp.v4 + inp.v5 - inp.v6); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/interpolation-qualifiers.frag b/3rdparty/spirv-cross/shaders-msl/frag/interpolation-qualifiers.frag new file mode 100644 index 000000000..ef8a4807f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/interpolation-qualifiers.frag @@ -0,0 +1,15 @@ +#version 450 + +layout(location=0) in vec2 v0; +layout(location=1) in noperspective vec2 v1; +layout(location=2) in centroid vec3 v2; +layout(location=3) in centroid noperspective vec4 v3; +layout(location=4) in sample float v4; +layout(location=5) in sample noperspective float v5; +layout(location=6) in flat float v6; + +layout(location=0) out vec4 FragColor; + +void main() { + FragColor = vec4(v0.x + v1.y, v2.xy, v3.w * v4 + v5 - v6); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/lut-promotion.frag b/3rdparty/spirv-cross/shaders-msl/frag/lut-promotion.frag new file mode 100644 index 000000000..0cdc8148f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/lut-promotion.frag @@ -0,0 +1,44 @@ +#version 310 es +precision mediump float; +layout(location = 0) out float FragColor; +layout(location = 0) flat in int index; + +const float LUT[16] = float[]( + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0); + +void main() +{ + // Try reading LUTs, both in branches and not branch. + FragColor = LUT[index]; + if (index < 10) + FragColor += LUT[index ^ 1]; + else + FragColor += LUT[index & 1]; + + // Not declared as a LUT, but can be promoted to one. + vec4 foo[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + if (index > 30) + { + FragColor += foo[index & 3].y; + } + else + { + FragColor += foo[index & 1].x; + } + + // Not declared as a LUT, but this cannot be promoted, because we have a partial write. + vec4 foobar[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + if (index > 30) + { + foobar[1].z = 20.0; + } + FragColor += foobar[index & 3].z; + + // Not declared as a LUT, but this cannot be promoted, because we have two complete writes. + vec4 baz[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + baz = vec4[](vec4(20.0), vec4(30.0), vec4(50.0), vec4(60.0)); + FragColor += baz[index & 3].z; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/mix.frag b/3rdparty/spirv-cross/shaders-msl/frag/mix.frag new file mode 100644 index 000000000..a5d589dd0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/mix.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vIn0; +layout(location = 1) in vec4 vIn1; +layout(location = 2) in float vIn2; +layout(location = 3) in float vIn3; +layout(location = 0) out vec4 FragColor; + +void main() +{ + bvec4 l = bvec4(false, true, false, false); + FragColor = mix(vIn0, vIn1, l); + + bool f = true; + FragColor = vec4(mix(vIn2, vIn3, f)); + + FragColor = f ? vIn0 : vIn1; + FragColor = vec4(f ? vIn2 : vIn3); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/mrt-array.frag b/3rdparty/spirv-cross/shaders-msl/frag/mrt-array.frag new file mode 100644 index 000000000..0460c72ab --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/mrt-array.frag @@ -0,0 +1,24 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor[4]; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void write_deeper_in_function() +{ + FragColor[3] = vA * vB; +} + +void write_in_function() +{ + FragColor[2] = vA - vB; + write_deeper_in_function(); +} + +void main() +{ + FragColor[0] = mod(vA, vB); + FragColor[1] = vA + vB; + write_in_function(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/packed-expression-vector-shuffle.frag b/3rdparty/spirv-cross/shaders-msl/frag/packed-expression-vector-shuffle.frag new file mode 100644 index 000000000..995844381 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/packed-expression-vector-shuffle.frag @@ -0,0 +1,15 @@ +#version 450 +layout(location = 0) out vec4 FragColor; + +layout(binding = 0, std140) uniform UBO +{ + vec3 color; + float v; +}; + +void main() +{ + vec4 f = vec4(1.0); + f.rgb = color; + FragColor = f; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/packing-test-3.frag b/3rdparty/spirv-cross/shaders-msl/frag/packing-test-3.frag new file mode 100644 index 000000000..56ad6f5f1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/packing-test-3.frag @@ -0,0 +1,36 @@ +#version 450 + +struct VertexOutput +{ + vec4 HPosition; +}; + +struct TestStruct +{ + vec3 position; + float radius; +}; + +layout(binding = 0, std140) uniform CB0 +{ + TestStruct CB0[16]; +} _24; + +layout(location = 0) out vec4 _entryPointOutput; + +vec4 _main(VertexOutput IN) +{ + TestStruct st; + st.position = _24.CB0[1].position; + st.radius = _24.CB0[1].radius; + vec4 col = vec4(st.position, st.radius); + return col; +} + +void main() +{ + VertexOutput IN; + IN.HPosition = gl_FragCoord; + VertexOutput param = IN; + _entryPointOutput = _main(param); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/pls.frag b/3rdparty/spirv-cross/shaders-msl/frag/pls.frag new file mode 100644 index 000000000..e3863e4e0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/pls.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 PLSIn0; +layout(location = 1) in vec4 PLSIn1; +layout(location = 2) in vec4 PLSIn2; +layout(location = 3) in vec4 PLSIn3; + +layout(location = 0) out vec4 PLSOut0; +layout(location = 1) out vec4 PLSOut1; +layout(location = 2) out vec4 PLSOut2; +layout(location = 3) out vec4 PLSOut3; + +void main() +{ + PLSOut0 = 2.0 * PLSIn0; + PLSOut1 = 6.0 * PLSIn1; + PLSOut2 = 7.0 * PLSIn2; + PLSOut3 = 4.0 * PLSIn3; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/readonly-ssbo.frag b/3rdparty/spirv-cross/shaders-msl/frag/readonly-ssbo.frag new file mode 100644 index 000000000..9d7cff66f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/readonly-ssbo.frag @@ -0,0 +1,16 @@ +#version 450 +layout(location = 0) out vec4 FragColor; +layout(binding = 0, std430) readonly buffer SSBO +{ + vec4 v; +}; + +vec4 read_from_function() +{ + return v; +} + +void main() +{ + FragColor = v + read_from_function(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sample-depth-separate-image-sampler.frag b/3rdparty/spirv-cross/shaders-msl/frag/sample-depth-separate-image-sampler.frag new file mode 100644 index 000000000..db1f5e983 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sample-depth-separate-image-sampler.frag @@ -0,0 +1,22 @@ +#version 450 + +layout(set = 0, binding = 0) uniform texture2D uDepth; +layout(set = 0, binding = 1) uniform texture2D uColor; +layout(set = 0, binding = 2) uniform sampler uSampler; +layout(set = 0, binding = 3) uniform samplerShadow uSamplerShadow; +layout(location = 0) out float FragColor; + +float sample_depth_from_function(texture2D uT, samplerShadow uS) +{ + return texture(sampler2DShadow(uT, uS), vec3(0.5)); +} + +float sample_color_from_function(texture2D uT, sampler uS) +{ + return texture(sampler2D(uT, uS), vec2(0.5)).x; +} + +void main() +{ + FragColor = sample_depth_from_function(uDepth, uSamplerShadow) + sample_color_from_function(uColor, uSampler); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sample-mask.frag b/3rdparty/spirv-cross/shaders-msl/frag/sample-mask.frag new file mode 100644 index 000000000..33ff0b2e6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sample-mask.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(1.0); + gl_SampleMask[0] = 0; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sample-position-func.frag b/3rdparty/spirv-cross/shaders-msl/frag/sample-position-func.frag new file mode 100644 index 000000000..d34b8968d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sample-position-func.frag @@ -0,0 +1,15 @@ +#version 450 + +layout(location = 0) in flat int index; + +layout(location = 0) out vec4 FragColor; + +vec4 getColor(int i) +{ + return vec4(gl_SamplePosition, i, 1.0); +} + +void main() +{ + FragColor = getColor(index); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sample-position.frag b/3rdparty/spirv-cross/shaders-msl/frag/sample-position.frag new file mode 100644 index 000000000..6f7502814 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sample-position.frag @@ -0,0 +1,8 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(gl_SamplePosition, gl_SampleID, 1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sampler-1d-lod.frag b/3rdparty/spirv-cross/shaders-msl/frag/sampler-1d-lod.frag new file mode 100644 index 000000000..f4526f39d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sampler-1d-lod.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in float vTex; +layout(binding = 0) uniform sampler1D uSampler; + +void main() +{ + FragColor += texture(uSampler, vTex, 2.0) + + textureLod(uSampler, vTex, 3.0) + + textureGrad(uSampler, vTex, 5.0, 8.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sampler-compare-cascade-gradient.frag b/3rdparty/spirv-cross/shaders-msl/frag/sampler-compare-cascade-gradient.frag new file mode 100644 index 000000000..9fd9e3ca0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sampler-compare-cascade-gradient.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform texture2DArray uTex; +layout(binding = 1) uniform samplerShadow uShadow; +layout(location = 0) in vec4 vUV; +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = textureGrad(sampler2DArrayShadow(uTex, uShadow), vUV, vec2(0.0), vec2(0.0)); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag b/3rdparty/spirv-cross/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag new file mode 100644 index 000000000..9fd9e3ca0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sampler-compare-cascade-gradient.ios.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(binding = 0) uniform texture2DArray uTex; +layout(binding = 1) uniform samplerShadow uShadow; +layout(location = 0) in vec4 vUV; +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = textureGrad(sampler2DArrayShadow(uTex, uShadow), vUV, vec2(0.0), vec2(0.0)); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sampler-image-arrays.msl2.frag b/3rdparty/spirv-cross/shaders-msl/frag/sampler-image-arrays.msl2.frag new file mode 100644 index 000000000..42370d972 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sampler-image-arrays.msl2.frag @@ -0,0 +1,33 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in vec2 vTex; +layout(location = 1) flat in int vIndex; +layout(binding = 0) uniform sampler2D uSampler[4]; +layout(binding = 4) uniform sampler uSamplers[4]; +layout(binding = 8) uniform texture2D uTextures[4]; + +vec4 sample_from_argument(sampler2D samplers[4]) +{ + return texture(samplers[vIndex], vTex + 0.2); +} + +vec4 sample_single_from_argument(sampler2D samp) +{ + return texture(samp, vTex + 0.3); +} + +vec4 sample_from_global() +{ + return texture(uSampler[vIndex], vTex + 0.1); +} + +void main() +{ + FragColor = vec4(0.0); + FragColor += texture(sampler2D(uTextures[2], uSamplers[1]), vTex); + FragColor += texture(uSampler[vIndex], vTex); + FragColor += sample_from_global(); + FragColor += sample_from_argument(uSampler); + FragColor += sample_single_from_argument(uSampler[3]); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sampler-ms.frag b/3rdparty/spirv-cross/shaders-msl/frag/sampler-ms.frag new file mode 100644 index 000000000..659392827 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sampler-ms.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2DMS uSampler; +layout(location = 0) out vec4 FragColor; + +void main() +{ + ivec2 coord = ivec2(gl_FragCoord.xy); + FragColor = + texelFetch(uSampler, coord, 0) + + texelFetch(uSampler, coord, 1) + + texelFetch(uSampler, coord, 2) + + texelFetch(uSampler, coord, 3); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/sampler.frag b/3rdparty/spirv-cross/shaders-msl/frag/sampler.frag new file mode 100644 index 000000000..e38f76886 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/sampler.frag @@ -0,0 +1,18 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; +layout(binding = 0) uniform sampler2D uTex; +layout(location = 0) out vec4 FragColor; + +vec4 sample_texture(sampler2D tex, vec2 uv) +{ + return texture(tex, uv); +} + +void main() +{ + FragColor = vColor * sample_texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/frag/separate-image-sampler-argument.frag b/3rdparty/spirv-cross/shaders-msl/frag/separate-image-sampler-argument.frag new file mode 100644 index 000000000..0475b019d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/separate-image-sampler-argument.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 1) uniform mediump texture2D uDepth; +layout(location = 0) out vec4 FragColor; + +vec4 samp(texture2D t, mediump sampler s) +{ + return texture(sampler2D(t, s), vec2(0.5)); +} + +void main() +{ + FragColor = samp(uDepth, uSampler); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/shadow-compare-global-alias.invalid.frag b/3rdparty/spirv-cross/shaders-msl/frag/shadow-compare-global-alias.invalid.frag new file mode 100644 index 000000000..d885a7847 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/shadow-compare-global-alias.invalid.frag @@ -0,0 +1,38 @@ +#version 450 + +layout(location = 0) out float FragColor; +layout(binding = 0) uniform sampler2DShadow uSampler; +layout(location = 0) in vec3 vUV; + +layout(binding = 1) uniform texture2D uTex; +layout(binding = 2) uniform samplerShadow uSamp; + +float Samp(vec3 uv) +{ + return texture(sampler2DShadow(uTex, uSamp), uv); +} + +float Samp2(vec3 uv) +{ + return texture(uSampler, vUV); +} + +float Samp3(texture2D uT, samplerShadow uS, vec3 uv) +{ + return texture(sampler2DShadow(uT, uS), vUV); +} + +float Samp4(sampler2DShadow uS, vec3 uv) +{ + return texture(uS, vUV); +} + +void main() +{ + FragColor = texture(uSampler, vUV); + FragColor += texture(sampler2DShadow(uTex, uSamp), vUV); + FragColor += Samp(vUV); + FragColor += Samp2(vUV); + FragColor += Samp3(uTex, uSamp, vUV); + FragColor += Samp4(uSampler, vUV); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/spec-constant-block-size.frag b/3rdparty/spirv-cross/shaders-msl/frag/spec-constant-block-size.frag new file mode 100644 index 000000000..8d2b1f326 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/spec-constant-block-size.frag @@ -0,0 +1,17 @@ +#version 310 es +precision mediump float; + +layout(constant_id = 10) const int Value = 2; +layout(binding = 0) uniform SpecConstArray +{ + vec4 samples[Value]; +}; + +layout(location = 0) flat in int Index; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = samples[Index]; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/frag/spec-constant-ternary.frag b/3rdparty/spirv-cross/shaders-msl/frag/spec-constant-ternary.frag new file mode 100644 index 000000000..78dccbf04 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/spec-constant-ternary.frag @@ -0,0 +1,9 @@ +#version 450 +layout(location = 0) out float FragColor; +layout(constant_id = 0) const uint s = 10u; +const uint f = s > 20u ? 30u : 50u; + +void main() +{ + FragColor = float(f); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/shaders-msl/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..d8aee43a6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/switch-unsigned-case.frag @@ -0,0 +1,26 @@ +#version 310 es +precision mediump float; + +#define ENUM_0 0u +#define ENUM_1 1u + +layout(set = 0, binding = 0) uniform Buff +{ + uint TestVal; +}; + +layout(location = 0) out vec4 fsout_Color; + +void main() +{ + fsout_Color = vec4(1.0); + switch (TestVal) + { + case ENUM_0: + fsout_Color = vec4(0.1); + break; + case ENUM_1: + fsout_Color = vec4(0.2); + break; + } +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/swizzle.frag b/3rdparty/spirv-cross/shaders-msl/frag/swizzle.frag new file mode 100644 index 000000000..af22dd655 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/swizzle.frag @@ -0,0 +1,17 @@ +#version 310 es +precision mediump float; + +layout(binding = 0) uniform sampler2D samp; +layout(location = 0) out vec4 FragColor; +layout(location = 1) in vec3 vNormal; +layout(location = 2) in vec2 vUV; + +void main() +{ + FragColor = vec4(texture(samp, vUV).xyz, 1.0); + FragColor = vec4(texture(samp, vUV).xz, 1.0, 4.0); + FragColor = vec4(texture(samp, vUV).xx, texture(samp, vUV + vec2(0.1)).yy); + FragColor = vec4(vNormal, 1.0); + FragColor = vec4(vNormal + 1.8, 1.0); + FragColor = vec4(vUV, vUV + 1.8); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/shaders-msl/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..e98748b8b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/texel-fetch-offset.frag @@ -0,0 +1,10 @@ +#version 310 es +precision mediump float; +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uTexture; + +void main() +{ + FragColor = texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(1, 1)); + FragColor += texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(-1, 1)); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/texture-multisample-array.msl21.frag b/3rdparty/spirv-cross/shaders-msl/frag/texture-multisample-array.msl21.frag new file mode 100644 index 000000000..ede809bd7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/texture-multisample-array.msl21.frag @@ -0,0 +1,10 @@ +#version 450 +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2DMSArray uTexture; +layout(location = 0) flat in ivec3 vCoord; +layout(location = 1) flat in int vSample; + +void main() +{ + FragColor = texelFetch(uTexture, vCoord, vSample); +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/texture-proj-shadow.frag b/3rdparty/spirv-cross/shaders-msl/frag/texture-proj-shadow.frag new file mode 100644 index 000000000..547532e64 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/texture-proj-shadow.frag @@ -0,0 +1,19 @@ +#version 450 + +layout(binding = 1) uniform sampler2DShadow uShadow2D; +layout(binding = 2) uniform sampler1D uSampler1D; +layout(binding = 3) uniform sampler2D uSampler2D; +layout(binding = 4) uniform sampler3D uSampler3D; + +layout(location = 0) out float FragColor; +layout(location = 0) in vec3 vClip3; +layout(location = 1) in vec4 vClip4; +layout(location = 2) in vec2 vClip2; + +void main() +{ + FragColor = textureProj(uShadow2D, vClip4); + FragColor = textureProj(uSampler1D, vClip2).x; + FragColor = textureProj(uSampler2D, vClip3).x; + FragColor = textureProj(uSampler3D, vClip4).x; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/ubo_layout.frag b/3rdparty/spirv-cross/shaders-msl/frag/ubo_layout.frag new file mode 100644 index 000000000..80f9f16d3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/ubo_layout.frag @@ -0,0 +1,24 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; + +struct Str +{ + mat4 foo; +}; + +layout(binding = 0, std140) uniform UBO1 +{ + layout(row_major) Str foo; +} ubo1; + +layout(binding = 1, std140) uniform UBO2 +{ + layout(column_major) Str foo; +} ubo0; + +void main() +{ + FragColor = ubo1.foo.foo[0] + ubo0.foo.foo[0]; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/unary-enclose.frag b/3rdparty/spirv-cross/shaders-msl/frag/unary-enclose.frag new file mode 100644 index 000000000..ea502e1de --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/unary-enclose.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vIn; +layout(location = 1) flat in ivec4 vIn1; + +void main() +{ + FragColor = +(-(-vIn)); + ivec4 a = ~(~vIn1); + + bool b = false; + b = !!b; +} diff --git a/3rdparty/spirv-cross/shaders-msl/frag/write-depth-in-function.frag b/3rdparty/spirv-cross/shaders-msl/frag/write-depth-in-function.frag new file mode 100644 index 000000000..1af7f5d9b --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/frag/write-depth-in-function.frag @@ -0,0 +1,14 @@ +#version 450 +layout(location = 0) in float v; +layout(location = 0) out float FragColor; + +void set_output_depth() +{ + gl_FragDepth = 0.2; +} + +void main() +{ + FragColor = 1.0; + set_output_depth(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/legacy/vert/transpose.legacy.vert b/3rdparty/spirv-cross/shaders-msl/legacy/vert/transpose.legacy.vert new file mode 100644 index 000000000..84f618262 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/legacy/vert/transpose.legacy.vert @@ -0,0 +1,20 @@ +#version 310 es + +uniform Buffer +{ + layout(row_major) mat4 MVPRowMajor; + layout(column_major) mat4 MVPColMajor; + mat4 M; +}; + +layout(location = 0) in vec4 Position; + +void main() +{ + vec4 c0 = M * (MVPRowMajor * Position); + vec4 c1 = M * (MVPColMajor * Position); + vec4 c2 = M * (Position * MVPRowMajor); + vec4 c3 = M * (Position * MVPColMajor); + gl_Position = c0 + c1 + c2 + c3; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/vert/basic.vert b/3rdparty/spirv-cross/shaders-msl/vert/basic.vert new file mode 100644 index 000000000..8191dc2d0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/basic.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(std140) uniform UBO +{ + uniform mat4 uMVP; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = uMVP * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/copy.flatten.vert b/3rdparty/spirv-cross/shaders-msl/vert/copy.flatten.vert new file mode 100644 index 000000000..4f1b8805e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/copy.flatten.vert @@ -0,0 +1,34 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + + vec4 Color; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; + + Light lights[4]; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec4 vColor; + +void main() +{ + gl_Position = uMVP * aVertex; + + vColor = vec4(0.0); + + for (int i = 0; i < 4; ++i) + { + Light light = lights[i]; + vec3 L = aVertex.xyz - light.Position; + vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / light.Radius, 0.0, 1.0) * lights[i].Color); + } +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/dynamic.flatten.vert b/3rdparty/spirv-cross/shaders-msl/vert/dynamic.flatten.vert new file mode 100644 index 000000000..a341d4528 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/dynamic.flatten.vert @@ -0,0 +1,33 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + + vec4 Color; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; + + Light lights[4]; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec4 vColor; + +void main() +{ + gl_Position = uMVP * aVertex; + + vColor = vec4(0.0); + + for (int i = 0; i < 4; ++i) + { + vec3 L = aVertex.xyz - lights[i].Position; + vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / lights[i].Radius, 0.0, 1.0) * lights[i].Color); + } +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/functions.vert b/3rdparty/spirv-cross/shaders-msl/vert/functions.vert new file mode 100644 index 000000000..b92074f46 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/functions.vert @@ -0,0 +1,28 @@ +#version 310 es + +layout(std140) uniform UBO +{ + uniform mat4 uMVP; + uniform vec3 rotDeg; + uniform vec3 rotRad; + uniform ivec2 bits; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; + +layout(location = 0) out vec3 vNormal; +layout(location = 1) out vec3 vRotDeg; +layout(location = 2) out vec3 vRotRad; +layout(location = 3) out ivec2 vLSB; +layout(location = 4) out ivec2 vMSB; + +void main() +{ + gl_Position = inverse(uMVP) * aVertex; + vNormal = aNormal; + vRotDeg = degrees(rotRad); + vRotRad = radians(rotDeg); + vLSB = findLSB(bits); + vMSB = findMSB(bits); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/in_out_array_mat.vert b/3rdparty/spirv-cross/shaders-msl/vert/in_out_array_mat.vert new file mode 100644 index 000000000..bdff3d280 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/in_out_array_mat.vert @@ -0,0 +1,41 @@ +#version 450 + +layout(binding = 0, std140) uniform UBO +{ + mat4 projection; + mat4 model; + float lodBias; +} ubo; + +layout(location = 0) in vec3 inPos; +layout(location = 1) in vec4 colors[3]; +layout(location = 4) in vec3 inNormal; +layout(location = 5) in mat4 inViewMat; +layout(location = 0) out vec3 outPos; +layout(location = 1) out vec3 outNormal; +layout(location = 2) out mat4 outTransModel; +layout(location = 6) out float outLodBias; +layout(location = 7) out vec4 color; + +void write_deeper_in_function() +{ + outTransModel[1][1] = ubo.lodBias; + color = colors[2]; +} + +void write_in_function() +{ + outTransModel[2] = vec4(inNormal, 1.0); + write_deeper_in_function(); +} + +void main() +{ + gl_Position = (ubo.projection * ubo.model) * vec4(inPos, 1.0); + outPos = vec3((ubo.model * vec4(inPos, 1.0)).xyz); + outNormal = mat3(vec3(ubo.model[0].x, ubo.model[0].y, ubo.model[0].z), vec3(ubo.model[1].x, ubo.model[1].y, ubo.model[1].z), vec3(ubo.model[2].x, ubo.model[2].y, ubo.model[2].z)) * inNormal; + outLodBias = ubo.lodBias; + outTransModel = transpose(ubo.model) * inViewMat; + write_in_function(); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/vert/interface-block-block-composites.frag b/3rdparty/spirv-cross/shaders-msl/vert/interface-block-block-composites.frag new file mode 100644 index 000000000..a0fb7c975 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/interface-block-block-composites.frag @@ -0,0 +1,17 @@ +#version 450 +layout(location = 0) in mat3 vMatrix; +layout(location = 4) in Vert +{ + mat3 wMatrix; + vec4 wTmp; + float arr[4]; +}; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = wMatrix[0].xxyy + wTmp + vMatrix[1].yyzz; + for (int i = 0; i < 4; i++) + FragColor += arr[i]; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/interface-block-block-composites.vert b/3rdparty/spirv-cross/shaders-msl/vert/interface-block-block-composites.vert new file mode 100644 index 000000000..899a85221 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/interface-block-block-composites.vert @@ -0,0 +1,22 @@ +#version 450 +layout(location = 0) out mat3 vMatrix; +layout(location = 0) in mat3 Matrix; +layout(location = 4) in vec4 Pos; + +layout(location = 4) out Vert +{ + float arr[3]; + mat3 wMatrix; + vec4 wTmp; +}; + +void main() +{ + vMatrix = Matrix; + wMatrix = Matrix; + arr[0] = 1.0; + arr[1] = 2.0; + arr[2] = 3.0; + wTmp = Pos; + gl_Position = Pos; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/interpolation-qualifiers-block.vert b/3rdparty/spirv-cross/shaders-msl/vert/interpolation-qualifiers-block.vert new file mode 100644 index 000000000..73d6cbbd0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/interpolation-qualifiers-block.vert @@ -0,0 +1,26 @@ +#version 450 + +layout(location=0) in vec4 Position; + +struct Output { + vec2 v0; + vec2 v1; + vec3 v2; + vec4 v3; + float v4; + float v5; + float v6; +}; + +layout(location=0) out centroid noperspective Output outp; + +void main() { + outp.v0 = Position.xy; + outp.v1 = Position.zw; + outp.v2 = vec3(Position.x, Position.z * Position.y, Position.x); + outp.v3 = Position.xxyy; + outp.v4 = Position.w; + outp.v5 = Position.y; + outp.v6 = Position.x * Position.w; + gl_Position = Position; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/interpolation-qualifiers.vert b/3rdparty/spirv-cross/shaders-msl/vert/interpolation-qualifiers.vert new file mode 100644 index 000000000..541285f2c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/interpolation-qualifiers.vert @@ -0,0 +1,22 @@ +#version 450 + +layout(location=0) in vec4 Position; + +layout(location=0) out vec2 v0; +layout(location=1) out noperspective vec2 v1; +layout(location=2) out centroid vec3 v2; +layout(location=3) out centroid noperspective vec4 v3; +layout(location=4) out sample float v4; +layout(location=5) out sample noperspective float v5; +layout(location=6) out flat float v6; + +void main() { + v0 = Position.xy; + v1 = Position.zw; + v2 = vec3(Position.x, Position.z * Position.y, Position.x); + v3 = Position.xxyy; + v4 = Position.w; + v5 = Position.y; + v6 = Position.x * Position.w; + gl_Position = Position; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/layer.msl11.invalid.vert b/3rdparty/spirv-cross/shaders-msl/vert/layer.msl11.invalid.vert new file mode 100644 index 000000000..73b918c80 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/layer.msl11.invalid.vert @@ -0,0 +1,10 @@ +#version 450 +#extension GL_ARB_shader_viewport_layer_array : require + +layout(location = 0) in vec4 coord; + +void main() +{ + gl_Position = coord; + gl_Layer = int(coord.z); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.vert b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.vert new file mode 100644 index 000000000..3c2573a62 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.vert @@ -0,0 +1,14 @@ +#version 450 + +layout(binding = 0, std430) writeonly buffer _10_12 +{ + uvec4 _m0[1024]; +} _12; + +layout(location = 0) in uvec4 _19; + +void main() +{ + _12._m0[gl_VertexIndex] = _19; +} + diff --git a/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_buff.vert b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_buff.vert new file mode 100644 index 000000000..96ed2bd1f --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_buff.vert @@ -0,0 +1,23 @@ +#version 450 + +layout(binding = 1, std430) writeonly buffer _33_35 +{ + uvec4 _m0[1024]; +} _35; + +layout(binding = 0, std140) uniform _38_40 +{ + uvec4 _m0[1024]; +} _40; + +layout(location = 0) in vec4 _14; + +void main() +{ + gl_Position = _14; + for (int _19 = 0; _19 < 1024; _19++) + { + _35._m0[_19] = _40._m0[_19]; + } +} + diff --git a/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_buff_atomic.vert b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_buff_atomic.vert new file mode 100644 index 000000000..607ac9f34 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_buff_atomic.vert @@ -0,0 +1,15 @@ +#version 450 + +layout(binding = 0, std430) coherent buffer _19_21 +{ + uint _m0; +} _21; + +layout(location = 0) in vec4 _14; + +void main() +{ + gl_Position = _14; + uint _26 = atomicAdd(_21._m0, 1u); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_tex.vert b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_tex.vert new file mode 100644 index 000000000..19c667391 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/no_stage_out.write_tex.vert @@ -0,0 +1,16 @@ +#version 450 + +layout(binding = 1, r32ui) uniform writeonly uimage1D _32; +layout(binding = 0, r32ui) uniform readonly uimage1D _35; + +layout(location = 0) in vec4 _14; + +void main() +{ + gl_Position = _14; + for (int _19 = 0; _19 < 128; _19++) + { + imageStore(_32, _19, imageLoad(_35, _19)); + } +} + diff --git a/3rdparty/spirv-cross/shaders-msl/vert/out_block.vert b/3rdparty/spirv-cross/shaders-msl/vert/out_block.vert new file mode 100644 index 000000000..d7a50c783 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/out_block.vert @@ -0,0 +1,22 @@ +#version 450 + +uniform Transform +{ + mat4 transform; +} block; + +layout(location = 0) in vec3 position; +layout(location = 1) in vec4 color; + +layout(location = 2) out VertexOut +{ + vec4 color; + vec4 color2; +} outputs; + +void main() +{ + gl_Position = block.transform * vec4(position, 1.0); + outputs.color = color; + outputs.color2 = color + vec4(1.0); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/packed_matrix.vert b/3rdparty/spirv-cross/shaders-msl/vert/packed_matrix.vert new file mode 100644 index 000000000..4d99d2190 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/packed_matrix.vert @@ -0,0 +1,41 @@ +#version 450 + +layout(binding = 13, std140) uniform _1365_18812 +{ + layout(row_major) mat4x3 _m0; + layout(row_major) mat4x3 _m1; +} _18812; + +layout(binding = 12, std140) uniform _1126_22044 +{ + layout(row_major) mat4 _m0; + layout(row_major) mat4 _m1; + float _m9; + vec3 _m10; + float _m11; + vec3 _m12; + float _m17; + float _m18; + float _m19; + vec2 _m20; +} _22044; + +layout(location = 0) out vec3 _3976; +layout(location = 0) in vec4 _5275; + +vec3 _2; + +void main() +{ + vec3 _23783; + do + { + _23783 = normalize(_18812._m1 * vec4(_5275.xyz, 0.0)); + break; + } while (false); + vec4 _14995 = vec4(_22044._m10 + (_5275.xyz * (_22044._m17 + _22044._m18)), 1.0) * _22044._m0; + _3976 = _23783; + vec4 _6282 = _14995; + _6282.y = -_14995.y; + gl_Position = _6282; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/pointsize.vert b/3rdparty/spirv-cross/shaders-msl/vert/pointsize.vert new file mode 100644 index 000000000..0fc713638 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/pointsize.vert @@ -0,0 +1,15 @@ +#version 450 +uniform params { + mat4 mvp; + float psize; +}; + +layout(location = 0) in vec4 position; +layout(location = 1) in vec4 color0; +layout(location = 0) out vec4 color; + +void main() { + gl_Position = mvp * position; + gl_PointSize = psize; + color = color0; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/shaders-msl/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..792fb8e36 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/read-from-row-major-array.vert @@ -0,0 +1,20 @@ +#version 310 es +layout(location = 0) in highp vec4 a_position; +layout(location = 0) out mediump float v_vtxResult; + +layout(set = 0, binding = 0, std140, row_major) uniform Block +{ + highp mat2x3 var[3][4]; +}; + +mediump float compare_float (highp float a, highp float b) { return abs(a - b) < 0.05 ? 1.0 : 0.0; } +mediump float compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); } +mediump float compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); } + +void main (void) +{ + gl_Position = a_position; + mediump float result = 1.0; + result *= compare_mat2x3(var[0][0], mat2x3(2.0, 6.0, -6.0, 0.0, 5.0, 5.0)); + v_vtxResult = result; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/resource-arrays-leaf.ios.vert b/3rdparty/spirv-cross/shaders-msl/vert/resource-arrays-leaf.ios.vert new file mode 100644 index 000000000..d097d5983 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/resource-arrays-leaf.ios.vert @@ -0,0 +1,26 @@ +#version 450 + +layout(constant_id = 0) const int arraySize = 3; + +layout(binding = 0, rgba32i) uniform iimage2D images[arraySize]; +uniform constant_block +{ + vec4 foo; + int bar; +} constants[4]; +buffer storage_block +{ + uvec4 baz; + ivec2 quux; +} storage[2]; + +void doWork() +{ + storage[0].baz = uvec4(constants[3].foo); + storage[1].quux = imageLoad(images[2], ivec2(constants[1].bar)).xy; +} + +void main() +{ + doWork(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/resource-arrays.ios.vert b/3rdparty/spirv-cross/shaders-msl/vert/resource-arrays.ios.vert new file mode 100644 index 000000000..a2082a437 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/resource-arrays.ios.vert @@ -0,0 +1,21 @@ +#version 450 + +layout(constant_id = 0) const int arraySize = 3; + +layout(binding = 0, rgba32i) uniform iimage2D images[arraySize]; +uniform constant_block +{ + vec4 foo; + int bar; +} constants[4]; +buffer storage_block +{ + uvec4 baz; + ivec2 quux; +} storage[2]; + +void main() +{ + storage[0].baz = uvec4(constants[3].foo); + storage[1].quux = imageLoad(images[2], ivec2(constants[1].bar)).xy; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/return-array.vert b/3rdparty/spirv-cross/shaders-msl/vert/return-array.vert new file mode 100644 index 000000000..708460114 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/return-array.vert @@ -0,0 +1,22 @@ +#version 310 es + +layout(location = 0) in vec4 vInput0; +layout(location = 1) in vec4 vInput1; + +vec4[2] test() +{ + return vec4[](vec4(10.0), vec4(20.0)); +} + +vec4[2] test2() +{ + vec4 foobar[2]; + foobar[0] = vInput0; + foobar[1] = vInput1; + return foobar; +} + +void main() +{ + gl_Position = test()[0] + test2()[1]; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/set_builtin_in_func.vert b/3rdparty/spirv-cross/shaders-msl/vert/set_builtin_in_func.vert new file mode 100644 index 000000000..dd991e354 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/set_builtin_in_func.vert @@ -0,0 +1,12 @@ +#version 450 + +void write_outblock() +{ + gl_PointSize = 1.0; + gl_Position = vec4(gl_PointSize); +} + +void main() +{ + write_outblock(); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/sign-int-types.vert b/3rdparty/spirv-cross/shaders-msl/vert/sign-int-types.vert new file mode 100644 index 000000000..f8530a466 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/sign-int-types.vert @@ -0,0 +1,38 @@ +#version 310 es + +layout(std140) uniform UBO +{ + uniform mat4 uMVP; + uniform vec4 uFloatVec4; + uniform vec3 uFloatVec3; + uniform vec2 uFloatVec2; + uniform float uFloat; + uniform ivec4 uIntVec4; + uniform ivec3 uIntVec3; + uniform ivec2 uIntVec2; + uniform int uInt; +}; + +layout(location = 0) in vec4 aVertex; + +layout(location = 0) out vec4 vFloatVec4; +layout(location = 1) out vec3 vFloatVec3; +layout(location = 2) out vec2 vFloatVec2; +layout(location = 3) out float vFloat; +layout(location = 4) flat out ivec4 vIntVec4; +layout(location = 5) flat out ivec3 vIntVec3; +layout(location = 6) flat out ivec2 vIntVec2; +layout(location = 7) flat out int vInt; + +void main() +{ + gl_Position = uMVP * aVertex; + vFloatVec4 = sign(uFloatVec4); + vFloatVec3 = sign(uFloatVec3); + vFloatVec2 = sign(uFloatVec2); + vFloat = sign(uFloat); + vIntVec4 = sign(uIntVec4); + vIntVec3 = sign(uIntVec3); + vIntVec2 = sign(uIntVec2); + vInt = sign(uInt); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/texture_buffer.vert b/3rdparty/spirv-cross/shaders-msl/vert/texture_buffer.vert new file mode 100644 index 000000000..6bc7ddfae --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/texture_buffer.vert @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_OES_texture_buffer : require + +layout(binding = 4) uniform highp samplerBuffer uSamp; +layout(rgba32f, binding = 5) uniform readonly highp imageBuffer uSampo; + +void main() +{ + gl_Position = texelFetch(uSamp, 10) + imageLoad(uSampo, 100); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/ubo.alignment.vert b/3rdparty/spirv-cross/shaders-msl/vert/ubo.alignment.vert new file mode 100644 index 000000000..2e9d16df4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/ubo.alignment.vert @@ -0,0 +1,23 @@ +#version 310 es + +layout(binding = 0, std140) uniform UBO +{ + mat4 mvp; + vec2 targSize; + vec3 color; // vec3 following vec2 should cause MSL to add pad if float3 is packed + float opacity; // Single float following vec3 should cause MSL float3 to pack +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; +layout(location = 1) out vec3 vColor; +layout(location = 2) out vec2 vSize; + +void main() +{ + gl_Position = mvp * aVertex; + vNormal = aNormal; + vColor = color * opacity; + vSize = targSize * opacity; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/ubo.vert b/3rdparty/spirv-cross/shaders-msl/vert/ubo.vert new file mode 100644 index 000000000..82e4626e1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/ubo.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(binding = 0, std140) uniform UBO +{ + mat4 mvp; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = mvp * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vert/viewport-index.msl2.invalid.vert b/3rdparty/spirv-cross/shaders-msl/vert/viewport-index.msl2.invalid.vert new file mode 100644 index 000000000..c05c10484 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vert/viewport-index.msl2.invalid.vert @@ -0,0 +1,10 @@ +#version 450 +#extension GL_ARB_shader_viewport_layer_array : require + +layout(location = 0) in vec4 coord; + +void main() +{ + gl_Position = coord; + gl_ViewportIndex = int(coord.z); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vulkan/frag/push-constant.vk.frag b/3rdparty/spirv-cross/shaders-msl/vulkan/frag/push-constant.vk.frag new file mode 100644 index 000000000..6180faba3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vulkan/frag/push-constant.vk.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; + +layout(push_constant, std430) uniform PushConstants +{ + vec4 value0; + vec4 value1; +} push; + +layout(location = 0) in vec4 vColor; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vColor + push.value0 + push.value1; +} diff --git a/3rdparty/spirv-cross/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag b/3rdparty/spirv-cross/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag new file mode 100644 index 000000000..3cb75da5c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vulkan/frag/spec-constant.msl11.vk.frag @@ -0,0 +1,67 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(constant_id = 1) const float a = 1.0; +layout(constant_id = 2) const float b = 2.0; +layout(constant_id = 3) const int c = 3; +layout(constant_id = 4) const int d = 4; +layout(constant_id = 5) const uint e = 5u; +layout(constant_id = 6) const uint f = 6u; +layout(constant_id = 7) const bool g = false; +layout(constant_id = 8) const bool h = true; +// glslang doesn't seem to support partial spec constants or composites yet, so only test the basics. + +void main() +{ + float t0 = a; + float t1 = b; + + uint c0 = uint(c); // OpIAdd with different types. + // FConvert, float-to-double. + int c1 = -c; // SNegate + int c2 = ~c; // OpNot + int c3 = c + d; // OpIAdd + int c4 = c - d; // OpISub + int c5 = c * d; // OpIMul + int c6 = c / d; // OpSDiv + uint c7 = e / f; // OpUDiv + int c8 = c % d; // OpSMod + uint c9 = e % f; // OpUMod + // TODO: OpSRem, any way to access this in GLSL? + int c10 = c >> d; // OpShiftRightArithmetic + uint c11 = e >> f; // OpShiftRightLogical + int c12 = c << d; // OpShiftLeftLogical + int c13 = c | d; // OpBitwiseOr + int c14 = c ^ d; // OpBitwiseXor + int c15 = c & d; // OpBitwiseAnd + // VectorShuffle, CompositeExtract, CompositeInsert, not testable atm. + bool c16 = g || h; // OpLogicalOr + bool c17 = g && h; // OpLogicalAnd + bool c18 = !g; // OpLogicalNot + bool c19 = g == h; // OpLogicalEqual + bool c20 = g != h; // OpLogicalNotEqual + // OpSelect not testable atm. + bool c21 = c == d; // OpIEqual + bool c22 = c != d; // OpINotEqual + bool c23 = c < d; // OpSLessThan + bool c24 = e < f; // OpULessThan + bool c25 = c > d; // OpSGreaterThan + bool c26 = e > f; // OpUGreaterThan + bool c27 = c <= d; // OpSLessThanEqual + bool c28 = e <= f; // OpULessThanEqual + bool c29 = c >= d; // OpSGreaterThanEqual + bool c30 = e >= f; // OpUGreaterThanEqual + // OpQuantizeToF16 not testable atm. + + int c31 = c8 + c3; + + int c32 = int(e); // OpIAdd with different types. + bool c33 = bool(c); // int -> bool + bool c34 = bool(e); // uint -> bool + int c35 = int(g); // bool -> int + uint c36 = uint(g); // bool -> uint + float c37 = float(g); // bool -> float + + FragColor = vec4(t0 + t1); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vulkan/frag/spec-constant.vk.frag b/3rdparty/spirv-cross/shaders-msl/vulkan/frag/spec-constant.vk.frag new file mode 100644 index 000000000..3cb75da5c --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vulkan/frag/spec-constant.vk.frag @@ -0,0 +1,67 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(constant_id = 1) const float a = 1.0; +layout(constant_id = 2) const float b = 2.0; +layout(constant_id = 3) const int c = 3; +layout(constant_id = 4) const int d = 4; +layout(constant_id = 5) const uint e = 5u; +layout(constant_id = 6) const uint f = 6u; +layout(constant_id = 7) const bool g = false; +layout(constant_id = 8) const bool h = true; +// glslang doesn't seem to support partial spec constants or composites yet, so only test the basics. + +void main() +{ + float t0 = a; + float t1 = b; + + uint c0 = uint(c); // OpIAdd with different types. + // FConvert, float-to-double. + int c1 = -c; // SNegate + int c2 = ~c; // OpNot + int c3 = c + d; // OpIAdd + int c4 = c - d; // OpISub + int c5 = c * d; // OpIMul + int c6 = c / d; // OpSDiv + uint c7 = e / f; // OpUDiv + int c8 = c % d; // OpSMod + uint c9 = e % f; // OpUMod + // TODO: OpSRem, any way to access this in GLSL? + int c10 = c >> d; // OpShiftRightArithmetic + uint c11 = e >> f; // OpShiftRightLogical + int c12 = c << d; // OpShiftLeftLogical + int c13 = c | d; // OpBitwiseOr + int c14 = c ^ d; // OpBitwiseXor + int c15 = c & d; // OpBitwiseAnd + // VectorShuffle, CompositeExtract, CompositeInsert, not testable atm. + bool c16 = g || h; // OpLogicalOr + bool c17 = g && h; // OpLogicalAnd + bool c18 = !g; // OpLogicalNot + bool c19 = g == h; // OpLogicalEqual + bool c20 = g != h; // OpLogicalNotEqual + // OpSelect not testable atm. + bool c21 = c == d; // OpIEqual + bool c22 = c != d; // OpINotEqual + bool c23 = c < d; // OpSLessThan + bool c24 = e < f; // OpULessThan + bool c25 = c > d; // OpSGreaterThan + bool c26 = e > f; // OpUGreaterThan + bool c27 = c <= d; // OpSLessThanEqual + bool c28 = e <= f; // OpULessThanEqual + bool c29 = c >= d; // OpSGreaterThanEqual + bool c30 = e >= f; // OpUGreaterThanEqual + // OpQuantizeToF16 not testable atm. + + int c31 = c8 + c3; + + int c32 = int(e); // OpIAdd with different types. + bool c33 = bool(c); // int -> bool + bool c34 = bool(e); // uint -> bool + int c35 = int(g); // bool -> int + uint c36 = uint(g); // bool -> uint + float c37 = float(g); // bool -> float + + FragColor = vec4(t0 + t1); +} diff --git a/3rdparty/spirv-cross/shaders-msl/vulkan/vert/small-storage.vk.vert b/3rdparty/spirv-cross/shaders-msl/vulkan/vert/small-storage.vk.vert new file mode 100644 index 000000000..0ca8144ce --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vulkan/vert/small-storage.vk.vert @@ -0,0 +1,38 @@ +#version 450 core + +// GL_EXT_shader_16bit_storage doesn't support input/output. +#extension GL_EXT_shader_8bit_storage : require +#extension GL_AMD_gpu_shader_int16 : require +#extension GL_AMD_gpu_shader_half_float : require + +layout(location = 0) in int16_t foo; +layout(location = 1) in uint16_t bar; +layout(location = 2) in float16_t baz; + +layout(binding = 0) uniform block { + i16vec2 a; + u16vec2 b; + i8vec2 c; + u8vec2 d; + f16vec2 e; +}; + +layout(binding = 1) readonly buffer storage { + i16vec3 f; + u16vec3 g; + i8vec3 h; + u8vec3 i; + f16vec3 j; +}; + +layout(location = 0) out i16vec4 p; +layout(location = 1) out u16vec4 q; +layout(location = 2) out f16vec4 r; + +void main() { + p = i16vec4(int(foo) + ivec4(ivec2(a), ivec2(c)) - ivec4(ivec3(f) / ivec3(h), 1)); + q = u16vec4(uint(bar) + uvec4(uvec2(b), uvec2(d)) - uvec4(uvec3(g) / uvec3(i), 1)); + r = f16vec4(float(baz) + vec4(vec2(e), 0, 1) - vec4(vec3(j), 1)); + gl_Position = vec4(0, 0, 0, 1); +} + diff --git a/3rdparty/spirv-cross/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert b/3rdparty/spirv-cross/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert new file mode 100644 index 000000000..4d0438ace --- /dev/null +++ b/3rdparty/spirv-cross/shaders-msl/vulkan/vert/vulkan-vertex.vk.vert @@ -0,0 +1,6 @@ +#version 310 es + +void main() +{ + gl_Position = float(gl_VertexIndex + gl_InstanceIndex) * vec4(1.0, 2.0, 3.0, 4.0); +} diff --git a/3rdparty/spirv-cross/shaders-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag b/3rdparty/spirv-cross/shaders-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag new file mode 100644 index 000000000..8b09e5b68 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-no-opt/asm/frag/inliner-dominator-inside-loop.asm.frag @@ -0,0 +1,646 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 1532 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %IN_HPosition %IN_Uv_EdgeDistance1 %IN_UvStuds_EdgeDistance2 %IN_Color %IN_LightPosition_Fog %IN_View_Depth %IN_Normal_SpecPower %IN_Tangent %IN_PosLightSpace_Reflectance %IN_studIndex %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %VertexOutput "VertexOutput" + OpMemberName %VertexOutput 0 "HPosition" + OpMemberName %VertexOutput 1 "Uv_EdgeDistance1" + OpMemberName %VertexOutput 2 "UvStuds_EdgeDistance2" + OpMemberName %VertexOutput 3 "Color" + OpMemberName %VertexOutput 4 "LightPosition_Fog" + OpMemberName %VertexOutput 5 "View_Depth" + OpMemberName %VertexOutput 6 "Normal_SpecPower" + OpMemberName %VertexOutput 7 "Tangent" + OpMemberName %VertexOutput 8 "PosLightSpace_Reflectance" + OpMemberName %VertexOutput 9 "studIndex" + OpName %Surface "Surface" + OpMemberName %Surface 0 "albedo" + OpMemberName %Surface 1 "normal" + OpMemberName %Surface 2 "specular" + OpMemberName %Surface 3 "gloss" + OpMemberName %Surface 4 "reflectance" + OpMemberName %Surface 5 "opacity" + OpName %SurfaceInput "SurfaceInput" + OpMemberName %SurfaceInput 0 "Color" + OpMemberName %SurfaceInput 1 "Uv" + OpMemberName %SurfaceInput 2 "UvStuds" + OpName %Globals "Globals" + OpMemberName %Globals 0 "ViewProjection" + OpMemberName %Globals 1 "ViewRight" + OpMemberName %Globals 2 "ViewUp" + OpMemberName %Globals 3 "ViewDir" + OpMemberName %Globals 4 "CameraPosition" + OpMemberName %Globals 5 "AmbientColor" + OpMemberName %Globals 6 "Lamp0Color" + OpMemberName %Globals 7 "Lamp0Dir" + OpMemberName %Globals 8 "Lamp1Color" + OpMemberName %Globals 9 "FogParams" + OpMemberName %Globals 10 "FogColor" + OpMemberName %Globals 11 "LightBorder" + OpMemberName %Globals 12 "LightConfig0" + OpMemberName %Globals 13 "LightConfig1" + OpMemberName %Globals 14 "LightConfig2" + OpMemberName %Globals 15 "LightConfig3" + OpMemberName %Globals 16 "RefractionBias_FadeDistance_GlowFactor" + OpMemberName %Globals 17 "OutlineBrightness_ShadowInfo" + OpMemberName %Globals 18 "ShadowMatrix0" + OpMemberName %Globals 19 "ShadowMatrix1" + OpMemberName %Globals 20 "ShadowMatrix2" + OpName %CB0 "CB0" + OpMemberName %CB0 0 "CB0" + OpName %_ "" + OpName %LightMapTexture "LightMapTexture" + OpName %LightMapSampler "LightMapSampler" + OpName %ShadowMapSampler "ShadowMapSampler" + OpName %ShadowMapTexture "ShadowMapTexture" + OpName %EnvironmentMapTexture "EnvironmentMapTexture" + OpName %EnvironmentMapSampler "EnvironmentMapSampler" + OpName %IN_HPosition "IN.HPosition" + OpName %IN_Uv_EdgeDistance1 "IN.Uv_EdgeDistance1" + OpName %IN_UvStuds_EdgeDistance2 "IN.UvStuds_EdgeDistance2" + OpName %IN_Color "IN.Color" + OpName %IN_LightPosition_Fog "IN.LightPosition_Fog" + OpName %IN_View_Depth "IN.View_Depth" + OpName %IN_Normal_SpecPower "IN.Normal_SpecPower" + OpName %IN_Tangent "IN.Tangent" + OpName %IN_PosLightSpace_Reflectance "IN.PosLightSpace_Reflectance" + OpName %IN_studIndex "IN.studIndex" + OpName %_entryPointOutput "@entryPointOutput" + OpName %DiffuseMapSampler "DiffuseMapSampler" + OpName %DiffuseMapTexture "DiffuseMapTexture" + OpName %NormalMapSampler "NormalMapSampler" + OpName %NormalMapTexture "NormalMapTexture" + OpName %NormalDetailMapTexture "NormalDetailMapTexture" + OpName %NormalDetailMapSampler "NormalDetailMapSampler" + OpName %StudsMapTexture "StudsMapTexture" + OpName %StudsMapSampler "StudsMapSampler" + OpName %SpecularMapSampler "SpecularMapSampler" + OpName %SpecularMapTexture "SpecularMapTexture" + OpName %Params "Params" + OpMemberName %Params 0 "LqmatFarTilingFactor" + OpName %CB2 "CB2" + OpMemberName %CB2 0 "CB2" + OpMemberDecorate %Globals 0 ColMajor + OpMemberDecorate %Globals 0 Offset 0 + OpMemberDecorate %Globals 0 MatrixStride 16 + OpMemberDecorate %Globals 1 Offset 64 + OpMemberDecorate %Globals 2 Offset 80 + OpMemberDecorate %Globals 3 Offset 96 + OpMemberDecorate %Globals 4 Offset 112 + OpMemberDecorate %Globals 5 Offset 128 + OpMemberDecorate %Globals 6 Offset 144 + OpMemberDecorate %Globals 7 Offset 160 + OpMemberDecorate %Globals 8 Offset 176 + OpMemberDecorate %Globals 9 Offset 192 + OpMemberDecorate %Globals 10 Offset 208 + OpMemberDecorate %Globals 11 Offset 224 + OpMemberDecorate %Globals 12 Offset 240 + OpMemberDecorate %Globals 13 Offset 256 + OpMemberDecorate %Globals 14 Offset 272 + OpMemberDecorate %Globals 15 Offset 288 + OpMemberDecorate %Globals 16 Offset 304 + OpMemberDecorate %Globals 17 Offset 320 + OpMemberDecorate %Globals 18 Offset 336 + OpMemberDecorate %Globals 19 Offset 352 + OpMemberDecorate %Globals 20 Offset 368 + OpMemberDecorate %CB0 0 Offset 0 + OpDecorate %CB0 Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %LightMapTexture DescriptorSet 1 + OpDecorate %LightMapTexture Binding 6 + OpDecorate %LightMapSampler DescriptorSet 1 + OpDecorate %LightMapSampler Binding 6 + OpDecorate %ShadowMapSampler DescriptorSet 1 + OpDecorate %ShadowMapSampler Binding 1 + OpDecorate %ShadowMapTexture DescriptorSet 1 + OpDecorate %ShadowMapTexture Binding 1 + OpDecorate %EnvironmentMapTexture DescriptorSet 1 + OpDecorate %EnvironmentMapTexture Binding 2 + OpDecorate %EnvironmentMapSampler DescriptorSet 1 + OpDecorate %EnvironmentMapSampler Binding 2 + OpDecorate %IN_HPosition BuiltIn FragCoord + OpDecorate %IN_Uv_EdgeDistance1 Location 0 + OpDecorate %IN_UvStuds_EdgeDistance2 Location 1 + OpDecorate %IN_Color Location 2 + OpDecorate %IN_LightPosition_Fog Location 3 + OpDecorate %IN_View_Depth Location 4 + OpDecorate %IN_Normal_SpecPower Location 5 + OpDecorate %IN_Tangent Location 6 + OpDecorate %IN_PosLightSpace_Reflectance Location 7 + OpDecorate %IN_studIndex Location 8 + OpDecorate %_entryPointOutput Location 0 + OpDecorate %DiffuseMapSampler DescriptorSet 1 + OpDecorate %DiffuseMapSampler Binding 3 + OpDecorate %DiffuseMapTexture DescriptorSet 1 + OpDecorate %DiffuseMapTexture Binding 3 + OpDecorate %NormalMapSampler DescriptorSet 1 + OpDecorate %NormalMapSampler Binding 4 + OpDecorate %NormalMapTexture DescriptorSet 1 + OpDecorate %NormalMapTexture Binding 4 + OpDecorate %NormalDetailMapTexture DescriptorSet 1 + OpDecorate %NormalDetailMapTexture Binding 8 + OpDecorate %NormalDetailMapSampler DescriptorSet 1 + OpDecorate %NormalDetailMapSampler Binding 8 + OpDecorate %StudsMapTexture DescriptorSet 1 + OpDecorate %StudsMapTexture Binding 0 + OpDecorate %StudsMapSampler DescriptorSet 1 + OpDecorate %StudsMapSampler Binding 0 + OpDecorate %SpecularMapSampler DescriptorSet 1 + OpDecorate %SpecularMapSampler Binding 5 + OpDecorate %SpecularMapTexture DescriptorSet 1 + OpDecorate %SpecularMapTexture Binding 5 + OpMemberDecorate %Params 0 Offset 0 + OpMemberDecorate %CB2 0 Offset 0 + OpDecorate %CB2 Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %8 = OpTypeFunction %float %_ptr_Function_float + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %v3float = OpTypeVector %float 3 + %18 = OpTypeFunction %v3float %_ptr_Function_v4float +%_ptr_Function_v3float = OpTypePointer Function %v3float + %23 = OpTypeFunction %v4float %_ptr_Function_v3float + %27 = OpTypeFunction %float %_ptr_Function_v3float + %31 = OpTypeFunction %float %_ptr_Function_float %_ptr_Function_float + %36 = OpTypeSampler +%_ptr_Function_36 = OpTypePointer Function %36 + %38 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_Function_38 = OpTypePointer Function %38 + %40 = OpTypeFunction %float %_ptr_Function_36 %_ptr_Function_38 %_ptr_Function_v3float %_ptr_Function_float +%VertexOutput = OpTypeStruct %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v3float %v4float %float +%_ptr_Function_VertexOutput = OpTypePointer Function %VertexOutput + %Surface = OpTypeStruct %v3float %v3float %float %float %float %float + %50 = OpTypeFunction %Surface %_ptr_Function_VertexOutput + %54 = OpTypeFunction %v4float %_ptr_Function_VertexOutput + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %60 = OpTypeFunction %v4float %_ptr_Function_36 %_ptr_Function_38 %_ptr_Function_v2float %_ptr_Function_float %_ptr_Function_float +%SurfaceInput = OpTypeStruct %v4float %v2float %v2float +%_ptr_Function_SurfaceInput = OpTypePointer Function %SurfaceInput + %70 = OpTypeFunction %Surface %_ptr_Function_SurfaceInput %_ptr_Function_v2float + %float_0 = OpConstant %float 0 + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 +%mat4v4float = OpTypeMatrix %v4float 4 + %Globals = OpTypeStruct %mat4v4float %v4float %v4float %v4float %v3float %v3float %v3float %v3float %v3float %v4float %v3float %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v4float %v4float + %CB0 = OpTypeStruct %Globals +%_ptr_Uniform_CB0 = OpTypePointer Uniform %CB0 + %_ = OpVariable %_ptr_Uniform_CB0 Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %int_15 = OpConstant %int 15 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %int_14 = OpConstant %int 14 + %128 = OpConstantComposite %v3float %float_1 %float_1 %float_1 + %133 = OpTypeImage %float 3D 0 0 0 1 Unknown +%_ptr_UniformConstant_133 = OpTypePointer UniformConstant %133 +%LightMapTexture = OpVariable %_ptr_UniformConstant_133 UniformConstant +%_ptr_UniformConstant_36 = OpTypePointer UniformConstant %36 +%LightMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant + %140 = OpTypeSampledImage %133 + %int_11 = OpConstant %int 11 + %uint = OpTypeInt 32 0 + %float_9 = OpConstant %float 9 + %float_20 = OpConstant %float 20 + %float_0_5 = OpConstant %float 0.5 + %183 = OpTypeSampledImage %38 + %uint_0 = OpConstant %uint 0 + %uint_1 = OpConstant %uint 1 + %int_17 = OpConstant %int 17 + %uint_3 = OpConstant %uint 3 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %float_0_25 = OpConstant %float 0.25 + %int_5 = OpConstant %int 5 +%float_0_00333333 = OpConstant %float 0.00333333 + %int_16 = OpConstant %int 16 +%_ptr_Function_Surface = OpTypePointer Function %Surface + %int_6 = OpConstant %int 6 + %int_7 = OpConstant %int 7 +%_ptr_Uniform_v3float = OpTypePointer Uniform %v3float + %int_8 = OpConstant %int 8 +%ShadowMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%_ptr_UniformConstant_38 = OpTypePointer UniformConstant %38 +%ShadowMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant + %367 = OpTypeImage %float Cube 0 0 0 1 Unknown +%_ptr_UniformConstant_367 = OpTypePointer UniformConstant %367 +%EnvironmentMapTexture = OpVariable %_ptr_UniformConstant_367 UniformConstant +%EnvironmentMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant + %373 = OpTypeSampledImage %367 + %float_1_5 = OpConstant %float 1.5 + %int_10 = OpConstant %int 10 +%_ptr_Input_v4float = OpTypePointer Input %v4float +%IN_HPosition = OpVariable %_ptr_Input_v4float Input +%IN_Uv_EdgeDistance1 = OpVariable %_ptr_Input_v4float Input +%IN_UvStuds_EdgeDistance2 = OpVariable %_ptr_Input_v4float Input + %IN_Color = OpVariable %_ptr_Input_v4float Input +%IN_LightPosition_Fog = OpVariable %_ptr_Input_v4float Input +%IN_View_Depth = OpVariable %_ptr_Input_v4float Input +%IN_Normal_SpecPower = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_v3float = OpTypePointer Input %v3float + %IN_Tangent = OpVariable %_ptr_Input_v3float Input +%IN_PosLightSpace_Reflectance = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_float = OpTypePointer Input %float +%IN_studIndex = OpVariable %_ptr_Input_float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %bool = OpTypeBool +%DiffuseMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%DiffuseMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%NormalMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%NormalMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%NormalDetailMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%NormalDetailMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant + %float_0_3 = OpConstant %float 0.3 +%StudsMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant +%StudsMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%SpecularMapSampler = OpVariable %_ptr_UniformConstant_36 UniformConstant +%SpecularMapTexture = OpVariable %_ptr_UniformConstant_38 UniformConstant + %float_0_75 = OpConstant %float 0.75 + %float_256 = OpConstant %float 256 + %689 = OpConstantComposite %v2float %float_2 %float_256 + %float_0_01 = OpConstant %float 0.01 + %692 = OpConstantComposite %v2float %float_0 %float_0_01 + %float_0_8 = OpConstant %float 0.8 + %float_120 = OpConstant %float 120 + %697 = OpConstantComposite %v2float %float_0_8 %float_120 + %Params = OpTypeStruct %v4float + %CB2 = OpTypeStruct %Params +%_ptr_Uniform_CB2 = OpTypePointer Uniform %CB2 + %false = OpConstantFalse %bool + %1509 = OpUndef %VertexOutput + %1510 = OpUndef %SurfaceInput + %1511 = OpUndef %v2float + %1512 = OpUndef %v4float + %1531 = OpUndef %Surface + %main = OpFunction %void None %3 + %5 = OpLabel + %501 = OpLoad %v4float %IN_HPosition + %1378 = OpCompositeInsert %VertexOutput %501 %1509 0 + %504 = OpLoad %v4float %IN_Uv_EdgeDistance1 + %1380 = OpCompositeInsert %VertexOutput %504 %1378 1 + %507 = OpLoad %v4float %IN_UvStuds_EdgeDistance2 + %1382 = OpCompositeInsert %VertexOutput %507 %1380 2 + %510 = OpLoad %v4float %IN_Color + %1384 = OpCompositeInsert %VertexOutput %510 %1382 3 + %513 = OpLoad %v4float %IN_LightPosition_Fog + %1386 = OpCompositeInsert %VertexOutput %513 %1384 4 + %516 = OpLoad %v4float %IN_View_Depth + %1388 = OpCompositeInsert %VertexOutput %516 %1386 5 + %519 = OpLoad %v4float %IN_Normal_SpecPower + %1390 = OpCompositeInsert %VertexOutput %519 %1388 6 + %523 = OpLoad %v3float %IN_Tangent + %1392 = OpCompositeInsert %VertexOutput %523 %1390 7 + %526 = OpLoad %v4float %IN_PosLightSpace_Reflectance + %1394 = OpCompositeInsert %VertexOutput %526 %1392 8 + %530 = OpLoad %float %IN_studIndex + %1396 = OpCompositeInsert %VertexOutput %530 %1394 9 + %1400 = OpCompositeInsert %SurfaceInput %510 %1510 0 + %954 = OpVectorShuffle %v2float %504 %504 0 1 + %1404 = OpCompositeInsert %SurfaceInput %954 %1400 1 + %958 = OpVectorShuffle %v2float %507 %507 0 1 + %1408 = OpCompositeInsert %SurfaceInput %958 %1404 2 + %1410 = OpCompositeExtract %float %1408 2 1 + %962 = OpExtInst %float %1 Fract %1410 + %965 = OpFAdd %float %962 %530 + %966 = OpFMul %float %965 %float_0_25 + %1414 = OpCompositeInsert %SurfaceInput %966 %1408 2 1 + %1416 = OpCompositeExtract %float %1396 5 3 + %970 = OpFMul %float %1416 %float_0_00333333 + %971 = OpFSub %float %float_1 %970 + %987 = OpExtInst %float %1 FClamp %971 %float_0 %float_1 + %976 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_16 %uint_1 + %977 = OpLoad %float %976 + %978 = OpFMul %float %1416 %977 + %979 = OpFSub %float %float_1 %978 + %990 = OpExtInst %float %1 FClamp %979 %float_0 %float_1 + %1024 = OpVectorTimesScalar %v2float %954 %float_1 + %1029 = OpLoad %36 %DiffuseMapSampler + %1030 = OpLoad %38 %DiffuseMapTexture + OpBranch %1119 + %1119 = OpLabel + OpLoopMerge %1120 %1121 None + OpBranch %1122 + %1122 = OpLabel + %1124 = OpFOrdEqual %bool %float_0 %float_0 + OpSelectionMerge %1125 None + OpBranchConditional %1124 %1126 %1127 + %1126 = OpLabel + %1130 = OpSampledImage %183 %1030 %1029 + %1132 = OpImageSampleImplicitLod %v4float %1130 %1024 + OpBranch %1120 + %1127 = OpLabel + %1134 = OpFSub %float %float_1 %float_0 + %1135 = OpFDiv %float %float_1 %1134 + %1138 = OpSampledImage %183 %1030 %1029 + %1140 = OpVectorTimesScalar %v2float %1024 %float_0_25 + %1141 = OpImageSampleImplicitLod %v4float %1138 %1140 + %1144 = OpSampledImage %183 %1030 %1029 + %1146 = OpImageSampleImplicitLod %v4float %1144 %1024 + %1149 = OpFMul %float %987 %1135 + %1152 = OpFMul %float %float_0 %1135 + %1153 = OpFSub %float %1149 %1152 + %1161 = OpExtInst %float %1 FClamp %1153 %float_0 %float_1 + %1155 = OpCompositeConstruct %v4float %1161 %1161 %1161 %1161 + %1156 = OpExtInst %v4float %1 FMix %1141 %1146 %1155 + OpBranch %1120 + %1125 = OpLabel + %1157 = OpUndef %v4float + OpBranch %1120 + %1121 = OpLabel + OpBranchConditional %false %1119 %1120 + %1120 = OpLabel + %1517 = OpPhi %v4float %1132 %1126 %1156 %1127 %1157 %1125 %1512 %1121 + %1035 = OpVectorTimesScalar %v4float %1517 %float_1 + %1036 = OpLoad %36 %NormalMapSampler + %1037 = OpLoad %38 %NormalMapTexture + OpBranch %1165 + %1165 = OpLabel + OpLoopMerge %1166 %1167 None + OpBranch %1168 + %1168 = OpLabel + OpSelectionMerge %1171 None + OpBranchConditional %1124 %1172 %1173 + %1172 = OpLabel + %1176 = OpSampledImage %183 %1037 %1036 + %1178 = OpImageSampleImplicitLod %v4float %1176 %1024 + OpBranch %1166 + %1173 = OpLabel + %1180 = OpFSub %float %float_1 %float_0 + %1181 = OpFDiv %float %float_1 %1180 + %1184 = OpSampledImage %183 %1037 %1036 + %1186 = OpVectorTimesScalar %v2float %1024 %float_0_25 + %1187 = OpImageSampleImplicitLod %v4float %1184 %1186 + %1190 = OpSampledImage %183 %1037 %1036 + %1192 = OpImageSampleImplicitLod %v4float %1190 %1024 + %1195 = OpFMul %float %990 %1181 + %1198 = OpFMul %float %float_0 %1181 + %1199 = OpFSub %float %1195 %1198 + %1206 = OpExtInst %float %1 FClamp %1199 %float_0 %float_1 + %1201 = OpCompositeConstruct %v4float %1206 %1206 %1206 %1206 + %1202 = OpExtInst %v4float %1 FMix %1187 %1192 %1201 + OpBranch %1166 + %1171 = OpLabel + %1203 = OpUndef %v4float + OpBranch %1166 + %1167 = OpLabel + OpBranchConditional %false %1165 %1166 + %1166 = OpLabel + %1523 = OpPhi %v4float %1178 %1172 %1202 %1173 %1203 %1171 %1512 %1167 + %1210 = OpVectorShuffle %v2float %1523 %1523 3 1 + %1211 = OpVectorTimesScalar %v2float %1210 %float_2 + %1212 = OpCompositeConstruct %v2float %float_1 %float_1 + %1213 = OpFSub %v2float %1211 %1212 + %1216 = OpFNegate %v2float %1213 + %1218 = OpDot %float %1216 %1213 + %1219 = OpFAdd %float %float_1 %1218 + %1220 = OpExtInst %float %1 FClamp %1219 %float_0 %float_1 + %1221 = OpExtInst %float %1 Sqrt %1220 + %1222 = OpCompositeExtract %float %1213 0 + %1223 = OpCompositeExtract %float %1213 1 + %1224 = OpCompositeConstruct %v3float %1222 %1223 %1221 + %1042 = OpLoad %38 %NormalDetailMapTexture + %1043 = OpLoad %36 %NormalDetailMapSampler + %1044 = OpSampledImage %183 %1042 %1043 + %1046 = OpVectorTimesScalar %v2float %1024 %float_0 + %1047 = OpImageSampleImplicitLod %v4float %1044 %1046 + %1228 = OpVectorShuffle %v2float %1047 %1047 3 1 + %1229 = OpVectorTimesScalar %v2float %1228 %float_2 + %1231 = OpFSub %v2float %1229 %1212 + %1234 = OpFNegate %v2float %1231 + %1236 = OpDot %float %1234 %1231 + %1237 = OpFAdd %float %float_1 %1236 + %1238 = OpExtInst %float %1 FClamp %1237 %float_0 %float_1 + %1239 = OpExtInst %float %1 Sqrt %1238 + %1240 = OpCompositeExtract %float %1231 0 + %1241 = OpCompositeExtract %float %1231 1 + %1242 = OpCompositeConstruct %v3float %1240 %1241 %1239 + %1050 = OpVectorShuffle %v2float %1242 %1242 0 1 + %1051 = OpVectorTimesScalar %v2float %1050 %float_0 + %1053 = OpVectorShuffle %v2float %1224 %1224 0 1 + %1054 = OpFAdd %v2float %1053 %1051 + %1056 = OpVectorShuffle %v3float %1224 %1054 3 4 2 + %1059 = OpVectorShuffle %v2float %1056 %1056 0 1 + %1060 = OpVectorTimesScalar %v2float %1059 %990 + %1062 = OpVectorShuffle %v3float %1056 %1060 3 4 2 + %1430 = OpCompositeExtract %float %1062 0 + %1065 = OpFMul %float %1430 %float_0_3 + %1066 = OpFAdd %float %float_1 %1065 + %1069 = OpVectorShuffle %v3float %510 %510 0 1 2 + %1071 = OpVectorShuffle %v3float %1035 %1035 0 1 2 + %1072 = OpFMul %v3float %1069 %1071 + %1074 = OpVectorTimesScalar %v3float %1072 %1066 + %1075 = OpLoad %38 %StudsMapTexture + %1076 = OpLoad %36 %StudsMapSampler + %1077 = OpSampledImage %183 %1075 %1076 + %1434 = OpCompositeExtract %v2float %1414 2 + %1080 = OpImageSampleImplicitLod %v4float %1077 %1434 + %1436 = OpCompositeExtract %float %1080 0 + %1083 = OpFMul %float %1436 %float_2 + %1085 = OpVectorTimesScalar %v3float %1074 %1083 + %1086 = OpLoad %36 %SpecularMapSampler + %1087 = OpLoad %38 %SpecularMapTexture + OpBranch %1246 + %1246 = OpLabel + OpLoopMerge %1247 %1248 None + OpBranch %1249 + %1249 = OpLabel + %1251 = OpFOrdEqual %bool %float_0_75 %float_0 + OpSelectionMerge %1252 None + OpBranchConditional %1251 %1253 %1254 + %1253 = OpLabel + %1257 = OpSampledImage %183 %1087 %1086 + %1259 = OpImageSampleImplicitLod %v4float %1257 %1024 + OpBranch %1247 + %1254 = OpLabel + %1261 = OpFSub %float %float_1 %float_0_75 + %1262 = OpFDiv %float %float_1 %1261 + %1265 = OpSampledImage %183 %1087 %1086 + %1267 = OpVectorTimesScalar %v2float %1024 %float_0_25 + %1268 = OpImageSampleImplicitLod %v4float %1265 %1267 + %1271 = OpSampledImage %183 %1087 %1086 + %1273 = OpImageSampleImplicitLod %v4float %1271 %1024 + %1276 = OpFMul %float %990 %1262 + %1279 = OpFMul %float %float_0_75 %1262 + %1280 = OpFSub %float %1276 %1279 + %1287 = OpExtInst %float %1 FClamp %1280 %float_0 %float_1 + %1282 = OpCompositeConstruct %v4float %1287 %1287 %1287 %1287 + %1283 = OpExtInst %v4float %1 FMix %1268 %1273 %1282 + OpBranch %1247 + %1252 = OpLabel + %1284 = OpUndef %v4float + OpBranch %1247 + %1248 = OpLabel + OpBranchConditional %false %1246 %1247 + %1247 = OpLabel + %1530 = OpPhi %v4float %1259 %1253 %1283 %1254 %1284 %1252 %1512 %1248 + %1091 = OpVectorShuffle %v2float %1530 %1530 0 1 + %1093 = OpFMul %v2float %1091 %689 + %1094 = OpFAdd %v2float %1093 %692 + %1097 = OpCompositeConstruct %v2float %990 %990 + %1098 = OpExtInst %v2float %1 FMix %697 %1094 %1097 + %1438 = OpCompositeInsert %Surface %1085 %1531 0 + %1440 = OpCompositeInsert %Surface %1062 %1438 1 + %1442 = OpCompositeExtract %float %1098 0 + %1444 = OpCompositeInsert %Surface %1442 %1440 2 + %1446 = OpCompositeExtract %float %1098 1 + %1448 = OpCompositeInsert %Surface %1446 %1444 3 + %1450 = OpCompositeExtract %float %1091 1 + %1112 = OpFMul %float %1450 %990 + %1113 = OpFMul %float %1112 %float_0 + %1452 = OpCompositeInsert %Surface %1113 %1448 4 + %1456 = OpCompositeExtract %float %1396 3 3 + %764 = OpCompositeExtract %float %1085 0 + %765 = OpCompositeExtract %float %1085 1 + %766 = OpCompositeExtract %float %1085 2 + %767 = OpCompositeConstruct %v4float %764 %765 %766 %1456 + %770 = OpVectorShuffle %v3float %519 %519 0 1 2 + %773 = OpExtInst %v3float %1 Cross %770 %523 + %1462 = OpCompositeExtract %float %1452 1 0 + %778 = OpVectorTimesScalar %v3float %523 %1462 + %1466 = OpCompositeExtract %float %1452 1 1 + %782 = OpVectorTimesScalar %v3float %773 %1466 + %783 = OpFAdd %v3float %778 %782 + %1468 = OpCompositeExtract %float %1452 1 2 + %789 = OpVectorTimesScalar %v3float %770 %1468 + %790 = OpFAdd %v3float %783 %789 + %791 = OpExtInst %v3float %1 Normalize %790 + %793 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_7 + %794 = OpLoad %v3float %793 + %795 = OpFNegate %v3float %794 + %796 = OpDot %float %791 %795 + %1290 = OpExtInst %float %1 FClamp %796 %float_0 %float_1 + %799 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_6 + %800 = OpLoad %v3float %799 + %801 = OpVectorTimesScalar %v3float %800 %1290 + %803 = OpFNegate %float %796 + %804 = OpExtInst %float %1 FMax %803 %float_0 + %805 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_8 + %806 = OpLoad %v3float %805 + %807 = OpVectorTimesScalar %v3float %806 %804 + %808 = OpFAdd %v3float %801 %807 + %810 = OpExtInst %float %1 Step %float_0 %796 + %813 = OpFMul %float %810 %1442 + %820 = OpVectorShuffle %v3float %513 %513 0 1 2 + %1296 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_15 + %1297 = OpLoad %v4float %1296 + %1298 = OpVectorShuffle %v3float %1297 %1297 0 1 2 + %1300 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_14 + %1301 = OpLoad %v4float %1300 + %1302 = OpVectorShuffle %v3float %1301 %1301 0 1 2 + %1303 = OpFSub %v3float %820 %1302 + %1304 = OpExtInst %v3float %1 FAbs %1303 + %1305 = OpExtInst %v3float %1 Step %1298 %1304 + %1307 = OpDot %float %1305 %128 + %1328 = OpExtInst %float %1 FClamp %1307 %float_0 %float_1 + %1309 = OpLoad %133 %LightMapTexture + %1310 = OpLoad %36 %LightMapSampler + %1311 = OpSampledImage %140 %1309 %1310 + %1313 = OpVectorShuffle %v3float %820 %820 1 2 0 + %1317 = OpVectorTimesScalar %v3float %1313 %1328 + %1318 = OpFSub %v3float %1313 %1317 + %1319 = OpImageSampleImplicitLod %v4float %1311 %1318 + %1321 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %int_11 + %1322 = OpLoad %v4float %1321 + %1324 = OpCompositeConstruct %v4float %1328 %1328 %1328 %1328 + %1325 = OpExtInst %v4float %1 FMix %1319 %1322 %1324 + %822 = OpLoad %36 %ShadowMapSampler + %823 = OpLoad %38 %ShadowMapTexture + %826 = OpVectorShuffle %v3float %526 %526 0 1 2 + %1482 = OpCompositeExtract %float %1325 3 + %1337 = OpSampledImage %183 %823 %822 + %1339 = OpVectorShuffle %v2float %826 %826 0 1 + %1340 = OpImageSampleImplicitLod %v4float %1337 %1339 + %1341 = OpVectorShuffle %v2float %1340 %1340 0 1 + %1484 = OpCompositeExtract %float %826 2 + %1486 = OpCompositeExtract %float %1341 0 + %1363 = OpExtInst %float %1 Step %1486 %1484 + %1365 = OpFSub %float %1484 %float_0_5 + %1366 = OpExtInst %float %1 FAbs %1365 + %1367 = OpFMul %float %float_20 %1366 + %1368 = OpFSub %float %float_9 %1367 + %1369 = OpExtInst %float %1 FClamp %1368 %float_0 %float_1 + %1370 = OpFMul %float %1363 %1369 + %1488 = OpCompositeExtract %float %1341 1 + %1350 = OpFMul %float %1370 %1488 + %1351 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_17 %uint_3 + %1352 = OpLoad %float %1351 + %1353 = OpFMul %float %1350 %1352 + %1354 = OpFSub %float %float_1 %1353 + %1356 = OpFMul %float %1354 %1482 + %830 = OpLoad %367 %EnvironmentMapTexture + %831 = OpLoad %36 %EnvironmentMapSampler + %832 = OpSampledImage %373 %830 %831 + %835 = OpVectorShuffle %v3float %516 %516 0 1 2 + %836 = OpFNegate %v3float %835 + %838 = OpExtInst %v3float %1 Reflect %836 %791 + %839 = OpImageSampleImplicitLod %v4float %832 %838 + %840 = OpVectorShuffle %v3float %839 %839 0 1 2 + %842 = OpVectorShuffle %v3float %767 %767 0 1 2 + %845 = OpCompositeConstruct %v3float %1113 %1113 %1113 + %846 = OpExtInst %v3float %1 FMix %842 %840 %845 + %848 = OpVectorShuffle %v4float %767 %846 4 5 6 3 + %849 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_5 + %850 = OpLoad %v3float %849 + %853 = OpVectorTimesScalar %v3float %808 %1356 + %854 = OpFAdd %v3float %850 %853 + %856 = OpVectorShuffle %v3float %1325 %1325 0 1 2 + %857 = OpFAdd %v3float %854 %856 + %859 = OpVectorShuffle %v3float %848 %848 0 1 2 + %860 = OpFMul %v3float %857 %859 + %865 = OpFMul %float %813 %1356 + %873 = OpExtInst %v3float %1 Normalize %835 + %874 = OpFAdd %v3float %795 %873 + %875 = OpExtInst %v3float %1 Normalize %874 + %876 = OpDot %float %791 %875 + %877 = OpExtInst %float %1 FClamp %876 %float_0 %float_1 + %879 = OpExtInst %float %1 Pow %877 %1446 + %880 = OpFMul %float %865 %879 + %881 = OpVectorTimesScalar %v3float %800 %880 + %884 = OpFAdd %v3float %860 %881 + %886 = OpVectorShuffle %v4float %1512 %884 4 5 6 3 + %1494 = OpCompositeExtract %float %848 3 + %1496 = OpCompositeInsert %v4float %1494 %886 3 + %896 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_17 %uint_0 + %897 = OpLoad %float %896 + %898 = OpFMul %float %978 %897 + %899 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_17 %uint_1 + %900 = OpLoad %float %899 + %901 = OpFAdd %float %898 %900 + %1373 = OpExtInst %float %1 FClamp %901 %float_0 %float_1 + %905 = OpVectorShuffle %v2float %504 %504 3 2 + %908 = OpVectorShuffle %v2float %507 %507 3 2 + %909 = OpExtInst %v2float %1 FMin %905 %908 + %1504 = OpCompositeExtract %float %909 0 + %1506 = OpCompositeExtract %float %909 1 + %914 = OpExtInst %float %1 FMin %1504 %1506 + %916 = OpFDiv %float %914 %978 + %919 = OpFSub %float %float_1_5 %916 + %920 = OpFMul %float %1373 %919 + %922 = OpFAdd %float %920 %916 + %1376 = OpExtInst %float %1 FClamp %922 %float_0 %float_1 + %925 = OpVectorShuffle %v3float %1496 %1496 0 1 2 + %926 = OpVectorTimesScalar %v3float %925 %1376 + %928 = OpVectorShuffle %v4float %1496 %926 4 5 6 3 + %1508 = OpCompositeExtract %float %1396 4 3 + %931 = OpExtInst %float %1 FClamp %1508 %float_0 %float_1 + %932 = OpAccessChain %_ptr_Uniform_v3float %_ %int_0 %int_10 + %933 = OpLoad %v3float %932 + %935 = OpVectorShuffle %v3float %928 %928 0 1 2 + %937 = OpCompositeConstruct %v3float %931 %931 %931 + %938 = OpExtInst %v3float %1 FMix %933 %935 %937 + %940 = OpVectorShuffle %v4float %928 %938 4 5 6 3 + OpStore %_entryPointOutput %940 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-no-opt/asm/vert/empty-struct-composite.asm.vert b/3rdparty/spirv-cross/shaders-no-opt/asm/vert/empty-struct-composite.asm.vert new file mode 100644 index 000000000..37a2d8793 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-no-opt/asm/vert/empty-struct-composite.asm.vert @@ -0,0 +1,37 @@ +; SPIR-V +; Version: 1.1 +; Generator: Google rspirv; 0 +; Bound: 17 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %2 "main" + OpExecutionMode %2 OriginUpperLeft + OpName %Test "Test" + OpName %t "t" + OpName %retvar "retvar" + OpName %main "main" + OpName %retvar_0 "retvar" + %void = OpTypeVoid + %6 = OpTypeFunction %void + %Test = OpTypeStruct +%_ptr_Function_Test = OpTypePointer Function %Test +%_ptr_Function_void = OpTypePointer Function %void + %2 = OpFunction %void None %6 + %7 = OpLabel + %t = OpVariable %_ptr_Function_Test Function + %retvar = OpVariable %_ptr_Function_void Function + OpBranch %4 + %4 = OpLabel + %13 = OpCompositeConstruct %Test + OpStore %t %13 + OpReturn + OpFunctionEnd + %main = OpFunction %void None %6 + %15 = OpLabel + %retvar_0 = OpVariable %_ptr_Function_void Function + OpBranch %14 + %14 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-no-opt/asm/vert/semantic-decoration.asm.vert b/3rdparty/spirv-cross/shaders-no-opt/asm/vert/semantic-decoration.asm.vert new file mode 100644 index 000000000..76007c30a --- /dev/null +++ b/3rdparty/spirv-cross/shaders-no-opt/asm/vert/semantic-decoration.asm.vert @@ -0,0 +1,68 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + OpExtension "SPV_GOOGLE_decorate_string" + OpExtension "SPV_GOOGLE_hlsl_functionality1" + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_entryPointOutput_p %_entryPointOutput_c + OpSource HLSL 500 + OpName %main "main" + OpName %VOut "VOut" + OpMemberName %VOut 0 "p" + OpMemberName %VOut 1 "c" + OpName %_main_ "@main(" + OpName %v "v" + OpName %flattenTemp "flattenTemp" + OpName %_entryPointOutput_p "@entryPointOutput.p" + OpName %_entryPointOutput_c "@entryPointOutput.c" + OpMemberDecorateStringGOOGLE %VOut 0 HlslSemanticGOOGLE "SV_POSITION" + OpMemberDecorateStringGOOGLE %VOut 1 HlslSemanticGOOGLE "COLOR" + OpDecorate %_entryPointOutput_p BuiltIn Position + OpDecorateStringGOOGLE %_entryPointOutput_p HlslSemanticGOOGLE "SV_POSITION" + OpDecorate %_entryPointOutput_c Location 0 + OpDecorateStringGOOGLE %_entryPointOutput_c HlslSemanticGOOGLE "COLOR" + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %VOut = OpTypeStruct %v4float %v4float + %9 = OpTypeFunction %VOut +%_ptr_Function_VOut = OpTypePointer Function %VOut + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 + %17 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %int_1 = OpConstant %int 1 + %float_2 = OpConstant %float 2 + %22 = OpConstantComposite %v4float %float_2 %float_2 %float_2 %float_2 +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput_p = OpVariable %_ptr_Output_v4float Output +%_entryPointOutput_c = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel +%flattenTemp = OpVariable %_ptr_Function_VOut Function + %28 = OpFunctionCall %VOut %_main_ + OpStore %flattenTemp %28 + %31 = OpAccessChain %_ptr_Function_v4float %flattenTemp %int_0 + %32 = OpLoad %v4float %31 + OpStore %_entryPointOutput_p %32 + %34 = OpAccessChain %_ptr_Function_v4float %flattenTemp %int_1 + %35 = OpLoad %v4float %34 + OpStore %_entryPointOutput_c %35 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %VOut None %9 + %11 = OpLabel + %v = OpVariable %_ptr_Function_VOut Function + %19 = OpAccessChain %_ptr_Function_v4float %v %int_0 + OpStore %19 %17 + %23 = OpAccessChain %_ptr_Function_v4float %v %int_1 + OpStore %23 %22 + %24 = OpLoad %VOut %v + OpReturnValue %24 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-no-opt/comp/bitfield.comp b/3rdparty/spirv-cross/shaders-no-opt/comp/bitfield.comp new file mode 100644 index 000000000..d75b556b6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-no-opt/comp/bitfield.comp @@ -0,0 +1,21 @@ +#version 310 es + +void main() +{ + int signed_value = 0; + uint unsigned_value = 0u; + + int s = bitfieldExtract(signed_value, 5, 20); + uint u = bitfieldExtract(unsigned_value, 6, 21); + s = bitfieldInsert(s, 40, 5, 4); + u = bitfieldInsert(u, 60u, 5, 4); + + u = bitfieldReverse(u); + s = bitfieldReverse(s); + + int v0 = bitCount(u); + int v1 = bitCount(s); + + int v2 = findMSB(u); + int v3 = findLSB(s); +} diff --git a/3rdparty/spirv-cross/shaders-no-opt/comp/loop.comp b/3rdparty/spirv-cross/shaders-no-opt/comp/loop.comp new file mode 100644 index 000000000..6d6c32424 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-no-opt/comp/loop.comp @@ -0,0 +1,98 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idat = in_data[ident]; + + int k = 0; + uint i = 0u; + + if (idat.y == 20.0) + { + do + { + k = k * 2; + i++; + } while (i < ident); + } + + switch (k) + { + case 10: + for (;;) + { + i++; + if (i > 10u) + break; + } + break; + + default: + for (;;) + { + i += 2u; + if (i > 20u) + break; + } + break; + } + + while (k < 10) + { + idat *= 2.0; + k++; + } + + for (uint i = 0u; i < 16u; i++, k++) + for (uint j = 0u; j < 30u; j++) + idat = mvp * idat; + + k = 0; + for (;;) + { + k++; + if (k > 10) + { + k += 2; + } + else + { + k += 3; + continue; + } + + k += 10; + } + + k = 0; + do + { + k++; + } while (k > 10); + + int l = 0; + for (;; l++) + { + if (l == 5) + { + continue; + } + + idat += 1.0; + } + out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/shaders-no-opt/comp/return.comp b/3rdparty/spirv-cross/shaders-no-opt/comp/return.comp new file mode 100644 index 000000000..617f43718 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-no-opt/comp/return.comp @@ -0,0 +1,33 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + + if (ident == 2u) + { + out_data[ident] = vec4(20.0); + } + else if (ident == 4u) + { + out_data[ident] = vec4(10.0); + return; + } + + for (int i = 0; i < 20; i++) + { + if (i == 10) + break; + + return; + } + + out_data[ident] = vec4(10.0); +} + diff --git a/3rdparty/spirv-cross/shaders-no-opt/vulkan/frag/spec-constant.vk.frag b/3rdparty/spirv-cross/shaders-no-opt/vulkan/frag/spec-constant.vk.frag new file mode 100644 index 000000000..2002c1272 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-no-opt/vulkan/frag/spec-constant.vk.frag @@ -0,0 +1,77 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(constant_id = 1) const float a = 1.0; +layout(constant_id = 2) const float b = 2.0; +layout(constant_id = 3) const int c = 3; +layout(constant_id = 4) const int d = 4; +layout(constant_id = 5) const uint e = 5u; +layout(constant_id = 6) const uint f = 6u; +layout(constant_id = 7) const bool g = false; +layout(constant_id = 8) const bool h = true; +// glslang doesn't seem to support partial spec constants or composites yet, so only test the basics. + +struct Foo +{ + float elems[d + 2]; +}; + +void main() +{ + float t0 = a; + float t1 = b; + + uint c0 = uint(c); // OpIAdd with different types. + // FConvert, float-to-double. + int c1 = -c; // SNegate + int c2 = ~c; // OpNot + int c3 = c + d; // OpIAdd + int c4 = c - d; // OpISub + int c5 = c * d; // OpIMul + int c6 = c / d; // OpSDiv + uint c7 = e / f; // OpUDiv + int c8 = c % d; // OpSMod + uint c9 = e % f; // OpUMod + // TODO: OpSRem, any way to access this in GLSL? + int c10 = c >> d; // OpShiftRightArithmetic + uint c11 = e >> f; // OpShiftRightLogical + int c12 = c << d; // OpShiftLeftLogical + int c13 = c | d; // OpBitwiseOr + int c14 = c ^ d; // OpBitwiseXor + int c15 = c & d; // OpBitwiseAnd + // VectorShuffle, CompositeExtract, CompositeInsert, not testable atm. + bool c16 = g || h; // OpLogicalOr + bool c17 = g && h; // OpLogicalAnd + bool c18 = !g; // OpLogicalNot + bool c19 = g == h; // OpLogicalEqual + bool c20 = g != h; // OpLogicalNotEqual + // OpSelect not testable atm. + bool c21 = c == d; // OpIEqual + bool c22 = c != d; // OpINotEqual + bool c23 = c < d; // OpSLessThan + bool c24 = e < f; // OpULessThan + bool c25 = c > d; // OpSGreaterThan + bool c26 = e > f; // OpUGreaterThan + bool c27 = c <= d; // OpSLessThanEqual + bool c28 = e <= f; // OpULessThanEqual + bool c29 = c >= d; // OpSGreaterThanEqual + bool c30 = e >= f; // OpUGreaterThanEqual + // OpQuantizeToF16 not testable atm. + + int c31 = c8 + c3; + + int c32 = int(e); // OpIAdd with different types. + bool c33 = bool(c); // int -> bool + bool c34 = bool(e); // uint -> bool + int c35 = int(g); // bool -> int + uint c36 = uint(g); // bool -> uint + float c37 = float(g); // bool -> float + + // Flexible sized arrays with spec constants and spec constant ops. + float vec0[c + 3][8]; + float vec1[c + 2]; + + Foo foo; + FragColor = vec4(t0 + t1) + vec0[0][0] + vec1[0] + foo.elems[c]; +} diff --git a/3rdparty/spirv-cross/shaders-other/README.md b/3rdparty/spirv-cross/shaders-other/README.md new file mode 100644 index 000000000..6d454813e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-other/README.md @@ -0,0 +1,4 @@ +These shaders are not actually run yet as part of any test suite, +but are kept here because they have been used to manually test various aspects of SPIRV-Cross in the past. + +These would ideally be part of the test suite in some way. diff --git a/3rdparty/spirv-cross/shaders-other/aliased-entry-point-names.asm b/3rdparty/spirv-cross/shaders-other/aliased-entry-point-names.asm new file mode 100644 index 000000000..d60cf3039 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-other/aliased-entry-point-names.asm @@ -0,0 +1,60 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 20 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_ + OpEntryPoint Vertex %main2 "main2" %_ + OpEntryPoint Fragment %main3 "main" %FragColor + OpEntryPoint Fragment %main4 "main2" %FragColor + OpSource GLSL 450 + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpMemberDecorate %gl_PerVertex 3 BuiltIn CullDistance + OpDecorate %FragColor Location 0 + OpDecorate %gl_PerVertex Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %v4floatptr = OpTypePointer Output %v4float + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 %_arr_float_uint_1 +%_ptr_Output_gl_PerVertex = OpTypePointer Output %gl_PerVertex + %_ = OpVariable %_ptr_Output_gl_PerVertex Output + %FragColor = OpVariable %v4floatptr Output + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 + %17 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %float_2 = OpConstant %float 2 + %18 = OpConstantComposite %v4float %float_2 %float_2 %float_2 %float_2 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %19 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %19 %17 + OpReturn + OpFunctionEnd + %main2 = OpFunction %void None %3 + %6 = OpLabel + %20 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %20 %18 + OpReturn + OpFunctionEnd + %main3 = OpFunction %void None %3 + %7 = OpLabel + OpStore %FragColor %17 + OpReturn + OpFunctionEnd + %main4 = OpFunction %void None %3 + %8 = OpLabel + OpStore %FragColor %18 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-reflection/asm/aliased-entry-point-names.asm.multi b/3rdparty/spirv-cross/shaders-reflection/asm/aliased-entry-point-names.asm.multi new file mode 100644 index 000000000..d60cf3039 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/asm/aliased-entry-point-names.asm.multi @@ -0,0 +1,60 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 20 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_ + OpEntryPoint Vertex %main2 "main2" %_ + OpEntryPoint Fragment %main3 "main" %FragColor + OpEntryPoint Fragment %main4 "main2" %FragColor + OpSource GLSL 450 + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpMemberDecorate %gl_PerVertex 3 BuiltIn CullDistance + OpDecorate %FragColor Location 0 + OpDecorate %gl_PerVertex Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %v4floatptr = OpTypePointer Output %v4float + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 %_arr_float_uint_1 +%_ptr_Output_gl_PerVertex = OpTypePointer Output %gl_PerVertex + %_ = OpVariable %_ptr_Output_gl_PerVertex Output + %FragColor = OpVariable %v4floatptr Output + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 + %17 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %float_2 = OpConstant %float 2 + %18 = OpConstantComposite %v4float %float_2 %float_2 %float_2 %float_2 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %19 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %19 %17 + OpReturn + OpFunctionEnd + %main2 = OpFunction %void None %3 + %6 = OpLabel + %20 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %20 %18 + OpReturn + OpFunctionEnd + %main3 = OpFunction %void None %3 + %7 = OpLabel + OpStore %FragColor %17 + OpReturn + OpFunctionEnd + %main4 = OpFunction %void None %3 + %8 = OpLabel + OpStore %FragColor %18 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-reflection/comp/struct-layout.comp b/3rdparty/spirv-cross/shaders-reflection/comp/struct-layout.comp new file mode 100644 index 000000000..5a2b7802d --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/comp/struct-layout.comp @@ -0,0 +1,24 @@ +#version 310 es +layout(local_size_x = 1) in; + +struct Foo +{ + mat4 m; +}; + +layout(std430, binding = 0) readonly buffer SSBO +{ + Foo in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + Foo out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + out_data[ident].m = in_data[ident].m * in_data[ident].m; +} + diff --git a/3rdparty/spirv-cross/shaders-reflection/comp/struct-packing.comp b/3rdparty/spirv-cross/shaders-reflection/comp/struct-packing.comp new file mode 100644 index 000000000..d2ffbaef5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/comp/struct-packing.comp @@ -0,0 +1,87 @@ +#version 450 + +layout(local_size_x = 1) in; + +struct S0 +{ + vec2 a[1]; + float b; +}; + +struct S1 +{ + vec3 a; + float b; +}; + +struct S2 +{ + vec3 a[1]; + float b; +}; + +struct S3 +{ + vec2 a; + float b; +}; + +struct S4 +{ + vec2 c; +}; + +struct Content +{ + S0 m0s[1]; + S1 m1s[1]; + S2 m2s[1]; + S0 m0; + S1 m1; + S2 m2; + S3 m3; + float m4; + + S4 m3s[8]; +}; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + Content content; + Content content1[2]; + Content content2; + + layout(column_major) mat2 m0; + layout(column_major) mat2 m1; + layout(column_major) mat2x3 m2[4]; + layout(column_major) mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + float array[]; +} ssbo_430; + +layout(binding = 0, std140) restrict buffer SSBO0 +{ + Content content; + Content content1[2]; + Content content2; + + layout(column_major) mat2 m0; + layout(column_major) mat2 m1; + layout(column_major) mat2x3 m2[4]; + layout(column_major) mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + + float array[]; +} ssbo_140; + +void main() +{ + ssbo_430.content = ssbo_140.content; +} + diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/combined-texture-sampler-shadow.vk.frag b/3rdparty/spirv-cross/shaders-reflection/frag/combined-texture-sampler-shadow.vk.frag new file mode 100644 index 000000000..2fabb5ea8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/combined-texture-sampler-shadow.vk.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump samplerShadow uSampler; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; +layout(set = 0, binding = 2) uniform texture2D uDepth; +layout(location = 0) out float FragColor; + +float samp2(texture2D t, mediump samplerShadow s) +{ + return texture(sampler2DShadow(t, s), vec3(1.0)); +} + +float samp3(texture2D t, mediump sampler s) +{ + return texture(sampler2D(t, s), vec2(1.0)).x; +} + +float samp(texture2D t, mediump samplerShadow s, mediump sampler s1) +{ + float r0 = samp2(t, s); + float r1 = samp3(t, s1); + return r0 + r1; +} + +void main() +{ + FragColor = samp(uDepth, uSampler, uSampler1); +} diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/combined-texture-sampler.vk.frag b/3rdparty/spirv-cross/shaders-reflection/frag/combined-texture-sampler.vk.frag new file mode 100644 index 000000000..b7de8d47e --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/combined-texture-sampler.vk.frag @@ -0,0 +1,47 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump sampler uSampler0; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; +layout(set = 0, binding = 2) uniform mediump texture2D uTexture0; +layout(set = 0, binding = 3) uniform mediump texture2D uTexture1; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTex; + +vec4 sample_dual(mediump sampler samp, mediump texture2D tex) +{ + return texture(sampler2D(tex, samp), vTex); +} + +vec4 sample_global_tex(mediump sampler samp) +{ + vec4 a = texture(sampler2D(uTexture0, samp), vTex); + vec4 b = sample_dual(samp, uTexture1); + return a + b; +} + +vec4 sample_global_sampler(mediump texture2D tex) +{ + vec4 a = texture(sampler2D(tex, uSampler0), vTex); + vec4 b = sample_dual(uSampler1, tex); + return a + b; +} + +vec4 sample_duals() +{ + vec4 a = sample_dual(uSampler0, uTexture0); + vec4 b = sample_dual(uSampler1, uTexture1); + return a + b; +} + +void main() +{ + vec4 c0 = sample_duals(); + vec4 c1 = sample_global_tex(uSampler0); + vec4 c2 = sample_global_tex(uSampler1); + vec4 c3 = sample_global_sampler(uTexture0); + vec4 c4 = sample_global_sampler(uTexture1); + + FragColor = c0 + c1 + c2 + c3 + c4; +} diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/image-load-store-uint-coord.asm.frag b/3rdparty/spirv-cross/shaders-reflection/frag/image-load-store-uint-coord.asm.frag new file mode 100644 index 000000000..a9bf1a749 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/image-load-store-uint-coord.asm.frag @@ -0,0 +1,103 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 63 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %storeTemp "storeTemp" + OpName %RWIm "RWIm" + OpName %v "v" + OpName %RWBuf "RWBuf" + OpName %ROIm "ROIm" + OpName %ROBuf "ROBuf" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %RWIm DescriptorSet 0 + OpDecorate %RWIm Binding 1 + OpDecorate %RWBuf DescriptorSet 0 + OpDecorate %RWBuf Binding 0 + OpDecorate %ROIm DescriptorSet 0 + OpDecorate %ROIm Binding 1 + OpDecorate %ROBuf DescriptorSet 0 + OpDecorate %ROBuf Binding 0 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float +%_ptr_Function_v4float = OpTypePointer Function %v4float + %float_10 = OpConstant %float 10 + %float_0_5 = OpConstant %float 0.5 + %float_8 = OpConstant %float 8 + %float_2 = OpConstant %float 2 + %17 = OpConstantComposite %v4float %float_10 %float_0_5 %float_8 %float_2 + %18 = OpTypeImage %float 2D 0 0 0 2 Rgba32f +%_ptr_UniformConstant_18 = OpTypePointer UniformConstant %18 + %RWIm = OpVariable %_ptr_UniformConstant_18 UniformConstant + %uint = OpTypeInt 32 0 + %v2uint = OpTypeVector %uint 2 + %uint_10 = OpConstant %uint 10 + %25 = OpConstantComposite %v2uint %uint_10 %uint_10 + %uint_30 = OpConstant %uint 30 + %30 = OpConstantComposite %v2uint %uint_30 %uint_30 + %32 = OpTypeImage %float Buffer 0 0 0 2 Rgba32f +%_ptr_UniformConstant_32 = OpTypePointer UniformConstant %32 + %RWBuf = OpVariable %_ptr_UniformConstant_32 UniformConstant + %uint_80 = OpConstant %uint 80 + %38 = OpTypeImage %float 2D 0 0 0 1 Unknown + %SampledImage = OpTypeSampledImage %38 +%_ptr_UniformConstant_38 = OpTypePointer UniformConstant %SampledImage + %ROIm = OpVariable %_ptr_UniformConstant_38 UniformConstant + %uint_50 = OpConstant %uint 50 + %uint_60 = OpConstant %uint 60 + %44 = OpConstantComposite %v2uint %uint_50 %uint_60 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %50 = OpTypeImage %float Buffer 0 0 0 1 Rgba32f +%_ptr_UniformConstant_50 = OpTypePointer UniformConstant %50 + %ROBuf = OpVariable %_ptr_UniformConstant_50 UniformConstant +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %62 = OpFunctionCall %v4float %_main_ + OpStore %_entryPointOutput %62 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %v4float None %8 + %10 = OpLabel + %storeTemp = OpVariable %_ptr_Function_v4float Function + %v = OpVariable %_ptr_Function_v4float Function + OpStore %storeTemp %17 + %21 = OpLoad %18 %RWIm + %26 = OpLoad %v4float %storeTemp + OpImageWrite %21 %25 %26 + %28 = OpLoad %18 %RWIm + %31 = OpImageRead %v4float %28 %30 + OpStore %v %31 + %35 = OpLoad %32 %RWBuf + %37 = OpLoad %v4float %v + OpImageWrite %35 %uint_80 %37 + %41 = OpLoad %SampledImage %ROIm + %ROImage = OpImage %38 %41 + %47 = OpImageFetch %v4float %ROImage %44 Lod %int_0 + %48 = OpLoad %v4float %v + %49 = OpFAdd %v4float %48 %47 + OpStore %v %49 + %53 = OpLoad %50 %ROBuf + %54 = OpImageFetch %v4float %53 %uint_80 + %55 = OpLoad %v4float %v + %56 = OpFAdd %v4float %55 %54 + OpStore %v %56 + %57 = OpLoad %v4float %v + OpReturnValue %57 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/input-attachment-ms.vk.frag b/3rdparty/spirv-cross/shaders-reflection/frag/input-attachment-ms.vk.frag new file mode 100644 index 000000000..e06073884 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/input-attachment-ms.vk.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform subpassInputMS uSubpass1; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = subpassLoad(uSubpass0, 1) + subpassLoad(uSubpass1, 2) + subpassLoad(uSubpass0, gl_SampleID); +} diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/input-attachment.vk.frag b/3rdparty/spirv-cross/shaders-reflection/frag/input-attachment.vk.frag new file mode 100644 index 000000000..f082d15b2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/input-attachment.vk.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform mediump subpassInput uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform mediump subpassInput uSubpass1; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = subpassLoad(uSubpass0) + subpassLoad(uSubpass1); +} diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/push-constant.vk.frag b/3rdparty/spirv-cross/shaders-reflection/frag/push-constant.vk.frag new file mode 100644 index 000000000..6180faba3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/push-constant.vk.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; + +layout(push_constant, std430) uniform PushConstants +{ + vec4 value0; + vec4 value1; +} push; + +layout(location = 0) in vec4 vColor; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vColor + push.value0 + push.value1; +} diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/separate-sampler-texture-array.vk.frag b/3rdparty/spirv-cross/shaders-reflection/frag/separate-sampler-texture-array.vk.frag new file mode 100644 index 000000000..b3501c1d8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/separate-sampler-texture-array.vk.frag @@ -0,0 +1,42 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 1) uniform mediump texture2D uTexture[4]; +layout(set = 0, binding = 2) uniform mediump texture3D uTexture3D[4]; +layout(set = 0, binding = 3) uniform mediump textureCube uTextureCube[4]; +layout(set = 0, binding = 4) uniform mediump texture2DArray uTextureArray[4]; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; + +vec4 sample_func(mediump sampler samp, vec2 uv) +{ + return texture(sampler2D(uTexture[2], samp), uv); +} + +vec4 sample_func_dual(mediump sampler samp, mediump texture2D tex, vec2 uv) +{ + return texture(sampler2D(tex, samp), uv); +} + +vec4 sample_func_dual_array(mediump sampler samp, mediump texture2D tex[4], vec2 uv) +{ + return texture(sampler2D(tex[1], samp), uv); +} + +void main() +{ + vec2 off = 1.0 / vec2(textureSize(sampler2D(uTexture[1], uSampler), 0)); + vec2 off2 = 1.0 / vec2(textureSize(sampler2D(uTexture[2], uSampler), 1)); + + vec4 c0 = sample_func(uSampler, vTex + off + off2); + vec4 c1 = sample_func_dual(uSampler, uTexture[1], vTex + off + off2); + vec4 c2 = sample_func_dual_array(uSampler, uTexture, vTex + off + off2); + vec4 c3 = texture(sampler2DArray(uTextureArray[3], uSampler), vTex3); + vec4 c4 = texture(samplerCube(uTextureCube[1], uSampler), vTex3); + vec4 c5 = texture(sampler3D(uTexture3D[2], uSampler), vTex3); + + FragColor = c0 + c1 + c2 + c3 + c4 + c5; +} diff --git a/3rdparty/spirv-cross/shaders-reflection/frag/spec-constant.vk.frag b/3rdparty/spirv-cross/shaders-reflection/frag/spec-constant.vk.frag new file mode 100644 index 000000000..e62a26059 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/frag/spec-constant.vk.frag @@ -0,0 +1,78 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(constant_id = 1) const float a = 1.5; +layout(constant_id = 2) const float b = 2.5; +layout(constant_id = 3) const int c = 3; +layout(constant_id = 4) const int d = 4; +layout(constant_id = 5) const uint e = 5u; +layout(constant_id = 6) const uint f = 6u; +layout(constant_id = 7) const bool g = false; +layout(constant_id = 8) const bool h = true; + +// glslang doesn't seem to support partial spec constants or composites yet, so only test the basics. + +struct Foo +{ + float elems[d + 2]; +}; + +void main() +{ + float t0 = a; + float t1 = b; + + uint c0 = uint(c); // OpIAdd with different types. + // FConvert, float-to-double. + int c1 = -c; // SNegate + int c2 = ~c; // OpNot + int c3 = c + d; // OpIAdd + int c4 = c - d; // OpISub + int c5 = c * d; // OpIMul + int c6 = c / d; // OpSDiv + uint c7 = e / f; // OpUDiv + int c8 = c % d; // OpSMod + uint c9 = e % f; // OpUMod + // TODO: OpSRem, any way to access this in GLSL? + int c10 = c >> d; // OpShiftRightArithmetic + uint c11 = e >> f; // OpShiftRightLogical + int c12 = c << d; // OpShiftLeftLogical + int c13 = c | d; // OpBitwiseOr + int c14 = c ^ d; // OpBitwiseXor + int c15 = c & d; // OpBitwiseAnd + // VectorShuffle, CompositeExtract, CompositeInsert, not testable atm. + bool c16 = g || h; // OpLogicalOr + bool c17 = g && h; // OpLogicalAnd + bool c18 = !g; // OpLogicalNot + bool c19 = g == h; // OpLogicalEqual + bool c20 = g != h; // OpLogicalNotEqual + // OpSelect not testable atm. + bool c21 = c == d; // OpIEqual + bool c22 = c != d; // OpINotEqual + bool c23 = c < d; // OpSLessThan + bool c24 = e < f; // OpULessThan + bool c25 = c > d; // OpSGreaterThan + bool c26 = e > f; // OpUGreaterThan + bool c27 = c <= d; // OpSLessThanEqual + bool c28 = e <= f; // OpULessThanEqual + bool c29 = c >= d; // OpSGreaterThanEqual + bool c30 = e >= f; // OpUGreaterThanEqual + // OpQuantizeToF16 not testable atm. + + int c31 = c8 + c3; + + int c32 = int(e); // OpIAdd with different types. + bool c33 = bool(c); // int -> bool + bool c34 = bool(e); // uint -> bool + int c35 = int(g); // bool -> int + uint c36 = uint(g); // bool -> uint + float c37 = float(g); // bool -> float + + // Flexible sized arrays with spec constants and spec constant ops. + float vec0[c + 3][8]; + float vec1[c + 2]; + + Foo foo; + FragColor = vec4(t0 + t1) + vec0[0][0] + vec1[0] + foo.elems[c]; +} diff --git a/3rdparty/spirv-cross/shaders-reflection/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/shaders-reflection/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..792fb8e36 --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/vert/read-from-row-major-array.vert @@ -0,0 +1,20 @@ +#version 310 es +layout(location = 0) in highp vec4 a_position; +layout(location = 0) out mediump float v_vtxResult; + +layout(set = 0, binding = 0, std140, row_major) uniform Block +{ + highp mat2x3 var[3][4]; +}; + +mediump float compare_float (highp float a, highp float b) { return abs(a - b) < 0.05 ? 1.0 : 0.0; } +mediump float compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); } +mediump float compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); } + +void main (void) +{ + gl_Position = a_position; + mediump float result = 1.0; + result *= compare_mat2x3(var[0][0], mat2x3(2.0, 6.0, -6.0, 0.0, 5.0, 5.0)); + v_vtxResult = result; +} diff --git a/3rdparty/spirv-cross/shaders-reflection/vert/texture_buffer.vert b/3rdparty/spirv-cross/shaders-reflection/vert/texture_buffer.vert new file mode 100644 index 000000000..6bc7ddfae --- /dev/null +++ b/3rdparty/spirv-cross/shaders-reflection/vert/texture_buffer.vert @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_OES_texture_buffer : require + +layout(binding = 4) uniform highp samplerBuffer uSamp; +layout(rgba32f, binding = 5) uniform readonly highp imageBuffer uSampo; + +void main() +{ + gl_Position = texelFetch(uSamp, 10) + imageLoad(uSampo, 100); +} diff --git a/3rdparty/spirv-cross/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag b/3rdparty/spirv-cross/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag new file mode 100644 index 000000000..a3f03664c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/amd/fragmentMaskFetch_subpassInput.vk.nocompat.invalid.frag @@ -0,0 +1,10 @@ +#version 450 +#extension GL_AMD_shader_fragment_mask : require + +layout(input_attachment_index = 0, binding = 0) uniform subpassInputMS t; + +void main () +{ + vec4 test2 = fragmentFetchAMD(t, 4); + uint testi2 = fragmentMaskFetchAMD(t); +} diff --git a/3rdparty/spirv-cross/shaders/amd/fs.invalid.frag b/3rdparty/spirv-cross/shaders/amd/fs.invalid.frag new file mode 100644 index 000000000..1ff82de06 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/amd/fs.invalid.frag @@ -0,0 +1,14 @@ +#version 450 +#extension GL_AMD_shader_fragment_mask : require +#extension GL_AMD_shader_explicit_vertex_parameter : require + +layout(binding = 0) uniform sampler2DMS texture1; +layout(location = 0) __explicitInterpAMD in vec4 vary; + +void main() +{ + uint testi1 = fragmentMaskFetchAMD(texture1, ivec2(0)); + vec4 test1 = fragmentFetchAMD(texture1, ivec2(1), 2); + + vec4 pos = interpolateAtVertexAMD(vary, 0u); +} diff --git a/3rdparty/spirv-cross/shaders/amd/gcn_shader.comp b/3rdparty/spirv-cross/shaders/amd/gcn_shader.comp new file mode 100644 index 000000000..037cdde6b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/amd/gcn_shader.comp @@ -0,0 +1,13 @@ +#version 450 +#extension GL_AMD_gcn_shader : require +#extension GL_ARB_gpu_shader_int64 : require + +layout (local_size_x = 64) in; + +void main () +{ + float cubeFace = cubeFaceIndexAMD(vec3(0.0)); + vec2 cubeFaceCoord = cubeFaceCoordAMD(vec3(1.0)); + + uint64_t time = timeAMD(); +} diff --git a/3rdparty/spirv-cross/shaders/amd/shader_ballot.comp b/3rdparty/spirv-cross/shaders/amd/shader_ballot.comp new file mode 100644 index 000000000..d2a727112 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/amd/shader_ballot.comp @@ -0,0 +1,33 @@ +#version 450 +#extension GL_AMD_shader_ballot : require +#extension GL_ARB_shader_ballot : require + +layout (local_size_x = 64) in; +layout (std430, binding = 0) buffer inputData +{ + float inputDataArray[]; +}; + +layout (std430, binding = 1) buffer outputData +{ + float outputDataArray[]; +}; + +void main () +{ + float thisLaneData = inputDataArray [gl_LocalInvocationID.x]; + bool laneActive = (thisLaneData > 0); + + uint thisLaneOutputSlot = mbcntAMD (ballotARB (laneActive)); + + int firstInvocation = readFirstInvocationARB(1); + int invocation = readInvocationARB(1, 0); + + vec3 swizzleInvocations = swizzleInvocationsAMD(vec3(0.0, 2.0, 1.0), uvec4(3)); + vec3 swizzelInvocationsMasked = swizzleInvocationsMaskedAMD(vec3(0.0, 2.0, 1.0), uvec3(2)); + vec3 writeInvocation = writeInvocationAMD(swizzleInvocations, swizzelInvocationsMasked, 0); + + if (laneActive) { + outputDataArray[thisLaneOutputSlot] = thisLaneData; + } +} diff --git a/3rdparty/spirv-cross/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp b/3rdparty/spirv-cross/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp new file mode 100644 index 000000000..afcc31d99 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/amd/shader_ballot_nonuniform_invocations.invalid.comp @@ -0,0 +1,9 @@ +#version 450 +#extension GL_AMD_shader_ballot : require + +void main () +{ + float addInvocations = addInvocationsNonUniformAMD(0.0); + int minInvocations = minInvocationsNonUniformAMD(1); + uint maxInvocations = maxInvocationsNonUniformAMD(4); +} diff --git a/3rdparty/spirv-cross/shaders/amd/shader_group_vote.comp b/3rdparty/spirv-cross/shaders/amd/shader_group_vote.comp new file mode 100644 index 000000000..d24aa92f8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/amd/shader_group_vote.comp @@ -0,0 +1,18 @@ +#version 450 +#extension GL_ARB_shader_group_vote : require + +layout (local_size_x = 64) in; +layout (std430, binding = 0) buffer inputData +{ + float inputDataArray[]; +}; + +void main () +{ + float thisLaneData = inputDataArray [gl_LocalInvocationID.x]; + bool laneActive = (thisLaneData > 0); + + bool allInvocations = allInvocationsARB(laneActive); + bool anyInvocations = anyInvocationARB(laneActive); + bool allInvocationsEqual = allInvocationsEqualARB(laneActive); +} diff --git a/3rdparty/spirv-cross/shaders/amd/shader_trinary_minmax.comp b/3rdparty/spirv-cross/shaders/amd/shader_trinary_minmax.comp new file mode 100644 index 000000000..f836146a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/amd/shader_trinary_minmax.comp @@ -0,0 +1,11 @@ +#version 450 +#extension GL_AMD_shader_trinary_minmax : require + +layout (local_size_x = 64) in; + +void main () +{ + int t11 = min3(0, 3, 2); + int t12 = max3(0, 3, 2); + int t13 = mid3(0, 3, 2); +} diff --git a/3rdparty/spirv-cross/shaders/asm/comp/atomic-decrement.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/atomic-decrement.asm.comp new file mode 100644 index 000000000..a87b93188 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/atomic-decrement.asm.comp @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %3 "main" %15 + OpExecutionMode %3 LocalSize 4 1 1 + OpName %3 "main" + OpName %8 "u0" + OpName %9 "u0_counters" + OpMemberName %9 0 "c" + OpName %11 "u0_counter" + OpName %15 "vThreadID" + OpName %19 "r0" + OpDecorate %8 DescriptorSet 0 + OpDecorate %8 Binding 0 + OpMemberDecorate %9 0 Offset 0 + OpDecorate %9 BufferBlock + OpDecorate %11 DescriptorSet 1 + OpDecorate %11 Binding 0 + OpDecorate %15 BuiltIn GlobalInvocationId + %1 = OpTypeVoid + %2 = OpTypeFunction %1 + %5 = OpTypeInt 32 0 + %6 = OpTypeImage %5 Buffer 0 0 0 2 R32ui + %7 = OpTypePointer UniformConstant %6 + %8 = OpVariable %7 UniformConstant + %9 = OpTypeStruct %5 + %10 = OpTypePointer Uniform %9 + %11 = OpVariable %10 Uniform + %12 = OpTypeInt 32 1 + %13 = OpTypeVector %12 3 + %14 = OpTypePointer Input %13 + %15 = OpVariable %14 Input + %16 = OpTypeFloat 32 + %17 = OpTypeVector %16 4 + %18 = OpTypePointer Function %17 + %20 = OpTypePointer Uniform %5 + %21 = OpConstant %5 0 + %23 = OpConstant %5 1 + %26 = OpTypePointer Function %16 + %33 = OpConstant %12 0 + %34 = OpConstant %5 2 + %37 = OpTypePointer Input %12 + %41 = OpTypeVector %5 4 + %3 = OpFunction %1 None %2 + %4 = OpLabel + %19 = OpVariable %18 Function + %22 = OpAccessChain %20 %11 %21 + %24 = OpAtomicIDecrement %5 %22 %23 %21 + %25 = OpBitcast %16 %24 + %27 = OpInBoundsAccessChain %26 %19 %21 + OpStore %27 %25 + %28 = OpLoad %6 %8 + %29 = OpInBoundsAccessChain %26 %19 %21 + %30 = OpLoad %16 %29 + %31 = OpBitcast %12 %30 + %32 = OpIMul %5 %31 %23 + %35 = OpShiftRightLogical %5 %33 %34 + %36 = OpIAdd %5 %32 %35 + %38 = OpInBoundsAccessChain %37 %15 %21 + %39 = OpLoad %12 %38 + %40 = OpBitcast %5 %39 + %42 = OpCompositeConstruct %41 %40 %40 %40 %40 + OpImageWrite %28 %36 %42 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/atomic-increment.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/atomic-increment.asm.comp new file mode 100644 index 000000000..3acb7115f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/atomic-increment.asm.comp @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %3 "main" %15 + OpExecutionMode %3 LocalSize 4 1 1 + OpName %3 "main" + OpName %8 "u0" + OpName %9 "u0_counters" + OpMemberName %9 0 "c" + OpName %11 "u0_counter" + OpName %15 "vThreadID" + OpName %19 "r0" + OpDecorate %8 DescriptorSet 0 + OpDecorate %8 Binding 0 + OpMemberDecorate %9 0 Offset 0 + OpDecorate %9 BufferBlock + OpDecorate %11 DescriptorSet 1 + OpDecorate %11 Binding 0 + OpDecorate %15 BuiltIn GlobalInvocationId + %1 = OpTypeVoid + %2 = OpTypeFunction %1 + %5 = OpTypeInt 32 0 + %6 = OpTypeImage %5 Buffer 0 0 0 2 R32ui + %7 = OpTypePointer UniformConstant %6 + %8 = OpVariable %7 UniformConstant + %9 = OpTypeStruct %5 + %10 = OpTypePointer Uniform %9 + %11 = OpVariable %10 Uniform + %12 = OpTypeInt 32 1 + %13 = OpTypeVector %12 3 + %14 = OpTypePointer Input %13 + %15 = OpVariable %14 Input + %16 = OpTypeFloat 32 + %17 = OpTypeVector %16 4 + %18 = OpTypePointer Function %17 + %20 = OpTypePointer Uniform %5 + %21 = OpConstant %5 0 + %23 = OpConstant %5 1 + %26 = OpTypePointer Function %16 + %33 = OpConstant %12 0 + %34 = OpConstant %5 2 + %37 = OpTypePointer Input %12 + %41 = OpTypeVector %5 4 + %3 = OpFunction %1 None %2 + %4 = OpLabel + %19 = OpVariable %18 Function + %22 = OpAccessChain %20 %11 %21 + %24 = OpAtomicIIncrement %5 %22 %23 %21 + %25 = OpBitcast %16 %24 + %27 = OpInBoundsAccessChain %26 %19 %21 + OpStore %27 %25 + %28 = OpLoad %6 %8 + %29 = OpInBoundsAccessChain %26 %19 %21 + %30 = OpLoad %16 %29 + %31 = OpBitcast %12 %30 + %32 = OpIMul %5 %31 %23 + %35 = OpShiftRightLogical %5 %33 %34 + %36 = OpIAdd %5 %32 %35 + %38 = OpInBoundsAccessChain %37 %15 %21 + %39 = OpLoad %12 %38 + %40 = OpBitcast %5 %39 + %42 = OpCompositeConstruct %41 %40 %40 %40 %40 + OpImageWrite %28 %36 %42 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/bitcast_iadd.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_iadd.asm.comp new file mode 100644 index 000000000..3b31ab285 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_iadd.asm.comp @@ -0,0 +1,79 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %inputs Restrict + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + OpDecorate %outputs Restrict + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of IAdd + %result_iadd_0 = OpIAdd %uvec4 %input0 %input1 + %result_iadd_1 = OpIAdd %uvec4 %input1 %input0 + %result_iadd_2 = OpIAdd %uvec4 %input0 %input0 + %result_iadd_3 = OpIAdd %uvec4 %input1 %input1 + %result_iadd_4 = OpIAdd %ivec4 %input0 %input0 + %result_iadd_5 = OpIAdd %ivec4 %input1 %input1 + %result_iadd_6 = OpIAdd %ivec4 %input0 %input1 + %result_iadd_7 = OpIAdd %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/bitcast_iequal.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_iequal.asm.comp new file mode 100644 index 000000000..c98f52c5a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_iequal.asm.comp @@ -0,0 +1,90 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + %bool = OpTypeBool + %bvec4 = OpTypeVector %bool 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + %uone = OpConstant %uint 1 + %uzero = OpConstant %uint 0 + %uvec41 = OpConstantComposite %uvec4 %uone %uone %uone %uone + %ivec41 = OpConstantComposite %ivec4 %one %one %one %one + %uvec40 = OpConstantComposite %uvec4 %uzero %uzero %uzero %uzero + %ivec40 = OpConstantComposite %ivec4 %zero %zero %zero %zero + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of IEqual + %result_iequal0 = OpIEqual %bvec4 %input0 %input1 + %result_iequal1 = OpIEqual %bvec4 %input1 %input0 + %result_iequal2 = OpIEqual %bvec4 %input0 %input0 + %result_iequal3 = OpIEqual %bvec4 %input1 %input1 + %result_0 = OpSelect %uvec4 %result_iequal0 %uvec41 %uvec40 + %result_1 = OpSelect %uvec4 %result_iequal1 %uvec41 %uvec40 + %result_2 = OpSelect %uvec4 %result_iequal2 %uvec41 %uvec40 + %result_3 = OpSelect %uvec4 %result_iequal3 %uvec41 %uvec40 + %result_4 = OpSelect %ivec4 %result_iequal0 %ivec41 %ivec40 + %result_5 = OpSelect %ivec4 %result_iequal1 %ivec41 %ivec40 + %result_6 = OpSelect %ivec4 %result_iequal2 %ivec41 %ivec40 + %result_7 = OpSelect %ivec4 %result_iequal3 %ivec41 %ivec40 + + OpStore %output_ptr_uvec4 %result_0 + OpStore %output_ptr_uvec4 %result_1 + OpStore %output_ptr_uvec4 %result_2 + OpStore %output_ptr_uvec4 %result_3 + OpStore %output_ptr_ivec4 %result_4 + OpStore %output_ptr_ivec4 %result_5 + OpStore %output_ptr_ivec4 %result_6 + OpStore %output_ptr_ivec4 %result_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/bitcast_sar.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_sar.asm.comp new file mode 100644 index 000000000..64f19fc34 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_sar.asm.comp @@ -0,0 +1,77 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of ShiftRightArithmetic + %result_iadd_0 = OpShiftRightArithmetic %uvec4 %input0 %input1 + %result_iadd_1 = OpShiftRightArithmetic %uvec4 %input1 %input0 + %result_iadd_2 = OpShiftRightArithmetic %uvec4 %input0 %input0 + %result_iadd_3 = OpShiftRightArithmetic %uvec4 %input1 %input1 + %result_iadd_4 = OpShiftRightArithmetic %ivec4 %input0 %input0 + %result_iadd_5 = OpShiftRightArithmetic %ivec4 %input1 %input1 + %result_iadd_6 = OpShiftRightArithmetic %ivec4 %input0 %input1 + %result_iadd_7 = OpShiftRightArithmetic %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/bitcast_sdiv.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_sdiv.asm.comp new file mode 100644 index 000000000..ab73ec83d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_sdiv.asm.comp @@ -0,0 +1,77 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of SDiv + %result_iadd_0 = OpSDiv %uvec4 %input0 %input1 + %result_iadd_1 = OpSDiv %uvec4 %input1 %input0 + %result_iadd_2 = OpSDiv %uvec4 %input0 %input0 + %result_iadd_3 = OpSDiv %uvec4 %input1 %input1 + %result_iadd_4 = OpSDiv %ivec4 %input0 %input0 + %result_iadd_5 = OpSDiv %ivec4 %input1 %input1 + %result_iadd_6 = OpSDiv %ivec4 %input0 %input1 + %result_iadd_7 = OpSDiv %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/bitcast_slr.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_slr.asm.comp new file mode 100644 index 000000000..6741f5cb5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/bitcast_slr.asm.comp @@ -0,0 +1,77 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of ShiftRightLogical + %result_iadd_0 = OpShiftRightLogical %uvec4 %input0 %input1 + %result_iadd_1 = OpShiftRightLogical %uvec4 %input1 %input0 + %result_iadd_2 = OpShiftRightLogical %uvec4 %input0 %input0 + %result_iadd_3 = OpShiftRightLogical %uvec4 %input1 %input1 + %result_iadd_4 = OpShiftRightLogical %ivec4 %input0 %input0 + %result_iadd_5 = OpShiftRightLogical %ivec4 %input1 %input1 + %result_iadd_6 = OpShiftRightLogical %ivec4 %input0 %input1 + %result_iadd_7 = OpShiftRightLogical %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/block-name-alias-global.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/block-name-alias-global.asm.comp new file mode 100644 index 000000000..85f6cc041 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/block-name-alias-global.asm.comp @@ -0,0 +1,119 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 59 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %Foo "A" + OpMemberName %Foo 0 "a" + OpMemberName %Foo 1 "b" + OpName %A "A" + OpMemberName %A 0 "Data" + OpName %C1 "C1" + OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" + OpName %Foo_0 "A" + OpMemberName %Foo_0 0 "a" + OpMemberName %Foo_0 1 "b" + OpName %A_0 "A" + OpMemberName %A_0 0 "Data" + OpName %C2 "C2" + OpName %B "B" + OpMemberName %B 0 "Data" + OpName %C3 "C3" + OpName %B_0 "B" + OpMemberName %B_0 0 "Data" + OpName %C4 "C4" + OpMemberDecorate %Foo 0 Offset 0 + OpMemberDecorate %Foo 1 Offset 4 + OpDecorate %_runtimearr_Foo ArrayStride 8 + OpMemberDecorate %A 0 Offset 0 + OpDecorate %A BufferBlock + OpDecorate %C1 DescriptorSet 0 + OpDecorate %C1 Binding 1 + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + OpMemberDecorate %Foo_0 0 Offset 0 + OpMemberDecorate %Foo_0 1 Offset 4 + OpDecorate %_arr_Foo_0_uint_1024 ArrayStride 16 + OpMemberDecorate %A_0 0 Offset 0 + OpDecorate %A_0 Block + OpDecorate %C2 DescriptorSet 0 + OpDecorate %C2 Binding 2 + OpDecorate %_runtimearr_Foo_0 ArrayStride 8 + OpMemberDecorate %B 0 Offset 0 + OpDecorate %B BufferBlock + OpDecorate %C3 DescriptorSet 0 + OpDecorate %C3 Binding 0 + OpDecorate %_arr_Foo_0_uint_1024_0 ArrayStride 16 + OpMemberDecorate %B_0 0 Offset 0 + OpDecorate %B_0 Block + OpDecorate %C4 DescriptorSet 0 + OpDecorate %C4 Binding 3 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %Foo = OpTypeStruct %int %int +%_runtimearr_Foo = OpTypeRuntimeArray %Foo + %A = OpTypeStruct %_runtimearr_Foo +%_ptr_Uniform_A = OpTypePointer Uniform %A + %C1 = OpVariable %_ptr_Uniform_A Uniform + %int_0 = OpConstant %int 0 + %uint = OpTypeInt 32 0 + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input + %uint_0 = OpConstant %uint 0 +%_ptr_Input_uint = OpTypePointer Input %uint + %Foo_0 = OpTypeStruct %int %int + %uint_1024 = OpConstant %uint 1024 +%_arr_Foo_0_uint_1024 = OpTypeArray %Foo_0 %uint_1024 + %A_0 = OpTypeStruct %_arr_Foo_0_uint_1024 +%_ptr_Uniform_A_0 = OpTypePointer Uniform %A_0 + %C2 = OpVariable %_ptr_Uniform_A_0 Uniform +%_ptr_Uniform_Foo_0 = OpTypePointer Uniform %Foo_0 +%_ptr_Uniform_Foo = OpTypePointer Uniform %Foo +%_ptr_Uniform_int = OpTypePointer Uniform %int + %int_1 = OpConstant %int 1 +%_runtimearr_Foo_0 = OpTypeRuntimeArray %Foo + %B = OpTypeStruct %_runtimearr_Foo_0 +%_ptr_Uniform_B = OpTypePointer Uniform %B + %C3 = OpVariable %_ptr_Uniform_B Uniform +%_arr_Foo_0_uint_1024_0 = OpTypeArray %Foo_0 %uint_1024 + %B_0 = OpTypeStruct %_arr_Foo_0_uint_1024_0 +%_ptr_Uniform_B_0 = OpTypePointer Uniform %B_0 + %C4 = OpVariable %_ptr_Uniform_B_0 Uniform + %main = OpFunction %void None %3 + %5 = OpLabel + %19 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %20 = OpLoad %uint %19 + %27 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %28 = OpLoad %uint %27 + %30 = OpAccessChain %_ptr_Uniform_Foo_0 %C2 %int_0 %28 + %31 = OpLoad %Foo_0 %30 + %33 = OpAccessChain %_ptr_Uniform_Foo %C1 %int_0 %20 + %34 = OpCompositeExtract %int %31 0 + %36 = OpAccessChain %_ptr_Uniform_int %33 %int_0 + OpStore %36 %34 + %37 = OpCompositeExtract %int %31 1 + %39 = OpAccessChain %_ptr_Uniform_int %33 %int_1 + OpStore %39 %37 + %44 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %45 = OpLoad %uint %44 + %50 = OpAccessChain %_ptr_Input_uint %gl_GlobalInvocationID %uint_0 + %51 = OpLoad %uint %50 + %52 = OpAccessChain %_ptr_Uniform_Foo_0 %C4 %int_0 %51 + %53 = OpLoad %Foo_0 %52 + %54 = OpAccessChain %_ptr_Uniform_Foo %C3 %int_0 %45 + %55 = OpCompositeExtract %int %53 0 + %56 = OpAccessChain %_ptr_Uniform_int %54 %int_0 + OpStore %56 %55 + %57 = OpCompositeExtract %int %53 1 + %58 = OpAccessChain %_ptr_Uniform_int %54 %int_1 + OpStore %58 %57 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/builtin-compute-bitcast.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/builtin-compute-bitcast.asm.comp new file mode 100644 index 000000000..4bc9202fc --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/builtin-compute-bitcast.asm.comp @@ -0,0 +1,50 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 26 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_WorkGroupID %gl_GlobalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %BUF "BUF" + OpMemberName %BUF 0 "values" + OpName %_ "" + OpName %gl_WorkGroupID "gl_WorkGroupID" + OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" + OpDecorate %_runtimearr_int ArrayStride 4 + OpMemberDecorate %BUF 0 Offset 0 + OpDecorate %BUF BufferBlock + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %gl_WorkGroupID BuiltIn WorkgroupId + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + %void = OpTypeVoid + %int = OpTypeInt 32 1 +%_runtimearr_int = OpTypeRuntimeArray %int + %3 = OpTypeFunction %void + %BUF = OpTypeStruct %_runtimearr_int +%_ptr_Uniform_BUF = OpTypePointer Uniform %BUF + %_ = OpVariable %_ptr_Uniform_BUF Uniform + %int_0 = OpConstant %int 0 + %v3int = OpTypeVector %int 3 +%_ptr_Input_v3int = OpTypePointer Input %v3int +%gl_WorkGroupID = OpVariable %_ptr_Input_v3int Input + %int_1 = OpConstant %int 1 +%_ptr_Input_int = OpTypePointer Input %int +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3int Input + %int_2 = OpConstant %int 2 +%_ptr_Uniform_int = OpTypePointer Uniform %int + %main = OpFunction %void None %3 + %5 = OpLabel + %18 = OpAccessChain %_ptr_Input_int %gl_WorkGroupID %int_1 + %19 = OpLoad %int %18 + %22 = OpAccessChain %_ptr_Input_int %gl_GlobalInvocationID %int_2 + %23 = OpLoad %int %22 + %25 = OpAccessChain %_ptr_Uniform_int %_ %int_0 %19 + OpStore %25 %23 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/decoration-group.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/decoration-group.asm.comp new file mode 100644 index 000000000..b597b4bd8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/decoration-group.asm.comp @@ -0,0 +1,99 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos SPIR-V Tools Assembler; 0 +; Bound: 58 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %1 "main" %2 + OpExecutionMode %1 LocalSize 1 1 1 + OpSource GLSL 430 + OpName %1 "main" + OpName %2 "gl_GlobalInvocationID" + OpDecorate %2 BuiltIn GlobalInvocationId + OpDecorate %3 ArrayStride 4 + OpDecorate %4 BufferBlock + OpDecorate %5 Offset 0 + %4 = OpDecorationGroup + %5 = OpDecorationGroup + OpGroupDecorate %4 %6 %7 %8 %9 %10 %11 + OpGroupMemberDecorate %5 %6 0 %7 0 %8 0 %9 0 %10 0 %11 0 + OpDecorate %12 DescriptorSet 0 + OpDecorate %13 DescriptorSet 0 + OpDecorate %13 NonWritable + OpDecorate %13 Restrict + %14 = OpDecorationGroup + %12 = OpDecorationGroup + %13 = OpDecorationGroup + OpGroupDecorate %12 %15 + OpGroupDecorate %12 %15 + OpGroupDecorate %12 %15 + OpDecorate %15 DescriptorSet 0 + OpDecorate %15 Binding 5 + OpGroupDecorate %14 %16 + OpDecorate %16 DescriptorSet 0 + OpDecorate %16 Binding 0 + OpGroupDecorate %12 %17 + OpDecorate %17 Binding 1 + OpGroupDecorate %13 %18 %19 + OpDecorate %18 Binding 2 + OpDecorate %19 Binding 3 + OpGroupDecorate %14 %20 + OpGroupDecorate %12 %20 + OpGroupDecorate %13 %20 + OpDecorate %20 Binding 4 + %21 = OpTypeBool + %22 = OpTypeVoid + %23 = OpTypeFunction %22 + %24 = OpTypeInt 32 0 + %25 = OpTypeInt 32 1 + %26 = OpTypeFloat 32 + %27 = OpTypeVector %24 3 + %28 = OpTypeVector %26 3 + %29 = OpTypePointer Input %27 + %30 = OpTypePointer Uniform %25 + %31 = OpTypePointer Uniform %26 + %32 = OpTypeRuntimeArray %25 + %3 = OpTypeRuntimeArray %26 + %2 = OpVariable %29 Input + %33 = OpConstant %25 0 + %6 = OpTypeStruct %3 + %34 = OpTypePointer Uniform %6 + %15 = OpVariable %34 Uniform + %7 = OpTypeStruct %3 + %35 = OpTypePointer Uniform %7 + %16 = OpVariable %35 Uniform + %8 = OpTypeStruct %3 + %36 = OpTypePointer Uniform %8 + %17 = OpVariable %36 Uniform + %9 = OpTypeStruct %3 + %37 = OpTypePointer Uniform %9 + %18 = OpVariable %37 Uniform + %10 = OpTypeStruct %3 + %38 = OpTypePointer Uniform %10 + %19 = OpVariable %38 Uniform + %11 = OpTypeStruct %3 + %39 = OpTypePointer Uniform %11 + %20 = OpVariable %39 Uniform + %1 = OpFunction %22 None %23 + %40 = OpLabel + %41 = OpLoad %27 %2 + %42 = OpCompositeExtract %24 %41 0 + %43 = OpAccessChain %31 %16 %33 %42 + %44 = OpAccessChain %31 %17 %33 %42 + %45 = OpAccessChain %31 %18 %33 %42 + %46 = OpAccessChain %31 %19 %33 %42 + %47 = OpAccessChain %31 %20 %33 %42 + %48 = OpAccessChain %31 %15 %33 %42 + %49 = OpLoad %26 %43 + %50 = OpLoad %26 %44 + %51 = OpLoad %26 %45 + %52 = OpLoad %26 %46 + %53 = OpLoad %26 %47 + %54 = OpFAdd %26 %49 %50 + %55 = OpFAdd %26 %54 %51 + %56 = OpFAdd %26 %55 %52 + %57 = OpFAdd %26 %56 %53 + OpStore %48 %57 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/global-parameter-name-alias.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/global-parameter-name-alias.asm.comp new file mode 100644 index 000000000..78b1dc74e --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/global-parameter-name-alias.asm.comp @@ -0,0 +1,102 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 61 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %id_1 + OpExecutionMode %main LocalSize 1 1 1 + OpSource HLSL 500 + OpName %main "main" + OpName %Load_u1_ "Load(u1;" + OpName %size "size" + OpName %_main_vu3_ "@main(vu3;" + OpName %id "id" + OpName %data "data" + OpName %byteAddrTemp "byteAddrTemp" + OpName %ssbo "ssbo" + OpMemberName %ssbo 0 "@data" + OpName %ssbo_0 "ssbo" + OpName %param "param" + OpName %id_0 "id" + OpName %id_1 "id" + OpName %param_0 "param" + OpDecorate %_runtimearr_uint ArrayStride 4 + OpMemberDecorate %ssbo 0 NonWritable + OpMemberDecorate %ssbo 0 Offset 0 + OpDecorate %ssbo BufferBlock + OpDecorate %ssbo_0 DescriptorSet 0 + OpDecorate %ssbo_0 Binding 1 + OpDecorate %id_1 BuiltIn GlobalInvocationId + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 +%_ptr_Function_uint = OpTypePointer Function %uint + %8 = OpTypeFunction %void %_ptr_Function_uint + %v3uint = OpTypeVector %uint 3 +%_ptr_Function_v3uint = OpTypePointer Function %v3uint + %14 = OpTypeFunction %void %_ptr_Function_v3uint + %v4uint = OpTypeVector %uint 4 +%_ptr_Function_v4uint = OpTypePointer Function %v4uint + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_2 = OpConstant %int 2 +%_runtimearr_uint = OpTypeRuntimeArray %uint + %ssbo = OpTypeStruct %_runtimearr_uint +%_ptr_Uniform_ssbo = OpTypePointer Uniform %ssbo + %ssbo_0 = OpVariable %_ptr_Uniform_ssbo Uniform + %int_0 = OpConstant %int 0 +%_ptr_Uniform_uint = OpTypePointer Uniform %uint + %int_1 = OpConstant %int 1 + %int_3 = OpConstant %int 3 + %uint_4 = OpConstant %uint 4 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint + %id_1 = OpVariable %_ptr_Input_v3uint Input + %main = OpFunction %void None %3 + %5 = OpLabel + %id_0 = OpVariable %_ptr_Function_v3uint Function + %param_0 = OpVariable %_ptr_Function_v3uint Function + %57 = OpLoad %v3uint %id_1 + OpStore %id_0 %57 + %59 = OpLoad %v3uint %id_0 + OpStore %param_0 %59 + %60 = OpFunctionCall %void %_main_vu3_ %param_0 + OpReturn + OpFunctionEnd + %Load_u1_ = OpFunction %void None %8 + %size = OpFunctionParameter %_ptr_Function_uint + %11 = OpLabel + %data = OpVariable %_ptr_Function_v4uint Function +%byteAddrTemp = OpVariable %_ptr_Function_int Function + %24 = OpLoad %uint %size + %26 = OpShiftRightLogical %int %24 %int_2 + OpStore %byteAddrTemp %26 + %32 = OpLoad %int %byteAddrTemp + %34 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %32 + %35 = OpLoad %uint %34 + %36 = OpLoad %int %byteAddrTemp + %38 = OpIAdd %int %36 %int_1 + %39 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %38 + %40 = OpLoad %uint %39 + %41 = OpLoad %int %byteAddrTemp + %42 = OpIAdd %int %41 %int_2 + %43 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %42 + %44 = OpLoad %uint %43 + %45 = OpLoad %int %byteAddrTemp + %47 = OpIAdd %int %45 %int_3 + %48 = OpAccessChain %_ptr_Uniform_uint %ssbo_0 %int_0 %47 + %49 = OpLoad %uint %48 + %50 = OpCompositeConstruct %v4uint %35 %40 %44 %49 + OpStore %data %50 + OpReturn + OpFunctionEnd + %_main_vu3_ = OpFunction %void None %14 + %id = OpFunctionParameter %_ptr_Function_v3uint + %17 = OpLabel + %param = OpVariable %_ptr_Function_uint Function + OpStore %param %uint_4 + %53 = OpFunctionCall %void %Load_u1_ %param + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/hlsl-functionality.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/hlsl-functionality.asm.comp new file mode 100644 index 000000000..dfdcb4540 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/hlsl-functionality.asm.comp @@ -0,0 +1,63 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 31 +; Schema: 0 + OpCapability Shader + OpExtension "SPV_GOOGLE_hlsl_functionality1" + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 1 1 + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %Buf "Buf" + OpMemberName %Buf 0 "@data" + OpName %Buf_0 "Buf" + OpName %Buf_count "Buf@count" + OpMemberName %Buf_count 0 "@count" + OpName %Buf_count_0 "Buf@count" + OpDecorate %_runtimearr_v4float ArrayStride 16 + OpMemberDecorate %Buf 0 Offset 0 + OpDecorate %Buf BufferBlock + OpDecorate %Buf_0 DescriptorSet 0 + OpDecorate %Buf_0 Binding 0 + OpMemberDecorate %Buf_count 0 Offset 0 + OpDecorate %Buf_count BufferBlock + OpDecorate %Buf_count_0 DescriptorSet 0 + OpDecorateId %Buf_0 HlslCounterBufferGOOGLE %Buf_count_0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_runtimearr_v4float = OpTypeRuntimeArray %v4float + %Buf = OpTypeStruct %_runtimearr_v4float +%_ptr_Uniform_Buf = OpTypePointer Uniform %Buf + %Buf_0 = OpVariable %_ptr_Uniform_Buf Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %Buf_count = OpTypeStruct %int +%_ptr_Uniform_Buf_count = OpTypePointer Uniform %Buf_count +%Buf_count_0 = OpVariable %_ptr_Uniform_Buf_count Uniform +%_ptr_Uniform_int = OpTypePointer Uniform %int + %int_1 = OpConstant %int 1 + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 + %uint_0 = OpConstant %uint 0 + %float_1 = OpConstant %float 1 + %27 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %30 = OpFunctionCall %void %_main_ + OpReturn + OpFunctionEnd + %_main_ = OpFunction %void None %3 + %7 = OpLabel + %20 = OpAccessChain %_ptr_Uniform_int %Buf_count_0 %int_0 + %25 = OpAtomicIAdd %int %20 %uint_1 %uint_0 %int_1 + %29 = OpAccessChain %_ptr_Uniform_v4float %Buf_0 %int_0 %25 + OpStore %29 %27 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/logical.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/logical.asm.comp new file mode 100644 index 000000000..4174e77f3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/logical.asm.comp @@ -0,0 +1,191 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 152 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 1 1 + OpSource ESSL 310 + OpName %main "main" + OpName %and_b1_b1_ "and(b1;b1;" + OpName %a "a" + OpName %b "b" + OpName %and_vb2_vb2_ "and(vb2;vb2;" + OpName %a_0 "a" + OpName %b_0 "b" + OpName %and_vb3_vb3_ "and(vb3;vb3;" + OpName %a_1 "a" + OpName %b_1 "b" + OpName %and_vb4_vb4_ "and(vb4;vb4;" + OpName %a_2 "a" + OpName %b_2 "b" + OpName %b0 "b0" + OpName %SSBO0 "SSBO0" + OpMemberName %SSBO0 0 "a" + OpMemberName %SSBO0 1 "b" + OpMemberName %SSBO0 2 "c" + OpMemberName %SSBO0 3 "d" + OpName %s0 "s0" + OpName %SSBO1 "SSBO1" + OpMemberName %SSBO1 0 "a" + OpMemberName %SSBO1 1 "b" + OpMemberName %SSBO1 2 "c" + OpMemberName %SSBO1 3 "d" + OpName %s1 "s1" + OpName %param "param" + OpName %param_0 "param" + OpName %b1 "b1" + OpName %param_1 "param" + OpName %param_2 "param" + OpName %b2 "b2" + OpName %param_3 "param" + OpName %param_4 "param" + OpName %b3 "b3" + OpName %param_5 "param" + OpName %param_6 "param" + OpMemberDecorate %SSBO0 0 Offset 0 + OpMemberDecorate %SSBO0 1 Offset 8 + OpMemberDecorate %SSBO0 2 Offset 16 + OpMemberDecorate %SSBO0 3 Offset 32 + OpDecorate %SSBO0 BufferBlock + OpDecorate %s0 DescriptorSet 0 + OpDecorate %s0 Binding 0 + OpMemberDecorate %SSBO1 0 Offset 0 + OpMemberDecorate %SSBO1 1 Offset 8 + OpMemberDecorate %SSBO1 2 Offset 16 + OpMemberDecorate %SSBO1 3 Offset 32 + OpDecorate %SSBO1 BufferBlock + OpDecorate %s1 DescriptorSet 0 + OpDecorate %s1 Binding 1 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %bool = OpTypeBool +%_ptr_Function_bool = OpTypePointer Function %bool + %8 = OpTypeFunction %bool %_ptr_Function_bool %_ptr_Function_bool + %v2bool = OpTypeVector %bool 2 +%_ptr_Function_v2bool = OpTypePointer Function %v2bool + %15 = OpTypeFunction %v2bool %_ptr_Function_v2bool %_ptr_Function_v2bool + %v3bool = OpTypeVector %bool 3 +%_ptr_Function_v3bool = OpTypePointer Function %v3bool + %22 = OpTypeFunction %v3bool %_ptr_Function_v3bool %_ptr_Function_v3bool + %v4bool = OpTypeVector %bool 4 +%_ptr_Function_v4bool = OpTypePointer Function %v4bool + %29 = OpTypeFunction %v4bool %_ptr_Function_v4bool %_ptr_Function_v4bool + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %v3float = OpTypeVector %float 3 + %v4float = OpTypeVector %float 4 + %SSBO0 = OpTypeStruct %float %v2float %v3float %v4float +%_ptr_Uniform_SSBO0 = OpTypePointer Uniform %SSBO0 + %s0 = OpVariable %_ptr_Uniform_SSBO0 Uniform + %int = OpTypeInt 32 1 + %102 = OpConstant %int 0 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %SSBO1 = OpTypeStruct %float %v2float %v3float %v4float +%_ptr_Uniform_SSBO1 = OpTypePointer Uniform %SSBO1 + %s1 = OpVariable %_ptr_Uniform_SSBO1 Uniform + %117 = OpConstant %int 1 +%_ptr_Uniform_v2float = OpTypePointer Uniform %v2float + %129 = OpConstant %int 2 +%_ptr_Uniform_v3float = OpTypePointer Uniform %v3float + %141 = OpConstant %int 3 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %b0 = OpVariable %_ptr_Function_bool Function + %param = OpVariable %_ptr_Function_bool Function + %param_0 = OpVariable %_ptr_Function_bool Function + %b1 = OpVariable %_ptr_Function_v2bool Function + %param_1 = OpVariable %_ptr_Function_v2bool Function + %param_2 = OpVariable %_ptr_Function_v2bool Function + %b2 = OpVariable %_ptr_Function_v3bool Function + %param_3 = OpVariable %_ptr_Function_v3bool Function + %param_4 = OpVariable %_ptr_Function_v3bool Function + %b3 = OpVariable %_ptr_Function_v4bool Function + %param_5 = OpVariable %_ptr_Function_v4bool Function + %param_6 = OpVariable %_ptr_Function_v4bool Function + %104 = OpAccessChain %_ptr_Uniform_float %s0 %102 + %105 = OpLoad %float %104 + %106 = OpIsInf %bool %105 + %110 = OpAccessChain %_ptr_Uniform_float %s1 %102 + %111 = OpLoad %float %110 + %112 = OpIsNan %bool %111 + OpStore %param %106 + OpStore %param_0 %112 + %115 = OpFunctionCall %bool %and_b1_b1_ %param %param_0 + OpStore %b0 %115 + %119 = OpAccessChain %_ptr_Uniform_v2float %s0 %117 + %120 = OpLoad %v2float %119 + %121 = OpIsInf %v2bool %120 + %122 = OpAccessChain %_ptr_Uniform_v2float %s1 %117 + %123 = OpLoad %v2float %122 + %124 = OpIsNan %v2bool %123 + OpStore %param_1 %121 + OpStore %param_2 %124 + %127 = OpFunctionCall %v2bool %and_vb2_vb2_ %param_1 %param_2 + OpStore %b1 %127 + %131 = OpAccessChain %_ptr_Uniform_v3float %s0 %129 + %132 = OpLoad %v3float %131 + %133 = OpIsInf %v3bool %132 + %134 = OpAccessChain %_ptr_Uniform_v3float %s1 %129 + %135 = OpLoad %v3float %134 + %136 = OpIsNan %v3bool %135 + OpStore %param_3 %133 + OpStore %param_4 %136 + %139 = OpFunctionCall %v3bool %and_vb3_vb3_ %param_3 %param_4 + OpStore %b2 %139 + %143 = OpAccessChain %_ptr_Uniform_v4float %s0 %141 + %144 = OpLoad %v4float %143 + %145 = OpIsInf %v4bool %144 + %146 = OpAccessChain %_ptr_Uniform_v4float %s1 %141 + %147 = OpLoad %v4float %146 + %148 = OpIsNan %v4bool %147 + OpStore %param_5 %145 + OpStore %param_6 %148 + %151 = OpFunctionCall %v4bool %and_vb4_vb4_ %param_5 %param_6 + OpStore %b3 %151 + OpReturn + OpFunctionEnd + %and_b1_b1_ = OpFunction %bool None %8 + %a = OpFunctionParameter %_ptr_Function_bool + %b = OpFunctionParameter %_ptr_Function_bool + %12 = OpLabel + %34 = OpLoad %bool %a + %35 = OpLoad %bool %b + %36 = OpLogicalAnd %bool %34 %35 + %37 = OpLogicalOr %bool %36 %35 + %38 = OpLogicalNot %bool %37 + OpReturnValue %38 + OpFunctionEnd +%and_vb2_vb2_ = OpFunction %v2bool None %15 + %a_0 = OpFunctionParameter %_ptr_Function_v2bool + %b_0 = OpFunctionParameter %_ptr_Function_v2bool + %19 = OpLabel + %39 = OpLoad %v2bool %a_0 + %41 = OpLoad %v2bool %b_0 + %48 = OpLogicalAnd %v2bool %39 %41 + %49 = OpLogicalOr %v2bool %48 %41 + %50 = OpLogicalNot %v2bool %49 + OpReturnValue %50 + OpFunctionEnd +%and_vb3_vb3_ = OpFunction %v3bool None %22 + %a_1 = OpFunctionParameter %_ptr_Function_v3bool + %b_1 = OpFunctionParameter %_ptr_Function_v3bool + %26 = OpLabel + %52 = OpLoad %v3bool %a_1 + %54 = OpLoad %v3bool %b_1 + %66 = OpLogicalAnd %v3bool %52 %54 + OpReturnValue %66 + OpFunctionEnd +%and_vb4_vb4_ = OpFunction %v4bool None %29 + %a_2 = OpFunctionParameter %_ptr_Function_v4bool + %b_2 = OpFunctionParameter %_ptr_Function_v4bool + %33 = OpLabel + %70 = OpLoad %v4bool %a_2 + %72 = OpLoad %v4bool %b_2 + %74 = OpLogicalAnd %v4bool %70 %72 + OpReturnValue %74 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/multiple-entry.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/multiple-entry.asm.comp new file mode 100644 index 000000000..0cfb5543d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/multiple-entry.asm.comp @@ -0,0 +1,97 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 30 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %func_alt "main2" %frag_in %frag_out + OpEntryPoint GLCompute %func "main" + OpExecutionMode %func LocalSize 1 1 1 + OpSource ESSL 310 + OpSourceExtension "GL_GOOGLE_cpp_style_line_directive" + OpSourceExtension "GL_GOOGLE_include_directive" + OpMemberDecorate %input_struct 0 Offset 0 + OpMemberDecorate %input_struct 1 Offset 16 + OpMemberDecorate %output_struct 0 Offset 0 + OpMemberDecorate %output_struct 1 Offset 16 + OpDecorate %input_struct BufferBlock + OpDecorate %inputs DescriptorSet 0 + OpDecorate %inputs Binding 0 + OpDecorate %inputs Restrict + OpDecorate %output_struct BufferBlock + OpDecorate %outputs DescriptorSet 0 + OpDecorate %outputs Binding 1 + OpDecorate %outputs Restrict + OpDecorate %frag_in Location 0 + OpDecorate %frag_out Location 0 + + %void = OpTypeVoid + %main_func = OpTypeFunction %void + + %uint = OpTypeInt 32 0 + %uvec4 = OpTypeVector %uint 4 + + %int = OpTypeInt 32 1 + %ivec4 = OpTypeVector %int 4 + + %ivec4_ptr = OpTypePointer Uniform %ivec4 + %uvec4_ptr = OpTypePointer Uniform %uvec4 + + %float = OpTypeFloat 32 + %vec4 = OpTypeVector %float 4 + %vec4_input_ptr = OpTypePointer Input %vec4 + %vec4_output_ptr = OpTypePointer Output %vec4 + + %zero = OpConstant %int 0 + %one = OpConstant %int 1 + + %input_struct = OpTypeStruct %ivec4 %uvec4 + %input_struct_ptr = OpTypePointer Uniform %input_struct + %inputs = OpVariable %input_struct_ptr Uniform + %output_struct = OpTypeStruct %uvec4 %ivec4 + %output_struct_ptr = OpTypePointer Uniform %output_struct + %outputs = OpVariable %output_struct_ptr Uniform + + %frag_in = OpVariable %vec4_input_ptr Input + %frag_out = OpVariable %vec4_output_ptr Output + + %func = OpFunction %void None %main_func + %block = OpLabel + + %input1_ptr = OpAccessChain %ivec4_ptr %inputs %zero + %input0_ptr = OpAccessChain %uvec4_ptr %inputs %one + %input1 = OpLoad %ivec4 %input1_ptr + %input0 = OpLoad %uvec4 %input0_ptr + + %output_ptr_uvec4 = OpAccessChain %uvec4_ptr %outputs %zero + %output_ptr_ivec4 = OpAccessChain %ivec4_ptr %outputs %one + +; Test all variants of IAdd + %result_iadd_0 = OpIAdd %uvec4 %input0 %input1 + %result_iadd_1 = OpIAdd %uvec4 %input1 %input0 + %result_iadd_2 = OpIAdd %uvec4 %input0 %input0 + %result_iadd_3 = OpIAdd %uvec4 %input1 %input1 + %result_iadd_4 = OpIAdd %ivec4 %input0 %input0 + %result_iadd_5 = OpIAdd %ivec4 %input1 %input1 + %result_iadd_6 = OpIAdd %ivec4 %input0 %input1 + %result_iadd_7 = OpIAdd %ivec4 %input1 %input0 + OpStore %output_ptr_uvec4 %result_iadd_0 + OpStore %output_ptr_uvec4 %result_iadd_1 + OpStore %output_ptr_uvec4 %result_iadd_2 + OpStore %output_ptr_uvec4 %result_iadd_3 + OpStore %output_ptr_ivec4 %result_iadd_4 + OpStore %output_ptr_ivec4 %result_iadd_5 + OpStore %output_ptr_ivec4 %result_iadd_6 + OpStore %output_ptr_ivec4 %result_iadd_7 + + OpReturn + OpFunctionEnd + + %func_alt = OpFunction %void None %main_func + %block_alt = OpLabel + %frag_input_value = OpLoad %vec4 %frag_in + OpStore %frag_out %frag_input_value + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/op-phi-swap.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/op-phi-swap.asm.comp new file mode 100644 index 000000000..dc18d6972 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/op-phi-swap.asm.comp @@ -0,0 +1,63 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos SPIR-V Tools Assembler; 0 +; Bound: 39 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" %gl_GlobalInvocationID + OpExecutionMode %main LocalSize 1 1 1 + OpName %main "main" + OpName %gl_GlobalInvocationID "gl_GlobalInvocationID" + OpDecorate %gl_GlobalInvocationID BuiltIn GlobalInvocationId + OpDecorate %_struct_3 BufferBlock + OpDecorate %4 DescriptorSet 0 + OpDecorate %4 Binding 0 + OpDecorate %5 DescriptorSet 0 + OpDecorate %5 Binding 1 + OpDecorate %_runtimearr_float ArrayStride 4 + OpMemberDecorate %_struct_3 0 Offset 0 + %bool = OpTypeBool + %void = OpTypeVoid + %9 = OpTypeFunction %void + %uint = OpTypeInt 32 0 + %int = OpTypeInt 32 1 + %float = OpTypeFloat 32 + %v3uint = OpTypeVector %uint 3 + %v3float = OpTypeVector %float 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%_ptr_Uniform_int = OpTypePointer Uniform %int +%_ptr_Uniform_float = OpTypePointer Uniform %float +%_runtimearr_int = OpTypeRuntimeArray %int +%_runtimearr_float = OpTypeRuntimeArray %float + %_struct_3 = OpTypeStruct %_runtimearr_float +%_ptr_Uniform__struct_3 = OpTypePointer Uniform %_struct_3 + %4 = OpVariable %_ptr_Uniform__struct_3 Uniform + %5 = OpVariable %_ptr_Uniform__struct_3 Uniform +%_ptr_Function_float = OpTypePointer Function %float +%gl_GlobalInvocationID = OpVariable %_ptr_Input_v3uint Input + %true = OpConstantTrue %bool + %false = OpConstantFalse %bool + %int_0 = OpConstant %int 0 + %float_8_5 = OpConstant %float 8.5 + %main = OpFunction %void None %9 + %25 = OpLabel + %26 = OpVariable %_ptr_Function_float Function %float_8_5 + %27 = OpLoad %v3uint %gl_GlobalInvocationID + %28 = OpCompositeExtract %uint %27 0 + %29 = OpAccessChain %_ptr_Uniform_float %4 %int_0 %28 + %30 = OpAccessChain %_ptr_Uniform_float %5 %int_0 %28 + %31 = OpLoad %float %29 + %32 = OpLoad %float %26 + OpBranch %33 + %33 = OpLabel + %34 = OpPhi %bool %true %25 %false %33 + %35 = OpPhi %float %31 %25 %36 %33 + %36 = OpPhi %float %32 %25 %35 %33 + OpLoopMerge %37 %33 None + OpBranchConditional %34 %33 %37 + %37 = OpLabel + %38 = OpFSub %float %35 %36 + OpStore %30 %38 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/quantize.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/quantize.asm.comp new file mode 100644 index 000000000..f5afc6570 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/quantize.asm.comp @@ -0,0 +1,67 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 38 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %4 "main" + OpExecutionMode %4 LocalSize 1 1 1 + OpSource ESSL 310 + OpName %4 "main" + OpName %10 "SSBO0" + OpMemberName %10 0 "scalar" + OpMemberName %10 1 "vec2_val" + OpMemberName %10 2 "vec3_val" + OpMemberName %10 3 "vec4_val" + OpName %12 "" + OpMemberDecorate %10 0 Offset 0 + OpMemberDecorate %10 1 Offset 8 + OpMemberDecorate %10 2 Offset 16 + OpMemberDecorate %10 3 Offset 32 + OpDecorate %10 BufferBlock + OpDecorate %12 DescriptorSet 0 + OpDecorate %12 Binding 0 + %2 = OpTypeVoid + %3 = OpTypeFunction %2 + %6 = OpTypeFloat 32 + %7 = OpTypeVector %6 2 + %8 = OpTypeVector %6 3 + %9 = OpTypeVector %6 4 + %10 = OpTypeStruct %6 %7 %8 %9 + %11 = OpTypePointer Uniform %10 + %12 = OpVariable %11 Uniform + %13 = OpTypeInt 32 1 + %14 = OpConstant %13 0 + %15 = OpTypePointer Uniform %6 + %20 = OpConstant %13 1 + %21 = OpTypePointer Uniform %7 + %26 = OpConstant %13 2 + %27 = OpTypePointer Uniform %8 + %32 = OpConstant %13 3 + %33 = OpTypePointer Uniform %9 + %4 = OpFunction %2 None %3 + %5 = OpLabel + %16 = OpAccessChain %15 %12 %14 + %17 = OpLoad %6 %16 + %18 = OpQuantizeToF16 %6 %17 + %19 = OpAccessChain %15 %12 %14 + OpStore %19 %18 + %22 = OpAccessChain %21 %12 %20 + %23 = OpLoad %7 %22 + %24 = OpQuantizeToF16 %7 %23 + %25 = OpAccessChain %21 %12 %20 + OpStore %25 %24 + %28 = OpAccessChain %27 %12 %26 + %29 = OpLoad %8 %28 + %30 = OpQuantizeToF16 %8 %29 + %31 = OpAccessChain %27 %12 %26 + OpStore %31 %30 + %34 = OpAccessChain %33 %12 %32 + %35 = OpLoad %9 %34 + %36 = OpQuantizeToF16 %9 %35 + %37 = OpAccessChain %33 %12 %32 + OpStore %37 %36 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/specialization-constant-workgroup.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/specialization-constant-workgroup.asm.comp new file mode 100644 index 000000000..188e3fec3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/specialization-constant-workgroup.asm.comp @@ -0,0 +1,47 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 24 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 20 1 + OpSource ESSL 310 + OpName %main "main" + OpName %SSBO "SSBO" + OpMemberName %SSBO 0 "a" + OpName %_ "" + OpMemberDecorate %SSBO 0 Offset 0 + OpDecorate %SSBO BufferBlock + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %19 SpecId 10 + OpDecorate %21 SpecId 12 + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %SSBO = OpTypeStruct %float +%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO + %_ = OpVariable %_ptr_Uniform_SSBO Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %uint = OpTypeInt 32 0 + %19 = OpSpecConstant %uint 9 + %uint_20 = OpConstant %uint 20 + %21 = OpSpecConstant %uint 4 + %v3uint = OpTypeVector %uint 3 +%gl_WorkGroupSize = OpSpecConstantComposite %v3uint %19 %uint_20 %21 + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpAccessChain %_ptr_Uniform_float %_ %int_0 + %15 = OpLoad %float %14 + %16 = OpFAdd %float %15 %float_1 + %17 = OpAccessChain %_ptr_Uniform_float %_ %int_0 + OpStore %17 %16 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp new file mode 100644 index 000000000..edb1a05e5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/storage-buffer-basic.invalid.asm.comp @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Codeplay; 0 +; Bound: 31 +; Schema: 0 + OpCapability Shader + OpCapability VariablePointers + OpExtension "SPV_KHR_storage_buffer_storage_class" + OpExtension "SPV_KHR_variable_pointers" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %22 "main" %gl_WorkGroupID + OpSource OpenCL_C 120 + OpDecorate %15 SpecId 0 + ;OpDecorate %16 SpecId 1 + OpDecorate %17 SpecId 2 + OpDecorate %_runtimearr_float ArrayStride 4 + OpMemberDecorate %_struct_4 0 Offset 0 + OpDecorate %_struct_4 Block + OpDecorate %gl_WorkGroupID BuiltIn WorkgroupId + OpDecorate %gl_WorkGroupSize BuiltIn WorkgroupSize + OpDecorate %20 DescriptorSet 0 + OpDecorate %20 Binding 0 + OpDecorate %21 DescriptorSet 0 + OpDecorate %21 Binding 1 + %float = OpTypeFloat 32 +%_ptr_StorageBuffer_float = OpTypePointer StorageBuffer %float +%_runtimearr_float = OpTypeRuntimeArray %float + %_struct_4 = OpTypeStruct %_runtimearr_float +%_ptr_StorageBuffer__struct_4 = OpTypePointer StorageBuffer %_struct_4 + %uint = OpTypeInt 32 0 + %void = OpTypeVoid + %8 = OpTypeFunction %void + %v3uint = OpTypeVector %uint 3 +%_ptr_Input_v3uint = OpTypePointer Input %v3uint +%_ptr_Input_uint = OpTypePointer Input %uint +%_ptr_Private_v3uint = OpTypePointer Private %v3uint + %uint_0 = OpConstant %uint 0 +%gl_WorkGroupID = OpVariable %_ptr_Input_v3uint Input + %15 = OpSpecConstant %uint 1 + %16 = OpConstant %uint 2 + %17 = OpSpecConstant %uint 3 +%gl_WorkGroupSize = OpSpecConstantComposite %v3uint %15 %16 %17 + %19 = OpVariable %_ptr_Private_v3uint Private %gl_WorkGroupSize + %20 = OpVariable %_ptr_StorageBuffer__struct_4 StorageBuffer + %21 = OpVariable %_ptr_StorageBuffer__struct_4 StorageBuffer + %22 = OpFunction %void None %8 + %23 = OpLabel + %24 = OpAccessChain %_ptr_Input_uint %gl_WorkGroupID %uint_0 + %25 = OpLoad %uint %24 + %26 = OpAccessChain %_ptr_StorageBuffer_float %21 %uint_0 %25 + %27 = OpLoad %float %26 + %28 = OpAccessChain %_ptr_StorageBuffer_float %20 %uint_0 %25 + %29 = OpLoad %float %28 + %30 = OpFAdd %float %27 %29 + OpStore %28 %30 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/comp/switch-break-ladder.asm.comp b/3rdparty/spirv-cross/shaders/asm/comp/switch-break-ladder.asm.comp new file mode 100644 index 000000000..a32c9ef98 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/comp/switch-break-ladder.asm.comp @@ -0,0 +1,92 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 50 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint GLCompute %main "main" + OpExecutionMode %main LocalSize 1 1 1 + OpSource GLSL 450 + OpName %main "main" + OpName %c "c" + OpName %BUF "BUF" + OpMemberName %BUF 0 "a" + OpMemberName %BUF 1 "b" + OpMemberName %BUF 2 "d" + OpName %o "o" + OpName %a "a" + OpMemberDecorate %BUF 0 Offset 0 + OpMemberDecorate %BUF 1 Offset 4 + OpMemberDecorate %BUF 2 Offset 8 + OpDecorate %BUF BufferBlock + OpDecorate %o DescriptorSet 0 + OpDecorate %o Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %BUF = OpTypeStruct %int %int %int +%_ptr_Uniform_BUF = OpTypePointer Uniform %BUF + %o = OpVariable %_ptr_Uniform_BUF Uniform + %int_0 = OpConstant %int 0 +%_ptr_Uniform_int = OpTypePointer Uniform %int + %int_2 = OpConstant %int 2 + %int_1 = OpConstant %int 1 + %main = OpFunction %void None %3 + %5 = OpLabel + %c = OpVariable %_ptr_Function_int Function + %a = OpVariable %_ptr_Function_int Function + %14 = OpAccessChain %_ptr_Uniform_int %o %int_0 + %15 = OpLoad %int %14 + OpStore %c %15 + OpBranch %16 + %16 = OpLabel + OpLoopMerge %18 %19 None + OpBranch %17 + %17 = OpLabel + %20 = OpLoad %int %c + OpSelectionMerge %23 None + OpSwitch %20 %23 5 %21 1 %22 2 %22 3 %22 + %21 = OpLabel + OpBranch %24 + %24 = OpLabel + OpLoopMerge %26 %27 None + OpBranch %25 + %25 = OpLabel + %29 = OpAccessChain %_ptr_Uniform_int %o %int_2 + %30 = OpLoad %int %29 + OpSelectionMerge %33 None + OpSwitch %30 %32 10 %31 20 %31 + %32 = OpLabel + OpBranch %27 + %31 = OpLabel + %34 = OpLoad %int %c + %35 = OpLoad %int %c + %36 = OpIAdd %int %35 %34 + OpStore %c %36 + OpBranch %26 + %33 = OpLabel + OpUnreachable + %27 = OpLabel + OpBranch %24 + %26 = OpLabel + OpBranch %23 + %22 = OpLabel + %42 = OpLoad %int %c + OpStore %a %42 + OpBranch %18 + %23 = OpLabel + %45 = OpLoad %int %c + %47 = OpIAdd %int %45 %int_1 + OpStore %c %47 + OpBranch %19 + %19 = OpLabel + OpBranch %16 + %18 = OpLabel + %48 = OpLoad %int %a + %49 = OpAccessChain %_ptr_Uniform_int %o %int_1 + OpStore %49 %48 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag new file mode 100644 index 000000000..ba2f95b23 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/combined-sampler-reuse.vk.asm.frag @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vUV + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %uTex "uTex" + OpName %uSampler "uSampler" + OpName %vUV "vUV" + OpDecorate %FragColor Location 0 + OpDecorate %uTex DescriptorSet 0 + OpDecorate %uTex Binding 1 + OpDecorate %uSampler DescriptorSet 0 + OpDecorate %uSampler Binding 0 + OpDecorate %vUV Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %10 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_UniformConstant_10 = OpTypePointer UniformConstant %10 + %uTex = OpVariable %_ptr_UniformConstant_10 UniformConstant + %14 = OpTypeSampler +%_ptr_UniformConstant_14 = OpTypePointer UniformConstant %14 + %uSampler = OpVariable %_ptr_UniformConstant_14 UniformConstant + %18 = OpTypeSampledImage %10 + %v2float = OpTypeVector %float 2 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %vUV = OpVariable %_ptr_Input_v2float Input + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 + %int_1 = OpConstant %int 1 + %32 = OpConstantComposite %v2int %int_1 %int_1 + %main = OpFunction %void None %3 + %5 = OpLabel + %13 = OpLoad %10 %uTex + %17 = OpLoad %14 %uSampler + %19 = OpSampledImage %18 %13 %17 + %23 = OpLoad %v2float %vUV + %24 = OpImageSampleImplicitLod %v4float %19 %23 + OpStore %FragColor %24 + %28 = OpLoad %v2float %vUV + %33 = OpImageSampleImplicitLod %v4float %19 %28 ConstOffset %32 + %34 = OpLoad %v4float %FragColor + %35 = OpFAdd %v4float %34 %33 + OpStore %FragColor %35 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/complex-name-workarounds.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/complex-name-workarounds.asm.frag new file mode 100644 index 000000000..59a67730a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/complex-name-workarounds.asm.frag @@ -0,0 +1,81 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 47 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %a %b %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %func__vf4_ "fu__nc_" + OpName %a_ "a_" + OpName %func_2_vf4_ "fu__nc_" + OpName %a_2 "___" + OpName %c0 "___" + OpName %a "__" + OpName %b "a" + OpName %param "b" + OpName %c1 "b" + OpName %param_0 "b" + OpName %FragColor "b" + OpDecorate %a Location 0 + OpDecorate %b Location 1 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %9 = OpTypeFunction %v4float %_ptr_Function_v4float +%_ptr_Input_v4float = OpTypePointer Input %v4float + %a = OpVariable %_ptr_Input_v4float Input + %b = OpVariable %_ptr_Input_v4float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %c0 = OpVariable %_ptr_Function_v4float Function + %param = OpVariable %_ptr_Function_v4float Function + %c1 = OpVariable %_ptr_Function_v4float Function + %param_0 = OpVariable %_ptr_Function_v4float Function + %25 = OpLoad %v4float %a + %27 = OpLoad %v4float %b + %28 = OpFAdd %v4float %25 %27 + %30 = OpLoad %v4float %a + OpStore %param %30 + %31 = OpFunctionCall %v4float %func__vf4_ %param + %32 = OpFAdd %v4float %28 %31 + OpStore %c0 %32 + %34 = OpLoad %v4float %a + %35 = OpLoad %v4float %b + %36 = OpFSub %v4float %34 %35 + %38 = OpLoad %v4float %b + OpStore %param_0 %38 + %39 = OpFunctionCall %v4float %func_2_vf4_ %param_0 + %40 = OpFAdd %v4float %36 %39 + OpStore %c1 %40 + %43 = OpLoad %v4float %c0 + OpStore %FragColor %43 + %44 = OpLoad %v4float %c1 + OpStore %FragColor %44 + %45 = OpLoad %v4float %c0 + OpStore %FragColor %45 + %46 = OpLoad %v4float %c1 + OpStore %FragColor %46 + OpReturn + OpFunctionEnd + %func__vf4_ = OpFunction %v4float None %9 + %a_ = OpFunctionParameter %_ptr_Function_v4float + %12 = OpLabel + %16 = OpLoad %v4float %a_ + OpReturnValue %16 + OpFunctionEnd +%func_2_vf4_ = OpFunction %v4float None %9 + %a_2 = OpFunctionParameter %_ptr_Function_v4float + %15 = OpLabel + %19 = OpLoad %v4float %a_2 + OpReturnValue %19 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag new file mode 100644 index 000000000..f33c48617 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/composite-construct-struct-no-swizzle.asm.frag @@ -0,0 +1,51 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 39 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %foo %FooOut + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %foo "foo" + OpName %SwizzleTest "SwizzleTest" + OpMemberName %SwizzleTest 0 "a" + OpMemberName %SwizzleTest 1 "b" + OpName %FooOut "FooOut" + OpDecorate %foo RelaxedPrecision + OpDecorate %foo Location 0 + OpDecorate %12 RelaxedPrecision + OpMemberDecorate %SwizzleTest 0 RelaxedPrecision + OpMemberDecorate %SwizzleTest 1 RelaxedPrecision + OpDecorate %FooOut RelaxedPrecision + OpDecorate %FooOut Location 0 + OpDecorate %34 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float +%_ptr_Input_v2float = OpTypePointer Input %v2float + %foo = OpVariable %_ptr_Input_v2float Input +%SwizzleTest = OpTypeStruct %float %float +%_ptr_Function_SwizzleTest = OpTypePointer Function %SwizzleTest + %uint = OpTypeInt 32 0 +%_ptr_Function_float = OpTypePointer Function %float +%_ptr_Output_float = OpTypePointer Output %float + %FooOut = OpVariable %_ptr_Output_float Output + %int = OpTypeInt 32 1 + %main = OpFunction %void None %3 + %5 = OpLabel + %12 = OpLoad %v2float %foo + %36 = OpCompositeExtract %float %12 0 + %38 = OpCompositeExtract %float %12 1 + %test0 = OpCompositeConstruct %SwizzleTest %36 %38 + %new0 = OpCompositeExtract %float %test0 0 + %new1 = OpCompositeExtract %float %test0 1 + %34 = OpFAdd %float %new0 %new1 + OpStore %FooOut %34 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/default-member-names.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/default-member-names.asm.frag new file mode 100644 index 000000000..4d616fe49 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/default-member-names.asm.frag @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 43 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %2 "main" %3 + OpExecutionMode %2 OriginLowerLeft + OpDecorate %3 Location 0 + %void = OpTypeVoid + %9 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %12 = OpTypeFunction %v4float + %_struct_5 = OpTypeStruct %float + %_struct_6 = OpTypeStruct %float %float %float %float %float %float %float %float %float %float %float %float %_struct_5 +%_ptr_Function__struct_6 = OpTypePointer Function %_struct_6 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Function_float = OpTypePointer Function %float + %int_1 = OpConstant %int 1 + %int_2 = OpConstant %int 2 + %int_3 = OpConstant %int 3 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %3 = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %2 = OpFunction %void None %9 + %22 = OpLabel + %23 = OpVariable %_ptr_Function__struct_6 Function + %24 = OpAccessChain %_ptr_Function_float %23 %int_0 + %25 = OpLoad %float %24 + %26 = OpAccessChain %_ptr_Function_float %23 %int_1 + %27 = OpLoad %float %26 + %28 = OpAccessChain %_ptr_Function_float %23 %int_2 + %29 = OpLoad %float %28 + %30 = OpAccessChain %_ptr_Function_float %23 %int_3 + %31 = OpLoad %float %30 + %32 = OpCompositeConstruct %v4float %25 %27 %29 %31 + OpStore %3 %32 + OpReturn + OpFunctionEnd + %4 = OpFunction %v4float None %12 + %33 = OpLabel + %7 = OpVariable %_ptr_Function__struct_6 Function + %34 = OpAccessChain %_ptr_Function_float %7 %int_0 + %35 = OpLoad %float %34 + %36 = OpAccessChain %_ptr_Function_float %7 %int_1 + %37 = OpLoad %float %36 + %38 = OpAccessChain %_ptr_Function_float %7 %int_2 + %39 = OpLoad %float %38 + %40 = OpAccessChain %_ptr_Function_float %7 %int_3 + %41 = OpLoad %float %40 + %42 = OpCompositeConstruct %v4float %35 %37 %39 %41 + OpReturnValue %42 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/empty-struct.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/empty-struct.asm.frag new file mode 100644 index 000000000..0efd3158c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/empty-struct.asm.frag @@ -0,0 +1,55 @@ +; SPIR-V +; Version: 1.2 +; Generator: Khronos; 0 +; Bound: 43 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %EntryPoint_Main "main" + OpExecutionMode %EntryPoint_Main OriginUpperLeft + OpSource Unknown 100 + OpName %EmptyStructTest "EmptyStructTest" + OpName %GetValue "GetValue" + OpName %GetValue2 "GetValue" + OpName %self "self" + OpName %self2 "self" + OpName %emptyStruct "emptyStruct" + OpName %value "value" + OpName %EntryPoint_Main "EntryPoint_Main" + +%EmptyStructTest = OpTypeStruct +%_ptr_Function_EmptyStructTest = OpTypePointer Function %EmptyStructTest + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %5 = OpTypeFunction %float %_ptr_Function_EmptyStructTest + %6 = OpTypeFunction %float %EmptyStructTest + %void = OpTypeVoid +%_ptr_Function_void = OpTypePointer Function %void + %8 = OpTypeFunction %void %_ptr_Function_EmptyStructTest + %9 = OpTypeFunction %void + %float_0 = OpConstant %float 0 + + %GetValue = OpFunction %float None %5 + %self = OpFunctionParameter %_ptr_Function_EmptyStructTest + %13 = OpLabel + OpReturnValue %float_0 + OpFunctionEnd + + %GetValue2 = OpFunction %float None %6 + %self2 = OpFunctionParameter %EmptyStructTest + %14 = OpLabel + OpReturnValue %float_0 + OpFunctionEnd + +%EntryPoint_Main = OpFunction %void None %9 + %37 = OpLabel + %emptyStruct = OpVariable %_ptr_Function_EmptyStructTest Function + %18 = OpVariable %_ptr_Function_EmptyStructTest Function + %value = OpVariable %_ptr_Function_float Function + %value2 = OpCompositeConstruct %EmptyStructTest + %22 = OpFunctionCall %float %GetValue %emptyStruct + %23 = OpFunctionCall %float %GetValue2 %value2 + OpStore %value %22 + OpStore %value %23 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/for-loop-phi-only-continue.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/for-loop-phi-only-continue.asm.frag new file mode 100644 index 000000000..ae84b30b4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/for-loop-phi-only-continue.asm.frag @@ -0,0 +1,48 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 51 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %float_0 = OpConstant %float 0 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %int_16 = OpConstant %int 16 + %bool = OpTypeBool + %float_1 = OpConstant %float 1 + %int_1 = OpConstant %int 1 + %float_2 = OpConstant %float 2 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + OpBranch %14 + %14 = OpLabel + %50 = OpPhi %float %float_0 %5 %25 %15 + %47 = OpPhi %int %int_0 %5 %28 %15 + %22 = OpSLessThan %bool %47 %int_16 + OpLoopMerge %16 %15 None + OpBranchConditional %22 %body1 %16 + %body1 = OpLabel + %25 = OpFAdd %float %50 %float_1 + %28 = OpIAdd %int %47 %int_1 + OpBranch %15 + %15 = OpLabel + OpBranch %14 + %16 = OpLabel + %46 = OpCompositeConstruct %v4float %50 %50 %50 %50 + OpStore %FragColor %46 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/frem.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/frem.asm.frag new file mode 100644 index 000000000..8350c75c0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/frem.asm.frag @@ -0,0 +1,41 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 16 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vA %vB + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %vA "vA" + OpName %vB "vB" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %vA RelaxedPrecision + OpDecorate %vA Location 0 + OpDecorate %12 RelaxedPrecision + OpDecorate %vB RelaxedPrecision + OpDecorate %vB Location 1 + OpDecorate %14 RelaxedPrecision + OpDecorate %15 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output +%_ptr_Input_v4float = OpTypePointer Input %v4float + %vA = OpVariable %_ptr_Input_v4float Input + %vB = OpVariable %_ptr_Input_v4float Input + %main = OpFunction %void None %3 + %5 = OpLabel + %12 = OpLoad %v4float %vA + %14 = OpLoad %v4float %vB + %15 = OpFRem %v4float %12 %14 + OpStore %FragColor %15 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/function-overload-alias.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/function-overload-alias.asm.frag new file mode 100644 index 000000000..397aa98ce --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/function-overload-alias.asm.frag @@ -0,0 +1,153 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 76 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %foobar_vf4_ "foo" + OpName %a "foo" + OpName %foobar_vf3_ "foo" + OpName %a_0 "foo" + OpName %foobaz_vf4_ "foo" + OpName %a_1 "foo" + OpName %foobaz_vf2_ "foo" + OpName %a_2 "foo" + OpName %a_3 "foo" + OpName %param "foo" + OpName %b "foo" + OpName %param_0 "foo" + OpName %c "foo" + OpName %param_1 "foo" + OpName %d "foo" + OpName %param_2 "foo" + OpName %FragColor "FragColor" + OpDecorate %foobar_vf4_ RelaxedPrecision + OpDecorate %a RelaxedPrecision + OpDecorate %foobar_vf3_ RelaxedPrecision + OpDecorate %a_0 RelaxedPrecision + OpDecorate %foobaz_vf4_ RelaxedPrecision + OpDecorate %a_1 RelaxedPrecision + OpDecorate %foobaz_vf2_ RelaxedPrecision + OpDecorate %a_2 RelaxedPrecision + OpDecorate %28 RelaxedPrecision + OpDecorate %30 RelaxedPrecision + OpDecorate %31 RelaxedPrecision + OpDecorate %34 RelaxedPrecision + OpDecorate %35 RelaxedPrecision + OpDecorate %36 RelaxedPrecision + OpDecorate %37 RelaxedPrecision + OpDecorate %40 RelaxedPrecision + OpDecorate %42 RelaxedPrecision + OpDecorate %43 RelaxedPrecision + OpDecorate %46 RelaxedPrecision + OpDecorate %47 RelaxedPrecision + OpDecorate %48 RelaxedPrecision + OpDecorate %49 RelaxedPrecision + OpDecorate %a_3 RelaxedPrecision + OpDecorate %55 RelaxedPrecision + OpDecorate %b RelaxedPrecision + OpDecorate %59 RelaxedPrecision + OpDecorate %c RelaxedPrecision + OpDecorate %62 RelaxedPrecision + OpDecorate %d RelaxedPrecision + OpDecorate %66 RelaxedPrecision + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %69 RelaxedPrecision + OpDecorate %70 RelaxedPrecision + OpDecorate %71 RelaxedPrecision + OpDecorate %72 RelaxedPrecision + OpDecorate %73 RelaxedPrecision + OpDecorate %74 RelaxedPrecision + OpDecorate %75 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %9 = OpTypeFunction %v4float %_ptr_Function_v4float + %v3float = OpTypeVector %float 3 +%_ptr_Function_v3float = OpTypePointer Function %v3float + %15 = OpTypeFunction %v4float %_ptr_Function_v3float + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %24 = OpTypeFunction %v4float %_ptr_Function_v2float + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 + %53 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %57 = OpConstantComposite %v3float %float_1 %float_1 %float_1 + %64 = OpConstantComposite %v2float %float_1 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %a_3 = OpVariable %_ptr_Function_v4float Function + %param = OpVariable %_ptr_Function_v4float Function + %b = OpVariable %_ptr_Function_v4float Function + %param_0 = OpVariable %_ptr_Function_v3float Function + %c = OpVariable %_ptr_Function_v4float Function + %param_1 = OpVariable %_ptr_Function_v4float Function + %d = OpVariable %_ptr_Function_v4float Function + %param_2 = OpVariable %_ptr_Function_v2float Function + OpStore %param %53 + %55 = OpFunctionCall %v4float %foobar_vf4_ %param + OpStore %a_3 %55 + OpStore %param_0 %57 + %59 = OpFunctionCall %v4float %foobar_vf3_ %param_0 + OpStore %b %59 + OpStore %param_1 %53 + %62 = OpFunctionCall %v4float %foobaz_vf4_ %param_1 + OpStore %c %62 + OpStore %param_2 %64 + %66 = OpFunctionCall %v4float %foobaz_vf2_ %param_2 + OpStore %d %66 + %69 = OpLoad %v4float %a_3 + %70 = OpLoad %v4float %b + %71 = OpFAdd %v4float %69 %70 + %72 = OpLoad %v4float %c + %73 = OpFAdd %v4float %71 %72 + %74 = OpLoad %v4float %d + %75 = OpFAdd %v4float %73 %74 + OpStore %FragColor %75 + OpReturn + OpFunctionEnd +%foobar_vf4_ = OpFunction %v4float None %9 + %a = OpFunctionParameter %_ptr_Function_v4float + %12 = OpLabel + %28 = OpLoad %v4float %a + %30 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1 + %31 = OpFAdd %v4float %28 %30 + OpReturnValue %31 + OpFunctionEnd +%foobar_vf3_ = OpFunction %v4float None %15 + %a_0 = OpFunctionParameter %_ptr_Function_v3float + %18 = OpLabel + %34 = OpLoad %v3float %a_0 + %35 = OpVectorShuffle %v4float %34 %34 0 1 2 2 + %36 = OpCompositeConstruct %v4float %float_1 %float_1 %float_1 %float_1 + %37 = OpFAdd %v4float %35 %36 + OpReturnValue %37 + OpFunctionEnd +%foobaz_vf4_ = OpFunction %v4float None %9 + %a_1 = OpFunctionParameter %_ptr_Function_v4float + %21 = OpLabel + %40 = OpLoad %v4float %a_1 + %42 = OpCompositeConstruct %v4float %float_2 %float_2 %float_2 %float_2 + %43 = OpFAdd %v4float %40 %42 + OpReturnValue %43 + OpFunctionEnd +%foobaz_vf2_ = OpFunction %v4float None %24 + %a_2 = OpFunctionParameter %_ptr_Function_v2float + %27 = OpLabel + %46 = OpLoad %v2float %a_2 + %47 = OpVectorShuffle %v4float %46 %46 0 1 0 1 + %48 = OpCompositeConstruct %v4float %float_2 %float_2 %float_2 %float_2 + %49 = OpFAdd %v4float %47 %48 + OpReturnValue %49 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag new file mode 100644 index 000000000..75ce80bfd --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/hlsl-sample-cmp-level-zero-cube.asm.frag @@ -0,0 +1,60 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 38 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %pointLightShadowMap "pointLightShadowMap" + OpName %shadowSamplerPCF "shadowSamplerPCF" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %pointLightShadowMap DescriptorSet 0 + OpDecorate %shadowSamplerPCF DescriptorSet 0 + OpDecorate %pointLightShadowMap Binding 0 + OpDecorate %shadowSamplerPCF Binding 1 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %7 = OpTypeFunction %float + %10 = OpTypeImage %float Cube 0 0 0 1 Unknown +%_ptr_UniformConstant_10 = OpTypePointer UniformConstant %10 +%pointLightShadowMap = OpVariable %_ptr_UniformConstant_10 UniformConstant + %14 = OpTypeSampler +%_ptr_UniformConstant_14 = OpTypePointer UniformConstant %14 +%shadowSamplerPCF = OpVariable %_ptr_UniformConstant_14 UniformConstant + %18 = OpTypeImage %float Cube 1 0 0 1 Unknown + %19 = OpTypeSampledImage %18 + %v3float = OpTypeVector %float 3 + %float_0_1 = OpConstant %float 0.1 + %23 = OpConstantComposite %v3float %float_0_1 %float_0_1 %float_0_1 + %float_0_5 = OpConstant %float 0.5 + %v4float = OpTypeVector %float 4 + %float_0 = OpConstant %float 0 +%_ptr_Output_float = OpTypePointer Output %float +%_entryPointOutput = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %37 = OpFunctionCall %float %_main_ + OpStore %_entryPointOutput %37 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %float None %7 + %9 = OpLabel + %13 = OpLoad %10 %pointLightShadowMap + %17 = OpLoad %14 %shadowSamplerPCF + %20 = OpSampledImage %19 %13 %17 + %26 = OpCompositeExtract %float %23 0 + %27 = OpCompositeExtract %float %23 1 + %28 = OpCompositeExtract %float %23 2 + %29 = OpCompositeConstruct %v4float %26 %27 %28 %float_0_5 + %31 = OpCompositeExtract %float %29 3 + %32 = OpImageSampleDrefExplicitLod %float %20 %29 %31 Lod %float_0 + OpReturnValue %32 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag new file mode 100644 index 000000000..bb0a1ea31 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/hlsl-sample-cmp-level-zero.asm.frag @@ -0,0 +1,115 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 70 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %texCoords_1 %cascadeIndex_1 %fragDepth_1 %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %_main_vf2_f1_f1_ "@main(vf2;f1;f1;" + OpName %texCoords "texCoords" + OpName %cascadeIndex "cascadeIndex" + OpName %fragDepth "fragDepth" + OpName %c "c" + OpName %ShadowMap "ShadowMap" + OpName %ShadowSamplerPCF "ShadowSamplerPCF" + OpName %texCoords_0 "texCoords" + OpName %texCoords_1 "texCoords" + OpName %cascadeIndex_0 "cascadeIndex" + OpName %cascadeIndex_1 "cascadeIndex" + OpName %fragDepth_0 "fragDepth" + OpName %fragDepth_1 "fragDepth" + OpName %_entryPointOutput "@entryPointOutput" + OpName %param "param" + OpName %param_0 "param" + OpName %param_1 "param" + OpDecorate %ShadowMap DescriptorSet 0 + OpDecorate %ShadowSamplerPCF DescriptorSet 0 + OpDecorate %ShadowMap Binding 0 + OpDecorate %ShadowSamplerPCF Binding 1 + OpDecorate %texCoords_1 Location 0 + OpDecorate %cascadeIndex_1 Location 1 + OpDecorate %fragDepth_1 Location 2 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float +%_ptr_Function_float = OpTypePointer Function %float + %v4float = OpTypeVector %float 4 + %11 = OpTypeFunction %v4float %_ptr_Function_v2float %_ptr_Function_float %_ptr_Function_float + %18 = OpTypeImage %float 2D 0 1 0 1 Unknown +%_ptr_UniformConstant_18 = OpTypePointer UniformConstant %18 + %ShadowMap = OpVariable %_ptr_UniformConstant_18 UniformConstant + %22 = OpTypeSampler +%_ptr_UniformConstant_22 = OpTypePointer UniformConstant %22 +%ShadowSamplerPCF = OpVariable %_ptr_UniformConstant_22 UniformConstant + %26 = OpTypeImage %float 2D 1 1 0 1 Unknown + %27 = OpTypeSampledImage %26 + %v3float = OpTypeVector %float 3 + %float_0 = OpConstant %float 0 +%_ptr_Input_v2float = OpTypePointer Input %v2float +%texCoords_1 = OpVariable %_ptr_Input_v2float Input +%_ptr_Input_float = OpTypePointer Input %float +%cascadeIndex_1 = OpVariable %_ptr_Input_float Input +%fragDepth_1 = OpVariable %_ptr_Input_float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel +%texCoords_0 = OpVariable %_ptr_Function_v2float Function +%cascadeIndex_0 = OpVariable %_ptr_Function_float Function +%fragDepth_0 = OpVariable %_ptr_Function_float Function + %param = OpVariable %_ptr_Function_v2float Function + %param_0 = OpVariable %_ptr_Function_float Function + %param_1 = OpVariable %_ptr_Function_float Function + %53 = OpLoad %v2float %texCoords_1 + OpStore %texCoords_0 %53 + %57 = OpLoad %float %cascadeIndex_1 + OpStore %cascadeIndex_0 %57 + %60 = OpLoad %float %fragDepth_1 + OpStore %fragDepth_0 %60 + %64 = OpLoad %v2float %texCoords_0 + OpStore %param %64 + %66 = OpLoad %float %cascadeIndex_0 + OpStore %param_0 %66 + %68 = OpLoad %float %fragDepth_0 + OpStore %param_1 %68 + %69 = OpFunctionCall %v4float %_main_vf2_f1_f1_ %param %param_0 %param_1 + OpStore %_entryPointOutput %69 + OpReturn + OpFunctionEnd +%_main_vf2_f1_f1_ = OpFunction %v4float None %11 + %texCoords = OpFunctionParameter %_ptr_Function_v2float +%cascadeIndex = OpFunctionParameter %_ptr_Function_float + %fragDepth = OpFunctionParameter %_ptr_Function_float + %16 = OpLabel + %c = OpVariable %_ptr_Function_float Function + %21 = OpLoad %18 %ShadowMap + %25 = OpLoad %22 %ShadowSamplerPCF + %28 = OpSampledImage %27 %21 %25 + %29 = OpLoad %v2float %texCoords + %30 = OpLoad %float %cascadeIndex + %32 = OpCompositeExtract %float %29 0 + %33 = OpCompositeExtract %float %29 1 + %34 = OpCompositeConstruct %v3float %32 %33 %30 + %35 = OpLoad %float %fragDepth + %36 = OpCompositeExtract %float %34 0 + %37 = OpCompositeExtract %float %34 1 + %38 = OpCompositeExtract %float %34 2 + %39 = OpCompositeConstruct %v4float %36 %37 %38 %35 + %41 = OpCompositeExtract %float %39 3 + %42 = OpImageSampleDrefExplicitLod %float %28 %39 %41 Lod %float_0 + OpStore %c %42 + %43 = OpLoad %float %c + %44 = OpLoad %float %c + %45 = OpLoad %float %c + %46 = OpLoad %float %c + %47 = OpCompositeConstruct %v4float %43 %44 %45 %46 + OpReturnValue %47 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/image-extract-reuse.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/image-extract-reuse.asm.frag new file mode 100644 index 000000000..63c8ab57a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/image-extract-reuse.asm.frag @@ -0,0 +1,41 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 19 +; Schema: 0 + OpCapability Shader + OpCapability ImageQuery + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %Size + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %Size "Size" + OpName %uTexture "uTexture" + OpDecorate %Size Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 +%_ptr_Output_v2int = OpTypePointer Output %v2int + %Size = OpVariable %_ptr_Output_v2int Output + %float = OpTypeFloat 32 + %11 = OpTypeImage %float 2D 0 0 0 1 Unknown + %12 = OpTypeSampledImage %11 +%_ptr_UniformConstant_12 = OpTypePointer UniformConstant %12 + %uTexture = OpVariable %_ptr_UniformConstant_12 UniformConstant + %int_0 = OpConstant %int 0 + %int_1 = OpConstant %int 1 + %main = OpFunction %void None %3 + %5 = OpLabel + %15 = OpLoad %12 %uTexture + %17 = OpImage %11 %15 + %18 = OpImageQuerySizeLod %v2int %17 %int_0 + %19 = OpImageQuerySizeLod %v2int %17 %int_1 + %20 = OpIAdd %v2int %18 %19 + OpStore %Size %20 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag b/3rdparty/spirv-cross/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag new file mode 100644 index 000000000..a3d64c09d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/image-fetch-no-sampler.asm.vk.frag @@ -0,0 +1,163 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 113 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %xIn_1 %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %sample_fetch_t21_vi3_ "sample_fetch(t21;vi3;" + OpName %tex "tex" + OpName %UV "UV" + OpName %sample_sampler_t21_vf2_ "sample_sampler(t21;vf2;" + OpName %tex_0 "tex" + OpName %UV_0 "UV" + OpName %_main_vf4_ "@main(vf4;" + OpName %xIn "xIn" + OpName %Sampler "Sampler" + OpName %coord "coord" + OpName %value "value" + OpName %SampledImage "SampledImage" + OpName %param "param" + OpName %param_0 "param" + OpName %param_1 "param" + OpName %param_2 "param" + OpName %xIn_0 "xIn" + OpName %xIn_1 "xIn" + OpName %_entryPointOutput "@entryPointOutput" + OpName %param_3 "param" + OpDecorate %Sampler DescriptorSet 0 + OpDecorate %Sampler Binding 0 + OpDecorate %SampledImage DescriptorSet 0 + OpDecorate %SampledImage Binding 0 + OpDecorate %xIn_1 BuiltIn FragCoord + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %7 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_Function_7 = OpTypePointer Function %7 + %int = OpTypeInt 32 1 + %v3int = OpTypeVector %int 3 +%_ptr_Function_v3int = OpTypePointer Function %v3int + %v4float = OpTypeVector %float 4 + %13 = OpTypeFunction %v4float %_ptr_Function_7 %_ptr_Function_v3int + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %20 = OpTypeFunction %v4float %_ptr_Function_7 %_ptr_Function_v2float +%_ptr_Function_v4float = OpTypePointer Function %v4float + %26 = OpTypeFunction %v4float %_ptr_Function_v4float + %v2int = OpTypeVector %int 2 + %uint = OpTypeInt 32 0 + %uint_2 = OpConstant %uint 2 +%_ptr_Function_int = OpTypePointer Function %int + %43 = OpTypeSampler +%_ptr_UniformConstant_43 = OpTypePointer UniformConstant %43 + %Sampler = OpVariable %_ptr_UniformConstant_43 UniformConstant + %47 = OpTypeSampledImage %7 + %uint_0 = OpConstant %uint 0 +%_ptr_Function_float = OpTypePointer Function %float + %float_1280 = OpConstant %float 1280 + %uint_1 = OpConstant %uint 1 + %float_720 = OpConstant %float 720 + %int_0 = OpConstant %int 0 +%_ptr_UniformConstant_7 = OpTypePointer UniformConstant %7 +%SampledImage = OpVariable %_ptr_UniformConstant_7 UniformConstant +%_ptr_Input_v4float = OpTypePointer Input %v4float + %xIn_1 = OpVariable %_ptr_Input_v4float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %xIn_0 = OpVariable %_ptr_Function_v4float Function + %param_3 = OpVariable %_ptr_Function_v4float Function + %107 = OpLoad %v4float %xIn_1 + OpStore %xIn_0 %107 + %111 = OpLoad %v4float %xIn_0 + OpStore %param_3 %111 + %112 = OpFunctionCall %v4float %_main_vf4_ %param_3 + OpStore %_entryPointOutput %112 + OpReturn + OpFunctionEnd +%sample_fetch_t21_vi3_ = OpFunction %v4float None %13 + %tex = OpFunctionParameter %_ptr_Function_7 + %UV = OpFunctionParameter %_ptr_Function_v3int + %17 = OpLabel + %30 = OpLoad %7 %tex + %32 = OpLoad %v3int %UV + %33 = OpVectorShuffle %v2int %32 %32 0 1 + %37 = OpAccessChain %_ptr_Function_int %UV %uint_2 + %38 = OpLoad %int %37 + %39 = OpImageFetch %v4float %30 %33 Lod %38 + OpReturnValue %39 + OpFunctionEnd +%sample_sampler_t21_vf2_ = OpFunction %v4float None %20 + %tex_0 = OpFunctionParameter %_ptr_Function_7 + %UV_0 = OpFunctionParameter %_ptr_Function_v2float + %24 = OpLabel + %42 = OpLoad %7 %tex_0 + %46 = OpLoad %43 %Sampler + %48 = OpSampledImage %47 %42 %46 + %49 = OpLoad %v2float %UV_0 + %50 = OpImageSampleImplicitLod %v4float %48 %49 + OpReturnValue %50 + OpFunctionEnd + %_main_vf4_ = OpFunction %v4float None %26 + %xIn = OpFunctionParameter %_ptr_Function_v4float + %29 = OpLabel + %coord = OpVariable %_ptr_Function_v3int Function + %value = OpVariable %_ptr_Function_v4float Function + %param = OpVariable %_ptr_Function_7 Function + %param_0 = OpVariable %_ptr_Function_v3int Function + %param_1 = OpVariable %_ptr_Function_7 Function + %param_2 = OpVariable %_ptr_Function_v2float Function + %56 = OpAccessChain %_ptr_Function_float %xIn %uint_0 + %57 = OpLoad %float %56 + %59 = OpFMul %float %57 %float_1280 + %60 = OpConvertFToS %int %59 + %62 = OpAccessChain %_ptr_Function_float %xIn %uint_1 + %63 = OpLoad %float %62 + %65 = OpFMul %float %63 %float_720 + %66 = OpConvertFToS %int %65 + %68 = OpCompositeConstruct %v3int %60 %66 %int_0 + OpStore %coord %68 + %73 = OpLoad %7 %SampledImage + OpStore %param %73 + %75 = OpLoad %v3int %coord + OpStore %param_0 %75 + %76 = OpFunctionCall %v4float %sample_fetch_t21_vi3_ %param %param_0 + OpStore %value %76 + %77 = OpLoad %7 %SampledImage + %78 = OpLoad %v3int %coord + %79 = OpVectorShuffle %v2int %78 %78 0 1 + %80 = OpAccessChain %_ptr_Function_int %coord %uint_2 + %81 = OpLoad %int %80 + %82 = OpImageFetch %v4float %77 %79 Lod %81 + %83 = OpLoad %v4float %value + %84 = OpFAdd %v4float %83 %82 + OpStore %value %84 + %86 = OpLoad %7 %SampledImage + OpStore %param_1 %86 + %88 = OpLoad %v4float %xIn + %89 = OpVectorShuffle %v2float %88 %88 0 1 + OpStore %param_2 %89 + %90 = OpFunctionCall %v4float %sample_sampler_t21_vf2_ %param_1 %param_2 + %91 = OpLoad %v4float %value + %92 = OpFAdd %v4float %91 %90 + OpStore %value %92 + %93 = OpLoad %7 %SampledImage + %94 = OpLoad %43 %Sampler + %95 = OpSampledImage %47 %93 %94 + %96 = OpLoad %v4float %xIn + %97 = OpVectorShuffle %v2float %96 %96 0 1 + %98 = OpImageSampleImplicitLod %v4float %95 %97 + %99 = OpLoad %v4float %value + %100 = OpFAdd %v4float %99 %98 + OpStore %value %100 + %101 = OpLoad %v4float %value + OpReturnValue %101 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/image-query-no-sampler.vk.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/image-query-no-sampler.vk.asm.frag new file mode 100644 index 000000000..a232bd489 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/image-query-no-sampler.vk.asm.frag @@ -0,0 +1,57 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + OpCapability ImageQuery + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %b "b" + OpName %uSampler2D "uSampler2D" + OpName %c "c" + OpName %uSampler2DMS "uSampler2DMS" + OpName %l1 "l1" + OpName %s0 "s0" + OpDecorate %uSampler2D DescriptorSet 0 + OpDecorate %uSampler2D Binding 0 + OpDecorate %uSampler2DMS DescriptorSet 0 + OpDecorate %uSampler2DMS Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 +%_ptr_Function_v2int = OpTypePointer Function %v2int + %float = OpTypeFloat 32 + %11 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_UniformConstant_12 = OpTypePointer UniformConstant %11 + %uSampler2D = OpVariable %_ptr_UniformConstant_12 UniformConstant + %int_0 = OpConstant %int 0 + %20 = OpTypeImage %float 2D 0 0 1 1 Unknown +%_ptr_UniformConstant_21 = OpTypePointer UniformConstant %20 +%uSampler2DMS = OpVariable %_ptr_UniformConstant_21 UniformConstant +%_ptr_Function_int = OpTypePointer Function %int + %main = OpFunction %void None %3 + %5 = OpLabel + %b = OpVariable %_ptr_Function_v2int Function + %c = OpVariable %_ptr_Function_v2int Function + %l1 = OpVariable %_ptr_Function_int Function + %s0 = OpVariable %_ptr_Function_int Function + %15 = OpLoad %11 %uSampler2D + %18 = OpImageQuerySizeLod %v2int %15 %int_0 + OpStore %b %18 + %24 = OpLoad %20 %uSampler2DMS + %26 = OpImageQuerySize %v2int %24 + OpStore %c %26 + %29 = OpLoad %11 %uSampler2D + %31 = OpImageQueryLevels %int %29 + OpStore %l1 %31 + %33 = OpLoad %20 %uSampler2DMS + %35 = OpImageQuerySamples %int %33 + OpStore %s0 %35 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/implicit-read-dep-phi.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/implicit-read-dep-phi.asm.frag new file mode 100644 index 000000000..ccdfeef58 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/implicit-read-dep-phi.asm.frag @@ -0,0 +1,81 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 60 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %v0 %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %phi "phi" + OpName %i "i" + OpName %v0 "v0" + OpName %FragColor "FragColor" + OpName %uImage "uImage" + OpDecorate %v0 Location 0 + OpDecorate %FragColor Location 0 + OpDecorate %uImage DescriptorSet 0 + OpDecorate %uImage Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %float_1 = OpConstant %float 1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_4 = OpConstant %int 4 + %bool = OpTypeBool + %v4float = OpTypeVector %float 4 +%_ptr_Input_v4float = OpTypePointer Input %v4float + %v0 = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_float = OpTypePointer Input %float + %float_0 = OpConstant %float 0 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %36 = OpTypeImage %float 2D 0 0 0 1 Unknown + %37 = OpTypeSampledImage %36 +%_ptr_UniformConstant_37 = OpTypePointer UniformConstant %37 + %uImage = OpVariable %_ptr_UniformConstant_37 UniformConstant + %v2float = OpTypeVector %float 2 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 + %float_2 = OpConstant %float 2 + %int_1 = OpConstant %int 1 + %float_1_vec = OpConstantComposite %v4float %float_1 %float_2 %float_1 %float_2 + %main = OpFunction %void None %3 + %5 = OpLabel + %i = OpVariable %_ptr_Function_int Function + OpStore %i %int_0 + OpBranch %loop_header + %loop_header = OpLabel + %phi = OpPhi %float %float_1 %5 %phi_plus_2 %continue_block + %tex_phi = OpPhi %v4float %float_1_vec %5 %texture_load_result %continue_block + OpLoopMerge %merge_block %continue_block None + OpBranch %loop_body + %loop_body = OpLabel + OpStore %FragColor %tex_phi + %19 = OpLoad %int %i + %22 = OpSLessThan %bool %19 %int_4 + OpBranchConditional %22 %15 %merge_block + %15 = OpLabel + %26 = OpLoad %int %i + %28 = OpAccessChain %_ptr_Input_float %v0 %26 + %29 = OpLoad %float %28 + %31 = OpFOrdGreaterThan %bool %29 %float_0 + OpBranchConditional %31 %continue_block %merge_block + %continue_block = OpLabel + %40 = OpLoad %37 %uImage + %43 = OpCompositeConstruct %v2float %phi %phi + %texture_load_result = OpImageSampleExplicitLod %v4float %40 %43 Lod %float_0 + %phi_plus_2 = OpFAdd %float %phi %float_2 + %54 = OpLoad %int %i + %56 = OpIAdd %int %54 %int_1 + OpStore %i %56 + OpBranch %loop_header + %merge_block = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/inf-nan-constant-double.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/inf-nan-constant-double.asm.frag new file mode 100644 index 000000000..2d0c18a9d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/inf-nan-constant-double.asm.frag @@ -0,0 +1,41 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 22 +; Schema: 0 + OpCapability Shader + OpCapability Float64 + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vTmp + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %vTmp "vTmp" + OpDecorate %FragColor Location 0 + OpDecorate %vTmp Flat + OpDecorate %vTmp Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v3float = OpTypeVector %float 3 +%_ptr_Output_v3float = OpTypePointer Output %v3float + %FragColor = OpVariable %_ptr_Output_v3float Output + %double = OpTypeFloat 64 + %v3double = OpTypeVector %double 3 +%double_0x1p_1024 = OpConstant %double 0x1p+1024 +%double_n0x1p_1024 = OpConstant %double -0x1p+1024 +%double_0x1_8p_1024 = OpConstant %double 0x1.8p+1024 + %15 = OpConstantComposite %v3double %double_0x1p_1024 %double_n0x1p_1024 %double_0x1_8p_1024 +%_ptr_Input_double = OpTypePointer Input %double + %vTmp = OpVariable %_ptr_Input_double Input + %main = OpFunction %void None %3 + %5 = OpLabel + %18 = OpLoad %double %vTmp + %19 = OpCompositeConstruct %v3double %18 %18 %18 + %20 = OpFAdd %v3double %15 %19 + %21 = OpFConvert %v3float %20 + OpStore %FragColor %21 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/inf-nan-constant.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/inf-nan-constant.asm.frag new file mode 100644 index 000000000..40e5d3a89 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/inf-nan-constant.asm.frag @@ -0,0 +1,29 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 14 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v3float = OpTypeVector %float 3 +%_ptr_Output_v3float = OpTypePointer Output %v3float + %FragColor = OpVariable %_ptr_Output_v3float Output +%float_0x1p_128 = OpConstant %float 0x1p+128 +%float_n0x1p_128 = OpConstant %float -0x1p+128 +%float_0x1_8p_128 = OpConstant %float 0x1.8p+128 + %13 = OpConstantComposite %v3float %float_0x1p_128 %float_n0x1p_128 %float_0x1_8p_128 + %main = OpFunction %void None %3 + %5 = OpLabel + OpStore %FragColor %13 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/invalidation.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/invalidation.asm.frag new file mode 100644 index 000000000..8e753d50f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/invalidation.asm.frag @@ -0,0 +1,46 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 28 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %4 "main" %v0 %v1 %FragColor + OpExecutionMode %4 OriginUpperLeft + OpSource GLSL 450 + OpName %4 "main" + OpName %a "a" + OpName %v0 "v0" + OpName %b "b" + OpName %v1 "v1" + OpName %FragColor "FragColor" + OpDecorate %v0 Location 0 + OpDecorate %v1 Location 1 + OpDecorate %FragColor Location 0 + %2 = OpTypeVoid + %3 = OpTypeFunction %2 + %float = OpTypeFloat 32 + %pfloat = OpTypePointer Function %float + %9 = OpTypePointer Input %float + %v0 = OpVariable %9 Input + %v1 = OpVariable %9 Input + %25 = OpTypePointer Output %float + %FragColor = OpVariable %25 Output + %4 = OpFunction %2 None %3 + %5 = OpLabel + %a = OpVariable %pfloat Function + %b = OpVariable %pfloat Function + %v0_tmp = OpLoad %float %v0 + %v1_tmp = OpLoad %float %v1 + OpStore %a %v0_tmp + OpStore %b %v1_tmp + + %a_tmp = OpLoad %float %a + %b_tmp = OpLoad %float %b + %res = OpFAdd %float %a_tmp %b_tmp + %res1 = OpFMul %float %res %b_tmp + OpStore %a %v1_tmp + OpStore %FragColor %res1 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/locations-components.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/locations-components.asm.frag new file mode 100644 index 000000000..bf8c6a690 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/locations-components.asm.frag @@ -0,0 +1,102 @@ +; SPIR-V +; Version: 1.0 +; Generator: Wine VKD3D Shader Compiler; 0 +; Bound: 67 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %8 %16 %22 %28 %33 %o0 + OpName %main "main" + OpName %v1 "v1" + OpName %v2 "v2" + OpName %o0 "o0" + OpName %r0 "r0" + OpDecorate %8 Location 1 + OpDecorate %16 Location 1 + OpDecorate %16 Component 2 + OpDecorate %22 Location 2 + OpDecorate %22 Flat + OpDecorate %28 Location 2 + OpDecorate %28 Component 1 + OpDecorate %28 Flat + OpDecorate %33 Location 2 + OpDecorate %33 Component 2 + OpDecorate %33 Flat + OpDecorate %o0 Location 0 + %void = OpTypeVoid + %2 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %8 = OpVariable %_ptr_Input_v2float Input + %v4float = OpTypeVector %float 4 +%_ptr_Private_v4float = OpTypePointer Private %v4float + %v1 = OpVariable %_ptr_Private_v4float Private +%_ptr_Input_float = OpTypePointer Input %float + %16 = OpVariable %_ptr_Input_float Input +%_ptr_Private_float = OpTypePointer Private %float + %uint = OpTypeInt 32 0 + %uint_2 = OpConstant %uint 2 + %22 = OpVariable %_ptr_Input_float Input + %v2 = OpVariable %_ptr_Private_v4float Private + %uint_0 = OpConstant %uint 0 +%_ptr_Input_uint = OpTypePointer Input %uint + %28 = OpVariable %_ptr_Input_uint Input + %uint_1 = OpConstant %uint 1 + %33 = OpVariable %_ptr_Input_uint Input +%_ptr_Output_v4float = OpTypePointer Output %v4float + %o0 = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %int = OpTypeInt 32 1 +%_ptr_Function_float = OpTypePointer Function %float +%_ptr_Output_float = OpTypePointer Output %float + %main = OpFunction %void None %2 + %4 = OpLabel + %r0 = OpVariable %_ptr_Function_v4float Function + %12 = OpLoad %v2float %8 + %13 = OpLoad %v4float %v1 + %14 = OpVectorShuffle %v4float %13 %12 4 5 2 3 + OpStore %v1 %14 + %17 = OpLoad %float %16 + %21 = OpInBoundsAccessChain %_ptr_Private_float %v1 %uint_2 + OpStore %21 %17 + %24 = OpLoad %float %22 + %26 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_0 + OpStore %26 %24 + %29 = OpLoad %uint %28 + %30 = OpBitcast %float %29 + %32 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_1 + OpStore %32 %30 + %34 = OpLoad %uint %33 + %35 = OpBitcast %float %34 + %36 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_2 + OpStore %36 %35 + %42 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_1 + %43 = OpLoad %float %42 + %44 = OpBitcast %int %43 + %45 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_2 + %46 = OpLoad %float %45 + %47 = OpBitcast %int %46 + %48 = OpIAdd %int %44 %47 + %49 = OpBitcast %float %48 + %51 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_0 + OpStore %51 %49 + %52 = OpInBoundsAccessChain %_ptr_Function_float %r0 %uint_0 + %53 = OpLoad %float %52 + %54 = OpBitcast %uint %53 + %55 = OpConvertUToF %float %54 + %57 = OpInBoundsAccessChain %_ptr_Output_float %o0 %uint_1 + OpStore %57 %55 + %58 = OpInBoundsAccessChain %_ptr_Private_float %v1 %uint_1 + %59 = OpLoad %float %58 + %60 = OpInBoundsAccessChain %_ptr_Private_float %v2 %uint_0 + %61 = OpLoad %float %60 + %62 = OpFAdd %float %59 %61 + %63 = OpInBoundsAccessChain %_ptr_Output_float %o0 %uint_0 + OpStore %63 %62 + %64 = OpLoad %v4float %v1 + %65 = OpLoad %v4float %o0 + %66 = OpVectorShuffle %v4float %65 %64 0 1 6 4 + OpStore %o0 %66 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag new file mode 100644 index 000000000..fa53940b1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/loop-body-dominator-continue-access.asm.frag @@ -0,0 +1,190 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 131 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %fragWorld_1 %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %GetClip2TexMatrix_ "GetClip2TexMatrix(" + OpName %GetCascade_vf3_ "GetCascade(vf3;" + OpName %fragWorldPosition "fragWorldPosition" + OpName %_main_vf3_ "@main(vf3;" + OpName %fragWorld "fragWorld" + OpName %Foo "Foo" + OpMemberName %Foo 0 "lightVP" + OpMemberName %Foo 1 "shadowCascadesNum" + OpMemberName %Foo 2 "test" + OpName %_ "" + OpName %cascadeIndex "cascadeIndex" + OpName %worldToShadowMap "worldToShadowMap" + OpName %fragShadowMapPos "fragShadowMapPos" + OpName %param "param" + OpName %fragWorld_0 "fragWorld" + OpName %fragWorld_1 "fragWorld" + OpName %_entryPointOutput "@entryPointOutput" + OpName %param_0 "param" + OpDecorate %_arr_mat4v4float_uint_64 ArrayStride 64 + OpMemberDecorate %Foo 0 RowMajor + OpMemberDecorate %Foo 0 Offset 0 + OpMemberDecorate %Foo 0 MatrixStride 16 + OpMemberDecorate %Foo 1 Offset 4096 + OpMemberDecorate %Foo 2 Offset 4100 + OpDecorate %Foo Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %fragWorld_1 Location 0 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%mat4v4float = OpTypeMatrix %v4float 4 + %9 = OpTypeFunction %mat4v4float + %v3float = OpTypeVector %float 3 +%_ptr_Function_v3float = OpTypePointer Function %v3float + %int = OpTypeInt 32 1 + %15 = OpTypeFunction %int %_ptr_Function_v3float + %uint = OpTypeInt 32 0 + %uint_64 = OpConstant %uint 64 +%_arr_mat4v4float_uint_64 = OpTypeArray %mat4v4float %uint_64 + %Foo = OpTypeStruct %_arr_mat4v4float_uint_64 %uint %int +%_ptr_Uniform_Foo = OpTypePointer Uniform %Foo + %_ = OpVariable %_ptr_Uniform_Foo Uniform + %int_2 = OpConstant %int 2 +%_ptr_Uniform_int = OpTypePointer Uniform %int + %int_0 = OpConstant %int 0 + %bool = OpTypeBool + %float_0_5 = OpConstant %float 0.5 + %float_0 = OpConstant %float 0 + %39 = OpConstantComposite %v4float %float_0_5 %float_0 %float_0 %float_0 + %40 = OpConstantComposite %v4float %float_0 %float_0_5 %float_0 %float_0 + %41 = OpConstantComposite %v4float %float_0 %float_0 %float_0_5 %float_0 + %float_1 = OpConstant %float 1 + %43 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_1 + %44 = OpConstantComposite %mat4v4float %39 %40 %41 %43 + %46 = OpConstantComposite %v4float %float_1 %float_0 %float_0 %float_0 + %47 = OpConstantComposite %v4float %float_0 %float_1 %float_0 %float_0 + %48 = OpConstantComposite %v4float %float_0 %float_0 %float_1 %float_0 + %49 = OpConstantComposite %mat4v4float %46 %47 %48 %43 +%_ptr_Function_uint = OpTypePointer Function %uint + %uint_0 = OpConstant %uint 0 + %int_1 = OpConstant %int 1 +%_ptr_Uniform_uint = OpTypePointer Uniform %uint +%_ptr_Function_mat4v4float = OpTypePointer Function %mat4v4float +%_ptr_Uniform_mat4v4float = OpTypePointer Uniform %mat4v4float +%_ptr_Function_v4float = OpTypePointer Function %v4float + %uint_2 = OpConstant %uint 2 +%_ptr_Function_float = OpTypePointer Function %float + %uint_1 = OpConstant %uint 1 + %int_n1 = OpConstant %int -1 +%_ptr_Input_v3float = OpTypePointer Input %v3float +%fragWorld_1 = OpVariable %_ptr_Input_v3float Input +%_ptr_Output_int = OpTypePointer Output %int +%_entryPointOutput = OpVariable %_ptr_Output_int Output + %main = OpFunction %void None %3 + %5 = OpLabel +%fragWorld_0 = OpVariable %_ptr_Function_v3float Function + %param_0 = OpVariable %_ptr_Function_v3float Function + %125 = OpLoad %v3float %fragWorld_1 + OpStore %fragWorld_0 %125 + %129 = OpLoad %v3float %fragWorld_0 + OpStore %param_0 %129 + %130 = OpFunctionCall %int %_main_vf3_ %param_0 + OpStore %_entryPointOutput %130 + OpReturn + OpFunctionEnd +%GetClip2TexMatrix_ = OpFunction %mat4v4float None %9 + %11 = OpLabel + %30 = OpAccessChain %_ptr_Uniform_int %_ %int_2 + %31 = OpLoad %int %30 + %34 = OpIEqual %bool %31 %int_0 + OpSelectionMerge %36 None + OpBranchConditional %34 %35 %36 + %35 = OpLabel + OpReturnValue %44 + %36 = OpLabel + OpReturnValue %49 + OpFunctionEnd +%GetCascade_vf3_ = OpFunction %int None %15 +%fragWorldPosition = OpFunctionParameter %_ptr_Function_v3float + %18 = OpLabel +%cascadeIndex = OpVariable %_ptr_Function_uint Function +%worldToShadowMap = OpVariable %_ptr_Function_mat4v4float Function +%fragShadowMapPos = OpVariable %_ptr_Function_v4float Function + OpStore %cascadeIndex %uint_0 + OpBranch %55 + %55 = OpLabel + OpLoopMerge %57 %58 Unroll + OpBranch %59 + %59 = OpLabel + %60 = OpLoad %uint %cascadeIndex + %63 = OpAccessChain %_ptr_Uniform_uint %_ %int_1 + %64 = OpLoad %uint %63 + %65 = OpULessThan %bool %60 %64 + OpBranchConditional %65 %56 %57 + %56 = OpLabel + %68 = OpFunctionCall %mat4v4float %GetClip2TexMatrix_ + %69 = OpLoad %uint %cascadeIndex + %71 = OpAccessChain %_ptr_Uniform_mat4v4float %_ %int_0 %69 + %72 = OpLoad %mat4v4float %71 + %73 = OpMatrixTimesMatrix %mat4v4float %68 %72 + OpStore %worldToShadowMap %73 + %76 = OpLoad %mat4v4float %worldToShadowMap + %77 = OpLoad %v3float %fragWorldPosition + %78 = OpCompositeExtract %float %77 0 + %79 = OpCompositeExtract %float %77 1 + %80 = OpCompositeExtract %float %77 2 + %81 = OpCompositeConstruct %v4float %78 %79 %80 %float_1 + %82 = OpMatrixTimesVector %v4float %76 %81 + OpStore %fragShadowMapPos %82 + %85 = OpAccessChain %_ptr_Function_float %fragShadowMapPos %uint_2 + %86 = OpLoad %float %85 + %87 = OpFOrdGreaterThanEqual %bool %86 %float_0 + %88 = OpAccessChain %_ptr_Function_float %fragShadowMapPos %uint_2 + %89 = OpLoad %float %88 + %90 = OpFOrdLessThanEqual %bool %89 %float_1 + %91 = OpLogicalAnd %bool %87 %90 + %92 = OpAccessChain %_ptr_Function_float %fragShadowMapPos %uint_0 + %93 = OpLoad %float %92 + %95 = OpAccessChain %_ptr_Function_float %fragShadowMapPos %uint_1 + %96 = OpLoad %float %95 + %97 = OpExtInst %float %1 FMax %93 %96 + %98 = OpFOrdLessThanEqual %bool %97 %float_1 + %99 = OpLogicalAnd %bool %91 %98 + %100 = OpAccessChain %_ptr_Function_float %fragShadowMapPos %uint_0 + %101 = OpLoad %float %100 + %102 = OpAccessChain %_ptr_Function_float %fragShadowMapPos %uint_1 + %103 = OpLoad %float %102 + %104 = OpExtInst %float %1 FMin %101 %103 + %105 = OpFOrdGreaterThanEqual %bool %104 %float_0 + %106 = OpLogicalAnd %bool %99 %105 + OpSelectionMerge %108 None + OpBranchConditional %106 %107 %108 + %107 = OpLabel + %109 = OpLoad %uint %cascadeIndex + %110 = OpBitcast %int %109 + OpReturnValue %110 + %108 = OpLabel + OpBranch %58 + %58 = OpLabel + %112 = OpLoad %uint %cascadeIndex + %113 = OpIAdd %uint %112 %int_1 + OpStore %cascadeIndex %113 + OpBranch %55 + %57 = OpLabel + OpReturnValue %int_n1 + OpFunctionEnd + %_main_vf3_ = OpFunction %int None %15 + %fragWorld = OpFunctionParameter %_ptr_Function_v3float + %21 = OpLabel + %param = OpVariable %_ptr_Function_v3float Function + %118 = OpLoad %v3float %fragWorld + OpStore %param %118 + %119 = OpFunctionCall %int %GetCascade_vf3_ %param + OpReturnValue %119 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/loop-header-to-continue.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/loop-header-to-continue.asm.frag new file mode 100644 index 000000000..54807d911 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/loop-header-to-continue.asm.frag @@ -0,0 +1,132 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 279 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %IN_p %IN_uv %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %Params "Params" + OpMemberName %Params 0 "TextureSize" + OpMemberName %Params 1 "Params1" + OpMemberName %Params 2 "Params2" + OpMemberName %Params 3 "Params3" + OpMemberName %Params 4 "Params4" + OpMemberName %Params 5 "Bloom" + OpName %CB1 "CB1" + OpMemberName %CB1 0 "CB1" + OpName %_ "" + OpName %mapSampler "mapSampler" + OpName %mapTexture "mapTexture" + OpName %IN_p "IN.p" + OpName %IN_uv "IN.uv" + OpName %_entryPointOutput "@entryPointOutput" + OpMemberDecorate %Params 0 Offset 0 + OpMemberDecorate %Params 1 Offset 16 + OpMemberDecorate %Params 2 Offset 32 + OpMemberDecorate %Params 3 Offset 48 + OpMemberDecorate %Params 4 Offset 64 + OpMemberDecorate %Params 5 Offset 80 + OpMemberDecorate %CB1 0 Offset 0 + OpDecorate %CB1 Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 1 + OpDecorate %mapSampler DescriptorSet 1 + OpDecorate %mapSampler Binding 2 + OpDecorate %mapTexture DescriptorSet 1 + OpDecorate %mapTexture Binding 2 + OpDecorate %IN_p BuiltIn FragCoord + OpDecorate %IN_uv Location 0 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %9 = OpTypeSampler + %11 = OpTypeImage %float 2D 0 0 0 1 Unknown + %v4float = OpTypeVector %float 4 +%float_0_222222 = OpConstant %float 0.222222 + %33 = OpTypeSampledImage %11 + %uint = OpTypeInt 32 0 + %float_80 = OpConstant %float 80 +%float_0_0008 = OpConstant %float 0.0008 +%float_8en05 = OpConstant %float 8e-05 +%float_0_008 = OpConstant %float 0.008 + %float_0 = OpConstant %float 0 + %int = OpTypeInt 32 1 + %int_n3 = OpConstant %int -3 + %int_3 = OpConstant %int 3 + %bool = OpTypeBool + %float_1 = OpConstant %float 1 + %int_1 = OpConstant %int 1 + %Params = OpTypeStruct %v4float %v4float %v4float %v4float %v4float %v4float + %CB1 = OpTypeStruct %Params +%_ptr_Uniform_CB1 = OpTypePointer Uniform %CB1 + %_ = OpVariable %_ptr_Uniform_CB1 Uniform + %int_0 = OpConstant %int 0 + %uint_3 = OpConstant %uint 3 +%_ptr_Uniform_float = OpTypePointer Uniform %float +%_ptr_UniformConstant_9 = OpTypePointer UniformConstant %9 + %mapSampler = OpVariable %_ptr_UniformConstant_9 UniformConstant +%_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 + %mapTexture = OpVariable %_ptr_UniformConstant_11 UniformConstant +%_ptr_Input_v4float = OpTypePointer Input %v4float + %IN_p = OpVariable %_ptr_Input_v4float Input +%_ptr_Input_v2float = OpTypePointer Input %v2float + %IN_uv = OpVariable %_ptr_Input_v2float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %158 = OpLoad %v2float %IN_uv + %178 = OpAccessChain %_ptr_Uniform_float %_ %int_0 %int_0 %uint_3 + %179 = OpLoad %float %178 + %180 = OpCompositeConstruct %v2float %float_0 %179 + %184 = OpLoad %9 %mapSampler + %185 = OpLoad %11 %mapTexture + %204 = OpSampledImage %33 %185 %184 + %206 = OpImageSampleImplicitLod %v4float %204 %158 + %207 = OpCompositeExtract %float %206 1 + %209 = OpFMul %float %207 %float_80 + %210 = OpFMul %float %209 %float_0_0008 + %211 = OpExtInst %float %1 FClamp %210 %float_8en05 %float_0_008 + OpBranch %212 + %212 = OpLabel + %276 = OpPhi %float %float_0 %5 %252 %218 + %277 = OpPhi %float %float_0 %5 %255 %218 + %278 = OpPhi %int %int_n3 %5 %257 %218 + %217 = OpSLessThanEqual %bool %278 %int_3 + OpLoopMerge %213 %218 None + OpBranchConditional %217 %218 %213 + %218 = OpLabel + %220 = OpConvertSToF %float %278 + %222 = OpFNegate %float %220 + %224 = OpFMul %float %222 %220 + %226 = OpFMul %float %224 %float_0_222222 + %227 = OpExtInst %float %1 Exp %226 + %230 = OpSampledImage %33 %185 %184 + %234 = OpVectorTimesScalar %v2float %180 %220 + %235 = OpFAdd %v2float %158 %234 + %236 = OpImageSampleImplicitLod %v4float %230 %235 + %273 = OpCompositeExtract %float %236 1 + %241 = OpFSub %float %273 %207 + %242 = OpExtInst %float %1 FAbs %241 + %244 = OpFOrdLessThan %bool %242 %211 + %245 = OpSelect %float %244 %float_1 %float_0 + %246 = OpFMul %float %227 %245 + %275 = OpCompositeExtract %float %236 0 + %250 = OpFMul %float %275 %246 + %252 = OpFAdd %float %276 %250 + %255 = OpFAdd %float %277 %246 + %257 = OpIAdd %int %278 %int_1 + OpBranch %212 + %213 = OpLabel + %260 = OpFDiv %float %276 %277 + %190 = OpCompositeConstruct %v4float %260 %207 %float_0 %float_1 + OpStore %_entryPointOutput %190 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/loop-merge-to-continue.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/loop-merge-to-continue.asm.frag new file mode 100644 index 000000000..f2acc4360 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/loop-merge-to-continue.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 51 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %v0 + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %i "i" + OpName %j "j" + OpName %v0 "v0" + OpDecorate %FragColor Location 0 + OpDecorate %v0 Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %float_1 = OpConstant %float 1 + %11 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_4 = OpConstant %int 4 + %bool = OpTypeBool +%_ptr_Input_v4float = OpTypePointer Input %v4float + %v0 = OpVariable %_ptr_Input_v4float Input + %int_3 = OpConstant %int 3 +%_ptr_Input_float = OpTypePointer Input %float + %int_1 = OpConstant %int 1 + %main = OpFunction %void None %3 + %5 = OpLabel + %i = OpVariable %_ptr_Function_int Function + %j = OpVariable %_ptr_Function_int Function + OpStore %FragColor %11 + OpStore %i %int_0 + OpBranch %16 + %16 = OpLabel + OpLoopMerge %18 %19 None + OpBranch %20 + %20 = OpLabel + %21 = OpLoad %int %i + %24 = OpSLessThan %bool %21 %int_4 + OpBranchConditional %24 %17 %18 + %17 = OpLabel + OpStore %j %int_0 + OpBranch %26 + %26 = OpLabel + OpLoopMerge %19 %29 None + OpBranch %30 + %30 = OpLabel + %31 = OpLoad %int %j + %32 = OpSLessThan %bool %31 %int_4 + OpBranchConditional %32 %27 %19 + %27 = OpLabel + %35 = OpLoad %int %i + %36 = OpLoad %int %j + %37 = OpIAdd %int %35 %36 + %39 = OpBitwiseAnd %int %37 %int_3 + %41 = OpAccessChain %_ptr_Input_float %v0 %39 + %42 = OpLoad %float %41 + %43 = OpLoad %v4float %FragColor + %44 = OpCompositeConstruct %v4float %42 %42 %42 %42 + %45 = OpFAdd %v4float %43 %44 + OpStore %FragColor %45 + OpBranch %29 + %29 = OpLabel + %46 = OpLoad %int %j + %48 = OpIAdd %int %46 %int_1 + OpStore %j %48 + OpBranch %26 + %19 = OpLabel + %49 = OpLoad %int %i + %50 = OpIAdd %int %49 %int_1 + OpStore %i %50 + OpBranch %16 + %18 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/lut-promotion-initializer.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/lut-promotion-initializer.asm.frag new file mode 100644 index 000000000..320e5ebfb --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/lut-promotion-initializer.asm.frag @@ -0,0 +1,195 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 111 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %index + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %index "index" + OpName %indexable "indexable" + OpName %indexable_0 "indexable" + OpName %indexable_1 "indexable" + OpName %foo "foo" + OpName %foobar "foobar" + OpName %baz "baz" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %index RelaxedPrecision + OpDecorate %index Flat + OpDecorate %index Location 0 + OpDecorate %20 RelaxedPrecision + OpDecorate %25 RelaxedPrecision + OpDecorate %26 RelaxedPrecision + OpDecorate %32 RelaxedPrecision + OpDecorate %34 RelaxedPrecision + OpDecorate %37 RelaxedPrecision + OpDecorate %38 RelaxedPrecision + OpDecorate %39 RelaxedPrecision + OpDecorate %41 RelaxedPrecision + OpDecorate %42 RelaxedPrecision + OpDecorate %45 RelaxedPrecision + OpDecorate %46 RelaxedPrecision + OpDecorate %47 RelaxedPrecision + OpDecorate %foo RelaxedPrecision + OpDecorate %61 RelaxedPrecision + OpDecorate %66 RelaxedPrecision + OpDecorate %68 RelaxedPrecision + OpDecorate %71 RelaxedPrecision + OpDecorate %72 RelaxedPrecision + OpDecorate %73 RelaxedPrecision + OpDecorate %75 RelaxedPrecision + OpDecorate %76 RelaxedPrecision + OpDecorate %79 RelaxedPrecision + OpDecorate %80 RelaxedPrecision + OpDecorate %81 RelaxedPrecision + OpDecorate %foobar RelaxedPrecision + OpDecorate %83 RelaxedPrecision + OpDecorate %90 RelaxedPrecision + OpDecorate %91 RelaxedPrecision + OpDecorate %93 RelaxedPrecision + OpDecorate %94 RelaxedPrecision + OpDecorate %95 RelaxedPrecision + OpDecorate %baz RelaxedPrecision + OpDecorate %105 RelaxedPrecision + OpDecorate %106 RelaxedPrecision + OpDecorate %108 RelaxedPrecision + OpDecorate %109 RelaxedPrecision + OpDecorate %110 RelaxedPrecision + OpDecorate %16 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %uint = OpTypeInt 32 0 + %uint_16 = OpConstant %uint 16 +%_arr_float_uint_16 = OpTypeArray %float %uint_16 + %float_1 = OpConstant %float 1 + %float_2 = OpConstant %float 2 + %float_3 = OpConstant %float 3 + %float_4 = OpConstant %float 4 + %16 = OpConstantComposite %_arr_float_uint_16 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 %float_1 %float_2 %float_3 %float_4 + %int = OpTypeInt 32 1 +%_ptr_Input_int = OpTypePointer Input %int + %index = OpVariable %_ptr_Input_int Input +%_ptr_Function__arr_float_uint_16 = OpTypePointer Function %_arr_float_uint_16 +%_ptr_Function_float = OpTypePointer Function %float + %int_10 = OpConstant %int 10 + %bool = OpTypeBool + %int_1 = OpConstant %int 1 + %v4float = OpTypeVector %float 4 + %uint_4 = OpConstant %uint 4 +%_arr_v4float_uint_4 = OpTypeArray %v4float %uint_4 +%_ptr_Function__arr_v4float_uint_4 = OpTypePointer Function %_arr_v4float_uint_4 + %float_0 = OpConstant %float 0 + %54 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 + %55 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %float_8 = OpConstant %float 8 + %57 = OpConstantComposite %v4float %float_8 %float_8 %float_8 %float_8 + %float_5 = OpConstant %float 5 + %59 = OpConstantComposite %v4float %float_5 %float_5 %float_5 %float_5 + %60 = OpConstantComposite %_arr_v4float_uint_4 %54 %55 %57 %59 + %int_30 = OpConstant %int 30 + %int_3 = OpConstant %int 3 + %uint_1 = OpConstant %uint 1 + %uint_0 = OpConstant %uint 0 + %float_20 = OpConstant %float 20 + %uint_2 = OpConstant %uint 2 + %97 = OpConstantComposite %v4float %float_20 %float_20 %float_20 %float_20 + %float_30 = OpConstant %float 30 + %99 = OpConstantComposite %v4float %float_30 %float_30 %float_30 %float_30 + %float_50 = OpConstant %float 50 + %101 = OpConstantComposite %v4float %float_50 %float_50 %float_50 %float_50 + %float_60 = OpConstant %float 60 + %103 = OpConstantComposite %v4float %float_60 %float_60 %float_60 %float_60 + %104 = OpConstantComposite %_arr_v4float_uint_4 %97 %99 %101 %103 + %main = OpFunction %void None %3 + %5 = OpLabel + %indexable = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 +%indexable_0 = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 +%indexable_1 = OpVariable %_ptr_Function__arr_float_uint_16 Function %16 + %foo = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %foobar = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %baz = OpVariable %_ptr_Function__arr_v4float_uint_4 Function %60 + %20 = OpLoad %int %index + %24 = OpAccessChain %_ptr_Function_float %indexable %20 + %25 = OpLoad %float %24 + OpStore %FragColor %25 + %26 = OpLoad %int %index + %29 = OpSLessThan %bool %26 %int_10 + OpSelectionMerge %31 None + OpBranchConditional %29 %30 %40 + %30 = OpLabel + %32 = OpLoad %int %index + %34 = OpBitwiseXor %int %32 %int_1 + %36 = OpAccessChain %_ptr_Function_float %indexable_0 %34 + %37 = OpLoad %float %36 + %38 = OpLoad %float %FragColor + %39 = OpFAdd %float %38 %37 + OpStore %FragColor %39 + OpBranch %31 + %40 = OpLabel + %41 = OpLoad %int %index + %42 = OpBitwiseAnd %int %41 %int_1 + %44 = OpAccessChain %_ptr_Function_float %indexable_1 %42 + %45 = OpLoad %float %44 + %46 = OpLoad %float %FragColor + %47 = OpFAdd %float %46 %45 + OpStore %FragColor %47 + OpBranch %31 + %31 = OpLabel + %61 = OpLoad %int %index + %63 = OpSGreaterThan %bool %61 %int_30 + OpSelectionMerge %65 None + OpBranchConditional %63 %64 %74 + %64 = OpLabel + %66 = OpLoad %int %index + %68 = OpBitwiseAnd %int %66 %int_3 + %70 = OpAccessChain %_ptr_Function_float %foo %68 %uint_1 + %71 = OpLoad %float %70 + %72 = OpLoad %float %FragColor + %73 = OpFAdd %float %72 %71 + OpStore %FragColor %73 + OpBranch %65 + %74 = OpLabel + %75 = OpLoad %int %index + %76 = OpBitwiseAnd %int %75 %int_1 + %78 = OpAccessChain %_ptr_Function_float %foo %76 %uint_0 + %79 = OpLoad %float %78 + %80 = OpLoad %float %FragColor + %81 = OpFAdd %float %80 %79 + OpStore %FragColor %81 + OpBranch %65 + %65 = OpLabel + %83 = OpLoad %int %index + %84 = OpSGreaterThan %bool %83 %int_30 + OpSelectionMerge %86 None + OpBranchConditional %84 %85 %86 + %85 = OpLabel + %89 = OpAccessChain %_ptr_Function_float %foobar %int_1 %uint_2 + OpStore %89 %float_20 + OpBranch %86 + %86 = OpLabel + %90 = OpLoad %int %index + %91 = OpBitwiseAnd %int %90 %int_3 + %92 = OpAccessChain %_ptr_Function_float %foobar %91 %uint_2 + %93 = OpLoad %float %92 + %94 = OpLoad %float %FragColor + %95 = OpFAdd %float %94 %93 + OpStore %FragColor %95 + OpStore %baz %104 + %105 = OpLoad %int %index + %106 = OpBitwiseAnd %int %105 %int_3 + %107 = OpAccessChain %_ptr_Function_float %baz %106 %uint_2 + %108 = OpLoad %float %107 + %109 = OpLoad %float %FragColor + %110 = OpFAdd %float %109 %108 + OpStore %FragColor %110 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/multi-for-loop-init.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/multi-for-loop-init.asm.frag new file mode 100644 index 000000000..d74f7ce56 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/multi-for-loop-init.asm.frag @@ -0,0 +1,111 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 52 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %counter %ucounter + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %i "i" + OpName %j "j" + OpName %counter "counter" + OpName %ucounter "ucounter" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %i RelaxedPrecision + OpDecorate %j RelaxedPrecision + OpDecorate %23 RelaxedPrecision + OpDecorate %27 RelaxedPrecision + OpDecorate %31 RelaxedPrecision + OpDecorate %32 RelaxedPrecision + OpDecorate %33 RelaxedPrecision + OpDecorate %34 RelaxedPrecision + OpDecorate %35 RelaxedPrecision + OpDecorate %36 RelaxedPrecision + OpDecorate %37 RelaxedPrecision + OpDecorate %38 RelaxedPrecision + OpDecorate %39 RelaxedPrecision + OpDecorate %40 RelaxedPrecision + OpDecorate %counter RelaxedPrecision + OpDecorate %counter Flat + OpDecorate %counter Location 0 + OpDecorate %43 RelaxedPrecision + OpDecorate %44 RelaxedPrecision + OpDecorate %45 RelaxedPrecision + OpDecorate %46 RelaxedPrecision + OpDecorate %47 RelaxedPrecision + OpDecorate %48 RelaxedPrecision + OpDecorate %ucounter RelaxedPrecision + OpDecorate %ucounter Flat + OpDecorate %ucounter Location 1 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %float_0 = OpConstant %float 0 + %11 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %uint = OpTypeInt 32 0 +%_ptr_Function_uint = OpTypePointer Function %uint + %int_0 = OpConstant %int 0 + %int_1 = OpConstant %uint 1 + %int_10 = OpConstant %int 10 + %bool = OpTypeBool + %int_20 = OpConstant %uint 20 +%_ptr_Input_int = OpTypePointer Input %int + %counter = OpVariable %_ptr_Input_int Input +%_ptr_Input_uint = OpTypePointer Input %uint + %ucounter = OpVariable %_ptr_Input_uint Input + %main = OpFunction %void None %3 + %5 = OpLabel + %i = OpVariable %_ptr_Function_int Function + %j = OpVariable %_ptr_Function_uint Function + OpStore %FragColor %11 + OpStore %i %int_0 + OpStore %j %int_1 + OpBranch %18 + %18 = OpLabel + OpLoopMerge %20 %21 None + OpBranch %22 + %22 = OpLabel + %23 = OpLoad %int %i + %26 = OpSLessThan %bool %23 %int_10 + %27 = OpLoad %uint %j + %29 = OpSLessThan %bool %27 %int_20 + %30 = OpLogicalAnd %bool %26 %29 + OpBranchConditional %30 %19 %20 + %19 = OpLabel + %31 = OpLoad %int %i + %32 = OpConvertSToF %float %31 + %33 = OpCompositeConstruct %v4float %32 %32 %32 %32 + %34 = OpLoad %v4float %FragColor + %35 = OpFAdd %v4float %34 %33 + OpStore %FragColor %35 + %36 = OpLoad %uint %j + %37 = OpConvertUToF %float %36 + %38 = OpCompositeConstruct %v4float %37 %37 %37 %37 + %39 = OpLoad %v4float %FragColor + %40 = OpFAdd %v4float %39 %38 + OpStore %FragColor %40 + OpBranch %21 + %21 = OpLabel + %43 = OpLoad %int %counter + %44 = OpLoad %int %i + %45 = OpIAdd %int %44 %43 + OpStore %i %45 + %46 = OpLoad %int %counter + %47 = OpLoad %uint %j + %48 = OpIAdd %uint %47 %46 + OpStore %j %48 + OpBranch %18 + %20 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/op-constant-null.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/op-constant-null.asm.frag new file mode 100644 index 000000000..61d2e579c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/op-constant-null.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 45 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %a "a" + OpName %b "b" + OpName %c "c" + OpName %D "D" + OpMemberName %D 0 "a" + OpMemberName %D 1 "b" + OpName %d "d" + OpName %e "e" + OpName %FragColor "FragColor" + OpDecorate %a RelaxedPrecision + OpDecorate %b RelaxedPrecision + OpDecorate %c RelaxedPrecision + OpMemberDecorate %D 0 RelaxedPrecision + OpMemberDecorate %D 1 RelaxedPrecision + OpDecorate %e RelaxedPrecision + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %44 RelaxedPrecision + OpDecorate %float_1 RelaxedPrecision + OpDecorate %14 RelaxedPrecision + OpDecorate %23 RelaxedPrecision + OpDecorate %41 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %float_1 = OpConstantNull %float + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %float_2 = OpConstantNull %float + %14 = OpConstantNull %v4float + %v3float = OpTypeVector %float 3 +%mat2v3float = OpTypeMatrix %v3float 2 +%_ptr_Function_mat2v3float = OpTypePointer Function %mat2v3float + %float_4 = OpConstantNull %float + %20 = OpConstantNull %v3float + %float_5 = OpConstantNull %float + %22 = OpConstantNull %v3float + %23 = OpConstantNull %mat2v3float + %D = OpTypeStruct %v4float %float +%_ptr_Function_D = OpTypePointer Function %D + %27 = OpConstantNull %D + %uint = OpTypeInt 32 0 + %uint_4 = OpConstant %uint 4 +%_arr_v4float_uint_4 = OpTypeArray %v4float %uint_4 +%_ptr_Function__arr_v4float_uint_4 = OpTypePointer Function %_arr_v4float_uint_4 + %float_10 = OpConstantNull %float + %34 = OpConstantNull %v4float + %float_11 = OpConstantNull %float + %36 = OpConstantNull %v4float + %float_12 = OpConstantNull %float + %38 = OpConstantNull %v4float + %float_13 = OpConstantNull %float + %40 = OpConstantNull %v4float + %41 = OpConstantNull %_arr_v4float_uint_4 +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %a = OpVariable %_ptr_Function_float Function + %b = OpVariable %_ptr_Function_v4float Function + %c = OpVariable %_ptr_Function_mat2v3float Function + %d = OpVariable %_ptr_Function_D Function + %e = OpVariable %_ptr_Function__arr_v4float_uint_4 Function + OpStore %a %float_1 + OpStore %b %14 + OpStore %c %23 + OpStore %d %27 + OpStore %e %41 + %44 = OpLoad %float %a + OpStore %FragColor %44 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/op-phi-swap-continue-block.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/op-phi-swap-continue-block.asm.frag new file mode 100644 index 000000000..afae32519 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/op-phi-swap-continue-block.asm.frag @@ -0,0 +1,69 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 7 +; Bound: 55 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %UBO "UBO" + OpMemberName %UBO 0 "uCount" + OpMemberName %UBO 1 "uJ" + OpMemberName %UBO 2 "uK" + OpName %_ "" + OpName %FragColor "FragColor" + OpMemberDecorate %UBO 0 Offset 0 + OpMemberDecorate %UBO 1 Offset 4 + OpMemberDecorate %UBO 2 Offset 8 + OpDecorate %UBO Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_ Binding 0 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %int = OpTypeInt 32 1 + %UBO = OpTypeStruct %int %int %int +%_ptr_Uniform_UBO = OpTypePointer Uniform %UBO + %_ = OpVariable %_ptr_Uniform_UBO Uniform + %int_1 = OpConstant %int 1 +%_ptr_Uniform_int = OpTypePointer Uniform %int + %int_2 = OpConstant %int 2 + %int_0 = OpConstant %int 0 + %bool = OpTypeBool + %float = OpTypeFloat 32 +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpAccessChain %_ptr_Uniform_int %_ %int_1 + %15 = OpLoad %int %14 + %18 = OpAccessChain %_ptr_Uniform_int %_ %int_2 + %19 = OpLoad %int %18 + OpBranch %22 + %22 = OpLabel + %54 = OpPhi %int %19 %5 %53 %23 + %53 = OpPhi %int %15 %5 %54 %23 + %52 = OpPhi %int %int_0 %5 %37 %23 + %28 = OpAccessChain %_ptr_Uniform_int %_ %int_0 + %29 = OpLoad %int %28 + %31 = OpSLessThan %bool %52 %29 + OpLoopMerge %24 %23 None + OpBranchConditional %31 %inbetween %24 + %inbetween = OpLabel + OpBranch %23 + %23 = OpLabel + %37 = OpIAdd %int %52 %int_1 + OpBranch %22 + %24 = OpLabel + %43 = OpISub %int %53 %54 + %44 = OpConvertSToF %float %43 + %49 = OpIMul %int %15 %19 + %50 = OpConvertSToF %float %49 + %51 = OpFMul %float %44 %50 + OpStore %FragColor %51 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/pass-by-value.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/pass-by-value.asm.frag new file mode 100644 index 000000000..083c85d9b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/pass-by-value.asm.frag @@ -0,0 +1,51 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 32 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %add_value_f1_f1_ "add_value(f1;f1;" + OpName %v "v" + OpName %w "w" + OpName %FragColor "FragColor" + OpName %Registers "Registers" + OpMemberName %Registers 0 "foo" + OpName %registers "registers" + OpDecorate %FragColor Location 0 + OpMemberDecorate %Registers 0 Offset 0 + OpDecorate %Registers Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 +%_ptr_Function_float = OpTypePointer Function %float + %8 = OpTypeFunction %float %float %float +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %float_10 = OpConstant %float 10 + %Registers = OpTypeStruct %float +%_ptr_PushConstant_Registers = OpTypePointer PushConstant %Registers + %registers = OpVariable %_ptr_PushConstant_Registers PushConstant + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_PushConstant_float = OpTypePointer PushConstant %float + %main = OpFunction %void None %3 + %5 = OpLabel + %29 = OpAccessChain %_ptr_PushConstant_float %registers %int_0 + %30 = OpLoad %float %29 + %31 = OpFunctionCall %float %add_value_f1_f1_ %float_10 %30 + OpStore %FragColor %31 + OpReturn + OpFunctionEnd +%add_value_f1_f1_ = OpFunction %float None %8 + %v = OpFunctionParameter %float + %w = OpFunctionParameter %float + %12 = OpLabel + %15 = OpFAdd %float %v %w + OpReturnValue %15 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/phi-loop-variable.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/phi-loop-variable.asm.frag new file mode 100644 index 000000000..74c46b4af --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/phi-loop-variable.asm.frag @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 59 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %4 "main" + OpExecutionMode %4 OriginUpperLeft + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 +%mat2v2float = OpTypeMatrix %v2float 2 +%_ptr_Function_mat2v2float = OpTypePointer Function %mat2v2float + %v3float = OpTypeVector %float 3 + %11 = OpTypeFunction %v3float %_ptr_Function_mat2v2float +%_ptr_Function_v3float = OpTypePointer Function %v3float + %float_1 = OpConstant %float 1 + %18 = OpConstantComposite %v3float %float_1 %float_1 %float_1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_35 = OpConstant %int 35 + %int_0 = OpConstant %int 0 + %bool = OpTypeBool + %int_1 = OpConstant %int 1 + %4 = OpFunction %void None %3 + %5 = OpLabel + OpBranch %48 + %48 = OpLabel + %58 = OpPhi %int %int_35 %5 %56 %50 + OpLoopMerge %49 %50 None + OpBranch %51 + %51 = OpLabel + %53 = OpSGreaterThanEqual %bool %58 %int_0 + OpBranchConditional %53 %54 %49 + %54 = OpLabel + OpBranch %50 + %50 = OpLabel + %56 = OpISub %int %58 %int_1 + OpBranch %48 + %49 = OpLabel + OpReturn + OpFunctionEnd + %13 = OpFunction %v3float None %11 + %12 = OpFunctionParameter %_ptr_Function_mat2v2float + %14 = OpLabel + %16 = OpVariable %_ptr_Function_v3float Function + %21 = OpVariable %_ptr_Function_int Function + OpStore %16 %18 + OpStore %21 %int_35 + OpBranch %23 + %23 = OpLabel + OpLoopMerge %25 %26 None + OpBranch %27 + %27 = OpLabel + %28 = OpLoad %int %21 + %31 = OpSGreaterThanEqual %bool %28 %int_0 + OpBranchConditional %31 %24 %25 + %24 = OpLabel + OpBranch %26 + %26 = OpLabel + %32 = OpLoad %int %21 + %34 = OpISub %int %32 %int_1 + OpStore %21 %34 + OpBranch %23 + %25 = OpLabel + %35 = OpLoad %v3float %16 + OpReturnValue %35 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag new file mode 100644 index 000000000..0c3833e7e --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/sampler-buffer-array-without-sampler.asm.frag @@ -0,0 +1,86 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 63 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %sample_from_func_s21_4__ "sample_from_func(s21[4];" + OpName %uSampler "uSampler" + OpName %sample_one_from_func_s21_ "sample_one_from_func(s21;" + OpName %uSampler_0 "uSampler" + OpName %Registers "Registers" + OpMemberName %Registers 0 "index" + OpName %registers "registers" + OpName %FragColor "FragColor" + OpName %uSampler_1 "uSampler" + OpMemberDecorate %Registers 0 Offset 0 + OpDecorate %Registers Block + OpDecorate %FragColor Location 0 + OpDecorate %uSampler_1 DescriptorSet 0 + OpDecorate %uSampler_1 Binding 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %7 = OpTypeImage %float 2D 0 0 0 1 Unknown + %uint = OpTypeInt 32 0 + %uint_4 = OpConstant %uint 4 +%_arr_8_uint_4 = OpTypeArray %7 %uint_4 +%_ptr_UniformConstant__arr_8_uint_4 = OpTypePointer UniformConstant %_arr_8_uint_4 + %v4float = OpTypeVector %float 4 + %14 = OpTypeFunction %v4float %_ptr_UniformConstant__arr_8_uint_4 +%_ptr_UniformConstant_8 = OpTypePointer UniformConstant %7 + %19 = OpTypeFunction %v4float %_ptr_UniformConstant_8 + %int = OpTypeInt 32 1 + %Registers = OpTypeStruct %int +%_ptr_PushConstant_Registers = OpTypePointer PushConstant %Registers + %registers = OpVariable %_ptr_PushConstant_Registers PushConstant + %int_0 = OpConstant %int 0 +%_ptr_PushConstant_int = OpTypePointer PushConstant %int + %v2int = OpTypeVector %int 2 + %int_4 = OpConstant %int 4 + %35 = OpConstantComposite %v2int %int_4 %int_4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %uSampler_1 = OpVariable %_ptr_UniformConstant__arr_8_uint_4 UniformConstant + %int_10 = OpConstant %int 10 + %53 = OpConstantComposite %v2int %int_10 %int_10 + %main = OpFunction %void None %3 + %5 = OpLabel + %48 = OpAccessChain %_ptr_PushConstant_int %registers %int_0 + %49 = OpLoad %int %48 + %50 = OpAccessChain %_ptr_UniformConstant_8 %uSampler_1 %49 + %51 = OpLoad %7 %50 + %55 = OpImageFetch %v4float %51 %53 Lod %int_0 + %56 = OpFunctionCall %v4float %sample_from_func_s21_4__ %uSampler_1 + %57 = OpFAdd %v4float %55 %56 + %58 = OpAccessChain %_ptr_PushConstant_int %registers %int_0 + %59 = OpLoad %int %58 + %60 = OpAccessChain %_ptr_UniformConstant_8 %uSampler_1 %59 + %61 = OpFunctionCall %v4float %sample_one_from_func_s21_ %60 + %62 = OpFAdd %v4float %57 %61 + OpStore %FragColor %62 + OpReturn + OpFunctionEnd +%sample_from_func_s21_4__ = OpFunction %v4float None %14 + %uSampler = OpFunctionParameter %_ptr_UniformConstant__arr_8_uint_4 + %17 = OpLabel + %29 = OpAccessChain %_ptr_PushConstant_int %registers %int_0 + %30 = OpLoad %int %29 + %31 = OpAccessChain %_ptr_UniformConstant_8 %uSampler %30 + %32 = OpLoad %7 %31 + %37 = OpImageFetch %v4float %32 %35 Lod %int_0 + OpReturnValue %37 + OpFunctionEnd +%sample_one_from_func_s21_ = OpFunction %v4float None %19 + %uSampler_0 = OpFunctionParameter %_ptr_UniformConstant_8 + %22 = OpLabel + %40 = OpLoad %7 %uSampler_0 + %42 = OpImageFetch %v4float %40 %35 Lod %int_0 + OpReturnValue %42 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag new file mode 100644 index 000000000..e6776eaf5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/sampler-buffer-without-sampler.asm.frag @@ -0,0 +1,62 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 36 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpName %main "main" + OpName %_main_ "@main(" + OpName %storeTemp "storeTemp" + OpName %RWTex "RWTex" + OpName %Tex "Tex" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %RWTex DescriptorSet 0 + OpDecorate %Tex DescriptorSet 0 + OpDecorate %RWTex Binding 0 + OpDecorate %Tex Binding 1 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float +%_ptr_Function_v4float = OpTypePointer Function %v4float + %13 = OpConstant %float 1 + %14 = OpConstant %float 2 + %15 = OpConstant %float 3 + %16 = OpConstant %float 4 + %17 = OpConstantComposite %v4float %13 %14 %15 %16 + %18 = OpTypeImage %float Buffer 0 0 0 2 Rgba32f +%_ptr_UniformConstant_18 = OpTypePointer UniformConstant %18 + %RWTex = OpVariable %_ptr_UniformConstant_18 UniformConstant + %int = OpTypeInt 32 1 + %23 = OpConstant %int 20 + %25 = OpTypeImage %float Buffer 0 0 0 1 Rgba32f +%_ptr_UniformConstant_25 = OpTypePointer UniformConstant %25 + %Tex = OpVariable %_ptr_UniformConstant_25 UniformConstant + %29 = OpConstant %int 10 +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %35 = OpFunctionCall %v4float %_main_ + OpStore %_entryPointOutput %35 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %v4float None %8 + %10 = OpLabel + %storeTemp = OpVariable %_ptr_Function_v4float Function + OpStore %storeTemp %17 + %21 = OpLoad %18 %RWTex + %24 = OpLoad %v4float %storeTemp + OpImageWrite %21 %23 %24 + %28 = OpLoad %25 %Tex + %30 = OpImageFetch %v4float %28 %29 + OpReturnValue %30 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/selection-merge-to-continue.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/selection-merge-to-continue.asm.frag new file mode 100644 index 000000000..ecc491594 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/selection-merge-to-continue.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 55 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %v0 + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %i "i" + OpName %v0 "v0" + OpDecorate %FragColor Location 0 + OpDecorate %v0 Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %float_1 = OpConstant %float 1 + %11 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_4 = OpConstant %int 4 + %bool = OpTypeBool +%_ptr_Input_v4float = OpTypePointer Input %v4float + %v0 = OpVariable %_ptr_Input_v4float Input + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Input_float = OpTypePointer Input %float + %float_20 = OpConstant %float 20 + %int_3 = OpConstant %int 3 + %int_1 = OpConstant %int 1 + %main = OpFunction %void None %3 + %5 = OpLabel + %i = OpVariable %_ptr_Function_int Function + OpStore %FragColor %11 + OpStore %i %int_0 + OpBranch %16 + %16 = OpLabel + OpLoopMerge %18 %19 None + OpBranch %20 + %20 = OpLabel + %21 = OpLoad %int %i + %24 = OpSLessThan %bool %21 %int_4 + OpBranchConditional %24 %17 %18 + %17 = OpLabel + %30 = OpAccessChain %_ptr_Input_float %v0 %uint_0 + %31 = OpLoad %float %30 + %33 = OpFOrdEqual %bool %31 %float_20 + OpSelectionMerge %19 None + OpBranchConditional %33 %34 %44 + %34 = OpLabel + %36 = OpLoad %int %i + %38 = OpBitwiseAnd %int %36 %int_3 + %39 = OpAccessChain %_ptr_Input_float %v0 %38 + %40 = OpLoad %float %39 + %41 = OpLoad %v4float %FragColor + %42 = OpCompositeConstruct %v4float %40 %40 %40 %40 + %43 = OpFAdd %v4float %41 %42 + OpStore %FragColor %43 + OpBranch %19 + %44 = OpLabel + %45 = OpLoad %int %i + %47 = OpBitwiseAnd %int %45 %int_1 + %48 = OpAccessChain %_ptr_Input_float %v0 %47 + %49 = OpLoad %float %48 + %50 = OpLoad %v4float %FragColor + %51 = OpCompositeConstruct %v4float %49 %49 %49 %49 + %52 = OpFAdd %v4float %50 %51 + OpStore %FragColor %52 + OpBranch %19 + %19 = OpLabel + %53 = OpLoad %int %i + %54 = OpIAdd %int %53 %int_1 + OpStore %i %54 + OpBranch %16 + %18 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/srem.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/srem.asm.frag new file mode 100644 index 000000000..c6f8e27cb --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/srem.asm.frag @@ -0,0 +1,43 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 23 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %vA %vB + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %vA "vA" + OpName %vB "vB" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %vA Flat + OpDecorate %vA Location 0 + OpDecorate %vB Flat + OpDecorate %vB Location 1 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %int = OpTypeInt 32 1 + %v4int = OpTypeVector %int 4 +%_ptr_Input_v4int = OpTypePointer Input %v4int + %vA = OpVariable %_ptr_Input_v4int Input + %vB = OpVariable %_ptr_Input_v4int Input + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpLoad %v4int %vA + %16 = OpLoad %v4int %vB + %17 = OpLoad %v4int %vA + %18 = OpLoad %v4int %vB + %19 = OpSRem %v4int %17 %18 + %20 = OpConvertSToF %v4float %19 + OpStore %FragColor %20 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag new file mode 100644 index 000000000..33bd1c916 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/struct-composite-extract-swizzle.asm.frag @@ -0,0 +1,55 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 34 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %uSampler "uSampler" + OpName %Foo "Foo" + OpMemberName %Foo 0 "var1" + OpMemberName %Foo 1 "var2" + OpName %foo "foo" + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + OpDecorate %uSampler RelaxedPrecision + OpDecorate %uSampler DescriptorSet 0 + OpDecorate %uSampler Binding 0 + OpDecorate %14 RelaxedPrecision + OpMemberDecorate %Foo 0 RelaxedPrecision + OpMemberDecorate %Foo 1 RelaxedPrecision + OpDecorate %27 RelaxedPrecision + OpDecorate %28 RelaxedPrecision + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %10 = OpTypeImage %float 2D 0 0 0 1 Unknown + %11 = OpTypeSampledImage %10 +%_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 + %uSampler = OpVariable %_ptr_UniformConstant_11 UniformConstant + %Foo = OpTypeStruct %float %float +%_ptr_Function_Foo = OpTypePointer Function %Foo + %int = OpTypeInt 32 1 +%_ptr_Function_float = OpTypePointer Function %float + %v2float = OpTypeVector %float 2 + %33 = OpUndef %Foo + %main = OpFunction %void None %3 + %5 = OpLabel + %foo = OpVariable %_ptr_Function_Foo Function + %14 = OpLoad %11 %uSampler + %30 = OpCompositeExtract %float %33 0 + %32 = OpCompositeExtract %float %33 1 + %27 = OpCompositeConstruct %v2float %30 %32 + %28 = OpImageSampleImplicitLod %v4float %14 %27 + OpStore %FragColor %28 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/switch-label-shared-block.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/switch-label-shared-block.asm.frag new file mode 100644 index 000000000..8f55bcf53 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/switch-label-shared-block.asm.frag @@ -0,0 +1,45 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 28 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %vIndex %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource ESSL 310 + OpName %main "main" + OpName %vIndex "vIndex" + OpName %FragColor "FragColor" + OpDecorate %vIndex RelaxedPrecision + OpDecorate %vIndex Flat + OpDecorate %vIndex Location 0 + OpDecorate %13 RelaxedPrecision + OpDecorate %FragColor RelaxedPrecision + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %float_8 = OpConstant %float 8 + %int = OpTypeInt 32 1 +%_ptr_Input_int = OpTypePointer Input %int + %vIndex = OpVariable %_ptr_Input_int Input + %float_1 = OpConstant %float 1 + %float_3 = OpConstant %float 3 +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %13 = OpLoad %int %vIndex + OpSelectionMerge %17 None + OpSwitch %13 %15 0 %14 2 %14 1 %15 8 %17 + %15 = OpLabel + OpBranch %17 + %14 = OpLabel + OpBranch %17 + %17 = OpLabel + %27 = OpPhi %float %float_3 %15 %float_1 %14 %float_8 %5 + OpStore %FragColor %27 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/switch-merge-to-continue.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/switch-merge-to-continue.asm.frag new file mode 100644 index 000000000..94ef5f538 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/switch-merge-to-continue.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 57 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %v0 + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %i "i" + OpName %v0 "v0" + OpDecorate %FragColor Location 0 + OpDecorate %v0 Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %float_1 = OpConstant %float 1 + %11 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %int = OpTypeInt 32 1 +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_4 = OpConstant %int 4 + %bool = OpTypeBool + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Output_float = OpTypePointer Output %float + %float_3 = OpConstant %float 3 + %uint_1 = OpConstant %uint 1 + %uint_2 = OpConstant %uint 2 + %int_1 = OpConstant %int 1 +%_ptr_Input_v4float = OpTypePointer Input %v4float + %v0 = OpVariable %_ptr_Input_v4float Input + %main = OpFunction %void None %3 + %5 = OpLabel + %i = OpVariable %_ptr_Function_int Function + OpStore %FragColor %11 + OpStore %i %int_0 + OpBranch %16 + %16 = OpLabel + OpLoopMerge %18 %19 None + OpBranch %20 + %20 = OpLabel + %21 = OpLoad %int %i + %24 = OpSLessThan %bool %21 %int_4 + OpBranchConditional %24 %17 %18 + %17 = OpLabel + %25 = OpLoad %int %i + OpSelectionMerge %19 None + OpSwitch %25 %28 0 %26 1 %27 + %28 = OpLabel + %46 = OpAccessChain %_ptr_Output_float %FragColor %uint_2 + %47 = OpLoad %float %46 + %48 = OpFAdd %float %47 %float_3 + %49 = OpAccessChain %_ptr_Output_float %FragColor %uint_2 + OpStore %49 %48 + OpBranch %19 + %26 = OpLabel + %33 = OpAccessChain %_ptr_Output_float %FragColor %uint_0 + %34 = OpLoad %float %33 + %35 = OpFAdd %float %34 %float_1 + %36 = OpAccessChain %_ptr_Output_float %FragColor %uint_0 + OpStore %36 %35 + OpBranch %19 + %27 = OpLabel + %40 = OpAccessChain %_ptr_Output_float %FragColor %uint_1 + %41 = OpLoad %float %40 + %42 = OpFAdd %float %41 %float_3 + %43 = OpAccessChain %_ptr_Output_float %FragColor %uint_1 + OpStore %43 %42 + OpBranch %19 + %19 = OpLabel + %52 = OpLoad %int %i + %54 = OpIAdd %int %52 %int_1 + OpStore %i %54 + OpBranch %16 + %18 = OpLabel + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/temporary-name-alias.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/temporary-name-alias.asm.frag new file mode 100644 index 000000000..6ea359b24 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/temporary-name-alias.asm.frag @@ -0,0 +1,47 @@ +; SPIR-V +; Version: 1.2 +; Generator: Khronos; 0 +; Bound: 51 +; Schema: 0 + OpCapability Shader + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %EntryPoint_Main "main" + OpExecutionMode %EntryPoint_Main OriginUpperLeft + OpSource Unknown 100 + OpName %mat3 "mat3" + OpName %constituent "constituent" + OpName %constituent_0 "constituent" + OpName %constituent_1 "constituent" + OpName %constituent_2 "constituent" + OpName %constituent_3 "constituent" + OpName %constituent_4 "constituent" + OpName %constituent_5 "constituent" + OpName %constituent_6 "constituent" + OpName %EntryPoint_Main "EntryPoint_Main" + %void = OpTypeVoid +%_ptr_Function_void = OpTypePointer Function %void + %float = OpTypeFloat 32 + %int = OpTypeInt 32 1 + %v3float = OpTypeVector %float 3 +%mat3v3float = OpTypeMatrix %v3float 3 +%_ptr_Function_mat3v3float = OpTypePointer Function %mat3v3float + %14 = OpTypeFunction %void + %int_0 = OpConstant %int 0 + %int_1 = OpConstant %int 1 +%EntryPoint_Main = OpFunction %void None %14 + %45 = OpLabel + %mat3 = OpVariable %_ptr_Function_mat3v3float Function +%constituent = OpConvertSToF %float %int_0 +%constituent_0 = OpCompositeConstruct %v3float %constituent %constituent %constituent +%constituent_1 = OpCompositeConstruct %v3float %constituent %constituent %constituent +%constituent_2 = OpCompositeConstruct %v3float %constituent %constituent %constituent + %25 = OpCompositeConstruct %mat3v3float %constituent_0 %constituent_1 %constituent_2 + OpStore %mat3 %25 +%constituent_3 = OpConvertSToF %float %int_1 +%constituent_4 = OpCompositeConstruct %v3float %constituent_3 %constituent_3 %constituent_3 +%constituent_5 = OpCompositeConstruct %v3float %constituent_3 %constituent_3 %constituent_3 +%constituent_6 = OpCompositeConstruct %v3float %constituent_3 %constituent_3 %constituent_3 + %30 = OpCompositeConstruct %mat3v3float %constituent_4 %constituent_5 %constituent_6 + OpStore %mat3 %30 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/temporary-phi-hoisting.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/temporary-phi-hoisting.asm.frag new file mode 100644 index 000000000..7cedcd581 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/temporary-phi-hoisting.asm.frag @@ -0,0 +1,75 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 87 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %MyStruct "MyStruct" + OpMemberName %MyStruct 0 "color" + OpName %MyStruct_CB "MyStruct_CB" + OpMemberName %MyStruct_CB 0 "g_MyStruct" + OpName %_ "" + OpName %_entryPointOutput "@entryPointOutput" + OpMemberDecorate %MyStruct 0 Offset 0 + OpDecorate %_arr_MyStruct_uint_4 ArrayStride 16 + OpMemberDecorate %MyStruct_CB 0 Offset 0 + OpDecorate %MyStruct_CB Block + OpDecorate %_ DescriptorSet 0 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %v3float = OpTypeVector %float 3 + %float_0 = OpConstant %float 0 + %15 = OpConstantComposite %v3float %float_0 %float_0 %float_0 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %int_4 = OpConstant %int 4 + %bool = OpTypeBool + %MyStruct = OpTypeStruct %v4float + %uint = OpTypeInt 32 0 + %uint_4 = OpConstant %uint 4 +%_arr_MyStruct_uint_4 = OpTypeArray %MyStruct %uint_4 +%MyStruct_CB = OpTypeStruct %_arr_MyStruct_uint_4 +%_ptr_Uniform_MyStruct_CB = OpTypePointer Uniform %MyStruct_CB + %_ = OpVariable %_ptr_Uniform_MyStruct_CB Uniform +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %int_1 = OpConstant %int 1 + %float_1 = OpConstant %float 1 +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + OpBranch %64 + %64 = OpLabel + %85 = OpPhi %v3float %15 %5 %77 %66 + %86 = OpPhi %int %int_0 %5 %79 %66 + OpLoopMerge %65 %66 None + OpBranch %67 + %67 = OpLabel + %69 = OpSLessThan %bool %86 %int_4 + OpBranchConditional %69 %70 %65 + %70 = OpLabel + %72 = OpAccessChain %_ptr_Uniform_v4float %_ %int_0 %86 %int_0 + %73 = OpLoad %v4float %72 + %74 = OpVectorShuffle %v3float %73 %73 0 1 2 + %77 = OpFAdd %v3float %85 %74 + OpBranch %66 + %66 = OpLabel + %79 = OpIAdd %int %86 %int_1 + OpBranch %64 + %65 = OpLabel + %81 = OpCompositeExtract %float %85 0 + %82 = OpCompositeExtract %float %85 1 + %83 = OpCompositeExtract %float %85 2 + %84 = OpCompositeConstruct %v4float %81 %82 %83 %float_1 + OpStore %_entryPointOutput %84 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/texel-fetch-no-lod.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/texel-fetch-no-lod.asm.frag new file mode 100644 index 000000000..53dc63809 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/texel-fetch-no-lod.asm.frag @@ -0,0 +1,46 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 26 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %FragColor %gl_FragCoord + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %FragColor "FragColor" + OpName %uTexture "uTexture" + OpName %gl_FragCoord "gl_FragCoord" + OpDecorate %FragColor Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 0 + OpDecorate %gl_FragCoord BuiltIn FragCoord + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output + %10 = OpTypeImage %float 2D 0 0 0 1 Unknown + %11 = OpTypeSampledImage %10 +%_ptr_UniformConstant_11 = OpTypePointer UniformConstant %11 + %uTexture = OpVariable %_ptr_UniformConstant_11 UniformConstant +%_ptr_Input_v4float = OpTypePointer Input %v4float +%gl_FragCoord = OpVariable %_ptr_Input_v4float Input + %v2float = OpTypeVector %float 2 + %int = OpTypeInt 32 1 + %v2int = OpTypeVector %int 2 + %int_0 = OpConstant %int 0 + %main = OpFunction %void None %3 + %5 = OpLabel + %14 = OpLoad %11 %uTexture + %18 = OpLoad %v4float %gl_FragCoord + %19 = OpVectorShuffle %v2float %18 %18 0 1 + %22 = OpConvertFToS %v2int %19 + %24 = OpImage %10 %14 + %25 = OpImageFetch %v4float %24 %22 + OpStore %FragColor %25 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/undef-variable-store.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/undef-variable-store.asm.frag new file mode 100644 index 000000000..966c2d9d5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/undef-variable-store.asm.frag @@ -0,0 +1,85 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 50 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %fragmentProgram "main" %_entryPointOutput + OpExecutionMode %fragmentProgram OriginUpperLeft + OpSource HLSL 500 + OpName %fragmentProgram "fragmentProgram" + OpName %_fragmentProgram_ "@fragmentProgram(" + OpName %uv "uv" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float + %v2float = OpTypeVector %float 2 +%_ptr_Function_v2float = OpTypePointer Function %v2float + %float_0 = OpConstant %float 0 + %15 = OpConstantComposite %v2float %float_0 %float_0 + %uint = OpTypeInt 32 0 + %uint_0 = OpConstant %uint 0 +%_ptr_Function_float = OpTypePointer Function %float + %bool = OpTypeBool + %float_1 = OpConstant %float 1 + %26 = OpConstantComposite %v4float %float_1 %float_0 %float_0 %float_1 + %29 = OpConstantComposite %v4float %float_1 %float_1 %float_0 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %false = OpConstantFalse %bool +%fragmentProgram = OpFunction %void None %3 + %5 = OpLabel + %35 = OpVariable %_ptr_Function_v2float Function + %37 = OpVariable %_ptr_Function_v4float Function + OpBranch %38 + %38 = OpLabel + OpLoopMerge %39 %40 None + OpBranch %41 + %41 = OpLabel + OpStore %35 %15 + %42 = OpAccessChain %_ptr_Function_float %35 %uint_0 + %43 = OpLoad %float %42 + %44 = OpFOrdNotEqual %bool %43 %float_0 + OpSelectionMerge %45 None + OpBranchConditional %44 %46 %47 + %46 = OpLabel + OpStore %37 %26 + OpBranch %39 + %47 = OpLabel + OpStore %37 %29 + OpBranch %39 + %45 = OpLabel + %48 = OpUndef %v4float + OpStore %37 %48 + OpBranch %39 + %40 = OpLabel + OpBranchConditional %false %38 %39 + %39 = OpLabel + %34 = OpLoad %v4float %37 + OpStore %_entryPointOutput %34 + OpReturn + OpFunctionEnd +%_fragmentProgram_ = OpFunction %v4float None %8 + %10 = OpLabel + %uv = OpVariable %_ptr_Function_v2float Function + OpStore %uv %15 + %19 = OpAccessChain %_ptr_Function_float %uv %uint_0 + %20 = OpLoad %float %19 + %22 = OpFOrdNotEqual %bool %20 %float_0 + OpSelectionMerge %24 None + OpBranchConditional %22 %23 %28 + %23 = OpLabel + OpReturnValue %26 + %28 = OpLabel + OpReturnValue %29 + %24 = OpLabel + %31 = OpUndef %v4float + OpReturnValue %31 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/unknown-depth-state.asm.vk.frag b/3rdparty/spirv-cross/shaders/asm/frag/unknown-depth-state.asm.vk.frag new file mode 100644 index 000000000..89036f0eb --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/unknown-depth-state.asm.vk.frag @@ -0,0 +1,71 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 44 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %vUV %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %sample_combined_ "sample_combined(" + OpName %sample_separate_ "sample_separate(" + OpName %uShadow "uShadow" + OpName %vUV "vUV" + OpName %uTexture "uTexture" + OpName %uSampler "uSampler" + OpName %FragColor "FragColor" + OpDecorate %uShadow DescriptorSet 0 + OpDecorate %uShadow Binding 0 + OpDecorate %vUV Location 0 + OpDecorate %uTexture DescriptorSet 0 + OpDecorate %uTexture Binding 1 + OpDecorate %uSampler DescriptorSet 0 + OpDecorate %uSampler Binding 2 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %7 = OpTypeFunction %float + %12 = OpTypeImage %float 2D 2 0 0 1 Unknown + %13 = OpTypeSampledImage %12 +%_ptr_UniformConstant_13 = OpTypePointer UniformConstant %13 + %uShadow = OpVariable %_ptr_UniformConstant_13 UniformConstant + %v3float = OpTypeVector %float 3 +%_ptr_Input_v3float = OpTypePointer Input %v3float + %vUV = OpVariable %_ptr_Input_v3float Input +%_ptr_UniformConstant_25 = OpTypePointer UniformConstant %12 + %uTexture = OpVariable %_ptr_UniformConstant_25 UniformConstant + %29 = OpTypeSampler +%_ptr_UniformConstant_29 = OpTypePointer UniformConstant %29 + %uSampler = OpVariable %_ptr_UniformConstant_29 UniformConstant +%_ptr_Output_float = OpTypePointer Output %float + %FragColor = OpVariable %_ptr_Output_float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %41 = OpFunctionCall %float %sample_combined_ + %42 = OpFunctionCall %float %sample_separate_ + %43 = OpFAdd %float %41 %42 + OpStore %FragColor %43 + OpReturn + OpFunctionEnd +%sample_combined_ = OpFunction %float None %7 + %9 = OpLabel + %16 = OpLoad %13 %uShadow + %20 = OpLoad %v3float %vUV + %21 = OpCompositeExtract %float %20 2 + %22 = OpImageSampleDrefImplicitLod %float %16 %20 %21 + OpReturnValue %22 + OpFunctionEnd +%sample_separate_ = OpFunction %float None %7 + %11 = OpLabel + %28 = OpLoad %12 %uTexture + %32 = OpLoad %29 %uSampler + %33 = OpSampledImage %13 %28 %32 + %34 = OpLoad %v3float %vUV + %35 = OpCompositeExtract %float %34 2 + %36 = OpImageSampleDrefImplicitLod %float %33 %34 %35 + OpReturnValue %36 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/unreachable.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/unreachable.asm.frag new file mode 100644 index 000000000..e2ce2eb56 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/unreachable.asm.frag @@ -0,0 +1,61 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 47 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %counter %FragColor + OpExecutionMode %main OriginUpperLeft + OpSource GLSL 450 + OpName %main "main" + OpName %counter "counter" + OpName %FragColor "FragColor" + OpDecorate %counter Flat + OpDecorate %counter Location 0 + OpDecorate %FragColor Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float + %int = OpTypeInt 32 1 +%_ptr_Input_int = OpTypePointer Input %int + %counter = OpVariable %_ptr_Input_int Input + %int_10 = OpConstant %int 10 + %bool = OpTypeBool + %float_10 = OpConstant %float 10 + %21 = OpConstantComposite %v4float %float_10 %float_10 %float_10 %float_10 + %float_30 = OpConstant %float 30 + %25 = OpConstantComposite %v4float %float_30 %float_30 %float_30 %float_30 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %FragColor = OpVariable %_ptr_Output_v4float Output +%_ptr_Function_v4float = OpTypePointer Function %v4float + %false = OpConstantFalse %bool + %44 = OpUndef %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + OpBranch %33 + %33 = OpLabel + %45 = OpPhi %v4float %44 %5 %44 %35 + OpLoopMerge %34 %35 None + OpBranch %36 + %36 = OpLabel + %37 = OpLoad %int %counter + %38 = OpIEqual %bool %37 %int_10 + OpSelectionMerge %39 None + OpBranchConditional %38 %40 %41 + %40 = OpLabel + OpBranch %34 + %41 = OpLabel + OpBranch %34 + %39 = OpLabel + OpUnreachable + %35 = OpLabel + OpBranchConditional %false %33 %34 + %34 = OpLabel + %46 = OpPhi %v4float %21 %40 %25 %41 %44 %35 + OpStore %FragColor %46 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/frag/vector-shuffle-oom.asm.frag b/3rdparty/spirv-cross/shaders/asm/frag/vector-shuffle-oom.asm.frag new file mode 100644 index 000000000..d60c6f52d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/frag/vector-shuffle-oom.asm.frag @@ -0,0 +1,886 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 25007 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %5663 "main" %5800 %gl_FragCoord %4317 + OpExecutionMode %5663 OriginUpperLeft + OpMemberDecorate %_struct_1116 0 Offset 0 + OpMemberDecorate %_struct_1116 1 Offset 16 + OpMemberDecorate %_struct_1116 2 Offset 32 + OpDecorate %_struct_1116 Block + OpDecorate %22044 DescriptorSet 0 + OpDecorate %22044 Binding 0 + OpDecorate %5785 DescriptorSet 0 + OpDecorate %5785 Binding 140 + OpDecorate %5688 DescriptorSet 0 + OpDecorate %5688 Binding 60 + OpMemberDecorate %_struct_994 0 Offset 0 + OpMemberDecorate %_struct_994 1 Offset 16 + OpMemberDecorate %_struct_994 2 Offset 28 + OpMemberDecorate %_struct_994 3 Offset 32 + OpMemberDecorate %_struct_994 4 Offset 44 + OpMemberDecorate %_struct_994 5 Offset 48 + OpMemberDecorate %_struct_994 6 Offset 60 + OpMemberDecorate %_struct_994 7 Offset 64 + OpMemberDecorate %_struct_994 8 Offset 76 + OpMemberDecorate %_struct_994 9 Offset 80 + OpMemberDecorate %_struct_994 10 Offset 92 + OpMemberDecorate %_struct_994 11 Offset 96 + OpMemberDecorate %_struct_994 12 Offset 108 + OpMemberDecorate %_struct_994 13 Offset 112 + OpMemberDecorate %_struct_994 14 Offset 120 + OpMemberDecorate %_struct_994 15 Offset 128 + OpMemberDecorate %_struct_994 16 Offset 140 + OpMemberDecorate %_struct_994 17 Offset 144 + OpMemberDecorate %_struct_994 18 Offset 148 + OpMemberDecorate %_struct_994 19 Offset 152 + OpMemberDecorate %_struct_994 20 Offset 156 + OpMemberDecorate %_struct_994 21 Offset 160 + OpMemberDecorate %_struct_994 22 Offset 176 + OpMemberDecorate %_struct_994 23 RowMajor + OpMemberDecorate %_struct_994 23 Offset 192 + OpMemberDecorate %_struct_994 23 MatrixStride 16 + OpMemberDecorate %_struct_994 24 Offset 256 + OpDecorate %_struct_994 Block + OpDecorate %12348 DescriptorSet 0 + OpDecorate %12348 Binding 2 + OpDecorate %3312 DescriptorSet 0 + OpDecorate %3312 Binding 142 + OpDecorate %4646 DescriptorSet 0 + OpDecorate %4646 Binding 62 + OpDecorate %4862 DescriptorSet 0 + OpDecorate %4862 Binding 141 + OpDecorate %3594 DescriptorSet 0 + OpDecorate %3594 Binding 61 + OpDecorate %_arr_mat4v4float_uint_2 ArrayStride 64 + OpDecorate %_arr_v4float_uint_2 ArrayStride 16 + OpMemberDecorate %_struct_408 0 RowMajor + OpMemberDecorate %_struct_408 0 Offset 0 + OpMemberDecorate %_struct_408 0 MatrixStride 16 + OpMemberDecorate %_struct_408 1 RowMajor + OpMemberDecorate %_struct_408 1 Offset 64 + OpMemberDecorate %_struct_408 1 MatrixStride 16 + OpMemberDecorate %_struct_408 2 RowMajor + OpMemberDecorate %_struct_408 2 Offset 128 + OpMemberDecorate %_struct_408 2 MatrixStride 16 + OpMemberDecorate %_struct_408 3 RowMajor + OpMemberDecorate %_struct_408 3 Offset 192 + OpMemberDecorate %_struct_408 3 MatrixStride 16 + OpMemberDecorate %_struct_408 4 Offset 256 + OpMemberDecorate %_struct_408 5 Offset 272 + OpMemberDecorate %_struct_408 6 Offset 288 + OpMemberDecorate %_struct_408 7 Offset 292 + OpMemberDecorate %_struct_408 8 Offset 296 + OpMemberDecorate %_struct_408 9 Offset 300 + OpMemberDecorate %_struct_408 10 Offset 304 + OpMemberDecorate %_struct_408 11 Offset 316 + OpMemberDecorate %_struct_408 12 Offset 320 + OpMemberDecorate %_struct_408 13 Offset 332 + OpMemberDecorate %_struct_408 14 Offset 336 + OpMemberDecorate %_struct_408 15 Offset 348 + OpMemberDecorate %_struct_408 16 Offset 352 + OpMemberDecorate %_struct_408 17 Offset 364 + OpMemberDecorate %_struct_408 18 Offset 368 + OpMemberDecorate %_struct_408 19 Offset 372 + OpMemberDecorate %_struct_408 20 Offset 376 + OpMemberDecorate %_struct_408 21 Offset 384 + OpMemberDecorate %_struct_408 22 Offset 392 + OpMemberDecorate %_struct_408 23 Offset 400 + OpMemberDecorate %_struct_408 24 Offset 416 + OpMemberDecorate %_struct_408 25 Offset 424 + OpMemberDecorate %_struct_408 26 Offset 432 + OpMemberDecorate %_struct_408 27 Offset 448 + OpMemberDecorate %_struct_408 28 Offset 460 + OpMemberDecorate %_struct_408 29 Offset 464 + OpMemberDecorate %_struct_408 30 Offset 468 + OpMemberDecorate %_struct_408 31 Offset 472 + OpMemberDecorate %_struct_408 32 Offset 476 + OpMemberDecorate %_struct_408 33 Offset 480 + OpMemberDecorate %_struct_408 34 Offset 488 + OpMemberDecorate %_struct_408 35 Offset 492 + OpMemberDecorate %_struct_408 36 Offset 496 + OpMemberDecorate %_struct_408 37 RowMajor + OpMemberDecorate %_struct_408 37 Offset 512 + OpMemberDecorate %_struct_408 37 MatrixStride 16 + OpMemberDecorate %_struct_408 38 Offset 640 + OpDecorate %_struct_408 Block + OpDecorate %15259 DescriptorSet 0 + OpDecorate %15259 Binding 1 + OpDecorate %5800 Location 0 + OpDecorate %gl_FragCoord BuiltIn FragCoord + OpDecorate %4317 Location 0 + OpMemberDecorate %_struct_1395 0 Offset 0 + OpMemberDecorate %_struct_1395 1 Offset 16 + OpMemberDecorate %_struct_1395 2 Offset 32 + OpMemberDecorate %_struct_1395 3 Offset 40 + OpMemberDecorate %_struct_1395 4 Offset 48 + OpMemberDecorate %_struct_1395 5 Offset 60 + OpMemberDecorate %_struct_1395 6 Offset 64 + OpMemberDecorate %_struct_1395 7 Offset 76 + OpMemberDecorate %_struct_1395 8 Offset 80 + OpMemberDecorate %_struct_1395 9 Offset 96 + OpMemberDecorate %_struct_1395 10 Offset 112 + OpMemberDecorate %_struct_1395 11 Offset 128 + OpMemberDecorate %_struct_1395 12 Offset 140 + OpMemberDecorate %_struct_1395 13 Offset 144 + OpMemberDecorate %_struct_1395 14 Offset 156 + OpMemberDecorate %_struct_1395 15 Offset 160 + OpMemberDecorate %_struct_1395 16 Offset 176 + OpMemberDecorate %_struct_1395 17 Offset 192 + OpMemberDecorate %_struct_1395 18 Offset 204 + OpMemberDecorate %_struct_1395 19 Offset 208 + OpMemberDecorate %_struct_1395 20 Offset 224 + OpDecorate %_struct_1395 Block + OpMemberDecorate %_struct_1018 0 Offset 0 + OpDecorate %_struct_1018 Block + %void = OpTypeVoid + %1282 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v2float = OpTypeVector %float 2 + %v4float = OpTypeVector %float 4 + %v3float = OpTypeVector %float 3 +%_struct_1017 = OpTypeStruct %v4float +%_struct_1116 = OpTypeStruct %v4float %float %v4float +%_ptr_Uniform__struct_1116 = OpTypePointer Uniform %_struct_1116 + %22044 = OpVariable %_ptr_Uniform__struct_1116 Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %150 = OpTypeImage %float 2D 0 0 0 1 Unknown +%_ptr_UniformConstant_150 = OpTypePointer UniformConstant %150 + %5785 = OpVariable %_ptr_UniformConstant_150 UniformConstant + %508 = OpTypeSampler +%_ptr_UniformConstant_508 = OpTypePointer UniformConstant %508 + %5688 = OpVariable %_ptr_UniformConstant_508 UniformConstant + %510 = OpTypeSampledImage %150 + %float_0 = OpConstant %float 0 + %uint = OpTypeInt 32 0 + %int_1 = OpConstant %int 1 +%_ptr_Uniform_float = OpTypePointer Uniform %float + %float_1 = OpConstant %float 1 +%mat4v4float = OpTypeMatrix %v4float 4 +%_struct_994 = OpTypeStruct %v3float %v3float %float %v3float %float %v3float %float %v3float %float %v3float %float %v3float %float %v2float %v2float %v3float %float %float %float %float %float %v4float %v4float %mat4v4float %v4float +%_ptr_Uniform__struct_994 = OpTypePointer Uniform %_struct_994 + %12348 = OpVariable %_ptr_Uniform__struct_994 Uniform + %int_5 = OpConstant %int 5 +%_ptr_Uniform_v3float = OpTypePointer Uniform %v3float + %3312 = OpVariable %_ptr_UniformConstant_150 UniformConstant + %4646 = OpVariable %_ptr_UniformConstant_508 UniformConstant + %bool = OpTypeBool + %4862 = OpVariable %_ptr_UniformConstant_150 UniformConstant + %3594 = OpVariable %_ptr_UniformConstant_508 UniformConstant + %uint_2 = OpConstant %uint 2 + %2938 = OpConstantComposite %v4float %float_0 %float_0 %float_0 %float_0 +%_arr_mat4v4float_uint_2 = OpTypeArray %mat4v4float %uint_2 +%_arr_v4float_uint_2 = OpTypeArray %v4float %uint_2 +%_struct_408 = OpTypeStruct %mat4v4float %mat4v4float %mat4v4float %mat4v4float %v4float %v4float %float %float %float %float %v3float %float %v3float %float %v3float %float %v3float %float %float %float %v2float %v2float %v2float %v4float %v2float %v2float %v2float %v3float %float %float %float %float %float %v2float %float %float %v3float %_arr_mat4v4float_uint_2 %_arr_v4float_uint_2 +%_ptr_Uniform__struct_408 = OpTypePointer Uniform %_struct_408 + %15259 = OpVariable %_ptr_Uniform__struct_408 Uniform + %int_23 = OpConstant %int 23 + %int_2 = OpConstant %int 2 + %float_n2 = OpConstant %float -2 + %float_0_5 = OpConstant %float 0.5 + %1196 = OpConstantComposite %v3float %float_0 %float_n2 %float_0_5 + %float_n1 = OpConstant %float -1 + %836 = OpConstantComposite %v3float %float_n1 %float_n1 %float_0_5 + %float_0_75 = OpConstant %float 0.75 + %1367 = OpConstantComposite %v3float %float_0 %float_n1 %float_0_75 + %141 = OpConstantComposite %v3float %float_1 %float_n1 %float_0_5 + %38 = OpConstantComposite %v3float %float_n2 %float_0 %float_0_5 + %95 = OpConstantComposite %v3float %float_n1 %float_0 %float_0_75 + %626 = OpConstantComposite %v3float %float_0 %float_0 %float_1 + %2411 = OpConstantComposite %v3float %float_1 %float_0 %float_0_75 + %float_2 = OpConstant %float 2 + %2354 = OpConstantComposite %v3float %float_2 %float_0 %float_0_5 + %837 = OpConstantComposite %v3float %float_n1 %float_1 %float_0_5 + %1368 = OpConstantComposite %v3float %float_0 %float_1 %float_0_75 + %142 = OpConstantComposite %v3float %float_1 %float_1 %float_0_5 + %1197 = OpConstantComposite %v3float %float_0 %float_2 %float_0_5 +%_ptr_Input_v2float = OpTypePointer Input %v2float + %5800 = OpVariable %_ptr_Input_v2float Input +%_ptr_Input_v4float = OpTypePointer Input %v4float +%gl_FragCoord = OpVariable %_ptr_Input_v4float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float + %4317 = OpVariable %_ptr_Output_v4float Output +%_struct_1395 = OpTypeStruct %v4float %v4float %v2float %v2float %v3float %float %v3float %float %v4float %v4float %v4float %v3float %float %v3float %float %v3float %v4float %v3float %float %v3float %v2float +%_struct_1018 = OpTypeStruct %v4float + %10264 = OpUndef %_struct_1017 + %5663 = OpFunction %void None %1282 + %25006 = OpLabel + %17463 = OpLoad %v4float %gl_FragCoord + %13863 = OpCompositeInsert %_struct_1017 %2938 %10264 0 + %22969 = OpVectorShuffle %v2float %17463 %17463 0 1 + %13206 = OpAccessChain %_ptr_Uniform_v4float %15259 %int_23 + %10343 = OpLoad %v4float %13206 + %7422 = OpVectorShuffle %v2float %10343 %10343 0 1 + %19927 = OpFMul %v2float %22969 %7422 + %18174 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_2 + %16206 = OpLoad %v4float %18174 + %20420 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %21354 = OpLoad %v4float %20420 + %7688 = OpVectorShuffle %v4float %21354 %21354 0 1 0 1 + %17581 = OpFMul %v4float %16206 %7688 + %10673 = OpVectorShuffle %v2float %1196 %1196 0 1 + %18824 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10344 = OpLoad %v4float %18824 + %8638 = OpVectorShuffle %v2float %10344 %10344 0 1 + %9197 = OpFMul %v2float %10673 %8638 + %18505 = OpFAdd %v2float %19927 %9197 + %7011 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21058 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13149 = OpExtInst %v2float %1 FClamp %18505 %7011 %21058 + %23584 = OpLoad %150 %5785 + %10339 = OpLoad %508 %5688 + %12147 = OpSampledImage %510 %23584 %10339 + %15371 = OpImageSampleExplicitLod %v4float %12147 %13149 Lod %float_0 + %15266 = OpCompositeExtract %float %15371 3 + %12116 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12972 = OpLoad %float %12116 + %15710 = OpFMul %float %15266 %12972 + %15279 = OpExtInst %float %1 FClamp %15710 %float_0 %float_1 + %22213 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11756 = OpLoad %v3float %22213 + %12103 = OpVectorTimesScalar %v3float %11756 %15279 + %15516 = OpLoad %150 %3312 + %24569 = OpLoad %508 %4646 + %12148 = OpSampledImage %510 %15516 %24569 + %17670 = OpImageSampleExplicitLod %v4float %12148 %13149 Lod %float_0 + %16938 = OpCompositeExtract %float %17670 1 + %14185 = OpFOrdGreaterThan %bool %16938 %float_0 + OpSelectionMerge %22307 DontFlatten + OpBranchConditional %14185 %12821 %22307 + %12821 = OpLabel + %13239 = OpLoad %150 %4862 + %19960 = OpLoad %508 %3594 + %12149 = OpSampledImage %510 %13239 %19960 + %15675 = OpImageSampleExplicitLod %v4float %12149 %13149 Lod %float_0 + %13866 = OpCompositeExtract %float %17670 1 + %12427 = OpCompositeExtract %float %17670 2 + %23300 = OpFMul %float %13866 %12427 + %17612 = OpExtInst %float %1 FClamp %23300 %float_0 %float_1 + %20291 = OpVectorShuffle %v3float %15675 %15675 0 1 2 + %11186 = OpVectorTimesScalar %v3float %20291 %17612 + %15293 = OpFAdd %v3float %12103 %11186 + OpBranch %22307 + %22307 = OpLabel + %7719 = OpPhi %v3float %12103 %25006 %15293 %12821 + %23399 = OpVectorTimesScalar %v3float %7719 %float_0_5 + %9339 = OpFAdd %float %float_0 %float_0_5 + %16235 = OpVectorShuffle %v3float %2938 %2938 0 1 2 + %22177 = OpFAdd %v3float %16235 %23399 + %15527 = OpVectorShuffle %v4float %2938 %22177 4 5 6 3 + %6434 = OpCompositeInsert %_struct_1017 %15527 %13863 0 + %24572 = OpVectorShuffle %v2float %836 %836 0 1 + %13207 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10345 = OpLoad %v4float %13207 + %8639 = OpVectorShuffle %v2float %10345 %10345 0 1 + %9198 = OpFMul %v2float %24572 %8639 + %18506 = OpFAdd %v2float %19927 %9198 + %7012 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21059 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13150 = OpExtInst %v2float %1 FClamp %18506 %7012 %21059 + %23585 = OpLoad %150 %5785 + %10340 = OpLoad %508 %5688 + %12150 = OpSampledImage %510 %23585 %10340 + %15372 = OpImageSampleExplicitLod %v4float %12150 %13150 Lod %float_0 + %15267 = OpCompositeExtract %float %15372 3 + %12117 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12973 = OpLoad %float %12117 + %15711 = OpFMul %float %15267 %12973 + %15280 = OpExtInst %float %1 FClamp %15711 %float_0 %float_1 + %22214 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11757 = OpLoad %v3float %22214 + %12104 = OpVectorTimesScalar %v3float %11757 %15280 + %15517 = OpLoad %150 %3312 + %24570 = OpLoad %508 %4646 + %12151 = OpSampledImage %510 %15517 %24570 + %17671 = OpImageSampleExplicitLod %v4float %12151 %13150 Lod %float_0 + %16939 = OpCompositeExtract %float %17671 1 + %14186 = OpFOrdGreaterThan %bool %16939 %float_0 + OpSelectionMerge %22308 DontFlatten + OpBranchConditional %14186 %12822 %22308 + %12822 = OpLabel + %13240 = OpLoad %150 %4862 + %19961 = OpLoad %508 %3594 + %12152 = OpSampledImage %510 %13240 %19961 + %15676 = OpImageSampleExplicitLod %v4float %12152 %13150 Lod %float_0 + %13867 = OpCompositeExtract %float %17671 1 + %12428 = OpCompositeExtract %float %17671 2 + %23301 = OpFMul %float %13867 %12428 + %17613 = OpExtInst %float %1 FClamp %23301 %float_0 %float_1 + %20292 = OpVectorShuffle %v3float %15676 %15676 0 1 2 + %11187 = OpVectorTimesScalar %v3float %20292 %17613 + %15294 = OpFAdd %v3float %12104 %11187 + OpBranch %22308 + %22308 = OpLabel + %7720 = OpPhi %v3float %12104 %22307 %15294 %12822 + %23400 = OpVectorTimesScalar %v3float %7720 %float_0_5 + %9340 = OpFAdd %float %9339 %float_0_5 + %16236 = OpVectorShuffle %v3float %15527 %15527 0 1 2 + %22178 = OpFAdd %v3float %16236 %23400 + %15528 = OpVectorShuffle %v4float %15527 %22178 4 5 6 3 + %6435 = OpCompositeInsert %_struct_1017 %15528 %6434 0 + %24573 = OpVectorShuffle %v2float %1367 %1367 0 1 + %13208 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10346 = OpLoad %v4float %13208 + %8640 = OpVectorShuffle %v2float %10346 %10346 0 1 + %9199 = OpFMul %v2float %24573 %8640 + %18507 = OpFAdd %v2float %19927 %9199 + %7013 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21060 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13151 = OpExtInst %v2float %1 FClamp %18507 %7013 %21060 + %23586 = OpLoad %150 %5785 + %10341 = OpLoad %508 %5688 + %12153 = OpSampledImage %510 %23586 %10341 + %15373 = OpImageSampleExplicitLod %v4float %12153 %13151 Lod %float_0 + %15268 = OpCompositeExtract %float %15373 3 + %12118 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12974 = OpLoad %float %12118 + %15712 = OpFMul %float %15268 %12974 + %15281 = OpExtInst %float %1 FClamp %15712 %float_0 %float_1 + %22215 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11758 = OpLoad %v3float %22215 + %12105 = OpVectorTimesScalar %v3float %11758 %15281 + %15518 = OpLoad %150 %3312 + %24571 = OpLoad %508 %4646 + %12154 = OpSampledImage %510 %15518 %24571 + %17672 = OpImageSampleExplicitLod %v4float %12154 %13151 Lod %float_0 + %16940 = OpCompositeExtract %float %17672 1 + %14187 = OpFOrdGreaterThan %bool %16940 %float_0 + OpSelectionMerge %22309 DontFlatten + OpBranchConditional %14187 %12823 %22309 + %12823 = OpLabel + %13241 = OpLoad %150 %4862 + %19962 = OpLoad %508 %3594 + %12155 = OpSampledImage %510 %13241 %19962 + %15677 = OpImageSampleExplicitLod %v4float %12155 %13151 Lod %float_0 + %13868 = OpCompositeExtract %float %17672 1 + %12429 = OpCompositeExtract %float %17672 2 + %23302 = OpFMul %float %13868 %12429 + %17614 = OpExtInst %float %1 FClamp %23302 %float_0 %float_1 + %20293 = OpVectorShuffle %v3float %15677 %15677 0 1 2 + %11188 = OpVectorTimesScalar %v3float %20293 %17614 + %15295 = OpFAdd %v3float %12105 %11188 + OpBranch %22309 + %22309 = OpLabel + %7721 = OpPhi %v3float %12105 %22308 %15295 %12823 + %23401 = OpVectorTimesScalar %v3float %7721 %float_0_75 + %9341 = OpFAdd %float %9340 %float_0_75 + %16237 = OpVectorShuffle %v3float %15528 %15528 0 1 2 + %22179 = OpFAdd %v3float %16237 %23401 + %15529 = OpVectorShuffle %v4float %15528 %22179 4 5 6 3 + %6436 = OpCompositeInsert %_struct_1017 %15529 %6435 0 + %24574 = OpVectorShuffle %v2float %141 %141 0 1 + %13209 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10347 = OpLoad %v4float %13209 + %8641 = OpVectorShuffle %v2float %10347 %10347 0 1 + %9200 = OpFMul %v2float %24574 %8641 + %18508 = OpFAdd %v2float %19927 %9200 + %7014 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21061 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13152 = OpExtInst %v2float %1 FClamp %18508 %7014 %21061 + %23587 = OpLoad %150 %5785 + %10342 = OpLoad %508 %5688 + %12156 = OpSampledImage %510 %23587 %10342 + %15374 = OpImageSampleExplicitLod %v4float %12156 %13152 Lod %float_0 + %15269 = OpCompositeExtract %float %15374 3 + %12119 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12975 = OpLoad %float %12119 + %15713 = OpFMul %float %15269 %12975 + %15282 = OpExtInst %float %1 FClamp %15713 %float_0 %float_1 + %22216 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11759 = OpLoad %v3float %22216 + %12106 = OpVectorTimesScalar %v3float %11759 %15282 + %15519 = OpLoad %150 %3312 + %24575 = OpLoad %508 %4646 + %12157 = OpSampledImage %510 %15519 %24575 + %17673 = OpImageSampleExplicitLod %v4float %12157 %13152 Lod %float_0 + %16941 = OpCompositeExtract %float %17673 1 + %14188 = OpFOrdGreaterThan %bool %16941 %float_0 + OpSelectionMerge %22310 DontFlatten + OpBranchConditional %14188 %12824 %22310 + %12824 = OpLabel + %13242 = OpLoad %150 %4862 + %19963 = OpLoad %508 %3594 + %12158 = OpSampledImage %510 %13242 %19963 + %15678 = OpImageSampleExplicitLod %v4float %12158 %13152 Lod %float_0 + %13869 = OpCompositeExtract %float %17673 1 + %12430 = OpCompositeExtract %float %17673 2 + %23303 = OpFMul %float %13869 %12430 + %17615 = OpExtInst %float %1 FClamp %23303 %float_0 %float_1 + %20294 = OpVectorShuffle %v3float %15678 %15678 0 1 2 + %11189 = OpVectorTimesScalar %v3float %20294 %17615 + %15296 = OpFAdd %v3float %12106 %11189 + OpBranch %22310 + %22310 = OpLabel + %7722 = OpPhi %v3float %12106 %22309 %15296 %12824 + %23402 = OpVectorTimesScalar %v3float %7722 %float_0_5 + %9342 = OpFAdd %float %9341 %float_0_5 + %16238 = OpVectorShuffle %v3float %15529 %15529 0 1 2 + %22180 = OpFAdd %v3float %16238 %23402 + %15530 = OpVectorShuffle %v4float %15529 %22180 4 5 6 3 + %6437 = OpCompositeInsert %_struct_1017 %15530 %6436 0 + %24576 = OpVectorShuffle %v2float %38 %38 0 1 + %13210 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10348 = OpLoad %v4float %13210 + %8642 = OpVectorShuffle %v2float %10348 %10348 0 1 + %9201 = OpFMul %v2float %24576 %8642 + %18509 = OpFAdd %v2float %19927 %9201 + %7015 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21062 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13153 = OpExtInst %v2float %1 FClamp %18509 %7015 %21062 + %23588 = OpLoad %150 %5785 + %10349 = OpLoad %508 %5688 + %12159 = OpSampledImage %510 %23588 %10349 + %15375 = OpImageSampleExplicitLod %v4float %12159 %13153 Lod %float_0 + %15270 = OpCompositeExtract %float %15375 3 + %12120 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12976 = OpLoad %float %12120 + %15714 = OpFMul %float %15270 %12976 + %15283 = OpExtInst %float %1 FClamp %15714 %float_0 %float_1 + %22217 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11760 = OpLoad %v3float %22217 + %12107 = OpVectorTimesScalar %v3float %11760 %15283 + %15520 = OpLoad %150 %3312 + %24577 = OpLoad %508 %4646 + %12160 = OpSampledImage %510 %15520 %24577 + %17674 = OpImageSampleExplicitLod %v4float %12160 %13153 Lod %float_0 + %16942 = OpCompositeExtract %float %17674 1 + %14189 = OpFOrdGreaterThan %bool %16942 %float_0 + OpSelectionMerge %22311 DontFlatten + OpBranchConditional %14189 %12825 %22311 + %12825 = OpLabel + %13243 = OpLoad %150 %4862 + %19964 = OpLoad %508 %3594 + %12161 = OpSampledImage %510 %13243 %19964 + %15679 = OpImageSampleExplicitLod %v4float %12161 %13153 Lod %float_0 + %13870 = OpCompositeExtract %float %17674 1 + %12431 = OpCompositeExtract %float %17674 2 + %23304 = OpFMul %float %13870 %12431 + %17616 = OpExtInst %float %1 FClamp %23304 %float_0 %float_1 + %20295 = OpVectorShuffle %v3float %15679 %15679 0 1 2 + %11190 = OpVectorTimesScalar %v3float %20295 %17616 + %15297 = OpFAdd %v3float %12107 %11190 + OpBranch %22311 + %22311 = OpLabel + %7723 = OpPhi %v3float %12107 %22310 %15297 %12825 + %23403 = OpVectorTimesScalar %v3float %7723 %float_0_5 + %9343 = OpFAdd %float %9342 %float_0_5 + %16239 = OpVectorShuffle %v3float %15530 %15530 0 1 2 + %22181 = OpFAdd %v3float %16239 %23403 + %15531 = OpVectorShuffle %v4float %15530 %22181 4 5 6 3 + %6438 = OpCompositeInsert %_struct_1017 %15531 %6437 0 + %24578 = OpVectorShuffle %v2float %95 %95 0 1 + %13211 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10350 = OpLoad %v4float %13211 + %8643 = OpVectorShuffle %v2float %10350 %10350 0 1 + %9202 = OpFMul %v2float %24578 %8643 + %18510 = OpFAdd %v2float %19927 %9202 + %7016 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21063 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13154 = OpExtInst %v2float %1 FClamp %18510 %7016 %21063 + %23589 = OpLoad %150 %5785 + %10351 = OpLoad %508 %5688 + %12162 = OpSampledImage %510 %23589 %10351 + %15376 = OpImageSampleExplicitLod %v4float %12162 %13154 Lod %float_0 + %15271 = OpCompositeExtract %float %15376 3 + %12121 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12977 = OpLoad %float %12121 + %15715 = OpFMul %float %15271 %12977 + %15284 = OpExtInst %float %1 FClamp %15715 %float_0 %float_1 + %22218 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11761 = OpLoad %v3float %22218 + %12108 = OpVectorTimesScalar %v3float %11761 %15284 + %15521 = OpLoad %150 %3312 + %24579 = OpLoad %508 %4646 + %12163 = OpSampledImage %510 %15521 %24579 + %17675 = OpImageSampleExplicitLod %v4float %12163 %13154 Lod %float_0 + %16943 = OpCompositeExtract %float %17675 1 + %14190 = OpFOrdGreaterThan %bool %16943 %float_0 + OpSelectionMerge %22312 DontFlatten + OpBranchConditional %14190 %12826 %22312 + %12826 = OpLabel + %13244 = OpLoad %150 %4862 + %19965 = OpLoad %508 %3594 + %12164 = OpSampledImage %510 %13244 %19965 + %15680 = OpImageSampleExplicitLod %v4float %12164 %13154 Lod %float_0 + %13871 = OpCompositeExtract %float %17675 1 + %12432 = OpCompositeExtract %float %17675 2 + %23305 = OpFMul %float %13871 %12432 + %17617 = OpExtInst %float %1 FClamp %23305 %float_0 %float_1 + %20296 = OpVectorShuffle %v3float %15680 %15680 0 1 2 + %11191 = OpVectorTimesScalar %v3float %20296 %17617 + %15298 = OpFAdd %v3float %12108 %11191 + OpBranch %22312 + %22312 = OpLabel + %7724 = OpPhi %v3float %12108 %22311 %15298 %12826 + %23404 = OpVectorTimesScalar %v3float %7724 %float_0_75 + %9344 = OpFAdd %float %9343 %float_0_75 + %16240 = OpVectorShuffle %v3float %15531 %15531 0 1 2 + %22182 = OpFAdd %v3float %16240 %23404 + %15532 = OpVectorShuffle %v4float %15531 %22182 4 5 6 3 + %6439 = OpCompositeInsert %_struct_1017 %15532 %6438 0 + %24580 = OpVectorShuffle %v2float %626 %626 0 1 + %13212 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10352 = OpLoad %v4float %13212 + %8644 = OpVectorShuffle %v2float %10352 %10352 0 1 + %9203 = OpFMul %v2float %24580 %8644 + %18511 = OpFAdd %v2float %19927 %9203 + %7017 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21064 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13155 = OpExtInst %v2float %1 FClamp %18511 %7017 %21064 + %23590 = OpLoad %150 %5785 + %10353 = OpLoad %508 %5688 + %12165 = OpSampledImage %510 %23590 %10353 + %15377 = OpImageSampleExplicitLod %v4float %12165 %13155 Lod %float_0 + %15272 = OpCompositeExtract %float %15377 3 + %12122 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12978 = OpLoad %float %12122 + %15716 = OpFMul %float %15272 %12978 + %15285 = OpExtInst %float %1 FClamp %15716 %float_0 %float_1 + %22219 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11762 = OpLoad %v3float %22219 + %12109 = OpVectorTimesScalar %v3float %11762 %15285 + %15522 = OpLoad %150 %3312 + %24581 = OpLoad %508 %4646 + %12166 = OpSampledImage %510 %15522 %24581 + %17676 = OpImageSampleExplicitLod %v4float %12166 %13155 Lod %float_0 + %16944 = OpCompositeExtract %float %17676 1 + %14191 = OpFOrdGreaterThan %bool %16944 %float_0 + OpSelectionMerge %22313 DontFlatten + OpBranchConditional %14191 %12827 %22313 + %12827 = OpLabel + %13245 = OpLoad %150 %4862 + %19966 = OpLoad %508 %3594 + %12167 = OpSampledImage %510 %13245 %19966 + %15681 = OpImageSampleExplicitLod %v4float %12167 %13155 Lod %float_0 + %13872 = OpCompositeExtract %float %17676 1 + %12433 = OpCompositeExtract %float %17676 2 + %23306 = OpFMul %float %13872 %12433 + %17618 = OpExtInst %float %1 FClamp %23306 %float_0 %float_1 + %20297 = OpVectorShuffle %v3float %15681 %15681 0 1 2 + %11192 = OpVectorTimesScalar %v3float %20297 %17618 + %15299 = OpFAdd %v3float %12109 %11192 + OpBranch %22313 + %22313 = OpLabel + %7725 = OpPhi %v3float %12109 %22312 %15299 %12827 + %23405 = OpVectorTimesScalar %v3float %7725 %float_1 + %9345 = OpFAdd %float %9344 %float_1 + %16241 = OpVectorShuffle %v3float %15532 %15532 0 1 2 + %22183 = OpFAdd %v3float %16241 %23405 + %15533 = OpVectorShuffle %v4float %15532 %22183 4 5 6 3 + %6440 = OpCompositeInsert %_struct_1017 %15533 %6439 0 + %24582 = OpVectorShuffle %v2float %2411 %2411 0 1 + %13213 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10354 = OpLoad %v4float %13213 + %8645 = OpVectorShuffle %v2float %10354 %10354 0 1 + %9204 = OpFMul %v2float %24582 %8645 + %18512 = OpFAdd %v2float %19927 %9204 + %7018 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21065 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13156 = OpExtInst %v2float %1 FClamp %18512 %7018 %21065 + %23591 = OpLoad %150 %5785 + %10355 = OpLoad %508 %5688 + %12168 = OpSampledImage %510 %23591 %10355 + %15378 = OpImageSampleExplicitLod %v4float %12168 %13156 Lod %float_0 + %15273 = OpCompositeExtract %float %15378 3 + %12123 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12979 = OpLoad %float %12123 + %15717 = OpFMul %float %15273 %12979 + %15286 = OpExtInst %float %1 FClamp %15717 %float_0 %float_1 + %22220 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11763 = OpLoad %v3float %22220 + %12110 = OpVectorTimesScalar %v3float %11763 %15286 + %15523 = OpLoad %150 %3312 + %24583 = OpLoad %508 %4646 + %12169 = OpSampledImage %510 %15523 %24583 + %17677 = OpImageSampleExplicitLod %v4float %12169 %13156 Lod %float_0 + %16945 = OpCompositeExtract %float %17677 1 + %14192 = OpFOrdGreaterThan %bool %16945 %float_0 + OpSelectionMerge %22314 DontFlatten + OpBranchConditional %14192 %12828 %22314 + %12828 = OpLabel + %13246 = OpLoad %150 %4862 + %19967 = OpLoad %508 %3594 + %12170 = OpSampledImage %510 %13246 %19967 + %15682 = OpImageSampleExplicitLod %v4float %12170 %13156 Lod %float_0 + %13873 = OpCompositeExtract %float %17677 1 + %12434 = OpCompositeExtract %float %17677 2 + %23307 = OpFMul %float %13873 %12434 + %17619 = OpExtInst %float %1 FClamp %23307 %float_0 %float_1 + %20298 = OpVectorShuffle %v3float %15682 %15682 0 1 2 + %11193 = OpVectorTimesScalar %v3float %20298 %17619 + %15300 = OpFAdd %v3float %12110 %11193 + OpBranch %22314 + %22314 = OpLabel + %7726 = OpPhi %v3float %12110 %22313 %15300 %12828 + %23406 = OpVectorTimesScalar %v3float %7726 %float_0_75 + %9346 = OpFAdd %float %9345 %float_0_75 + %16242 = OpVectorShuffle %v3float %15533 %15533 0 1 2 + %22184 = OpFAdd %v3float %16242 %23406 + %15534 = OpVectorShuffle %v4float %15533 %22184 4 5 6 3 + %6441 = OpCompositeInsert %_struct_1017 %15534 %6440 0 + %24584 = OpVectorShuffle %v2float %2354 %2354 0 1 + %13214 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10356 = OpLoad %v4float %13214 + %8646 = OpVectorShuffle %v2float %10356 %10356 0 1 + %9205 = OpFMul %v2float %24584 %8646 + %18513 = OpFAdd %v2float %19927 %9205 + %7019 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21066 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13157 = OpExtInst %v2float %1 FClamp %18513 %7019 %21066 + %23592 = OpLoad %150 %5785 + %10357 = OpLoad %508 %5688 + %12171 = OpSampledImage %510 %23592 %10357 + %15379 = OpImageSampleExplicitLod %v4float %12171 %13157 Lod %float_0 + %15274 = OpCompositeExtract %float %15379 3 + %12124 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12980 = OpLoad %float %12124 + %15718 = OpFMul %float %15274 %12980 + %15287 = OpExtInst %float %1 FClamp %15718 %float_0 %float_1 + %22221 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11764 = OpLoad %v3float %22221 + %12111 = OpVectorTimesScalar %v3float %11764 %15287 + %15524 = OpLoad %150 %3312 + %24585 = OpLoad %508 %4646 + %12172 = OpSampledImage %510 %15524 %24585 + %17678 = OpImageSampleExplicitLod %v4float %12172 %13157 Lod %float_0 + %16946 = OpCompositeExtract %float %17678 1 + %14193 = OpFOrdGreaterThan %bool %16946 %float_0 + OpSelectionMerge %22315 DontFlatten + OpBranchConditional %14193 %12829 %22315 + %12829 = OpLabel + %13247 = OpLoad %150 %4862 + %19968 = OpLoad %508 %3594 + %12173 = OpSampledImage %510 %13247 %19968 + %15683 = OpImageSampleExplicitLod %v4float %12173 %13157 Lod %float_0 + %13874 = OpCompositeExtract %float %17678 1 + %12435 = OpCompositeExtract %float %17678 2 + %23308 = OpFMul %float %13874 %12435 + %17620 = OpExtInst %float %1 FClamp %23308 %float_0 %float_1 + %20299 = OpVectorShuffle %v3float %15683 %15683 0 1 2 + %11194 = OpVectorTimesScalar %v3float %20299 %17620 + %15301 = OpFAdd %v3float %12111 %11194 + OpBranch %22315 + %22315 = OpLabel + %7727 = OpPhi %v3float %12111 %22314 %15301 %12829 + %23407 = OpVectorTimesScalar %v3float %7727 %float_0_5 + %9347 = OpFAdd %float %9346 %float_0_5 + %16243 = OpVectorShuffle %v3float %15534 %15534 0 1 2 + %22185 = OpFAdd %v3float %16243 %23407 + %15535 = OpVectorShuffle %v4float %15534 %22185 4 5 6 3 + %6442 = OpCompositeInsert %_struct_1017 %15535 %6441 0 + %24586 = OpVectorShuffle %v2float %837 %837 0 1 + %13215 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10358 = OpLoad %v4float %13215 + %8647 = OpVectorShuffle %v2float %10358 %10358 0 1 + %9206 = OpFMul %v2float %24586 %8647 + %18514 = OpFAdd %v2float %19927 %9206 + %7020 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21067 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13158 = OpExtInst %v2float %1 FClamp %18514 %7020 %21067 + %23593 = OpLoad %150 %5785 + %10359 = OpLoad %508 %5688 + %12174 = OpSampledImage %510 %23593 %10359 + %15380 = OpImageSampleExplicitLod %v4float %12174 %13158 Lod %float_0 + %15275 = OpCompositeExtract %float %15380 3 + %12125 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12981 = OpLoad %float %12125 + %15719 = OpFMul %float %15275 %12981 + %15288 = OpExtInst %float %1 FClamp %15719 %float_0 %float_1 + %22222 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11765 = OpLoad %v3float %22222 + %12112 = OpVectorTimesScalar %v3float %11765 %15288 + %15525 = OpLoad %150 %3312 + %24587 = OpLoad %508 %4646 + %12175 = OpSampledImage %510 %15525 %24587 + %17679 = OpImageSampleExplicitLod %v4float %12175 %13158 Lod %float_0 + %16947 = OpCompositeExtract %float %17679 1 + %14194 = OpFOrdGreaterThan %bool %16947 %float_0 + OpSelectionMerge %22316 DontFlatten + OpBranchConditional %14194 %12830 %22316 + %12830 = OpLabel + %13248 = OpLoad %150 %4862 + %19969 = OpLoad %508 %3594 + %12176 = OpSampledImage %510 %13248 %19969 + %15684 = OpImageSampleExplicitLod %v4float %12176 %13158 Lod %float_0 + %13875 = OpCompositeExtract %float %17679 1 + %12436 = OpCompositeExtract %float %17679 2 + %23309 = OpFMul %float %13875 %12436 + %17621 = OpExtInst %float %1 FClamp %23309 %float_0 %float_1 + %20300 = OpVectorShuffle %v3float %15684 %15684 0 1 2 + %11195 = OpVectorTimesScalar %v3float %20300 %17621 + %15302 = OpFAdd %v3float %12112 %11195 + OpBranch %22316 + %22316 = OpLabel + %7728 = OpPhi %v3float %12112 %22315 %15302 %12830 + %23408 = OpVectorTimesScalar %v3float %7728 %float_0_5 + %9348 = OpFAdd %float %9347 %float_0_5 + %16244 = OpVectorShuffle %v3float %15535 %15535 0 1 2 + %22186 = OpFAdd %v3float %16244 %23408 + %15536 = OpVectorShuffle %v4float %15535 %22186 4 5 6 3 + %6443 = OpCompositeInsert %_struct_1017 %15536 %6442 0 + %24588 = OpVectorShuffle %v2float %1368 %1368 0 1 + %13216 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10360 = OpLoad %v4float %13216 + %8648 = OpVectorShuffle %v2float %10360 %10360 0 1 + %9207 = OpFMul %v2float %24588 %8648 + %18515 = OpFAdd %v2float %19927 %9207 + %7021 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21068 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13159 = OpExtInst %v2float %1 FClamp %18515 %7021 %21068 + %23594 = OpLoad %150 %5785 + %10361 = OpLoad %508 %5688 + %12177 = OpSampledImage %510 %23594 %10361 + %15381 = OpImageSampleExplicitLod %v4float %12177 %13159 Lod %float_0 + %15276 = OpCompositeExtract %float %15381 3 + %12126 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12982 = OpLoad %float %12126 + %15720 = OpFMul %float %15276 %12982 + %15289 = OpExtInst %float %1 FClamp %15720 %float_0 %float_1 + %22223 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11766 = OpLoad %v3float %22223 + %12113 = OpVectorTimesScalar %v3float %11766 %15289 + %15526 = OpLoad %150 %3312 + %24589 = OpLoad %508 %4646 + %12178 = OpSampledImage %510 %15526 %24589 + %17680 = OpImageSampleExplicitLod %v4float %12178 %13159 Lod %float_0 + %16948 = OpCompositeExtract %float %17680 1 + %14195 = OpFOrdGreaterThan %bool %16948 %float_0 + OpSelectionMerge %22317 DontFlatten + OpBranchConditional %14195 %12831 %22317 + %12831 = OpLabel + %13249 = OpLoad %150 %4862 + %19970 = OpLoad %508 %3594 + %12179 = OpSampledImage %510 %13249 %19970 + %15685 = OpImageSampleExplicitLod %v4float %12179 %13159 Lod %float_0 + %13876 = OpCompositeExtract %float %17680 1 + %12437 = OpCompositeExtract %float %17680 2 + %23310 = OpFMul %float %13876 %12437 + %17622 = OpExtInst %float %1 FClamp %23310 %float_0 %float_1 + %20301 = OpVectorShuffle %v3float %15685 %15685 0 1 2 + %11196 = OpVectorTimesScalar %v3float %20301 %17622 + %15303 = OpFAdd %v3float %12113 %11196 + OpBranch %22317 + %22317 = OpLabel + %7729 = OpPhi %v3float %12113 %22316 %15303 %12831 + %23409 = OpVectorTimesScalar %v3float %7729 %float_0_75 + %9349 = OpFAdd %float %9348 %float_0_75 + %16245 = OpVectorShuffle %v3float %15536 %15536 0 1 2 + %22187 = OpFAdd %v3float %16245 %23409 + %15537 = OpVectorShuffle %v4float %15536 %22187 4 5 6 3 + %6444 = OpCompositeInsert %_struct_1017 %15537 %6443 0 + %24590 = OpVectorShuffle %v2float %142 %142 0 1 + %13217 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10362 = OpLoad %v4float %13217 + %8649 = OpVectorShuffle %v2float %10362 %10362 0 1 + %9208 = OpFMul %v2float %24590 %8649 + %18516 = OpFAdd %v2float %19927 %9208 + %7022 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21069 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13160 = OpExtInst %v2float %1 FClamp %18516 %7022 %21069 + %23595 = OpLoad %150 %5785 + %10363 = OpLoad %508 %5688 + %12180 = OpSampledImage %510 %23595 %10363 + %15382 = OpImageSampleExplicitLod %v4float %12180 %13160 Lod %float_0 + %15277 = OpCompositeExtract %float %15382 3 + %12127 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12983 = OpLoad %float %12127 + %15721 = OpFMul %float %15277 %12983 + %15290 = OpExtInst %float %1 FClamp %15721 %float_0 %float_1 + %22224 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11767 = OpLoad %v3float %22224 + %12114 = OpVectorTimesScalar %v3float %11767 %15290 + %15538 = OpLoad %150 %3312 + %24591 = OpLoad %508 %4646 + %12181 = OpSampledImage %510 %15538 %24591 + %17681 = OpImageSampleExplicitLod %v4float %12181 %13160 Lod %float_0 + %16949 = OpCompositeExtract %float %17681 1 + %14196 = OpFOrdGreaterThan %bool %16949 %float_0 + OpSelectionMerge %22318 DontFlatten + OpBranchConditional %14196 %12832 %22318 + %12832 = OpLabel + %13250 = OpLoad %150 %4862 + %19971 = OpLoad %508 %3594 + %12182 = OpSampledImage %510 %13250 %19971 + %15686 = OpImageSampleExplicitLod %v4float %12182 %13160 Lod %float_0 + %13877 = OpCompositeExtract %float %17681 1 + %12438 = OpCompositeExtract %float %17681 2 + %23311 = OpFMul %float %13877 %12438 + %17623 = OpExtInst %float %1 FClamp %23311 %float_0 %float_1 + %20302 = OpVectorShuffle %v3float %15686 %15686 0 1 2 + %11197 = OpVectorTimesScalar %v3float %20302 %17623 + %15304 = OpFAdd %v3float %12114 %11197 + OpBranch %22318 + %22318 = OpLabel + %7730 = OpPhi %v3float %12114 %22317 %15304 %12832 + %23410 = OpVectorTimesScalar %v3float %7730 %float_0_5 + %9350 = OpFAdd %float %9349 %float_0_5 + %16246 = OpVectorShuffle %v3float %15537 %15537 0 1 2 + %22188 = OpFAdd %v3float %16246 %23410 + %15539 = OpVectorShuffle %v4float %15537 %22188 4 5 6 3 + %6445 = OpCompositeInsert %_struct_1017 %15539 %6444 0 + %24592 = OpVectorShuffle %v2float %1197 %1197 0 1 + %13218 = OpAccessChain %_ptr_Uniform_v4float %22044 %int_0 + %10364 = OpLoad %v4float %13218 + %8650 = OpVectorShuffle %v2float %10364 %10364 0 1 + %9209 = OpFMul %v2float %24592 %8650 + %18517 = OpFAdd %v2float %19927 %9209 + %7023 = OpVectorShuffle %v2float %17581 %17581 0 1 + %21070 = OpVectorShuffle %v2float %17581 %17581 2 3 + %13161 = OpExtInst %v2float %1 FClamp %18517 %7023 %21070 + %23596 = OpLoad %150 %5785 + %10365 = OpLoad %508 %5688 + %12183 = OpSampledImage %510 %23596 %10365 + %15383 = OpImageSampleExplicitLod %v4float %12183 %13161 Lod %float_0 + %15278 = OpCompositeExtract %float %15383 3 + %12128 = OpAccessChain %_ptr_Uniform_float %22044 %int_1 + %12984 = OpLoad %float %12128 + %15722 = OpFMul %float %15278 %12984 + %15291 = OpExtInst %float %1 FClamp %15722 %float_0 %float_1 + %22225 = OpAccessChain %_ptr_Uniform_v3float %12348 %int_5 + %11768 = OpLoad %v3float %22225 + %12115 = OpVectorTimesScalar %v3float %11768 %15291 + %15540 = OpLoad %150 %3312 + %24593 = OpLoad %508 %4646 + %12184 = OpSampledImage %510 %15540 %24593 + %17682 = OpImageSampleExplicitLod %v4float %12184 %13161 Lod %float_0 + %16950 = OpCompositeExtract %float %17682 1 + %14197 = OpFOrdGreaterThan %bool %16950 %float_0 + OpSelectionMerge %22319 DontFlatten + OpBranchConditional %14197 %12833 %22319 + %12833 = OpLabel + %13251 = OpLoad %150 %4862 + %19972 = OpLoad %508 %3594 + %12185 = OpSampledImage %510 %13251 %19972 + %15687 = OpImageSampleExplicitLod %v4float %12185 %13161 Lod %float_0 + %13878 = OpCompositeExtract %float %17682 1 + %12439 = OpCompositeExtract %float %17682 2 + %23312 = OpFMul %float %13878 %12439 + %17624 = OpExtInst %float %1 FClamp %23312 %float_0 %float_1 + %20303 = OpVectorShuffle %v3float %15687 %15687 0 1 2 + %11198 = OpVectorTimesScalar %v3float %20303 %17624 + %15305 = OpFAdd %v3float %12115 %11198 + OpBranch %22319 + %22319 = OpLabel + %7731 = OpPhi %v3float %12115 %22318 %15305 %12833 + %23411 = OpVectorTimesScalar %v3float %7731 %float_0_5 + %9351 = OpFAdd %float %9350 %float_0_5 + %16247 = OpVectorShuffle %v3float %15539 %15539 0 1 2 + %22189 = OpFAdd %v3float %16247 %23411 + %15541 = OpVectorShuffle %v4float %15539 %22189 4 5 6 3 + %6719 = OpCompositeInsert %_struct_1017 %15541 %6445 0 + %23412 = OpVectorShuffle %v3float %15541 %15541 0 1 2 + %10833 = OpCompositeConstruct %v3float %9351 %9351 %9351 + %13750 = OpFDiv %v3float %23412 %10833 + %24033 = OpVectorShuffle %v4float %15541 %13750 4 5 6 3 + %8636 = OpCompositeInsert %_struct_1017 %24033 %6719 0 + %16315 = OpCompositeInsert %_struct_1017 %float_1 %8636 0 3 + %11544 = OpCompositeExtract %v4float %16315 0 + OpStore %4317 %11544 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/geom/block-name-namespace.asm.geom b/3rdparty/spirv-cross/shaders/asm/geom/block-name-namespace.asm.geom new file mode 100644 index 000000000..b3744a303 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/geom/block-name-namespace.asm.geom @@ -0,0 +1,103 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 48 +; Schema: 0 + OpCapability Geometry + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Geometry %main "main" %_ %VertexOutput %vin + OpExecutionMode %main Triangles + OpExecutionMode %main Invocations 1 + OpExecutionMode %main OutputTriangleStrip + OpExecutionMode %main OutputVertices 4 + OpSource GLSL 450 + OpName %main "main" + OpName %VertexInput3 "VertexInput" + OpName %gl_PerVertex "gl_PerVertex" + OpMemberName %gl_PerVertex 0 "gl_Position" + OpMemberName %gl_PerVertex 1 "gl_PointSize" + OpMemberName %gl_PerVertex 2 "gl_ClipDistance" + OpMemberName %gl_PerVertex 3 "gl_CullDistance" + OpName %_ "" + OpName %VertexInput "VertexInput" + OpMemberName %VertexInput 0 "a" + OpName %VertexInput4 "VertexInput" + OpName %VertexInput_0 "VertexInput" + OpMemberName %VertexInput_0 0 "b" + OpName %VertexInput2 "VertexInput" + OpName %VertexInput_1 "VertexInput" + OpMemberName %VertexInput_1 0 "vColor" + OpName %VertexOutput "VertexInput" + OpName %VertexInput_2 "VertexInput" + OpMemberName %VertexInput_2 0 "vColor" + OpName %vin "vin" + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpMemberDecorate %gl_PerVertex 3 BuiltIn CullDistance + OpDecorate %gl_PerVertex Block + OpMemberDecorate %VertexInput 0 Offset 0 + OpDecorate %VertexInput Block + OpDecorate %VertexInput4 DescriptorSet 0 + OpDecorate %VertexInput4 Binding 0 + OpMemberDecorate %VertexInput_0 0 Offset 0 + OpDecorate %VertexInput_0 BufferBlock + OpDecorate %VertexInput2 DescriptorSet 0 + OpDecorate %VertexInput2 Binding 0 + OpDecorate %VertexInput_1 Block + OpDecorate %VertexOutput Location 0 + OpDecorate %VertexInput_2 Block + OpDecorate %vin Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %float_1 = OpConstant %float 1 + %11 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 %_arr_float_uint_1 +%_ptr_Output_gl_PerVertex = OpTypePointer Output %gl_PerVertex + %_ = OpVariable %_ptr_Output_gl_PerVertex Output + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%VertexInput = OpTypeStruct %v4float +%_ptr_Uniform_VertexInput = OpTypePointer Uniform %VertexInput +%VertexInput4 = OpVariable %_ptr_Uniform_VertexInput Uniform +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float +%VertexInput_0 = OpTypeStruct %v4float +%_ptr_Uniform_VertexInput_0 = OpTypePointer Uniform %VertexInput_0 +%VertexInput2 = OpVariable %_ptr_Uniform_VertexInput_0 Uniform +%_ptr_Output_v4float = OpTypePointer Output %v4float +%VertexInput_1 = OpTypeStruct %v4float +%_ptr_Output_VertexInput_1 = OpTypePointer Output %VertexInput_1 +%VertexOutput = OpVariable %_ptr_Output_VertexInput_1 Output +%VertexInput_2 = OpTypeStruct %v4float + %uint_3 = OpConstant %uint 3 +%_arr_VertexInput_2_uint_3 = OpTypeArray %VertexInput_2 %uint_3 +%_ptr_Input__arr_VertexInput_2_uint_3 = OpTypePointer Input %_arr_VertexInput_2_uint_3 + %vin = OpVariable %_ptr_Input__arr_VertexInput_2_uint_3 Input +%_ptr_Input_v4float = OpTypePointer Input %v4float + %main = OpFunction %void None %3 + %5 = OpLabel +%VertexInput3 = OpVariable %_ptr_Function_v4float Function + OpStore %VertexInput3 %11 + %20 = OpLoad %v4float %VertexInput3 + %25 = OpAccessChain %_ptr_Uniform_v4float %VertexInput4 %int_0 + %26 = OpLoad %v4float %25 + %27 = OpFAdd %v4float %20 %26 + %31 = OpAccessChain %_ptr_Uniform_v4float %VertexInput2 %int_0 + %32 = OpLoad %v4float %31 + %33 = OpFAdd %v4float %27 %32 + %35 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %35 %33 + %45 = OpAccessChain %_ptr_Input_v4float %vin %int_0 %int_0 + %46 = OpLoad %v4float %45 + %47 = OpAccessChain %_ptr_Output_v4float %VertexOutput %int_0 + OpStore %47 %46 + OpEmitVertex + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/geom/inout-split-access-chain-handle.asm.geom b/3rdparty/spirv-cross/shaders/asm/geom/inout-split-access-chain-handle.asm.geom new file mode 100644 index 000000000..d011cc696 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/geom/inout-split-access-chain-handle.asm.geom @@ -0,0 +1,90 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 42 +; Schema: 0 + OpCapability Geometry + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Geometry %main "main" %gl_in %_ + OpExecutionMode %main Triangles + OpExecutionMode %main Invocations 1 + OpExecutionMode %main OutputTriangleStrip + OpExecutionMode %main OutputVertices 5 + OpSource GLSL 440 + OpName %main "main" + OpName %Data "Data" + OpMemberName %Data 0 "ApiPerspectivePosition" + OpName %Copy_struct_Data_vf41_3__ "Copy(struct-Data-vf41[3];" + OpName %inputStream "inputStream" + OpName %gl_PerVertex "gl_PerVertex" + OpMemberName %gl_PerVertex 0 "gl_Position" + OpMemberName %gl_PerVertex 1 "gl_PointSize" + OpMemberName %gl_PerVertex 2 "gl_ClipDistance" + OpName %gl_in "gl_in" + OpName %inputStream_0 "inputStream" + OpName %param "param" + OpName %gl_PerVertex_0 "gl_PerVertex" + OpMemberName %gl_PerVertex_0 0 "gl_Position" + OpMemberName %gl_PerVertex_0 1 "gl_PointSize" + OpMemberName %gl_PerVertex_0 2 "gl_ClipDistance" + OpName %_ "" + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpDecorate %gl_PerVertex Block + OpMemberDecorate %gl_PerVertex_0 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex_0 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex_0 2 BuiltIn ClipDistance + OpDecorate %gl_PerVertex_0 Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %Data = OpTypeStruct %v4float + %uint = OpTypeInt 32 0 + %uint_3 = OpConstant %uint 3 +%_arr_Data_uint_3 = OpTypeArray %Data %uint_3 +%_ptr_Function__Data = OpTypePointer Function %Data +%_ptr_Function__arr_Data_uint_3 = OpTypePointer Function %_arr_Data_uint_3 + %13 = OpTypeFunction %void %_ptr_Function__arr_Data_uint_3 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 +%_arr_gl_PerVertex_uint_3 = OpTypeArray %gl_PerVertex %uint_3 +%_ptr_Input__arr_gl_PerVertex_uint_3 = OpTypePointer Input %_arr_gl_PerVertex_uint_3 + %gl_in = OpVariable %_ptr_Input__arr_gl_PerVertex_uint_3 Input +%_ptr_Input_v4float = OpTypePointer Input %v4float +%_ptr_Function_v4float = OpTypePointer Function %v4float +%gl_PerVertex_0 = OpTypeStruct %v4float %float %_arr_float_uint_1 +%_ptr_Output_gl_PerVertex_0 = OpTypePointer Output %gl_PerVertex_0 + %_ = OpVariable %_ptr_Output_gl_PerVertex_0 Output +%_ptr_Output_v4float = OpTypePointer Output %v4float + %main = OpFunction %void None %3 + %5 = OpLabel +%inputStream_0 = OpVariable %_ptr_Function__arr_Data_uint_3 Function + %param = OpVariable %_ptr_Function__arr_Data_uint_3 Function + %32 = OpLoad %_arr_Data_uint_3 %inputStream_0 + OpStore %param %32 + %33 = OpFunctionCall %void %Copy_struct_Data_vf41_3__ %param + %34 = OpLoad %_arr_Data_uint_3 %param + OpStore %inputStream_0 %34 + %59 = OpAccessChain %_ptr_Function__Data %inputStream_0 %int_0 + %38 = OpAccessChain %_ptr_Function_v4float %59 %int_0 + %39 = OpLoad %v4float %38 + %41 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %41 %39 + OpReturn + OpFunctionEnd +%Copy_struct_Data_vf41_3__ = OpFunction %void None %13 +%inputStream = OpFunctionParameter %_ptr_Function__arr_Data_uint_3 + %16 = OpLabel + %26 = OpAccessChain %_ptr_Input_v4float %gl_in %int_0 %int_0 + %27 = OpLoad %v4float %26 + %28 = OpAccessChain %_ptr_Function__Data %inputStream %int_0 + %29 = OpAccessChain %_ptr_Function_v4float %28 %int_0 + OpStore %29 %27 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/geom/split-access-chain-input.asm.geom b/3rdparty/spirv-cross/shaders/asm/geom/split-access-chain-input.asm.geom new file mode 100644 index 000000000..5e477fa33 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/geom/split-access-chain-input.asm.geom @@ -0,0 +1,52 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 23 +; Schema: 0 + OpCapability Geometry + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Geometry %main "main" %gl_in %position + OpExecutionMode %main Triangles + OpExecutionMode %main Invocations 1 + OpExecutionMode %main OutputTriangleStrip + OpExecutionMode %main OutputVertices 3 + OpSource GLSL 440 + OpName %main "main" + OpName %position "position" + OpName %gl_PerVertex "gl_PerVertex" + OpMemberName %gl_PerVertex 0 "gl_Position" + OpMemberName %gl_PerVertex 1 "gl_PointSize" + OpMemberName %gl_PerVertex 2 "gl_ClipDistance" + OpName %gl_in "gl_in" + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpDecorate %gl_PerVertex Block + OpDecorate %position BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%_ptr_Function_v4float = OpTypePointer Output %v4float + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 + %uint_3 = OpConstant %uint 3 +%_arr_gl_PerVertex_uint_3 = OpTypeArray %gl_PerVertex %uint_3 +%ptr_Input_gl_PerVertex = OpTypePointer Input %gl_PerVertex +%_ptr_Input__arr_gl_PerVertex_uint_3 = OpTypePointer Input %_arr_gl_PerVertex_uint_3 + %gl_in = OpVariable %_ptr_Input__arr_gl_PerVertex_uint_3 Input + %position = OpVariable %_ptr_Function_v4float Output + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Input_v4float = OpTypePointer Input %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %21 = OpAccessChain %ptr_Input_gl_PerVertex %gl_in %int_0 + %22 = OpAccessChain %_ptr_Input_v4float %21 %int_0 + %23 = OpLoad %v4float %22 + OpStore %position %23 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/geom/store-uint-layer.invalid.asm.geom b/3rdparty/spirv-cross/shaders/asm/geom/store-uint-layer.invalid.asm.geom new file mode 100644 index 000000000..550fc4e99 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/geom/store-uint-layer.invalid.asm.geom @@ -0,0 +1,130 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 74 +; Schema: 0 + OpCapability Geometry + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Geometry %main "main" %stream_pos %stream_layer %input_pos + OpExecutionMode %main Triangles + OpExecutionMode %main Invocations 1 + OpExecutionMode %main OutputTriangleStrip + OpExecutionMode %main OutputVertices 3 + OpSource HLSL 500 + OpName %main "main" + OpName %VertexOutput "VertexOutput" + OpMemberName %VertexOutput 0 "pos" + OpName %GeometryOutput "GeometryOutput" + OpMemberName %GeometryOutput 0 "pos" + OpMemberName %GeometryOutput 1 "layer" + OpName %_main_struct_VertexOutput_vf41_3__struct_GeometryOutput_vf4_u11_ "@main(struct-VertexOutput-vf41[3];struct-GeometryOutput-vf4-u11;" + OpName %input "input" + OpName %stream "stream" + OpName %output "output" + OpName %v "v" + OpName %stream_pos "stream.pos" + OpName %stream_layer "stream.layer" + OpName %input_0 "input" + OpName %input_pos "input.pos" + OpName %stream_0 "stream" + OpName %param "param" + OpName %param_0 "param" + OpDecorate %stream_pos BuiltIn Position + OpDecorate %stream_layer BuiltIn Layer + OpDecorate %input_pos BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 +%VertexOutput = OpTypeStruct %v4float + %uint = OpTypeInt 32 0 + %uint_3 = OpConstant %uint 3 +%_arr_VertexOutput_uint_3 = OpTypeArray %VertexOutput %uint_3 +%_ptr_Function__arr_VertexOutput_uint_3 = OpTypePointer Function %_arr_VertexOutput_uint_3 +%GeometryOutput = OpTypeStruct %v4float %uint +%_ptr_Function_GeometryOutput = OpTypePointer Function %GeometryOutput + %15 = OpTypeFunction %void %_ptr_Function__arr_VertexOutput_uint_3 %_ptr_Function_GeometryOutput + %int = OpTypeInt 32 1 + %int_1 = OpConstant %int 1 + %uint_1 = OpConstant %uint 1 +%_ptr_Function_uint = OpTypePointer Function %uint +%_ptr_Function_int = OpTypePointer Function %int + %int_0 = OpConstant %int 0 + %int_3 = OpConstant %int 3 + %bool = OpTypeBool +%_ptr_Function_v4float = OpTypePointer Function %v4float +%_ptr_Output_v4float = OpTypePointer Output %v4float + %stream_pos = OpVariable %_ptr_Output_v4float Output +%_ptr_Output_uint = OpTypePointer Output %uint +%stream_layer = OpVariable %_ptr_Output_uint Output +%_arr_v4float_uint_3 = OpTypeArray %v4float %uint_3 +%_ptr_Input__arr_v4float_uint_3 = OpTypePointer Input %_arr_v4float_uint_3 + %input_pos = OpVariable %_ptr_Input__arr_v4float_uint_3 Input +%_ptr_Input_v4float = OpTypePointer Input %v4float + %int_2 = OpConstant %int 2 + %main = OpFunction %void None %3 + %5 = OpLabel + %input_0 = OpVariable %_ptr_Function__arr_VertexOutput_uint_3 Function + %stream_0 = OpVariable %_ptr_Function_GeometryOutput Function + %param = OpVariable %_ptr_Function__arr_VertexOutput_uint_3 Function + %param_0 = OpVariable %_ptr_Function_GeometryOutput Function + %58 = OpAccessChain %_ptr_Input_v4float %input_pos %int_0 + %59 = OpLoad %v4float %58 + %60 = OpAccessChain %_ptr_Function_v4float %input_0 %int_0 %int_0 + OpStore %60 %59 + %61 = OpAccessChain %_ptr_Input_v4float %input_pos %int_1 + %62 = OpLoad %v4float %61 + %63 = OpAccessChain %_ptr_Function_v4float %input_0 %int_1 %int_0 + OpStore %63 %62 + %65 = OpAccessChain %_ptr_Input_v4float %input_pos %int_2 + %66 = OpLoad %v4float %65 + %67 = OpAccessChain %_ptr_Function_v4float %input_0 %int_2 %int_0 + OpStore %67 %66 + %70 = OpLoad %_arr_VertexOutput_uint_3 %input_0 + OpStore %param %70 + %72 = OpFunctionCall %void %_main_struct_VertexOutput_vf41_3__struct_GeometryOutput_vf4_u11_ %param %param_0 + %73 = OpLoad %GeometryOutput %param_0 + OpStore %stream_0 %73 + OpReturn + OpFunctionEnd +%_main_struct_VertexOutput_vf41_3__struct_GeometryOutput_vf4_u11_ = OpFunction %void None %15 + %input = OpFunctionParameter %_ptr_Function__arr_VertexOutput_uint_3 + %stream = OpFunctionParameter %_ptr_Function_GeometryOutput + %19 = OpLabel + %output = OpVariable %_ptr_Function_GeometryOutput Function + %v = OpVariable %_ptr_Function_int Function + %25 = OpAccessChain %_ptr_Function_uint %output %int_1 + OpStore %25 %uint_1 + OpStore %v %int_0 + OpBranch %29 + %29 = OpLabel + OpLoopMerge %31 %32 None + OpBranch %33 + %33 = OpLabel + %34 = OpLoad %int %v + %37 = OpSLessThan %bool %34 %int_3 + OpBranchConditional %37 %30 %31 + %30 = OpLabel + %38 = OpLoad %int %v + %40 = OpAccessChain %_ptr_Function_v4float %input %38 %int_0 + %41 = OpLoad %v4float %40 + %42 = OpAccessChain %_ptr_Function_v4float %output %int_0 + OpStore %42 %41 + %45 = OpAccessChain %_ptr_Function_v4float %output %int_0 + %46 = OpLoad %v4float %45 + OpStore %stream_pos %46 + %49 = OpAccessChain %_ptr_Function_uint %output %int_1 + %50 = OpLoad %uint %49 + OpStore %stream_layer %50 + OpEmitVertex + OpBranch %32 + %32 = OpLabel + %51 = OpLoad %int %v + %52 = OpIAdd %int %51 %int_1 + OpStore %v %52 + OpBranch %29 + %31 = OpLabel + OpEndPrimitive + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc b/3rdparty/spirv-cross/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc new file mode 100644 index 000000000..0fd4dce25 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/tesc/tess-fixed-input-array-builtin-array.invalid.asm.tesc @@ -0,0 +1,248 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 162 +; Schema: 0 + OpCapability Tessellation + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint TessellationControl %hs_main "main" %p_pos %p_1 %i_1 %_entryPointOutput_pos %_entryPointOutput %_patchConstantOutput_EdgeTess %_patchConstantOutput_InsideTess + OpExecutionMode %hs_main OutputVertices 3 + OpExecutionMode %hs_main Triangles + OpExecutionMode %hs_main SpacingFractionalOdd + OpExecutionMode %hs_main VertexOrderCw + OpSource HLSL 500 + OpName %hs_main "hs_main" + OpName %VertexOutput "VertexOutput" + OpMemberName %VertexOutput 0 "pos" + OpMemberName %VertexOutput 1 "uv" + OpName %HSOut "HSOut" + OpMemberName %HSOut 0 "pos" + OpMemberName %HSOut 1 "uv" + OpName %_hs_main_struct_VertexOutput_vf4_vf21_3__u1_ "@hs_main(struct-VertexOutput-vf4-vf21[3];u1;" + OpName %p "p" + OpName %i "i" + OpName %HSConstantOut "HSConstantOut" + OpMemberName %HSConstantOut 0 "EdgeTess" + OpMemberName %HSConstantOut 1 "InsideTess" + OpName %PatchHS_struct_VertexOutput_vf4_vf21_3__ "PatchHS(struct-VertexOutput-vf4-vf21[3];" + OpName %patch "patch" + OpName %output "output" + OpName %p_0 "p" + OpName %p_pos "p.pos" + OpName %VertexOutput_0 "VertexOutput" + OpMemberName %VertexOutput_0 0 "uv" + OpName %p_1 "p" + OpName %i_0 "i" + OpName %i_1 "i" + OpName %flattenTemp "flattenTemp" + OpName %param "param" + OpName %param_0 "param" + OpName %_entryPointOutput_pos "@entryPointOutput.pos" + OpName %HSOut_0 "HSOut" + OpMemberName %HSOut_0 0 "uv" + OpName %_entryPointOutput "@entryPointOutput" + OpName %_patchConstantResult "@patchConstantResult" + OpName %param_1 "param" + OpName %_patchConstantOutput_EdgeTess "@patchConstantOutput.EdgeTess" + OpName %_patchConstantOutput_InsideTess "@patchConstantOutput.InsideTess" + OpName %output_0 "output" + OpDecorate %p_pos BuiltIn Position + OpDecorate %p_1 Location 0 + OpDecorate %i_1 BuiltIn InvocationId + OpDecorate %_entryPointOutput_pos BuiltIn Position + OpDecorate %_entryPointOutput Location 0 + OpDecorate %_patchConstantOutput_EdgeTess Patch + OpDecorate %_patchConstantOutput_EdgeTess BuiltIn TessLevelOuter + OpDecorate %_patchConstantOutput_InsideTess Patch + OpDecorate %_patchConstantOutput_InsideTess BuiltIn TessLevelInner + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %v2float = OpTypeVector %float 2 +%VertexOutput = OpTypeStruct %v4float %v2float + %uint = OpTypeInt 32 0 + %uint_3 = OpConstant %uint 3 +%_arr_VertexOutput_uint_3 = OpTypeArray %VertexOutput %uint_3 +%_ptr_Function__arr_VertexOutput_uint_3 = OpTypePointer Function %_arr_VertexOutput_uint_3 +%_ptr_Function_uint = OpTypePointer Function %uint + %HSOut = OpTypeStruct %v4float %v2float + %16 = OpTypeFunction %HSOut %_ptr_Function__arr_VertexOutput_uint_3 %_ptr_Function_uint +%_arr_float_uint_3 = OpTypeArray %float %uint_3 +%HSConstantOut = OpTypeStruct %_arr_float_uint_3 %float + %23 = OpTypeFunction %HSConstantOut %_ptr_Function__arr_VertexOutput_uint_3 +%_ptr_Function_HSOut = OpTypePointer Function %HSOut + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %int_1 = OpConstant %int 1 +%_ptr_Function_v2float = OpTypePointer Function %v2float +%_arr_v4float_uint_3 = OpTypeArray %v4float %uint_3 +%_ptr_Input__arr_v4float_uint_3 = OpTypePointer Input %_arr_v4float_uint_3 + %p_pos = OpVariable %_ptr_Input__arr_v4float_uint_3 Input +%_ptr_Input_v4float = OpTypePointer Input %v4float +%VertexOutput_0 = OpTypeStruct %v2float +%_arr_VertexOutput_0_uint_3 = OpTypeArray %VertexOutput_0 %uint_3 +%_ptr_Input__arr_VertexOutput_0_uint_3 = OpTypePointer Input %_arr_VertexOutput_0_uint_3 + %p_1 = OpVariable %_ptr_Input__arr_VertexOutput_0_uint_3 Input +%_ptr_Input_v2float = OpTypePointer Input %v2float + %int_2 = OpConstant %int 2 +%_ptr_Input_uint = OpTypePointer Input %uint + %i_1 = OpVariable %_ptr_Input_uint Input +%_ptr_Output__arr_v4float_uint_3 = OpTypePointer Output %_arr_v4float_uint_3 +%_entryPointOutput_pos = OpVariable %_ptr_Output__arr_v4float_uint_3 Output +%_ptr_Output_v4float = OpTypePointer Output %v4float + %HSOut_0 = OpTypeStruct %v2float +%_arr_HSOut_0_uint_3 = OpTypeArray %HSOut_0 %uint_3 +%_ptr_Output__arr_HSOut_0_uint_3 = OpTypePointer Output %_arr_HSOut_0_uint_3 +%_entryPointOutput = OpVariable %_ptr_Output__arr_HSOut_0_uint_3 Output +%_ptr_Output_v2float = OpTypePointer Output %v2float + %uint_2 = OpConstant %uint 2 + %uint_1 = OpConstant %uint 1 + %uint_0 = OpConstant %uint 0 + %bool = OpTypeBool +%_ptr_Function_HSConstantOut = OpTypePointer Function %HSConstantOut + %uint_4 = OpConstant %uint 4 +%_arr_float_uint_4 = OpTypeArray %float %uint_4 +%_ptr_Output__arr_float_uint_4 = OpTypePointer Output %_arr_float_uint_4 +%_patchConstantOutput_EdgeTess = OpVariable %_ptr_Output__arr_float_uint_4 Output +%_ptr_Function_float = OpTypePointer Function %float +%_ptr_Output_float = OpTypePointer Output %float +%_arr_float_uint_2 = OpTypeArray %float %uint_2 +%_ptr_Output__arr_float_uint_2 = OpTypePointer Output %_arr_float_uint_2 +%_patchConstantOutput_InsideTess = OpVariable %_ptr_Output__arr_float_uint_2 Output + %float_1 = OpConstant %float 1 + %hs_main = OpFunction %void None %3 + %5 = OpLabel + %p_0 = OpVariable %_ptr_Function__arr_VertexOutput_uint_3 Function + %i_0 = OpVariable %_ptr_Function_uint Function +%flattenTemp = OpVariable %_ptr_Function_HSOut Function + %param = OpVariable %_ptr_Function__arr_VertexOutput_uint_3 Function + %param_0 = OpVariable %_ptr_Function_uint Function +%_patchConstantResult = OpVariable %_ptr_Function_HSConstantOut Function + %param_1 = OpVariable %_ptr_Function__arr_VertexOutput_uint_3 Function + %50 = OpAccessChain %_ptr_Input_v4float %p_pos %int_0 + %51 = OpLoad %v4float %50 + %52 = OpAccessChain %_ptr_Function_v4float %p_0 %int_0 %int_0 + OpStore %52 %51 + %58 = OpAccessChain %_ptr_Input_v2float %p_1 %int_0 %int_0 + %59 = OpLoad %v2float %58 + %60 = OpAccessChain %_ptr_Function_v2float %p_0 %int_0 %int_1 + OpStore %60 %59 + %61 = OpAccessChain %_ptr_Input_v4float %p_pos %int_1 + %62 = OpLoad %v4float %61 + %63 = OpAccessChain %_ptr_Function_v4float %p_0 %int_1 %int_0 + OpStore %63 %62 + %64 = OpAccessChain %_ptr_Input_v2float %p_1 %int_1 %int_0 + %65 = OpLoad %v2float %64 + %66 = OpAccessChain %_ptr_Function_v2float %p_0 %int_1 %int_1 + OpStore %66 %65 + %68 = OpAccessChain %_ptr_Input_v4float %p_pos %int_2 + %69 = OpLoad %v4float %68 + %70 = OpAccessChain %_ptr_Function_v4float %p_0 %int_2 %int_0 + OpStore %70 %69 + %71 = OpAccessChain %_ptr_Input_v2float %p_1 %int_2 %int_0 + %72 = OpLoad %v2float %71 + %73 = OpAccessChain %_ptr_Function_v2float %p_0 %int_2 %int_1 + OpStore %73 %72 + %77 = OpLoad %uint %i_1 + OpStore %i_0 %77 + %80 = OpLoad %_arr_VertexOutput_uint_3 %p_0 + OpStore %param %80 + %82 = OpLoad %uint %i_0 + OpStore %param_0 %82 + %83 = OpFunctionCall %HSOut %_hs_main_struct_VertexOutput_vf4_vf21_3__u1_ %param %param_0 + OpStore %flattenTemp %83 + %86 = OpAccessChain %_ptr_Function_v4float %flattenTemp %int_0 + %87 = OpLoad %v4float %86 + %94 = OpLoad %uint %i_1 + %89 = OpAccessChain %_ptr_Output_v4float %_entryPointOutput_pos %94 + OpStore %89 %87 + %95 = OpAccessChain %_ptr_Function_v2float %flattenTemp %int_1 + %96 = OpLoad %v2float %95 + %98 = OpAccessChain %_ptr_Output_v2float %_entryPointOutput %94 %int_0 + OpStore %98 %96 + OpControlBarrier %uint_2 %uint_1 %uint_0 + %102 = OpLoad %uint %i_1 + %104 = OpIEqual %bool %102 %int_0 + OpSelectionMerge %106 None + OpBranchConditional %104 %105 %106 + %105 = OpLabel + %110 = OpLoad %_arr_VertexOutput_uint_3 %p_0 + OpStore %param_1 %110 + %111 = OpFunctionCall %HSConstantOut %PatchHS_struct_VertexOutput_vf4_vf21_3__ %param_1 + OpStore %_patchConstantResult %111 + %117 = OpAccessChain %_ptr_Function_float %_patchConstantResult %int_0 %int_0 + %118 = OpLoad %float %117 + %120 = OpAccessChain %_ptr_Output_float %_patchConstantOutput_EdgeTess %int_0 + OpStore %120 %118 + %121 = OpAccessChain %_ptr_Function_float %_patchConstantResult %int_0 %int_1 + %122 = OpLoad %float %121 + %123 = OpAccessChain %_ptr_Output_float %_patchConstantOutput_EdgeTess %int_1 + OpStore %123 %122 + %124 = OpAccessChain %_ptr_Function_float %_patchConstantResult %int_0 %int_2 + %125 = OpLoad %float %124 + %126 = OpAccessChain %_ptr_Output_float %_patchConstantOutput_EdgeTess %int_2 + OpStore %126 %125 + %130 = OpAccessChain %_ptr_Function_float %_patchConstantResult %int_1 + %131 = OpLoad %float %130 + %132 = OpAccessChain %_ptr_Output_float %_patchConstantOutput_InsideTess %int_0 + OpStore %132 %131 + OpBranch %106 + %106 = OpLabel + OpReturn + OpFunctionEnd +%_hs_main_struct_VertexOutput_vf4_vf21_3__u1_ = OpFunction %HSOut None %16 + %p = OpFunctionParameter %_ptr_Function__arr_VertexOutput_uint_3 + %i = OpFunctionParameter %_ptr_Function_uint + %20 = OpLabel + %output = OpVariable %_ptr_Function_HSOut Function + %31 = OpLoad %uint %i + %33 = OpAccessChain %_ptr_Function_v4float %p %31 %int_0 + %34 = OpLoad %v4float %33 + %35 = OpAccessChain %_ptr_Function_v4float %output %int_0 + OpStore %35 %34 + %37 = OpLoad %uint %i + %39 = OpAccessChain %_ptr_Function_v2float %p %37 %int_1 + %40 = OpLoad %v2float %39 + %41 = OpAccessChain %_ptr_Function_v2float %output %int_1 + OpStore %41 %40 + %42 = OpLoad %HSOut %output + OpReturnValue %42 + OpFunctionEnd +%PatchHS_struct_VertexOutput_vf4_vf21_3__ = OpFunction %HSConstantOut None %23 + %patch = OpFunctionParameter %_ptr_Function__arr_VertexOutput_uint_3 + %26 = OpLabel + %output_0 = OpVariable %_ptr_Function_HSConstantOut Function + %135 = OpAccessChain %_ptr_Function_v2float %patch %int_0 %int_1 + %136 = OpLoad %v2float %135 + %137 = OpCompositeConstruct %v2float %float_1 %float_1 + %138 = OpFAdd %v2float %137 %136 + %139 = OpCompositeExtract %float %138 0 + %140 = OpAccessChain %_ptr_Function_float %output_0 %int_0 %int_0 + OpStore %140 %139 + %141 = OpAccessChain %_ptr_Function_v2float %patch %int_0 %int_1 + %142 = OpLoad %v2float %141 + %143 = OpCompositeConstruct %v2float %float_1 %float_1 + %144 = OpFAdd %v2float %143 %142 + %145 = OpCompositeExtract %float %144 0 + %146 = OpAccessChain %_ptr_Function_float %output_0 %int_0 %int_1 + OpStore %146 %145 + %147 = OpAccessChain %_ptr_Function_v2float %patch %int_0 %int_1 + %148 = OpLoad %v2float %147 + %149 = OpCompositeConstruct %v2float %float_1 %float_1 + %150 = OpFAdd %v2float %149 %148 + %151 = OpCompositeExtract %float %150 0 + %152 = OpAccessChain %_ptr_Function_float %output_0 %int_0 %int_2 + OpStore %152 %151 + %153 = OpAccessChain %_ptr_Function_v2float %patch %int_0 %int_1 + %154 = OpLoad %v2float %153 + %155 = OpCompositeConstruct %v2float %float_1 %float_1 + %156 = OpFAdd %v2float %155 %154 + %157 = OpCompositeExtract %float %156 0 + %158 = OpAccessChain %_ptr_Function_float %output_0 %int_1 + OpStore %158 %157 + %159 = OpLoad %HSConstantOut %output_0 + OpReturnValue %159 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/empty-io.asm.vert b/3rdparty/spirv-cross/shaders/asm/vert/empty-io.asm.vert new file mode 100644 index 000000000..0ba6cb796 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/empty-io.asm.vert @@ -0,0 +1,70 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 40 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %position %_entryPointOutput_position %_entryPointOutput + OpName %main "main" + OpName %VSInput "VSInput" + OpMemberName %VSInput 0 "position" + OpName %VSOutput "VSOutput" + OpMemberName %VSOutput 0 "position" + OpName %_main_struct_VSInput_vf41_ "@main(struct-VSInput-vf41;" + OpName %_input "_input" + OpName %_out "_out" + OpName %_input_0 "_input" + OpName %position "position" + OpName %_entryPointOutput_position "@entryPointOutput_position" + OpName %param "param" + OpName %VSOutput_0 "VSOutput" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %position Location 0 + OpDecorate %_entryPointOutput_position BuiltIn Position + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %VSInput = OpTypeStruct %v4float +%_ptr_Function_VSInput = OpTypePointer Function %VSInput + %VSOutput = OpTypeStruct %v4float + %11 = OpTypeFunction %VSOutput %_ptr_Function_VSInput +%_ptr_Function_VSOutput = OpTypePointer Function %VSOutput + %int = OpTypeInt 32 1 + %18 = OpConstant %int 0 +%_ptr_Function_v4float = OpTypePointer Function %v4float +%_ptr_Input_v4float = OpTypePointer Input %v4float + %position = OpVariable %_ptr_Input_v4float Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput_position = OpVariable %_ptr_Output_v4float Output + %VSOutput_0 = OpTypeStruct +%_ptr_Output_VSOutput_0 = OpTypePointer Output %VSOutput_0 +%_entryPointOutput = OpVariable %_ptr_Output_VSOutput_0 Output + %main = OpFunction %void None %3 + %5 = OpLabel + %_input_0 = OpVariable %_ptr_Function_VSInput Function + %param = OpVariable %_ptr_Function_VSInput Function + %29 = OpLoad %v4float %position + %30 = OpAccessChain %_ptr_Function_v4float %_input_0 %18 + OpStore %30 %29 + %34 = OpLoad %VSInput %_input_0 + OpStore %param %34 + %35 = OpFunctionCall %VSOutput %_main_struct_VSInput_vf41_ %param + %36 = OpCompositeExtract %v4float %35 0 + OpStore %_entryPointOutput_position %36 + OpReturn + OpFunctionEnd +%_main_struct_VSInput_vf41_ = OpFunction %VSOutput None %11 + %_input = OpFunctionParameter %_ptr_Function_VSInput + %14 = OpLabel + %_out = OpVariable %_ptr_Function_VSOutput Function + %20 = OpAccessChain %_ptr_Function_v4float %_input %18 + %21 = OpLoad %v4float %20 + %22 = OpAccessChain %_ptr_Function_v4float %_out %18 + OpStore %22 %21 + %23 = OpLoad %VSOutput %_out + OpReturnValue %23 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/global-builtin.sso.asm.vert b/3rdparty/spirv-cross/shaders/asm/vert/global-builtin.sso.asm.vert new file mode 100644 index 000000000..d7306deb2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/global-builtin.sso.asm.vert @@ -0,0 +1,68 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 40 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_entryPointOutput %_entryPointOutput_pos + OpSource HLSL 500 + OpName %main "main" + OpName %VSOut "VSOut" + OpMemberName %VSOut 0 "a" + OpMemberName %VSOut 1 "pos" + OpName %_main_ "@main(" + OpName %vout "vout" + OpName %flattenTemp "flattenTemp" + OpName %VSOut_0 "VSOut" + OpMemberName %VSOut_0 0 "a" + OpName %_entryPointOutput "@entryPointOutput" + OpName %_entryPointOutput_pos "@entryPointOutput_pos" + OpDecorate %_entryPointOutput Location 0 + OpDecorate %_entryPointOutput_pos BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %VSOut = OpTypeStruct %float %v4float + %9 = OpTypeFunction %VSOut +%_ptr_Function_VSOut = OpTypePointer Function %VSOut + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_40 = OpConstant %float 40 +%_ptr_Function_float = OpTypePointer Function %float + %int_1 = OpConstant %int 1 + %float_1 = OpConstant %float 1 + %21 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 +%_ptr_Function_v4float = OpTypePointer Function %v4float + %VSOut_0 = OpTypeStruct %float +%_ptr_Output_VSOut_0 = OpTypePointer Output %VSOut_0 +%_entryPointOutput = OpVariable %_ptr_Output_VSOut_0 Output +%_ptr_Output_float = OpTypePointer Output %float +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput_pos = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel +%flattenTemp = OpVariable %_ptr_Function_VSOut Function + %28 = OpFunctionCall %VSOut %_main_ + OpStore %flattenTemp %28 + %32 = OpAccessChain %_ptr_Function_float %flattenTemp %int_0 + %33 = OpLoad %float %32 + %35 = OpAccessChain %_ptr_Output_float %_entryPointOutput %int_0 + OpStore %35 %33 + %38 = OpAccessChain %_ptr_Function_v4float %flattenTemp %int_1 + %39 = OpLoad %v4float %38 + OpStore %_entryPointOutput_pos %39 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %VSOut None %9 + %11 = OpLabel + %vout = OpVariable %_ptr_Function_VSOut Function + %18 = OpAccessChain %_ptr_Function_float %vout %int_0 + OpStore %18 %float_40 + %23 = OpAccessChain %_ptr_Function_v4float %vout %int_1 + OpStore %23 %21 + %24 = OpLoad %VSOut %vout + OpReturnValue %24 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/invariant-block.asm.vert b/3rdparty/spirv-cross/shaders/asm/vert/invariant-block.asm.vert new file mode 100644 index 000000000..5984935c7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/invariant-block.asm.vert @@ -0,0 +1,44 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 20 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_ + OpSource GLSL 450 + OpName %main "main" + OpName %gl_PerVertex "gl_PerVertex" + OpMemberName %gl_PerVertex 0 "gl_Position" + OpMemberName %gl_PerVertex 1 "gl_PointSize" + OpMemberName %gl_PerVertex 2 "gl_ClipDistance" + OpMemberName %gl_PerVertex 3 "gl_CullDistance" + OpName %_ "" + OpMemberDecorate %gl_PerVertex 0 Invariant + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpMemberDecorate %gl_PerVertex 3 BuiltIn CullDistance + OpDecorate %gl_PerVertex Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 %_arr_float_uint_1 +%_ptr_Output_gl_PerVertex = OpTypePointer Output %gl_PerVertex + %_ = OpVariable %_ptr_Output_gl_PerVertex Output + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 + %17 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %19 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %19 %17 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/invariant-block.sso.asm.vert b/3rdparty/spirv-cross/shaders/asm/vert/invariant-block.sso.asm.vert new file mode 100644 index 000000000..5984935c7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/invariant-block.sso.asm.vert @@ -0,0 +1,44 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 20 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_ + OpSource GLSL 450 + OpName %main "main" + OpName %gl_PerVertex "gl_PerVertex" + OpMemberName %gl_PerVertex 0 "gl_Position" + OpMemberName %gl_PerVertex 1 "gl_PointSize" + OpMemberName %gl_PerVertex 2 "gl_ClipDistance" + OpMemberName %gl_PerVertex 3 "gl_CullDistance" + OpName %_ "" + OpMemberDecorate %gl_PerVertex 0 Invariant + OpMemberDecorate %gl_PerVertex 0 BuiltIn Position + OpMemberDecorate %gl_PerVertex 1 BuiltIn PointSize + OpMemberDecorate %gl_PerVertex 2 BuiltIn ClipDistance + OpMemberDecorate %gl_PerVertex 3 BuiltIn CullDistance + OpDecorate %gl_PerVertex Block + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %uint = OpTypeInt 32 0 + %uint_1 = OpConstant %uint 1 +%_arr_float_uint_1 = OpTypeArray %float %uint_1 +%gl_PerVertex = OpTypeStruct %v4float %float %_arr_float_uint_1 %_arr_float_uint_1 +%_ptr_Output_gl_PerVertex = OpTypePointer Output %gl_PerVertex + %_ = OpVariable %_ptr_Output_gl_PerVertex Output + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %float_1 = OpConstant %float 1 + %17 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float + %main = OpFunction %void None %3 + %5 = OpLabel + %19 = OpAccessChain %_ptr_Output_v4float %_ %int_0 + OpStore %19 %17 + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/invariant.asm.vert b/3rdparty/spirv-cross/shaders/asm/vert/invariant.asm.vert new file mode 100644 index 000000000..c0d381ee2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/invariant.asm.vert @@ -0,0 +1,34 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 18 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_entryPointOutput + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %_entryPointOutput Invariant + OpDecorate %_entryPointOutput BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float + %float_1 = OpConstant %float 1 + %12 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %17 = OpFunctionCall %v4float %_main_ + OpStore %_entryPointOutput %17 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %v4float None %8 + %10 = OpLabel + OpReturnValue %12 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/invariant.sso.asm.vert b/3rdparty/spirv-cross/shaders/asm/vert/invariant.sso.asm.vert new file mode 100644 index 000000000..c0d381ee2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/invariant.sso.asm.vert @@ -0,0 +1,34 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 3 +; Bound: 18 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %_entryPointOutput + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %_entryPointOutput Invariant + OpDecorate %_entryPointOutput BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float + %float_1 = OpConstant %float 1 + %12 = OpConstantComposite %v4float %float_1 %float_1 %float_1 %float_1 +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %17 = OpFunctionCall %v4float %_main_ + OpStore %_entryPointOutput %17 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %v4float None %8 + %10 = OpLabel + OpReturnValue %12 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert b/3rdparty/spirv-cross/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert new file mode 100644 index 000000000..b566a3d1a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/spec-constant-op-composite.asm.vk.vert @@ -0,0 +1,98 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 1 +; Bound: 58 +; Schema: 0 + OpCapability Shader + OpCapability ClipDistance + OpCapability CullDistance + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %4 "main" %52 %output + OpSource GLSL 450 + OpName %4 "main" + OpName %9 "pos" + OpName %50 "gl_PerVertex" + OpMemberName %50 0 "gl_Position" + OpMemberName %50 1 "gl_PointSize" + OpMemberName %50 2 "gl_ClipDistance" + OpMemberName %50 3 "gl_CullDistance" + OpName %52 "" + OpDecorate %13 SpecId 201 + OpDecorate %24 SpecId 202 + OpMemberDecorate %50 0 BuiltIn Position + OpMemberDecorate %50 1 BuiltIn PointSize + OpMemberDecorate %50 2 BuiltIn ClipDistance + OpMemberDecorate %50 3 BuiltIn CullDistance + OpDecorate %50 Block + OpDecorate %57 SpecId 200 + OpDecorate %output Flat + OpDecorate %output Location 0 + %2 = OpTypeVoid + %3 = OpTypeFunction %2 + %6 = OpTypeFloat 32 + %7 = OpTypeVector %6 4 + %8 = OpTypePointer Function %7 + %10 = OpConstant %6 0 + %11 = OpConstantComposite %7 %10 %10 %10 %10 + %12 = OpTypeInt 32 1 + %int_ptr = OpTypePointer Output %12 + %13 = OpSpecConstant %12 -10 + %14 = OpConstant %12 2 + %15 = OpSpecConstantOp %12 IAdd %13 %14 + %17 = OpTypeInt 32 0 + %18 = OpConstant %17 1 + %19 = OpTypePointer Function %6 + %24 = OpSpecConstant %17 100 + %25 = OpConstant %17 5 + %26 = OpSpecConstantOp %17 UMod %24 %25 + %28 = OpConstant %17 2 + %33 = OpConstant %12 20 + %34 = OpConstant %12 30 + %35 = OpTypeVector %12 4 + %36 = OpSpecConstantComposite %35 %33 %34 %15 %15 + %40 = OpTypeVector %12 2 + %41 = OpSpecConstantOp %40 VectorShuffle %36 %36 1 0 + %foo = OpSpecConstantOp %12 CompositeExtract %36 1 + %42 = OpTypeVector %6 2 + %49 = OpTypeArray %6 %18 + %50 = OpTypeStruct %7 %6 %49 %49 + %51 = OpTypePointer Output %50 + %52 = OpVariable %51 Output + %output = OpVariable %int_ptr Output + %53 = OpConstant %12 0 + %55 = OpTypePointer Output %7 + %57 = OpSpecConstant %6 3.14159 + %4 = OpFunction %2 None %3 + %5 = OpLabel + %9 = OpVariable %8 Function + OpStore %9 %11 + %16 = OpConvertSToF %6 %15 + %20 = OpAccessChain %19 %9 %18 + %21 = OpLoad %6 %20 + %22 = OpFAdd %6 %21 %16 + %23 = OpAccessChain %19 %9 %18 + OpStore %23 %22 + %27 = OpConvertUToF %6 %26 + %29 = OpAccessChain %19 %9 %28 + %30 = OpLoad %6 %29 + %31 = OpFAdd %6 %30 %27 + %32 = OpAccessChain %19 %9 %28 + OpStore %32 %31 + %37 = OpConvertSToF %7 %36 + %38 = OpLoad %7 %9 + %39 = OpFAdd %7 %38 %37 + OpStore %9 %39 + %43 = OpConvertSToF %42 %41 + %44 = OpLoad %7 %9 + %45 = OpVectorShuffle %42 %44 %44 0 1 + %46 = OpFAdd %42 %45 %43 + %47 = OpLoad %7 %9 + %48 = OpVectorShuffle %7 %47 %46 4 5 2 3 + OpStore %9 %48 + %54 = OpLoad %7 %9 + %56 = OpAccessChain %55 %52 %53 + OpStore %56 %54 + OpStore %output %foo + OpReturn + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert b/3rdparty/spirv-cross/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert new file mode 100644 index 000000000..29b0076a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/asm/vert/uint-vertex-id-instance-id.asm.vert @@ -0,0 +1,65 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 6 +; Bound: 36 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Vertex %main "main" %vid_1 %iid_1 %_entryPointOutput + OpSource HLSL 500 + OpName %main "main" + OpName %_main_u1_u1_ "@main(u1;u1;" + OpName %vid "vid" + OpName %iid "iid" + OpName %vid_0 "vid" + OpName %vid_1 "vid" + OpName %iid_0 "iid" + OpName %iid_1 "iid" + OpName %_entryPointOutput "@entryPointOutput" + OpName %param "param" + OpName %param_0 "param" + OpDecorate %vid_1 BuiltIn VertexIndex + OpDecorate %iid_1 BuiltIn InstanceIndex + OpDecorate %_entryPointOutput BuiltIn Position + %void = OpTypeVoid + %3 = OpTypeFunction %void + %uint = OpTypeInt 32 0 +%_ptr_Function_uint = OpTypePointer Function %uint + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %10 = OpTypeFunction %v4float %_ptr_Function_uint %_ptr_Function_uint +%_ptr_Input_uint = OpTypePointer Input %uint + %vid_1 = OpVariable %_ptr_Input_uint Input + %iid_1 = OpVariable %_ptr_Input_uint Input +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %vid_0 = OpVariable %_ptr_Function_uint Function + %iid_0 = OpVariable %_ptr_Function_uint Function + %param = OpVariable %_ptr_Function_uint Function + %param_0 = OpVariable %_ptr_Function_uint Function + %25 = OpLoad %uint %vid_1 + OpStore %vid_0 %25 + %28 = OpLoad %uint %iid_1 + OpStore %iid_0 %28 + %32 = OpLoad %uint %vid_0 + OpStore %param %32 + %34 = OpLoad %uint %iid_0 + OpStore %param_0 %34 + %35 = OpFunctionCall %v4float %_main_u1_u1_ %param %param_0 + OpStore %_entryPointOutput %35 + OpReturn + OpFunctionEnd +%_main_u1_u1_ = OpFunction %v4float None %10 + %vid = OpFunctionParameter %_ptr_Function_uint + %iid = OpFunctionParameter %_ptr_Function_uint + %14 = OpLabel + %15 = OpLoad %uint %vid + %16 = OpLoad %uint %iid + %17 = OpIAdd %uint %15 %16 + %18 = OpConvertUToF %float %17 + %19 = OpCompositeConstruct %v4float %18 %18 %18 %18 + OpReturnValue %19 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/comp/atomic.comp b/3rdparty/spirv-cross/shaders/comp/atomic.comp new file mode 100644 index 000000000..703256d87 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/atomic.comp @@ -0,0 +1,56 @@ +#version 310 es +#extension GL_OES_shader_image_atomic : require +layout(local_size_x = 1) in; + +layout(r32ui, binding = 0) uniform highp uimage2D uImage; +layout(r32i, binding = 1) uniform highp iimage2D iImage; +layout(binding = 2, std430) buffer SSBO +{ + uint u32; + int i32; +} ssbo; + +void main() +{ + imageAtomicAdd(uImage, ivec2(1, 5), 1u); + + // Test that we do not invalidate OpImage variables which are loaded from UniformConstant + // address space. + imageStore(iImage, ivec2(1, 6), ivec4(imageAtomicAdd(uImage, ivec2(1, 5), 1u))); + + imageAtomicOr(uImage, ivec2(1, 5), 1u); + imageAtomicXor(uImage, ivec2(1, 5), 1u); + imageAtomicAnd(uImage, ivec2(1, 5), 1u); + imageAtomicMin(uImage, ivec2(1, 5), 1u); + imageAtomicMax(uImage, ivec2(1, 5), 1u); + //imageAtomicExchange(uImage, ivec2(1, 5), 1u); + imageAtomicCompSwap(uImage, ivec2(1, 5), 10u, 2u); + + imageAtomicAdd(iImage, ivec2(1, 6), 1); + imageAtomicOr(iImage, ivec2(1, 6), 1); + imageAtomicXor(iImage, ivec2(1, 6), 1); + imageAtomicAnd(iImage, ivec2(1, 6), 1); + imageAtomicMin(iImage, ivec2(1, 6), 1); + imageAtomicMax(iImage, ivec2(1, 6), 1); + //imageAtomicExchange(iImage, ivec2(1, 5), 1u); + imageAtomicCompSwap(iImage, ivec2(1, 5), 10, 2); + + atomicAdd(ssbo.u32, 1u); + atomicOr(ssbo.u32, 1u); + atomicXor(ssbo.u32, 1u); + atomicAnd(ssbo.u32, 1u); + atomicMin(ssbo.u32, 1u); + atomicMax(ssbo.u32, 1u); + atomicExchange(ssbo.u32, 1u); + atomicCompSwap(ssbo.u32, 10u, 2u); + + atomicAdd(ssbo.i32, 1); + atomicOr(ssbo.i32, 1); + atomicXor(ssbo.i32, 1); + atomicAnd(ssbo.i32, 1); + atomicMin(ssbo.i32, 1); + atomicMax(ssbo.i32, 1); + atomicExchange(ssbo.i32, 1); + atomicCompSwap(ssbo.i32, 10, 2); +} + diff --git a/3rdparty/spirv-cross/shaders/comp/bake_gradient.comp b/3rdparty/spirv-cross/shaders/comp/bake_gradient.comp new file mode 100644 index 000000000..4885ff00b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/bake_gradient.comp @@ -0,0 +1,55 @@ +#version 310 es + +layout(local_size_x = 8, local_size_y = 8) in; + +layout(binding = 0) uniform sampler2D uHeight; +layout(binding = 1) uniform sampler2D uDisplacement; +layout(rgba16f, binding = 2) uniform writeonly mediump image2D iHeightDisplacement; +layout(rgba16f, binding = 3) uniform writeonly mediump image2D iGradJacobian; + +layout(binding = 4) uniform UBO +{ + vec4 uInvSize; + vec4 uScale; +}; + +mediump float jacobian(mediump vec2 dDdx, mediump vec2 dDdy) +{ + return (1.0 + dDdx.x) * (1.0 + dDdy.y) - dDdx.y * dDdy.x; +} +#define LAMBDA 1.2 + +void main() +{ + vec4 uv = (vec2(gl_GlobalInvocationID.xy) * uInvSize.xy).xyxy + 0.5 * uInvSize; + + float h = textureLod(uHeight, uv.xy, 0.0).x; + + // Compute the heightmap gradient by simple differentiation. + float x0 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(-1, 0)).x; + float x1 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(+1, 0)).x; + float y0 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(0, -1)).x; + float y1 = textureLodOffset(uHeight, uv.xy, 0.0, ivec2(0, +1)).x; + vec2 grad = uScale.xy * 0.5 * vec2(x1 - x0, y1 - y0); + + // Displacement map must be sampled with a different offset since it's a smaller texture. + vec2 displacement = LAMBDA * textureLod(uDisplacement, uv.zw, 0.0).xy; + + // Compute jacobian. + vec2 dDdx = 0.5 * LAMBDA * ( + textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(+1, 0)).xy - + textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(-1, 0)).xy); + vec2 dDdy = 0.5 * LAMBDA * ( + textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(0, +1)).xy - + textureLodOffset(uDisplacement, uv.zw, 0.0, ivec2(0, -1)).xy); + float j = jacobian(dDdx * uScale.z, dDdy * uScale.z); + + displacement = vec2(0.0); + + // Read by vertex shader/tess shader. + imageStore(iHeightDisplacement, ivec2(gl_GlobalInvocationID.xy), vec4(h, displacement, 0.0)); + + // Read by fragment shader. + imageStore(iGradJacobian, ivec2(gl_GlobalInvocationID.xy), vec4(grad, j, 0.0)); +} + diff --git a/3rdparty/spirv-cross/shaders/comp/barriers.comp b/3rdparty/spirv-cross/shaders/comp/barriers.comp new file mode 100644 index 000000000..7e0ea42d4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/barriers.comp @@ -0,0 +1,79 @@ +#version 310 es +layout(local_size_x = 4) in; + +void barrier_shared() +{ + memoryBarrierShared(); +} + +void full_barrier() +{ + memoryBarrier(); +} + +void image_barrier() +{ + memoryBarrierImage(); +} + +void buffer_barrier() +{ + memoryBarrierBuffer(); +} + +void group_barrier() +{ + groupMemoryBarrier(); +} + +void barrier_shared_exec() +{ + memoryBarrierShared(); + barrier(); +} + +void full_barrier_exec() +{ + memoryBarrier(); + barrier(); +} + +void image_barrier_exec() +{ + memoryBarrierImage(); + barrier(); +} + +void buffer_barrier_exec() +{ + memoryBarrierBuffer(); + barrier(); +} + +void group_barrier_exec() +{ + groupMemoryBarrier(); + barrier(); +} + +void exec_barrier() +{ + barrier(); +} + +void main() +{ + barrier_shared(); + full_barrier(); + image_barrier(); + buffer_barrier(); + group_barrier(); + + barrier_shared_exec(); + full_barrier_exec(); + image_barrier_exec(); + buffer_barrier_exec(); + group_barrier_exec(); + + exec_barrier(); +} diff --git a/3rdparty/spirv-cross/shaders/comp/basic.comp b/3rdparty/spirv-cross/shaders/comp/basic.comp new file mode 100644 index 000000000..f9bf55670 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/basic.comp @@ -0,0 +1,28 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +layout(std430, binding = 2) buffer SSBO3 +{ + uint counter; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idata = in_data[ident]; + if (dot(idata, vec4(1.0, 5.0, 6.0, 2.0)) > 8.2) + { + out_data[atomicAdd(counter, 1u)] = idata; + } +} + diff --git a/3rdparty/spirv-cross/shaders/comp/bitcast-16bit-1.invalid.comp b/3rdparty/spirv-cross/shaders/comp/bitcast-16bit-1.invalid.comp new file mode 100644 index 000000000..0c21cda30 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/bitcast-16bit-1.invalid.comp @@ -0,0 +1,23 @@ +#version 450 core +#extension GL_AMD_gpu_shader_half_float : require +#extension GL_AMD_gpu_shader_int16 : require +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + i16vec4 inputs[]; +}; + +layout(binding = 1, std430) buffer SSBO1 +{ + ivec4 outputs[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + f16vec2 a = int16BitsToFloat16(inputs[ident].xy); + outputs[ident].x = int(packFloat2x16(a + f16vec2(1, 1))); + outputs[ident].y = packInt2x16(inputs[ident].zw); + outputs[ident].z = int(packUint2x16(u16vec2(inputs[ident].xy))); +} diff --git a/3rdparty/spirv-cross/shaders/comp/bitcast-16bit-2.invalid.comp b/3rdparty/spirv-cross/shaders/comp/bitcast-16bit-2.invalid.comp new file mode 100644 index 000000000..6bb662412 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/bitcast-16bit-2.invalid.comp @@ -0,0 +1,26 @@ +#version 450 core +#extension GL_AMD_gpu_shader_half_float : require +#extension GL_AMD_gpu_shader_int16 : require +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + ivec4 inputs[]; +}; + +layout(binding = 1, std430) buffer SSBO1 +{ + i16vec4 outputs[]; +}; + +layout(binding = 2) uniform UBO +{ + f16vec4 const0; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + outputs[ident].xy = unpackInt2x16(inputs[ident].x) + float16BitsToInt16(const0.xy); + outputs[ident].zw = i16vec2(unpackUint2x16(uint(inputs[ident].y)) - float16BitsToUint16(const0.zw)); +} diff --git a/3rdparty/spirv-cross/shaders/comp/casts.comp b/3rdparty/spirv-cross/shaders/comp/casts.comp new file mode 100644 index 000000000..6be539d7b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/casts.comp @@ -0,0 +1,18 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO0 +{ + ivec4 inputs[]; +}; + +layout(binding = 1, std430) buffer SSBO1 +{ + ivec4 outputs[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + outputs[ident] = ivec4(bvec4(inputs[ident] & 0x3)); +} diff --git a/3rdparty/spirv-cross/shaders/comp/cfg-preserve-parameter.comp b/3rdparty/spirv-cross/shaders/comp/cfg-preserve-parameter.comp new file mode 100644 index 000000000..9ef909200 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/cfg-preserve-parameter.comp @@ -0,0 +1,54 @@ +#version 310 es + +// We write in all paths (and no reads), so should just be out. +void out_test_0(int cond, inout int i) +{ + if (cond == 0) + i = 40; + else + i = 60; +} + +// We write in all paths (and no reads), so should just be out. +void out_test_1(int cond, inout int i) +{ + switch (cond) + { + case 40: + i = 40; + break; + + default: + i = 70; + break; + } +} + +// We don't write in all paths, so should be inout. +void inout_test_0(int cond, inout int i) +{ + if (cond == 0) + i = 40; +} + +void inout_test_1(int cond, inout int i) +{ + switch (cond) + { + case 40: + i = 40; + break; + } +} + + +void main() +{ + int cond = 40; + int i = 50; + + out_test_0(cond, i); + out_test_1(cond, i); + inout_test_0(cond, i); + inout_test_1(cond, i); +} diff --git a/3rdparty/spirv-cross/shaders/comp/cfg.comp b/3rdparty/spirv-cross/shaders/comp/cfg.comp new file mode 100644 index 000000000..4f4e6c0ea --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/cfg.comp @@ -0,0 +1,91 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + float data; +}; + +void test() +{ + // Test that variables local to a scope stay local. + if (data != 0.0) + { + float tmp = 10.0; + data = tmp; + } + else + { + float tmp = 15.0; + data = tmp; + } + + // Test that variable access propagates up to dominator + if (data != 0.0) + { + float e; + if (data != 5.0) + { + if (data != 6.0) + e = 10.0; + } + else + e = 20.0; + } + + // Test that variables local to a switch block stay local. + switch (int(data)) + { + case 0: + { + float tmp = 20.0; + data = tmp; + break; + } + + case 1: + { + float tmp = 30.0; + data = tmp; + break; + } + } + + // Check that multibranches propagate up to dominator. + float f; + switch (int(data)) + { + case 0: + { + f = 30.0; + break; + } + + case 1: + { + f = 40.0; + break; + } + } + + // Check that loops work. + // Interesting case here is propagating variable access from the continue block. + float h; + for (int i = 0; i < 20; i++, h += 10.0) + ; + data = h; + + // Do the same with do-while, gotta test all the hard cases. + float m; + do + { + } while (m != 20.0); + data = m; +} + +void main() +{ + // Test that we do the CFG analysis for all functions. + test(); +} + diff --git a/3rdparty/spirv-cross/shaders/comp/coherent-block.comp b/3rdparty/spirv-cross/shaders/comp/coherent-block.comp new file mode 100644 index 000000000..0a174e8ef --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/coherent-block.comp @@ -0,0 +1,12 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 1) coherent restrict writeonly buffer SSBO +{ + vec4 value; +}; + +void main() +{ + value = vec4(20.0); +} diff --git a/3rdparty/spirv-cross/shaders/comp/coherent-image.comp b/3rdparty/spirv-cross/shaders/comp/coherent-image.comp new file mode 100644 index 000000000..fd6e28018 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/coherent-image.comp @@ -0,0 +1,14 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 1) coherent restrict writeonly buffer SSBO +{ + ivec4 value; +}; + +layout(r32i, binding = 3) coherent readonly restrict uniform mediump iimage2D uImage; + +void main() +{ + value = imageLoad(uImage, ivec2(10)); +} diff --git a/3rdparty/spirv-cross/shaders/comp/composite-array-initialization.comp b/3rdparty/spirv-cross/shaders/comp/composite-array-initialization.comp new file mode 100644 index 000000000..fa9b61148 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/composite-array-initialization.comp @@ -0,0 +1,29 @@ +#version 310 es +#extension GL_EXT_shader_non_constant_global_initializers : require +layout(local_size_x = 2) in; + +struct Data +{ + float a; + float b; +}; + +layout(std430, binding = 0) buffer SSBO +{ + Data outdata[]; +}; + +layout(constant_id = 0) const float X = 4.0; + +Data data[2] = Data[](Data(1.0, 2.0), Data(3.0, 4.0)); +Data data2[2] = Data[](Data(X, 2.0), Data(3.0, 5.0)); + +Data combine(Data a, Data b) +{ + return Data(a.a + b.a, a.b + b.b); +} + +void main() +{ + outdata[gl_WorkGroupID.x] = combine(data[gl_LocalInvocationID.x], data2[gl_LocalInvocationID.x]); +} diff --git a/3rdparty/spirv-cross/shaders/comp/composite-construct.comp b/3rdparty/spirv-cross/shaders/comp/composite-construct.comp new file mode 100644 index 000000000..859c56f51 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/composite-construct.comp @@ -0,0 +1,40 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO0 +{ + vec4 as[]; +}; + +layout(std430, binding = 1) buffer SSBO1 +{ + vec4 bs[]; +}; + +vec4 summe(vec4 values[3][2]) +{ + return values[0][0] + values[2][1] + values[0][1] + values[1][0]; +} + +struct Composite +{ + vec4 a[2]; + vec4 b[2]; +}; + +void main() +{ + vec4 values[2] = vec4[](as[gl_GlobalInvocationID.x], bs[gl_GlobalInvocationID.x]); + vec4 const_values[2] = vec4[](vec4(10.0), vec4(30.0)); + vec4 copy_values[2]; + copy_values = const_values; + vec4 copy_values2[2] = values; + as[gl_GlobalInvocationID.x] = summe(vec4[][](values, copy_values, copy_values2)); + + Composite c = Composite(values, copy_values); + + float arrayofarray[2][3] = float[][](float[](1.0, 1.0, 1.0), float[](2.0, 2.0, 2.0)); + + float b = 10.0; + float values_scalar[4] = float[](b, b, b, b); +} diff --git a/3rdparty/spirv-cross/shaders/comp/culling.comp b/3rdparty/spirv-cross/shaders/comp/culling.comp new file mode 100644 index 000000000..9f8331b10 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/culling.comp @@ -0,0 +1,26 @@ +#version 310 es +layout(local_size_x = 4) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + float in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + float out_data[]; +}; + +layout(std430, binding = 2) buffer SSBO3 +{ + uint count; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = in_data[ident]; + if (idata > 12.0) + out_data[atomicAdd(count, 1u)] = idata; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/defer-parens.comp b/3rdparty/spirv-cross/shaders/comp/defer-parens.comp new file mode 100644 index 000000000..4e8ea6b39 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/defer-parens.comp @@ -0,0 +1,30 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBO +{ + vec4 data; + int index; +}; + +void main() +{ + // Tests defer-parens behavior where a binary expression is OpCompositeExtracted chained together + // with an OpCompositeConstruct optimization. + vec4 d = data; + data = vec4(d.x, d.yz + 10.0, d.w); + + // Verify binary ops. + data = d + d + d; + + // Verify swizzles. + data = (d.yz + 10.0).xxyy; + + // OpCompositeExtract + float t = (d.yz + 10.0).y; + data = vec4(t); + + // OpVectorExtractDynamic + t = (d.zw + 10.0)[index]; + data = vec4(t); +} diff --git a/3rdparty/spirv-cross/shaders/comp/dowhile.comp b/3rdparty/spirv-cross/shaders/comp/dowhile.comp new file mode 100644 index 000000000..709db75a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/dowhile.comp @@ -0,0 +1,31 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +int i; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + + i = 0; + vec4 idat = in_data[ident]; + do + { + idat = mvp * idat; + i++; + } while(i < 16); + + out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/generate_height.comp b/3rdparty/spirv-cross/shaders/comp/generate_height.comp new file mode 100644 index 000000000..16cef4de7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/generate_height.comp @@ -0,0 +1,97 @@ +#version 310 es + +layout(local_size_x = 64) in; + +layout(std430, binding = 0) readonly buffer Distribution +{ + vec2 distribution[]; +}; + +layout(std430, binding = 1) writeonly buffer HeightmapFFT +{ + uint heights[]; +}; + +layout(binding = 2, std140) uniform UBO +{ + vec4 uModTime; +}; + +vec2 alias(vec2 i, vec2 N) +{ + return mix(i, i - N, greaterThan(i, 0.5 * N)); +} + +vec4 cmul(vec4 a, vec4 b) +{ + vec4 r3 = a.yxwz; + vec4 r1 = b.xxzz; + vec4 R0 = a * r1; + vec4 r2 = b.yyww; + vec4 R1 = r2 * r3; + return R0 + vec4(-R1.x, R1.y, -R1.z, R1.w); +} + +vec2 cmul(vec2 a, vec2 b) +{ + vec2 r3 = a.yx; + vec2 r1 = b.xx; + vec2 R0 = a * r1; + vec2 r2 = b.yy; + vec2 R1 = r2 * r3; + return R0 + vec2(-R1.x, R1.y); +} + +uint pack2(vec2 v) +{ + return packHalf2x16(v); +} + +uvec2 pack4(vec4 v) +{ + return uvec2(packHalf2x16(v.xy), packHalf2x16(v.zw)); +} + +uvec2 workaround_mix(uvec2 a, uvec2 b, bvec2 sel) +{ + return uvec2(sel.x ? b.x : a.x, sel.y ? b.y : a.y); +} + +void generate_heightmap() +{ + uvec2 N = gl_WorkGroupSize.xy * gl_NumWorkGroups.xy; + uvec2 i = gl_GlobalInvocationID.xy; + // Pick out the negative frequency variant. + uvec2 wi = workaround_mix(N - i, uvec2(0u), equal(i, uvec2(0u))); + + // Pick out positive and negative travelling waves. + vec2 a = distribution[i.y * N.x + i.x]; + vec2 b = distribution[wi.y * N.x + wi.x]; + + vec2 k = uModTime.xy * alias(vec2(i), vec2(N)); + float k_len = length(k); + + const float G = 9.81; + + // If this sample runs for hours on end, the cosines of very large numbers will eventually become unstable. + // It is fairly easy to fix this by wrapping uTime, + // and quantizing w such that wrapping uTime does not change the result. + // See Tessendorf's paper for how to do it. + // The sqrt(G * k_len) factor represents how fast ocean waves at different frequencies propagate. + float w = sqrt(G * k_len) * uModTime.z; + float cw = cos(w); + float sw = sin(w); + + // Complex multiply to rotate our frequency samples. + a = cmul(a, vec2(cw, sw)); + b = cmul(b, vec2(cw, sw)); + b = vec2(b.x, -b.y); // Complex conjugate since we picked a frequency with the opposite direction. + vec2 res = a + b; // Sum up forward and backwards travelling waves. + heights[i.y * N.x + i.x] = pack2(res); +} + +void main() +{ + generate_heightmap(); +} + diff --git a/3rdparty/spirv-cross/shaders/comp/image.comp b/3rdparty/spirv-cross/shaders/comp/image.comp new file mode 100644 index 000000000..e375534a5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/image.comp @@ -0,0 +1,12 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(rgba8, binding = 0) uniform readonly mediump image2D uImageIn; +layout(rgba8, binding = 1) uniform writeonly mediump image2D uImageOut; + +void main() +{ + vec4 v = imageLoad(uImageIn, ivec2(gl_GlobalInvocationID.xy) + imageSize(uImageIn)); + imageStore(uImageOut, ivec2(gl_GlobalInvocationID.xy), v); +} + diff --git a/3rdparty/spirv-cross/shaders/comp/inout-struct.invalid.comp b/3rdparty/spirv-cross/shaders/comp/inout-struct.invalid.comp new file mode 100644 index 000000000..c1de95974 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/inout-struct.invalid.comp @@ -0,0 +1,55 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) writeonly buffer SSBO +{ + vec4 data[]; +} outdata; + +layout(std430, binding = 1) readonly buffer SSBO2 +{ + vec4 data[]; +} indata; + +struct Foo +{ + vec4 a; + vec4 b; + vec4 c; + vec4 d; +}; + +layout(std430, binding = 2) readonly buffer SSBO3 +{ + Foo foos[]; +} foobar; + +vec4 bar(Foo foo) +{ + return foo.a + foo.b + foo.c + foo.d; +} + +void baz(out Foo foo) +{ + uint ident = gl_GlobalInvocationID.x; + foo.a = indata.data[4u * ident + 0u]; + foo.b = indata.data[4u * ident + 1u]; + foo.c = indata.data[4u * ident + 2u]; + foo.d = indata.data[4u * ident + 3u]; +} + +void meow(inout Foo foo) +{ + foo.a += 10.0; + foo.b += 20.0; + foo.c += 30.0; + foo.d += 40.0; +} + +void main() +{ + Foo foo; + baz(foo); + meow(foo); + outdata.data[gl_GlobalInvocationID.x] = bar(foo) + bar(foobar.foos[gl_GlobalInvocationID.x]); +} diff --git a/3rdparty/spirv-cross/shaders/comp/insert.comp b/3rdparty/spirv-cross/shaders/comp/insert.comp new file mode 100644 index 000000000..07c1f8d7a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/insert.comp @@ -0,0 +1,18 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) writeonly buffer SSBO +{ + vec4 out_data[]; +}; + +void main() +{ + vec4 v; + v.x = 10.0; + v.y = 30.0; + v.z = 70.0; + v.w = 90.0; + out_data[gl_GlobalInvocationID.x] = v; + out_data[gl_GlobalInvocationID.x].y = 20.0; +} diff --git a/3rdparty/spirv-cross/shaders/comp/mat3.comp b/3rdparty/spirv-cross/shaders/comp/mat3.comp new file mode 100644 index 000000000..7c5bb1e4f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/mat3.comp @@ -0,0 +1,14 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + mat3 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + out_data[ident] = mat3(vec3(10.0), vec3(20.0), vec3(40.0)); +} + diff --git a/3rdparty/spirv-cross/shaders/comp/mod.comp b/3rdparty/spirv-cross/shaders/comp/mod.comp new file mode 100644 index 000000000..1631456e3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/mod.comp @@ -0,0 +1,26 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 v = mod(in_data[ident], out_data[ident]); + out_data[ident] = v; + + uvec4 vu = floatBitsToUint(in_data[ident]) % floatBitsToUint(out_data[ident]); + out_data[ident] = uintBitsToFloat(vu); + + ivec4 vi = floatBitsToInt(in_data[ident]) % floatBitsToInt(out_data[ident]); + out_data[ident] = intBitsToFloat(vi); +} + diff --git a/3rdparty/spirv-cross/shaders/comp/modf.comp b/3rdparty/spirv-cross/shaders/comp/modf.comp new file mode 100644 index 000000000..edadefcf0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/modf.comp @@ -0,0 +1,23 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 i; + //vec4 v = frexp(in_data[ident], i); + //out_data[ident] = ldexp(v, i); + vec4 v = modf(in_data[ident], i); + out_data[ident] = v; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/read-write-only.comp b/3rdparty/spirv-cross/shaders/comp/read-write-only.comp new file mode 100644 index 000000000..b224b6f12 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/read-write-only.comp @@ -0,0 +1,26 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(binding = 0, std430) readonly buffer SSBO0 +{ + vec4 data0; + vec4 data1; +}; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + vec4 data2; + vec4 data3; +}; + +layout(binding = 2, std430) restrict writeonly buffer SSBO2 +{ + vec4 data4; + vec4 data5; +}; + +void main() +{ + data4 = data0 + data2; + data5 = data1 + data3; +} diff --git a/3rdparty/spirv-cross/shaders/comp/rmw-matrix.comp b/3rdparty/spirv-cross/shaders/comp/rmw-matrix.comp new file mode 100644 index 000000000..c158ab4dd --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/rmw-matrix.comp @@ -0,0 +1,20 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + float a; + vec4 b; + mat4 c; + + float a1; + vec4 b1; + mat4 c1; +}; + +void main() +{ + a *= a1; + b *= b1; + c *= c1; +} diff --git a/3rdparty/spirv-cross/shaders/comp/rmw-opt.comp b/3rdparty/spirv-cross/shaders/comp/rmw-opt.comp new file mode 100644 index 000000000..a6e1e7fe7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/rmw-opt.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + int a; +}; + +void main() +{ + a += 10; + a -= 10; + a *= 10; + a /= 10; + a <<= 2; + a >>= 3; + a &= 40; + a ^= 10; + a %= 40; + a |= 1; + + bool c = false; + bool d = true; + c = c && d; + d = d || c; + a = c && d ? 1 : 0; +} diff --git a/3rdparty/spirv-cross/shaders/comp/shared.comp b/3rdparty/spirv-cross/shaders/comp/shared.comp new file mode 100644 index 000000000..4deff9359 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/shared.comp @@ -0,0 +1,27 @@ +#version 310 es +layout(local_size_x = 4) in; + +shared float sShared[gl_WorkGroupSize.x]; + +layout(std430, binding = 0) readonly buffer SSBO +{ + float in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + float out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + float idata = in_data[ident]; + + sShared[gl_LocalInvocationIndex] = idata; + memoryBarrierShared(); + barrier(); + + out_data[ident] = sShared[gl_WorkGroupSize.x - gl_LocalInvocationIndex - 1u]; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/ssbo-array.comp b/3rdparty/spirv-cross/shaders/comp/ssbo-array.comp new file mode 100644 index 000000000..da0eae088 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/ssbo-array.comp @@ -0,0 +1,14 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + vec4 data[]; +} ssbos[2]; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + ssbos[1].data[ident] = ssbos[0].data[ident]; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/struct-layout.comp b/3rdparty/spirv-cross/shaders/comp/struct-layout.comp new file mode 100644 index 000000000..5a2b7802d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/struct-layout.comp @@ -0,0 +1,24 @@ +#version 310 es +layout(local_size_x = 1) in; + +struct Foo +{ + mat4 m; +}; + +layout(std430, binding = 0) readonly buffer SSBO +{ + Foo in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + Foo out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + out_data[ident].m = in_data[ident].m * in_data[ident].m; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/struct-packing.comp b/3rdparty/spirv-cross/shaders/comp/struct-packing.comp new file mode 100644 index 000000000..7a1be0478 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/struct-packing.comp @@ -0,0 +1,86 @@ +#version 310 es +layout(local_size_x = 1) in; + +struct S0 +{ + vec2 a[1]; + float b; +}; + +struct S1 +{ + vec3 a; + float b; +}; + +struct S2 +{ + vec3 a[1]; + float b; +}; + +struct S3 +{ + vec2 a; + float b; +}; + +struct S4 +{ + vec2 c; +}; + +struct Content +{ + S0 m0s[1]; + S1 m1s[1]; + S2 m2s[1]; + S0 m0; + S1 m1; + S2 m2; + S3 m3; + float m4; + + S4 m3s[8]; +}; + +layout(binding = 1, std430) restrict buffer SSBO1 +{ + Content content; + Content content1[2]; + Content content2; + + layout(column_major) mat2 m0; + layout(column_major) mat2 m1; + layout(column_major) mat2x3 m2[4]; + layout(column_major) mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + float array[]; +} ssbo_430; + +layout(binding = 0, std140) restrict buffer SSBO0 +{ + Content content; + Content content1[2]; + Content content2; + + layout(column_major) mat2 m0; + layout(column_major) mat2 m1; + layout(column_major) mat2x3 m2[4]; + layout(column_major) mat3x2 m3; + layout(row_major) mat2 m4; + layout(row_major) mat2 m5[9]; + layout(row_major) mat2x3 m6[4][2]; + layout(row_major) mat3x2 m7; + + float array[]; +} ssbo_140; + +void main() +{ + ssbo_430.content = ssbo_140.content; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/torture-loop.comp b/3rdparty/spirv-cross/shaders/comp/torture-loop.comp new file mode 100644 index 000000000..54a1221a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/torture-loop.comp @@ -0,0 +1,40 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) readonly buffer SSBO +{ + mat4 mvp; + vec4 in_data[]; +}; + +layout(std430, binding = 1) writeonly buffer SSBO2 +{ + vec4 out_data[]; +}; + +void main() +{ + uint ident = gl_GlobalInvocationID.x; + vec4 idat = in_data[ident]; + + int k = 0; + + // Continue with side effects. + while (++k < 10) + { + idat *= 2.0; + k++; + } + + // Again used here ... + for (uint i = 0u; i < 16u; i++, k++) + for (uint j = 0u; j < 30u; j++) + idat = mvp * idat; + + do + { + k++; + } while (k > 10); + out_data[ident] = idat; +} + diff --git a/3rdparty/spirv-cross/shaders/comp/type-alias.comp b/3rdparty/spirv-cross/shaders/comp/type-alias.comp new file mode 100644 index 000000000..343d350a2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/type-alias.comp @@ -0,0 +1,45 @@ +#version 310 es +layout(local_size_x = 1) in; + +struct S0 +{ + vec4 a; +}; + +struct S1 +{ + vec4 a; +}; + +vec4 overload(S0 s0) +{ + return s0.a; +} + +vec4 overload(S1 s1) +{ + return s1.a; +} + +layout(std430, binding = 0) buffer SSBO0 +{ + S0 s0s[]; +}; + +layout(std430, binding = 1) buffer SSBO1 +{ + S1 s1s[]; +}; + +layout(std430, binding = 2) buffer SSBO2 +{ + vec4 outputs[]; +}; + + +void main() +{ + S0 s0 = s0s[gl_GlobalInvocationID.x]; + S1 s1 = s1s[gl_GlobalInvocationID.x]; + outputs[gl_GlobalInvocationID.x] = overload(s0) + overload(s1); +} diff --git a/3rdparty/spirv-cross/shaders/comp/udiv.comp b/3rdparty/spirv-cross/shaders/comp/udiv.comp new file mode 100644 index 000000000..33fe564f0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/comp/udiv.comp @@ -0,0 +1,17 @@ +#version 310 es +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + uint inputs[]; +}; + +layout(std430, binding = 0) buffer SSBO2 +{ + uint outputs[]; +}; + +void main() +{ + outputs[gl_GlobalInvocationID.x] = inputs[gl_GlobalInvocationID.x] / 29u; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/comp/enhanced-layouts.comp b/3rdparty/spirv-cross/shaders/desktop-only/comp/enhanced-layouts.comp new file mode 100644 index 000000000..470b73e9b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/comp/enhanced-layouts.comp @@ -0,0 +1,39 @@ +#version 450 + +struct Foo +{ + int a; + int b; + int c; +}; + +layout(std140, binding = 0) uniform UBO +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo foo; + layout(offset = 48) int c[8]; +} ubo; + +layout(std140, binding = 1) buffer SSBO1 +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo foo; + layout(offset = 48) int c[8]; +} ssbo1; + +layout(std430, binding = 2) buffer SSBO2 +{ + layout(offset = 4) int a; + layout(offset = 8) int b; + layout(offset = 16) Foo foo; + layout(offset = 48) int c[8]; +} ssbo2; + +void main() +{ + ssbo1.a = ssbo2.a; + ssbo1.b = ubo.b; +} + diff --git a/3rdparty/spirv-cross/shaders/desktop-only/comp/extended-arithmetic.desktop.comp b/3rdparty/spirv-cross/shaders/desktop-only/comp/extended-arithmetic.desktop.comp new file mode 100644 index 000000000..9623751b6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/comp/extended-arithmetic.desktop.comp @@ -0,0 +1,41 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(binding = 0, std430) buffer SSBOUint +{ + uint a, b, c, d; + uvec2 a2, b2, c2, d2; + uvec3 a3, b3, c3, d3; + uvec4 a4, b4, c4, d4; +} u; + +layout(binding = 1, std430) buffer SSBOInt +{ + int a, b, c, d; + ivec2 a2, b2, c2, d2; + ivec3 a3, b3, c3, d3; + ivec4 a4, b4, c4, d4; +} i; + +void main() +{ + u.c = uaddCarry(u.a, u.b, u.d); + u.c2 = uaddCarry(u.a2, u.b2, u.d2); + u.c3 = uaddCarry(u.a3, u.b3, u.d3); + u.c4 = uaddCarry(u.a4, u.b4, u.d4); + + u.c = usubBorrow(u.a, u.b, u.d); + u.c2 = usubBorrow(u.a2, u.b2, u.d2); + u.c3 = usubBorrow(u.a3, u.b3, u.d3); + u.c4 = usubBorrow(u.a4, u.b4, u.d4); + + umulExtended(u.a, u.b, u.c, u.d); + umulExtended(u.a2, u.b2, u.c2, u.d2); + umulExtended(u.a3, u.b3, u.c3, u.d3); + umulExtended(u.a4, u.b4, u.c4, u.d4); + + imulExtended(i.a, i.b, i.c, i.d); + imulExtended(i.a2, i.b2, i.c2, i.d2); + imulExtended(i.a3, i.b3, i.c3, i.d3); + imulExtended(i.a4, i.b4, i.c4, i.d4); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/comp/fp64.desktop.comp b/3rdparty/spirv-cross/shaders/desktop-only/comp/fp64.desktop.comp new file mode 100644 index 000000000..2c2d5018d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/comp/fp64.desktop.comp @@ -0,0 +1,91 @@ +#version 450 +layout(local_size_x = 1) in; + +struct M0 +{ + double v; + dvec2 b[2]; + dmat2x3 c; + dmat3x2 d; +}; + +// Test buffer layout handling. +layout(std430, binding = 0) buffer SSBO0 +{ + dvec4 a; + M0 m0; + dmat4 b; +} ssbo_0; + +layout(std430, binding = 1) buffer SSBO1 +{ + dmat4 a; + dvec4 b; + M0 m0; +} ssbo_1; + +layout(std430, binding = 2) buffer SSBO2 +{ + double a[4]; + dvec2 b[4]; +} ssbo_2; + +layout(std140, binding = 3) buffer SSBO3 +{ + double a[4]; + dvec2 b[4]; +} ssbo_3; + +void main() +{ + ssbo_0.a += dvec4(10, 20, 30, 40); + ssbo_0.a += 20; + + dvec4 a = ssbo_0.a; + dmat4 amat = ssbo_0.b; + + ssbo_0.a = abs(a); + ssbo_0.a = sign(a); + ssbo_0.a = floor(a); + ssbo_0.a = trunc(a); + ssbo_0.a = round(a); + ssbo_0.a = roundEven(a); + ssbo_0.a = ceil(a); + ssbo_0.a = fract(a); + ssbo_0.a = mod(a, 20.0); + ssbo_0.a = mod(a, a); + ssbo_0.a = min(a, a); + ssbo_0.a = max(a, a); + ssbo_0.a = clamp(a, a, a); + ssbo_0.a = mix(a, a, a); + ssbo_0.a = step(a, a); + ssbo_0.a = smoothstep(a, a, a); + bvec4 b = isnan(a); + bvec4 c = isinf(a); + + double f = packDouble2x32(uvec2(10, 40)); + uvec2 g = unpackDouble2x32(f); + + double d = length(a); + d = distance(a, a); + d = dot(a, a); + dvec3 e = cross(a.xyz, a.yzw); + a = faceforward(a, a, a); + a = reflect(a, a); + //a = refract(a, a, 1.45); + + dmat4 l = matrixCompMult(amat, amat); + l = outerProduct(a, a); + l = transpose(l); + double m = determinant(l); + l = inverse(l); + + bvec4 k = lessThan(a, a); + k = lessThanEqual(a, a); + k = greaterThan(a, a); + k = greaterThanEqual(a, a); + + ssbo_1.b.x += 1.0lf; + ssbo_2.b[0].x += 1.0lf; + ssbo_3.b[0].x += 1.0lf; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp b/3rdparty/spirv-cross/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp new file mode 100644 index 000000000..5a70623c8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/comp/image-formats.desktop.noeliminate.comp @@ -0,0 +1,48 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(rgba32f, binding = 0) uniform image2D uImg00; +layout(rgba16f, binding = 1) uniform image2D uImg01; +layout(rg32f, binding = 2) uniform image2D uImg02; +layout(rg16f, binding = 3) uniform image2D uImg03; +layout(r11f_g11f_b10f, binding = 4) uniform image2D uImg04; +layout(r32f, binding = 5) uniform image2D uImg05; +layout(r16f, binding = 6) uniform image2D uImg06; +layout(rgba16, binding = 7) uniform image2D uImg07; +layout(rgb10_a2, binding = 8) uniform image2D uImg08; +layout(rgba8, binding = 9) uniform image2D uImg09; +layout(rg16, binding = 10) uniform image2D uImg10; +layout(rg8, binding = 11) uniform image2D uImg11; +layout(r16, binding = 12) uniform image2D uImg12; +layout(r8, binding = 13) uniform image2D uImg13; +layout(rgba16_snorm, binding = 14) uniform image2D uImg14; +layout(rgba8_snorm, binding = 15) uniform image2D uImg15; +layout(rg16_snorm, binding = 16) uniform image2D uImg16; +layout(rg8_snorm, binding = 17) uniform image2D uImg17; +layout(r16_snorm, binding = 18) uniform image2D uImg18; +layout(r8_snorm, binding = 19) uniform image2D uImg19; + +layout(rgba32i, binding = 20) uniform iimage2D uImage20; +layout(rgba16i, binding = 21) uniform iimage2D uImage21; +layout(rgba8i, binding = 22) uniform iimage2D uImage22; +layout(rg32i, binding = 23) uniform iimage2D uImage23; +layout(rg16i, binding = 24) uniform iimage2D uImage24; +layout(rg8i, binding = 25) uniform iimage2D uImage25; +layout(r32i, binding = 26) uniform iimage2D uImage26; +layout(r16i, binding = 27) uniform iimage2D uImage27; +layout(r8i, binding = 28) uniform iimage2D uImage28; + +layout(rgba32ui, binding = 29) uniform uimage2D uImage29; +layout(rgba16ui, binding = 30) uniform uimage2D uImage30; +layout(rgb10_a2ui, binding = 31) uniform uimage2D uImage31; +layout(rgba8ui, binding = 32) uniform uimage2D uImage32; +layout(rg32ui, binding = 33) uniform uimage2D uImage33; +layout(rg16ui, binding = 34) uniform uimage2D uImage34; +layout(rg8ui, binding = 35) uniform uimage2D uImage35; +layout(r32ui, binding = 36) uniform uimage2D uImage36; +layout(r16ui, binding = 37) uniform uimage2D uImage37; +layout(r8ui, binding = 38) uniform uimage2D uImage38; + +void main() +{ +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/comp/int64.desktop.comp b/3rdparty/spirv-cross/shaders/desktop-only/comp/int64.desktop.comp new file mode 100644 index 000000000..81004d4ad --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/comp/int64.desktop.comp @@ -0,0 +1,55 @@ +#version 450 +#extension GL_ARB_gpu_shader_int64 : require +layout(local_size_x = 1) in; + +struct M0 +{ + int64_t v; + i64vec2 b[2]; + uint64_t c; + uint64_t d[5]; +}; + +// Test buffer layout handling. +layout(std430, binding = 0) buffer SSBO0 +{ + i64vec4 a; + M0 m0; +} ssbo_0; + +layout(std430, binding = 1) buffer SSBO1 +{ + u64vec4 b; + M0 m0; +} ssbo_1; + +layout(std430, binding = 2) buffer SSBO2 +{ + int64_t a[4]; + i64vec2 b[4]; +} ssbo_2; + +layout(std140, binding = 3) buffer SSBO3 +{ + int64_t a[4]; + i64vec2 b[4]; +} ssbo_3; + +void main() +{ + ssbo_0.a += i64vec4(10, 20, 30, 40); + ssbo_1.b += u64vec4(999999999999999999ul, 8888888888888888ul, 77777777777777777ul, 6666666666666666ul); + ssbo_0.a += 20; + ssbo_0.a = abs(ssbo_0.a + i64vec4(ssbo_1.b)); + + ssbo_0.a++; + ssbo_1.b++; + ssbo_0.a--; + ssbo_1.b--; + + ssbo_1.b = doubleBitsToUint64(int64BitsToDouble(ssbo_0.a)); + ssbo_0.a = doubleBitsToInt64(uint64BitsToDouble(ssbo_1.b)); + + ssbo_2.a[0] += 1l; + ssbo_3.a[0] += 2l; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/clip-cull-distance.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/clip-cull-distance.desktop.frag new file mode 100644 index 000000000..5212fd644 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/clip-cull-distance.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +in float gl_ClipDistance[4]; +in float gl_CullDistance[3]; + +layout(location = 0) out float FragColor; + +void main() +{ + FragColor = gl_ClipDistance[0] + gl_CullDistance[0]; +} + diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag new file mode 100644 index 000000000..7c75ffe1b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/control-dependent-in-branch.desktop.frag @@ -0,0 +1,36 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uSampler; +layout(location = 0) in vec4 vInput; + +void main() +{ + FragColor = vInput; + vec4 t = texture(uSampler, vInput.xy); + vec4 d0 = dFdx(vInput); + vec4 d1 = dFdy(vInput); + vec4 d2 = fwidth(vInput); + vec4 d3 = dFdxCoarse(vInput); + vec4 d4 = dFdyCoarse(vInput); + vec4 d5 = fwidthCoarse(vInput); + vec4 d6 = dFdxFine(vInput); + vec4 d7 = dFdyFine(vInput); + vec4 d8 = fwidthFine(vInput); + vec2 lod = textureQueryLod(uSampler, vInput.zw); + if (vInput.y > 10.0) + { + FragColor += t; + FragColor += d0; + FragColor += d1; + FragColor += d2; + FragColor += d3; + FragColor += d4; + FragColor += d5; + FragColor += d6; + FragColor += d7; + FragColor += d8; + FragColor += lod.xyxy; + } +} + diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/depth-greater-than.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/depth-greater-than.desktop.frag new file mode 100644 index 000000000..88f9a4214 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/depth-greater-than.desktop.frag @@ -0,0 +1,8 @@ +#version 450 +layout(early_fragment_tests) in; +layout(depth_greater) out float gl_FragDepth; + +void main() +{ + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/depth-less-than.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/depth-less-than.desktop.frag new file mode 100644 index 000000000..87fdd4620 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/depth-less-than.desktop.frag @@ -0,0 +1,8 @@ +#version 450 +layout(early_fragment_tests) in; +layout(depth_less) out float gl_FragDepth; + +void main() +{ + gl_FragDepth = 0.5; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/dual-source-blending.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/dual-source-blending.desktop.frag new file mode 100644 index 000000000..f322cf4c3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/dual-source-blending.desktop.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(location = 0, index = 0) out vec4 FragColor0; +layout(location = 0, index = 1) out vec4 FragColor1; + +void main() +{ + FragColor0 = vec4(1.0); + FragColor1 = vec4(2.0); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/fp16.invalid.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/fp16.invalid.desktop.frag new file mode 100644 index 000000000..f3517a92f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/fp16.invalid.desktop.frag @@ -0,0 +1,151 @@ +#version 450 +#extension GL_AMD_gpu_shader_half_float : require + +layout(location = 0) in float16_t v1; +layout(location = 1) in f16vec2 v2; +layout(location = 2) in f16vec3 v3; +layout(location = 3) in f16vec4 v4; + +layout(location = 0) out float o1; +layout(location = 1) out vec2 o2; +layout(location = 2) out vec3 o3; +layout(location = 3) out vec4 o4; + +f16mat2 test_mat2(f16vec2 a, f16vec2 b, f16vec2 c, f16vec2 d) +{ + return f16mat2(a, b) * f16mat2(c, d); +} + +f16mat3 test_mat3(f16vec3 a, f16vec3 b, f16vec3 c, f16vec3 d, f16vec3 e, f16vec3 f) +{ + return f16mat3(a, b, c) * f16mat3(d, e, f); +} + +void test_constants() +{ + float16_t a = 1.0hf; + float16_t b = 1.5hf; + float16_t c = -1.5hf; // Negatives + float16_t d = (0.0hf / 0.0hf); // NaN + float16_t e = (1.0hf / 0.0hf); // +Inf + float16_t f = (-1.0hf / 0.0hf); // -Inf + float16_t g = 1014.0hf; // Large. + float16_t h = 0.000001hf; // Denormal +} + +float16_t test_result() +{ + return 1.0hf; +} + +void test_conversions() +{ + float16_t one = test_result(); + int a = int(one); + uint b = uint(one); + bool c = bool(one); + float d = float(one); + double e = double(one); + float16_t a2 = float16_t(a); + float16_t b2 = float16_t(b); + float16_t c2 = float16_t(c); + float16_t d2 = float16_t(d); + float16_t e2 = float16_t(e); +} + +void test_builtins() +{ + f16vec4 res; + res = radians(v4); + res = degrees(v4); + res = sin(v4); + res = cos(v4); + res = tan(v4); + res = asin(v4); + res = atan(v4, v3.xyzz); + res = atan(v4); + res = sinh(v4); + res = cosh(v4); + res = tanh(v4); + res = asinh(v4); + res = acosh(v4); + res = atanh(v4); + res = pow(v4, v4); + res = exp(v4); + res = log(v4); + res = exp2(v4); + res = log2(v4); + res = sqrt(v4); + res = inversesqrt(v4); + res = abs(v4); + res = sign(v4); + res = floor(v4); + res = trunc(v4); + res = round(v4); + res = roundEven(v4); + res = ceil(v4); + res = fract(v4); + res = mod(v4, v4); + f16vec4 tmp; + res = modf(v4, tmp); + res = min(v4, v4); + res = max(v4, v4); + res = clamp(v4, v4, v4); + res = mix(v4, v4, v4); + res = mix(v4, v4, lessThan(v4, v4)); + res = step(v4, v4); + res = smoothstep(v4, v4, v4); + + bvec4 btmp = isnan(v4); + btmp = isinf(v4); + res = fma(v4, v4, v4); + + ivec4 itmp; + res = frexp(v4, itmp); + res = ldexp(res, itmp); + + uint pack0 = packFloat2x16(v4.xy); + uint pack1 = packFloat2x16(v4.zw); + res = f16vec4(unpackFloat2x16(pack0), unpackFloat2x16(pack1)); + + float16_t t0 = length(v4); + t0 = distance(v4, v4); + t0 = dot(v4, v4); + f16vec3 res3 = cross(v3, v3); + res = normalize(v4); + res = faceforward(v4, v4, v4); + res = reflect(v4, v4); + res = refract(v4, v4, v1); + + btmp = lessThan(v4, v4); + btmp = lessThanEqual(v4, v4); + btmp = greaterThan(v4, v4); + btmp = greaterThanEqual(v4, v4); + btmp = equal(v4, v4); + btmp = notEqual(v4, v4); + + res = dFdx(v4); + res = dFdy(v4); + res = dFdxFine(v4); + res = dFdyFine(v4); + res = dFdxCoarse(v4); + res = dFdyCoarse(v4); + res = fwidth(v4); + res = fwidthFine(v4); + res = fwidthCoarse(v4); + + //res = interpolateAtCentroid(v4); + //res = interpolateAtSample(v4, 0); + //res = interpolateAtOffset(v4, f16vec2(0.1hf)); +} + +void main() +{ + // Basic matrix tests. + f16mat2 m0 = test_mat2(v2, v2, v3.xy, v3.xy); + f16mat3 m1 = test_mat3(v3, v3, v3, v4.xyz, v4.xyz, v4.yzw); + + test_constants(); + test_conversions(); + test_builtins(); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag new file mode 100644 index 000000000..1c6dd7b8b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/hlsl-uav-block-alias.asm.frag @@ -0,0 +1,56 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 29 +; Schema: 0 + OpCapability Shader + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %Foobar "Foobar" + OpMemberName %Foobar 0 "@data" + OpName %Foobar_0 "Foobar" + OpName %Foobaz "Foobaz" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %_runtimearr_v4float ArrayStride 16 + OpMemberDecorate %Foobar 0 Offset 0 + OpDecorate %Foobar BufferBlock + OpDecorate %Foobar_0 DescriptorSet 0 + OpDecorate %Foobar_0 Binding 0 + OpDecorate %Foobaz DescriptorSet 0 + OpDecorate %Foobaz Binding 1 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float +%_runtimearr_v4float = OpTypeRuntimeArray %v4float + %Foobar = OpTypeStruct %_runtimearr_v4float +%_ptr_Uniform_Foobar = OpTypePointer Uniform %Foobar + %Foobar_0 = OpVariable %_ptr_Uniform_Foobar Uniform + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 +%_ptr_Uniform_v4float = OpTypePointer Uniform %v4float + %Foobaz = OpVariable %_ptr_Uniform_Foobar Uniform +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %28 = OpFunctionCall %v4float %_main_ + OpStore %_entryPointOutput %28 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %v4float None %8 + %10 = OpLabel + %18 = OpAccessChain %_ptr_Uniform_v4float %Foobar_0 %int_0 %int_0 + %19 = OpLoad %v4float %18 + %21 = OpAccessChain %_ptr_Uniform_v4float %Foobaz %int_0 %int_0 + %22 = OpLoad %v4float %21 + %23 = OpFAdd %v4float %19 %22 + OpReturnValue %23 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/image-ms.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/image-ms.desktop.frag new file mode 100644 index 000000000..d3acc3081 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/image-ms.desktop.frag @@ -0,0 +1,12 @@ +#version 450 + +layout(rgba8, binding = 0) uniform image2DMS uImage; +layout(rgba8, binding = 1) uniform image2DMSArray uImageArray; + +void main() +{ + vec4 a = imageLoad(uImage, ivec2(1, 2), 2); + vec4 b = imageLoad(uImageArray, ivec3(1, 2, 4), 3); + imageStore(uImage, ivec2(2, 3), 1, a); + imageStore(uImageArray, ivec3(2, 3, 7), 1, b); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/image-query.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/image-query.desktop.frag new file mode 100644 index 000000000..a5cbe011e --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/image-query.desktop.frag @@ -0,0 +1,56 @@ +#version 450 + +layout(binding = 0) uniform sampler1D uSampler1D; +layout(binding = 1) uniform sampler2D uSampler2D; +layout(binding = 2) uniform sampler2DArray uSampler2DArray; +layout(binding = 3) uniform sampler3D uSampler3D; +layout(binding = 4) uniform samplerCube uSamplerCube; +layout(binding = 5) uniform samplerCubeArray uSamplerCubeArray; +layout(binding = 6) uniform samplerBuffer uSamplerBuffer; +layout(binding = 7) uniform sampler2DMS uSamplerMS; +layout(binding = 8) uniform sampler2DMSArray uSamplerMSArray; + +layout(r32f, binding = 9) uniform image1D uImage1D; +layout(r32f, binding = 10) uniform image2D uImage2D; +layout(r32f, binding = 11) uniform image2DArray uImage2DArray; +layout(r32f, binding = 12) uniform image3D uImage3D; +layout(r32f, binding = 13) uniform imageCube uImageCube; +layout(r32f, binding = 14) uniform imageCubeArray uImageCubeArray; +layout(r32f, binding = 15) uniform imageBuffer uImageBuffer; +layout(r32f, binding = 16) uniform image2DMS uImageMS; +layout(r32f, binding = 17) uniform image2DMSArray uImageMSArray; + +void main() +{ + int a = textureSize(uSampler1D, 0); + ivec2 b = textureSize(uSampler2D, 0); + ivec3 c = textureSize(uSampler2DArray, 0); + ivec3 d = textureSize(uSampler3D, 0); + ivec2 e = textureSize(uSamplerCube, 0); + ivec3 f = textureSize(uSamplerCubeArray, 0); + int g = textureSize(uSamplerBuffer); + ivec2 h = textureSize(uSamplerMS); + ivec3 i = textureSize(uSamplerMSArray); + + int l0 = textureQueryLevels(uSampler1D); + int l1 = textureQueryLevels(uSampler2D); + int l2 = textureQueryLevels(uSampler2DArray); + int l3 = textureQueryLevels(uSampler3D); + int l4 = textureQueryLevels(uSamplerCube); + int l5 = textureQueryLevels(uSamplerCubeArray); + + a = imageSize(uImage1D); + b = imageSize(uImage2D); + c = imageSize(uImage2DArray); + d = imageSize(uImage3D); + e = imageSize(uImageCube); + f = imageSize(uImageCubeArray); + g = imageSize(uImageBuffer); + h = imageSize(uImageMS); + i = imageSize(uImageMSArray); + + int s0 = textureSamples(uSamplerMS); + int s1 = textureSamples(uSamplerMSArray); + int s2 = imageSamples(uImageMS); + int s3 = imageSamples(uImageMSArray); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/in-block-qualifiers.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/in-block-qualifiers.frag new file mode 100644 index 000000000..f22096e6d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/in-block-qualifiers.frag @@ -0,0 +1,20 @@ +#version 450 + +layout(location = 0) in VertexData { + flat float f; + centroid vec4 g; + flat int h; + float i; +} vin; + +layout(location = 4) in flat float f; +layout(location = 5) in centroid vec4 g; +layout(location = 6) in flat int h; +layout(location = 7) in sample float i; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vin.f + vin.g + float(vin.h) + vin.i + f + g + float(h) + i; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/layout-component.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/layout-component.desktop.frag new file mode 100644 index 000000000..fade67ba6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/layout-component.desktop.frag @@ -0,0 +1,14 @@ +#version 450 +layout(location = 0, component = 0) in vec2 v0; +layout(location = 0, component = 2) in float v1; +layout(location = 0) out vec2 FragColor; + +in Vertex +{ + layout(location = 1, component = 2) in float v3; +}; + +void main() +{ + FragColor = v0 + v1 + v3; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/query-levels.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/query-levels.desktop.frag new file mode 100644 index 000000000..3a6977611 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/query-levels.desktop.frag @@ -0,0 +1,9 @@ +#version 450 + +layout(binding = 0) uniform sampler2D uSampler; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(textureQueryLevels(uSampler)); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/query-lod.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/query-lod.desktop.frag new file mode 100644 index 000000000..0cb160402 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/query-lod.desktop.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(location = 0) in vec2 vTexCoord; +layout(binding = 0) uniform sampler2D uSampler; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = textureQueryLod(uSampler, vTexCoord).xyxy; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/sampler-ms-query.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/sampler-ms-query.desktop.frag new file mode 100644 index 000000000..f707ed5c4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/sampler-ms-query.desktop.frag @@ -0,0 +1,17 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2DMS uSampler; +layout(binding = 1) uniform sampler2DMSArray uSamplerArray; +layout(rgba8, binding = 2) uniform image2DMS uImage; +layout(rgba8, binding = 3) uniform image2DMSArray uImageArray; + +void main() +{ + FragColor = + vec4( + textureSamples(uSampler) + + textureSamples(uSamplerArray) + + imageSamples(uImage) + + imageSamples(uImageArray)); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag b/3rdparty/spirv-cross/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag new file mode 100644 index 000000000..0c4cf8f5a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/frag/texture-proj-shadow.desktop.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(binding = 0) uniform sampler1DShadow uShadow1D; +layout(binding = 1) uniform sampler2DShadow uShadow2D; +layout(binding = 2) uniform sampler1D uSampler1D; +layout(binding = 3) uniform sampler2D uSampler2D; +layout(binding = 4) uniform sampler3D uSampler3D; + +layout(location = 0) out float FragColor; +layout(location = 0) in vec3 vClip3; +layout(location = 1) in vec4 vClip4; +layout(location = 2) in vec2 vClip2; + +void main() +{ + FragColor = textureProj(uShadow1D, vClip4); + FragColor = textureProj(uShadow2D, vClip4); + FragColor = textureProj(uSampler1D, vClip2).x; + FragColor = textureProj(uSampler2D, vClip3).x; + FragColor = textureProj(uSampler3D, vClip4).x; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/geom/basic.desktop.sso.geom b/3rdparty/spirv-cross/shaders/desktop-only/geom/basic.desktop.sso.geom new file mode 100644 index 000000000..f3d331dd1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/geom/basic.desktop.sso.geom @@ -0,0 +1,37 @@ +#version 450 + +layout(triangles, invocations = 4) in; +layout(triangle_strip, max_vertices = 3) out; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[]; + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal + float(gl_InvocationID); + EmitVertex(); + + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal + 4.0 * float(gl_InvocationID); + EmitVertex(); + + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal + 2.0 * float(gl_InvocationID); + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/geom/viewport-index.desktop.geom b/3rdparty/spirv-cross/shaders/desktop-only/geom/viewport-index.desktop.geom new file mode 100644 index 000000000..e02e81daf --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/geom/viewport-index.desktop.geom @@ -0,0 +1,11 @@ +#version 450 + +layout(triangles) in; +layout(triangle_strip) out; +layout(max_vertices = 4) out; + +void main() +{ + gl_ViewportIndex = 1; +} + diff --git a/3rdparty/spirv-cross/shaders/desktop-only/tesc/basic.desktop.sso.tesc b/3rdparty/spirv-cross/shaders/desktop-only/tesc/basic.desktop.sso.tesc new file mode 100644 index 000000000..8ff739b0a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/tesc/basic.desktop.sso.tesc @@ -0,0 +1,28 @@ +#version 450 +layout(vertices = 1) out; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[gl_MaxPatchVertices]; + +out gl_PerVertex +{ + vec4 gl_Position; +} gl_out[1]; + +layout(location = 0) patch out vec3 vFoo; + + +void main() +{ + gl_TessLevelInner[0] = 8.9; + gl_TessLevelInner[1] = 6.9; + gl_TessLevelOuter[0] = 8.9; + gl_TessLevelOuter[1] = 6.9; + gl_TessLevelOuter[2] = 3.9; + gl_TessLevelOuter[3] = 4.9; + vFoo = vec3(1.0); + + gl_out[gl_InvocationID].gl_Position = gl_in[0].gl_Position + gl_in[1].gl_Position; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/tese/triangle.desktop.sso.tese b/3rdparty/spirv-cross/shaders/desktop-only/tese/triangle.desktop.sso.tese new file mode 100644 index 000000000..c964fbe26 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/tese/triangle.desktop.sso.tese @@ -0,0 +1,22 @@ +#version 450 + +layout(cw, triangles, fractional_even_spacing) in; + +in gl_PerVertex +{ + vec4 gl_Position; +} gl_in[gl_MaxPatchVertices]; + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +void main() +{ + gl_Position = + gl_in[0].gl_Position * gl_TessCoord.x + + gl_in[1].gl_Position * gl_TessCoord.y + + gl_in[2].gl_Position * gl_TessCoord.z; +} + diff --git a/3rdparty/spirv-cross/shaders/desktop-only/vert/basic.desktop.sso.vert b/3rdparty/spirv-cross/shaders/desktop-only/vert/basic.desktop.sso.vert new file mode 100644 index 000000000..9ddab08cd --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/vert/basic.desktop.sso.vert @@ -0,0 +1,20 @@ +#version 450 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; +}; +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = uMVP * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert b/3rdparty/spirv-cross/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert new file mode 100644 index 000000000..1489cc7a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/vert/clip-cull-distance.desktop.sso.vert @@ -0,0 +1,13 @@ +#version 450 +out float gl_ClipDistance[4]; +out float gl_CullDistance[3]; + +void main() +{ + gl_Position = vec4(1.0); + gl_ClipDistance[0] = 0.0; + gl_ClipDistance[1] = 0.0; + gl_ClipDistance[2] = 0.0; + gl_ClipDistance[3] = 0.0; + gl_CullDistance[1] = 4.0; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/vert/clip-cull-distance.desktop.vert b/3rdparty/spirv-cross/shaders/desktop-only/vert/clip-cull-distance.desktop.vert new file mode 100644 index 000000000..1489cc7a1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/vert/clip-cull-distance.desktop.vert @@ -0,0 +1,13 @@ +#version 450 +out float gl_ClipDistance[4]; +out float gl_CullDistance[3]; + +void main() +{ + gl_Position = vec4(1.0); + gl_ClipDistance[0] = 0.0; + gl_ClipDistance[1] = 0.0; + gl_ClipDistance[2] = 0.0; + gl_ClipDistance[3] = 0.0; + gl_CullDistance[1] = 4.0; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/vert/out-block-qualifiers.vert b/3rdparty/spirv-cross/shaders/desktop-only/vert/out-block-qualifiers.vert new file mode 100644 index 000000000..c1e409fb4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/vert/out-block-qualifiers.vert @@ -0,0 +1,26 @@ +#version 450 + +layout(location = 0) out VertexData { + flat float f; + centroid vec4 g; + flat int h; + float i; +} vout; + +layout(location = 4) out flat float f; +layout(location = 5) out centroid vec4 g; +layout(location = 6) out flat int h; +layout(location = 7) out float i; + +void main() +{ + vout.f = 10.0; + vout.g = vec4(20.0); + vout.h = 20; + vout.i = 30.0; + + f = 10.0; + g = vec4(20.0); + h = 20; + i = 30.0; +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert b/3rdparty/spirv-cross/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert new file mode 100644 index 000000000..b5cde0270 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/vert/shader-draw-parameters-450.desktop.vert @@ -0,0 +1,12 @@ +#version 450 +#extension GL_ARB_shader_draw_parameters : enable + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +void main() +{ + gl_Position = vec4(gl_BaseVertexARB, gl_BaseInstanceARB, gl_DrawIDARB, 1); +} diff --git a/3rdparty/spirv-cross/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert b/3rdparty/spirv-cross/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert new file mode 100644 index 000000000..997804ece --- /dev/null +++ b/3rdparty/spirv-cross/shaders/desktop-only/vert/shader-draw-parameters.desktop.vert @@ -0,0 +1,11 @@ +#version 460 + +out gl_PerVertex +{ + vec4 gl_Position; +}; + +void main() +{ + gl_Position = vec4(gl_BaseVertex, gl_BaseInstance, gl_DrawID, 1); +} diff --git a/3rdparty/spirv-cross/shaders/flatten/array.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/array.flatten.vert new file mode 100644 index 000000000..fa6da076c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/array.flatten.vert @@ -0,0 +1,19 @@ +#version 310 es + +layout(std140) uniform UBO +{ + vec4 A4[5][4][2]; + mat4 uMVP; + vec4 A1[2]; + vec4 A2[2][3]; + float A3[3]; + vec4 Offset; +}; +layout(location = 0) in vec4 aVertex; + +void main() +{ + vec4 a4 = A4[2][3][1]; // 2 * (4 * 2) + 3 * 2 + 1 = 16 + 6 + 1 = 23. + vec4 offset = A2[1][1] + A1[1] + A3[2]; + gl_Position = uMVP * aVertex + Offset + offset; +} diff --git a/3rdparty/spirv-cross/shaders/flatten/basic.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/basic.flatten.vert new file mode 100644 index 000000000..e60a9067b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/basic.flatten.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(std140) uniform UBO +{ + mat4 uMVP; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = uMVP * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders/flatten/copy.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/copy.flatten.vert new file mode 100644 index 000000000..4f1b8805e --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/copy.flatten.vert @@ -0,0 +1,34 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + + vec4 Color; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; + + Light lights[4]; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec4 vColor; + +void main() +{ + gl_Position = uMVP * aVertex; + + vColor = vec4(0.0); + + for (int i = 0; i < 4; ++i) + { + Light light = lights[i]; + vec3 L = aVertex.xyz - light.Position; + vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / light.Radius, 0.0, 1.0) * lights[i].Color); + } +} diff --git a/3rdparty/spirv-cross/shaders/flatten/dynamic.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/dynamic.flatten.vert new file mode 100644 index 000000000..a341d4528 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/dynamic.flatten.vert @@ -0,0 +1,33 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + + vec4 Color; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; + + Light lights[4]; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec4 vColor; + +void main() +{ + gl_Position = uMVP * aVertex; + + vColor = vec4(0.0); + + for (int i = 0; i < 4; ++i) + { + vec3 L = aVertex.xyz - lights[i].Position; + vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / lights[i].Radius, 0.0, 1.0) * lights[i].Color); + } +} diff --git a/3rdparty/spirv-cross/shaders/flatten/matrix-conversion.flatten.frag b/3rdparty/spirv-cross/shaders/flatten/matrix-conversion.flatten.frag new file mode 100644 index 000000000..427825c34 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/matrix-conversion.flatten.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; +layout(location = 0) out vec3 FragColor; +layout(location = 0) flat in vec3 vNormal; + +layout(binding = 0, std140) uniform UBO +{ + mat4 m; +}; + +void main() +{ + FragColor = mat3(m) * vNormal; +} diff --git a/3rdparty/spirv-cross/shaders/flatten/matrixindex.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/matrixindex.flatten.vert new file mode 100644 index 000000000..0ee783843 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/matrixindex.flatten.vert @@ -0,0 +1,25 @@ +#version 310 es + +layout(std140) uniform UBO +{ + layout(column_major) mat4 M1C; + layout(row_major) mat4 M1R; + layout(column_major) mat2x4 M2C; + layout(row_major) mat2x4 M2R; +}; + +layout(location = 0) out vec4 oA; +layout(location = 1) out vec4 oB; +layout(location = 2) out vec4 oC; +layout(location = 3) out vec4 oD; +layout(location = 4) out vec4 oE; + +void main() +{ + gl_Position = vec4(0.0); + oA = M1C[1]; + oB = M1R[1]; + oC = M2C[1]; + oD = M2R[0]; + oE = vec4(M1C[1][2], M1R[1][2], M2C[1][2], M2R[1][2]); +} diff --git a/3rdparty/spirv-cross/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag b/3rdparty/spirv-cross/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag new file mode 100644 index 000000000..24b2ff1d2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/multi-dimensional.desktop.invalid.flatten_dim.frag @@ -0,0 +1,18 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uTextures[2][3][1]; +layout(location = 0) flat in int vIndex; +layout(location = 1) in vec2 vUV; + +void main() +{ + vec4 values3[2][3][1]; + + for (int z = 0; z < 2; z++) + for (int y = 0; y < 3; y++) + for (int x = 0; x < 1; x++) + values3[z][y][x] = texture(uTextures[z][y][x], vUV); + + FragColor = values3[1][2][0] + values3[0][2][0] + values3[vIndex + 1][2][vIndex]; +} diff --git a/3rdparty/spirv-cross/shaders/flatten/multiindex.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/multiindex.flatten.vert new file mode 100644 index 000000000..0b471d86e --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/multiindex.flatten.vert @@ -0,0 +1,13 @@ +#version 310 es + +layout(std140) uniform UBO +{ + vec4 Data[3][5]; +}; + +layout(location = 0) in ivec2 aIndex; + +void main() +{ + gl_Position = Data[aIndex.x][aIndex.y]; +} diff --git a/3rdparty/spirv-cross/shaders/flatten/push-constant.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/push-constant.flatten.vert new file mode 100644 index 000000000..c7b1b42e1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/push-constant.flatten.vert @@ -0,0 +1,17 @@ +#version 310 es + +layout(push_constant, std430) uniform PushMe +{ + mat4 MVP; + mat2 Rot; // The MatrixStride will be 8 here. + float Arr[4]; +} registers; + +layout(location = 0) in vec2 Rot; +layout(location = 1) in vec4 Pos; +layout(location = 0) out vec2 vRot; +void main() +{ + gl_Position = registers.MVP * Pos; + vRot = registers.Rot * Rot + registers.Arr[2]; // Constant access should work even if array stride is just 4 here. +} diff --git a/3rdparty/spirv-cross/shaders/flatten/rowmajor.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/rowmajor.flatten.vert new file mode 100644 index 000000000..88c468c8f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/rowmajor.flatten.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(std140) uniform UBO +{ + layout(column_major) mat4 uMVPR; + layout(row_major) mat4 uMVPC; + layout(row_major) mat2x4 uMVP; +}; + +layout(location = 0) in vec4 aVertex; + +void main() +{ + vec2 v = aVertex * uMVP; + gl_Position = uMVPR * aVertex + uMVPC * aVertex; +} diff --git a/3rdparty/spirv-cross/shaders/flatten/struct.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/struct.flatten.vert new file mode 100644 index 000000000..936bb41b8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/struct.flatten.vert @@ -0,0 +1,30 @@ +#version 310 es + +struct Light +{ + vec3 Position; + float Radius; + + vec4 Color; +}; + +layout(std140) uniform UBO +{ + mat4 uMVP; + + Light light; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec4 vColor; + +void main() +{ + gl_Position = uMVP * aVertex; + + vColor = vec4(0.0); + + vec3 L = aVertex.xyz - light.Position; + vColor += dot(aNormal, normalize(L)) * (clamp(1.0 - length(L) / light.Radius, 0.0, 1.0) * light.Color); +} diff --git a/3rdparty/spirv-cross/shaders/flatten/struct.rowmajor.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/struct.rowmajor.flatten.vert new file mode 100644 index 000000000..231389b8f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/struct.rowmajor.flatten.vert @@ -0,0 +1,26 @@ +#version 310 es + +struct Foo +{ + mat3x4 MVP0; + mat3x4 MVP1; +}; + +layout(std140, binding = 0) uniform UBO +{ + layout(row_major) Foo foo; +}; + +layout(location = 0) in vec4 v0; +layout(location = 1) in vec4 v1; +layout(location = 0) out vec3 V0; +layout(location = 1) out vec3 V1; + +void main() +{ + Foo f = foo; + vec3 a = v0 * f.MVP0; + vec3 b = v1 * f.MVP1; + V0 = a; + V1 = b; +} diff --git a/3rdparty/spirv-cross/shaders/flatten/swizzle.flatten.vert b/3rdparty/spirv-cross/shaders/flatten/swizzle.flatten.vert new file mode 100644 index 000000000..fafff7734 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/swizzle.flatten.vert @@ -0,0 +1,47 @@ +#version 310 es + +// comments note the 16b alignment boundaries (see GL spec 7.6.2.2 Standard Uniform Block Layout) +layout(std140, binding = 0) uniform UBO +{ + // 16b boundary + vec4 A; + // 16b boundary + vec2 B0; + vec2 B1; + // 16b boundary + float C0; + // 16b boundary (vec3 is aligned to 16b) + vec3 C1; + // 16b boundary + vec3 D0; + float D1; + // 16b boundary + float E0; + float E1; + float E2; + float E3; + // 16b boundary + float F0; + vec2 F1; + // 16b boundary (vec2 before us is aligned to 8b) + float F2; +}; + +layout(location = 0) out vec4 oA; +layout(location = 1) out vec4 oB; +layout(location = 2) out vec4 oC; +layout(location = 3) out vec4 oD; +layout(location = 4) out vec4 oE; +layout(location = 5) out vec4 oF; + +void main() +{ + gl_Position = vec4(0.0); + + oA = A; + oB = vec4(B0, B1); + oC = vec4(C0, C1); + oD = vec4(D0, D1); + oE = vec4(E0, E1, E2, E3); + oF = vec4(F0, F1, F2); +} diff --git a/3rdparty/spirv-cross/shaders/flatten/types.flatten.frag b/3rdparty/spirv-cross/shaders/flatten/types.flatten.frag new file mode 100644 index 000000000..faab5b7e0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/flatten/types.flatten.frag @@ -0,0 +1,27 @@ +#version 310 es +precision mediump float; + +layout(std140, binding = 0) uniform UBO0 +{ + vec4 a; + vec4 b; +}; + +layout(std140, binding = 0) uniform UBO1 +{ + ivec4 c; + ivec4 d; +}; + +layout(std140, binding = 0) uniform UBO2 +{ + uvec4 e; + uvec4 f; +}; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vec4(c) + vec4(d) + vec4(e) + vec4(f) + a + b; +} diff --git a/3rdparty/spirv-cross/shaders/frag/16bit-constants.frag b/3rdparty/spirv-cross/shaders/frag/16bit-constants.frag new file mode 100644 index 000000000..c53091b5b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/16bit-constants.frag @@ -0,0 +1,14 @@ +#version 450 core + +#extension GL_AMD_gpu_shader_int16 : require +#extension GL_AMD_gpu_shader_half_float : require + +layout(location = 0) out float16_t foo; +layout(location = 1) out int16_t bar; +layout(location = 2) out uint16_t baz; + +void main() { + foo = 1.0hf; + bar = 2s; + baz = 3us; +} diff --git a/3rdparty/spirv-cross/shaders/frag/array-lut-no-loop-variable.frag b/3rdparty/spirv-cross/shaders/frag/array-lut-no-loop-variable.frag new file mode 100644 index 000000000..3493e0ccc --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/array-lut-no-loop-variable.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 v0; + +void main() +{ + float lut[5] = float[](1.0, 2.0, 3.0, 4.0, 5.0); + for (int i = 0; i < 4; i++, FragColor += lut[i]) + { + } +} diff --git a/3rdparty/spirv-cross/shaders/frag/basic.frag b/3rdparty/spirv-cross/shaders/frag/basic.frag new file mode 100644 index 000000000..dd9a8f850 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/basic.frag @@ -0,0 +1,13 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; +layout(binding = 0) uniform sampler2D uTex; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vColor * texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/shaders/frag/complex-expression-in-access-chain.frag b/3rdparty/spirv-cross/shaders/frag/complex-expression-in-access-chain.frag new file mode 100644 index 000000000..47f93931c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/complex-expression-in-access-chain.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; + +struct Foo +{ + vec4 a; + vec4 b; +}; + +layout(binding = 0) buffer UBO +{ + vec4 results[1024]; +}; + +layout(binding = 1) uniform highp isampler2D Buf; +layout(location = 0) flat in int vIn; +layout(location = 1) flat in int vIn2; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + ivec4 coords = texelFetch(Buf, ivec2(gl_FragCoord.xy), 0); + vec4 foo = results[coords.x % 16]; + + int c = vIn * vIn; + int d = vIn2 * vIn2; + FragColor = foo + foo + results[c + d]; +} diff --git a/3rdparty/spirv-cross/shaders/frag/composite-extract-forced-temporary.frag b/3rdparty/spirv-cross/shaders/frag/composite-extract-forced-temporary.frag new file mode 100644 index 000000000..35fdbe862 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/composite-extract-forced-temporary.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; +layout(binding = 0) uniform sampler2D Texture; +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTexCoord; + +void main() +{ + float f = texture(Texture, vTexCoord).x; + FragColor = vec4(f * f); +} diff --git a/3rdparty/spirv-cross/shaders/frag/constant-array.frag b/3rdparty/spirv-cross/shaders/frag/constant-array.frag new file mode 100644 index 000000000..b862cb1db --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/constant-array.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; +layout(location = 0) out vec4 FragColor; + +layout(location = 0) flat in int index; + +struct Foobar { float a; float b; }; + +vec4 resolve(Foobar f) +{ + return vec4(f.a + f.b); +} + +void main() +{ + const vec4 foo[3] = vec4[](vec4(1.0), vec4(2.0), vec4(3.0)); + const vec4 foobars[2][2] = vec4[][](vec4[](vec4(1.0), vec4(2.0)), vec4[](vec4(8.0), vec4(10.0))); + const Foobar foos[2] = Foobar[](Foobar(10.0, 40.0), Foobar(90.0, 70.0)); + + FragColor = foo[index] + foobars[index][index + 1] + resolve(Foobar(10.0, 20.0)) + resolve(foos[index]); +} diff --git a/3rdparty/spirv-cross/shaders/frag/constant-composites.frag b/3rdparty/spirv-cross/shaders/frag/constant-composites.frag new file mode 100644 index 000000000..a12e22ff4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/constant-composites.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; + +float lut[4] = float[](1.0, 4.0, 3.0, 2.0); + +struct Foo +{ + float a; + float b; +}; +Foo foos[2] = Foo[](Foo(10.0, 20.0), Foo(30.0, 40.0)); + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in int line; + +void main() +{ + FragColor = vec4(lut[line]); + FragColor += foos[line].a * foos[1 - line].a; +} diff --git a/3rdparty/spirv-cross/shaders/frag/false-loop-init.frag b/3rdparty/spirv-cross/shaders/frag/false-loop-init.frag new file mode 100644 index 000000000..7ce5b52bd --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/false-loop-init.frag @@ -0,0 +1,19 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 accum; +layout(location = 0) out vec4 result; + +void main() +{ + result = vec4(0.0); + uint j; + for (int i = 0; i < 4; i += int(j)) + { + if (accum.y > 10.0) + j = 40u; + else + j = 30u; + result += accum; + } +} diff --git a/3rdparty/spirv-cross/shaders/frag/flush_params.frag b/3rdparty/spirv-cross/shaders/frag/flush_params.frag new file mode 100644 index 000000000..8a26ad3a2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/flush_params.frag @@ -0,0 +1,27 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; + +struct Structy +{ + vec4 c; +}; + +void foo2(out Structy f) +{ + f.c = vec4(10.0); +} + +Structy foo() +{ + Structy f; + foo2(f); + return f; +} + +void main() +{ + Structy s = foo(); + FragColor = s.c; +} diff --git a/3rdparty/spirv-cross/shaders/frag/for-loop-init.frag b/3rdparty/spirv-cross/shaders/frag/for-loop-init.frag new file mode 100644 index 000000000..0cde26765 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/for-loop-init.frag @@ -0,0 +1,52 @@ +#version 310 es +precision mediump float; +layout(location = 0) out int FragColor; + +void main() +{ + FragColor = 16; + + // Basic loop variable. + for (int i = 0; i < 25; i++) + FragColor += 10; + + // Multiple loop variables. + for (int i = 1, j = 4; i < 30; i++, j += 4) + FragColor += 11; + + // A potential loop variables, but we access it outside the loop, + // so cannot be one. + int k = 0; + for (; k < 20; k++) + FragColor += 12; + k += 3; + FragColor += k; + + // Potential loop variables, but the dominator is not trivial. + int l; + if (k == 40) + { + for (l = 0; l < 40; l++) + FragColor += 13; + return; + } + else + { + l = k; + FragColor += l; + } + + // Vectors cannot be loop variables + for (ivec2 i = ivec2(0); i.x < 10; i.x += 4) + { + FragColor += i.y; + } + + // Check that static expressions can be used before the loop header. + int m = 0; + m = k; + int o = m; + for (; m < 40; m++) + FragColor += m; + FragColor += o; +} diff --git a/3rdparty/spirv-cross/shaders/frag/frexp-modf.frag b/3rdparty/spirv-cross/shaders/frag/frexp-modf.frag new file mode 100644 index 000000000..6a26a4175 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/frexp-modf.frag @@ -0,0 +1,24 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out float FragColor; +layout(location = 0) in float v0; +layout(location = 1) in vec2 v1; + +void main() +{ + int e0; + float f0 = frexp(v0, e0); + f0 = frexp(v0 + 1.0, e0); + + ivec2 e1; + vec2 f1 = frexp(v1, e1); + + float r0; + float m0 = modf(v0, r0); + vec2 r1; + vec2 m1 = modf(v1, r1); + + FragColor = f0 + f1.x + f1.y + m0 + m1.x + m1.y; +} + diff --git a/3rdparty/spirv-cross/shaders/frag/front-facing.frag b/3rdparty/spirv-cross/shaders/frag/front-facing.frag new file mode 100644 index 000000000..90ca1abf4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/front-facing.frag @@ -0,0 +1,14 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vA; +layout(location = 1) in vec4 vB; + +void main() +{ + if (gl_FrontFacing) + FragColor = vA; + else + FragColor = vB; +} diff --git a/3rdparty/spirv-cross/shaders/frag/gather-dref.frag b/3rdparty/spirv-cross/shaders/frag/gather-dref.frag new file mode 100644 index 000000000..a8aac56cb --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/gather-dref.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(binding = 0) uniform mediump sampler2DShadow uT; +layout(location = 0) in vec3 vUV; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = textureGather(uT, vUV.xy, vUV.z); +} diff --git a/3rdparty/spirv-cross/shaders/frag/ground.frag b/3rdparty/spirv-cross/shaders/frag/ground.frag new file mode 100755 index 000000000..d1fcfd490 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/ground.frag @@ -0,0 +1,162 @@ +#version 310 es +precision mediump float; + +#define DEBUG_NONE 0 +#define DEBUG_DIFFUSE 1 +#define DEBUG_SPECULAR 2 +#define DEBUG_LIGHTING 3 +#define DEBUG_FOG 4 +#define DEBUG DEBUG_NONE + +#define FORWARD 0 +#define DEFERRED 1 +#define DEFERRED_VTEX 2 + +float saturate(float x) { return clamp(x, 0.0, 1.0); } + +layout(std140, binding = 4) uniform GlobalPSData +{ + vec4 g_CamPos; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_ResolutionParams; + vec4 g_TimeParams; + vec4 g_FogColor_Distance; +}; + +vec4 ComputeFogFactor(vec3 WorldPos) +{ + vec4 FogData; + vec3 vEye = WorldPos - g_CamPos.xyz; + vec3 nEye = normalize(vEye); + FogData.w = exp(-dot(vEye, vEye) * g_FogColor_Distance.w * 0.75); + + float fog_sun_factor = pow(saturate(dot(nEye, g_SunDir.xyz)), 8.0); + FogData.xyz = mix(vec3(1.0, 1.0, 1.0), vec3(0.6, 0.6, 0.9), nEye.y * 0.5 + 0.5); + FogData.xyz = mix(FogData.xyz, vec3(0.95, 0.87, 0.78), fog_sun_factor); + return FogData; +} + +void ApplyFog(inout vec3 Color, vec4 FogData) +{ + Color = mix(FogData.xyz, Color, FogData.w); +} + +void ApplyLighting(inout mediump vec3 Color, mediump float DiffuseFactor) +{ + mediump vec3 DiffuseLight = g_SunColor.xyz * DiffuseFactor; + mediump vec3 AmbientLight = vec3(0.2, 0.35, 0.55) * 0.5; + mediump vec3 Lighting = DiffuseLight + AmbientLight; +#if DEBUG == DEBUG_LIGHTING + Color = Lighting; +#else + Color *= Lighting; +#endif +} + +#define SPECULAR 0 +#define GLOSSMAP 0 + +void ApplySpecular(inout mediump vec3 Color, mediump vec3 EyeVec, mediump vec3 Normal, mediump vec3 SpecularColor, mediump float Shininess, mediump float FresnelAmount) +{ + mediump vec3 HalfAngle = normalize(-EyeVec + g_SunDir.xyz); + + mediump float v_dot_h = saturate(dot(HalfAngle, -EyeVec)); + mediump float n_dot_l = saturate(dot(Normal, g_SunDir.xyz)); + mediump float n_dot_h = saturate(dot(Normal, HalfAngle)); + mediump float n_dot_v = saturate(dot(-EyeVec, Normal)); + mediump float h_dot_l = saturate(dot(g_SunDir.xyz, HalfAngle)); + + const mediump float roughness_value = 0.25; + + mediump float r_sq = roughness_value * roughness_value; + mediump float n_dot_h_sq = n_dot_h * n_dot_h; + mediump float roughness_a = 1.0 / (4.0 * r_sq * n_dot_h_sq * n_dot_h_sq); + mediump float roughness_b = n_dot_h_sq - 1.0; + mediump float roughness_c = r_sq * n_dot_h_sq; + mediump float roughness = saturate(roughness_a * exp(roughness_b / roughness_c)); + + FresnelAmount = 0.5; + mediump float fresnel_term = pow(1.0 - n_dot_v, 5.0) * (1.0 - FresnelAmount) + FresnelAmount; + + mediump float geo_numerator = 2.0 * n_dot_h; + mediump float geo_denominator = 1.0 / v_dot_h; + mediump float geo_term = min(1.0, min(n_dot_v, n_dot_l) * geo_numerator * geo_denominator); + +#if SPECULAR || GLOSSMAP + Color += SpecularColor * g_SunColor.xyz * fresnel_term * roughness * n_dot_l * geo_term / (n_dot_v * n_dot_l + 0.0001); +#endif + + //Color = vec3(0.025 * 1.0 / (n_dot_v * n_dot_l)); +} +layout(location = 0) in vec2 TexCoord; +layout(location = 1) in vec3 EyeVec; + +layout(binding = 2) uniform sampler2D TexNormalmap; +//layout(binding = 3) uniform sampler2D TexScatteringLUT; + +#define DIFFUSE_ONLY 0 +#define GLOBAL_RENDERER DEFERRED +#define OUTPUT_FEEDBACK_TEXTURE 0 + +#if DIFFUSE_ONLY +layout(location = 0) out vec4 ColorOut; +layout(location = 1) out vec4 NormalOut; +#else +layout(location = 0) out vec4 AlbedoOut; +layout(location = 1) out vec4 SpecularOut; +layout(location = 2) out vec4 NormalOut; +layout(location = 3) out vec4 LightingOut; +#endif + +void Resolve(vec3 Albedo, vec3 Normal, float Roughness, float Metallic) +{ +#if (GLOBAL_RENDERER == FORWARD) || OUTPUT_FEEDBACK_TEXTURE + float Lighting = saturate(dot(Normal, normalize(vec3(1.0, 0.5, 1.0)))); + ColorOut.xyz = Albedo * Lighting; + ColorOut.w = 1.0; +#elif DIFFUSE_ONLY + ColorOut = vec4(Albedo, 0.0); + NormalOut.xyz = Normal * 0.5 + 0.5; + NormalOut.w = 1.0; + + // linearize and map to 0..255 range + ColorOut.w = -0.003921569 / (gl_FragCoord.z - 1.003921569); + ColorOut.w = log2(1.0 + saturate(length(EyeVec.xyz) / 200.0)); + ColorOut.w -= 1.0 / 255.0; +#else + LightingOut = vec4(0.0); + NormalOut = vec4(Normal * 0.5 + 0.5, 0.0); + SpecularOut = vec4(Roughness, Metallic, 0.0, 0.0); + AlbedoOut = vec4(Albedo, 1.0); +#endif +} + +void main() +{ + vec3 Normal = texture(TexNormalmap, TexCoord).xyz * 2.0 - 1.0; + Normal = normalize(Normal); + + vec2 scatter_uv; + scatter_uv.x = saturate(length(EyeVec) / 1000.0); + + vec3 nEye = normalize(EyeVec); + scatter_uv.y = 0.0; //nEye.x * 0.5 + 0.5; + + vec3 Color = vec3(0.1, 0.3, 0.1); + vec3 grass = vec3(0.1, 0.3, 0.1); + vec3 dirt = vec3(0.1, 0.1, 0.1); + vec3 snow = vec3(0.8, 0.8, 0.8); + + float grass_snow = smoothstep(0.0, 0.15, (g_CamPos.y + EyeVec.y) / 200.0); + vec3 base = mix(grass, snow, grass_snow); + + float edge = smoothstep(0.7, 0.75, Normal.y); + Color = mix(dirt, base, edge); + Color *= Color; + + float Roughness = 1.0 - edge * grass_snow; + + Resolve(Color, Normal, Roughness, 0.0); +} + diff --git a/3rdparty/spirv-cross/shaders/frag/helper-invocation.frag b/3rdparty/spirv-cross/shaders/frag/helper-invocation.frag new file mode 100644 index 000000000..1da8c5763 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/helper-invocation.frag @@ -0,0 +1,21 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vUV; +layout(binding = 0) uniform sampler2D uSampler; + +vec4 foo() +{ + vec4 color; + if (!gl_HelperInvocation) + color = textureLod(uSampler, vUV, 0.0); + else + color = vec4(1.0); + return color; +} + +void main() +{ + FragColor = foo(); +} diff --git a/3rdparty/spirv-cross/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag b/3rdparty/spirv-cross/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag new file mode 100644 index 000000000..cc8a64835 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/hoisted-temporary-use-continue-block-as-value.frag @@ -0,0 +1,24 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) flat in int vA; +layout(location = 1) flat in int vB; + +void main() +{ + FragColor = vec4(0.0); + + int k = 0; + int j; + for (int i = 0; i < vA; i += j) + { + if ((vA + i) == 20) + k = 50; + else if ((vB + i) == 40) + k = 60; + + j = k + 10; + FragColor += 1.0; + } +} diff --git a/3rdparty/spirv-cross/shaders/frag/image-load-store-uint-coord.asm.frag b/3rdparty/spirv-cross/shaders/frag/image-load-store-uint-coord.asm.frag new file mode 100644 index 000000000..a9bf1a749 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/image-load-store-uint-coord.asm.frag @@ -0,0 +1,103 @@ +; SPIR-V +; Version: 1.0 +; Generator: Khronos Glslang Reference Front End; 2 +; Bound: 63 +; Schema: 0 + OpCapability Shader + OpCapability SampledBuffer + OpCapability ImageBuffer + %1 = OpExtInstImport "GLSL.std.450" + OpMemoryModel Logical GLSL450 + OpEntryPoint Fragment %main "main" %_entryPointOutput + OpExecutionMode %main OriginUpperLeft + OpSource HLSL 500 + OpName %main "main" + OpName %_main_ "@main(" + OpName %storeTemp "storeTemp" + OpName %RWIm "RWIm" + OpName %v "v" + OpName %RWBuf "RWBuf" + OpName %ROIm "ROIm" + OpName %ROBuf "ROBuf" + OpName %_entryPointOutput "@entryPointOutput" + OpDecorate %RWIm DescriptorSet 0 + OpDecorate %RWIm Binding 1 + OpDecorate %RWBuf DescriptorSet 0 + OpDecorate %RWBuf Binding 0 + OpDecorate %ROIm DescriptorSet 0 + OpDecorate %ROIm Binding 1 + OpDecorate %ROBuf DescriptorSet 0 + OpDecorate %ROBuf Binding 0 + OpDecorate %_entryPointOutput Location 0 + %void = OpTypeVoid + %3 = OpTypeFunction %void + %float = OpTypeFloat 32 + %v4float = OpTypeVector %float 4 + %8 = OpTypeFunction %v4float +%_ptr_Function_v4float = OpTypePointer Function %v4float + %float_10 = OpConstant %float 10 + %float_0_5 = OpConstant %float 0.5 + %float_8 = OpConstant %float 8 + %float_2 = OpConstant %float 2 + %17 = OpConstantComposite %v4float %float_10 %float_0_5 %float_8 %float_2 + %18 = OpTypeImage %float 2D 0 0 0 2 Rgba32f +%_ptr_UniformConstant_18 = OpTypePointer UniformConstant %18 + %RWIm = OpVariable %_ptr_UniformConstant_18 UniformConstant + %uint = OpTypeInt 32 0 + %v2uint = OpTypeVector %uint 2 + %uint_10 = OpConstant %uint 10 + %25 = OpConstantComposite %v2uint %uint_10 %uint_10 + %uint_30 = OpConstant %uint 30 + %30 = OpConstantComposite %v2uint %uint_30 %uint_30 + %32 = OpTypeImage %float Buffer 0 0 0 2 Rgba32f +%_ptr_UniformConstant_32 = OpTypePointer UniformConstant %32 + %RWBuf = OpVariable %_ptr_UniformConstant_32 UniformConstant + %uint_80 = OpConstant %uint 80 + %38 = OpTypeImage %float 2D 0 0 0 1 Unknown + %SampledImage = OpTypeSampledImage %38 +%_ptr_UniformConstant_38 = OpTypePointer UniformConstant %SampledImage + %ROIm = OpVariable %_ptr_UniformConstant_38 UniformConstant + %uint_50 = OpConstant %uint 50 + %uint_60 = OpConstant %uint 60 + %44 = OpConstantComposite %v2uint %uint_50 %uint_60 + %int = OpTypeInt 32 1 + %int_0 = OpConstant %int 0 + %50 = OpTypeImage %float Buffer 0 0 0 1 Rgba32f +%_ptr_UniformConstant_50 = OpTypePointer UniformConstant %50 + %ROBuf = OpVariable %_ptr_UniformConstant_50 UniformConstant +%_ptr_Output_v4float = OpTypePointer Output %v4float +%_entryPointOutput = OpVariable %_ptr_Output_v4float Output + %main = OpFunction %void None %3 + %5 = OpLabel + %62 = OpFunctionCall %v4float %_main_ + OpStore %_entryPointOutput %62 + OpReturn + OpFunctionEnd + %_main_ = OpFunction %v4float None %8 + %10 = OpLabel + %storeTemp = OpVariable %_ptr_Function_v4float Function + %v = OpVariable %_ptr_Function_v4float Function + OpStore %storeTemp %17 + %21 = OpLoad %18 %RWIm + %26 = OpLoad %v4float %storeTemp + OpImageWrite %21 %25 %26 + %28 = OpLoad %18 %RWIm + %31 = OpImageRead %v4float %28 %30 + OpStore %v %31 + %35 = OpLoad %32 %RWBuf + %37 = OpLoad %v4float %v + OpImageWrite %35 %uint_80 %37 + %41 = OpLoad %SampledImage %ROIm + %ROImage = OpImage %38 %41 + %47 = OpImageFetch %v4float %ROImage %44 Lod %int_0 + %48 = OpLoad %v4float %v + %49 = OpFAdd %v4float %48 %47 + OpStore %v %49 + %53 = OpLoad %50 %ROBuf + %54 = OpImageFetch %v4float %53 %uint_80 + %55 = OpLoad %v4float %v + %56 = OpFAdd %v4float %55 %54 + OpStore %v %56 + %57 = OpLoad %v4float %v + OpReturnValue %57 + OpFunctionEnd diff --git a/3rdparty/spirv-cross/shaders/frag/loop-dominator-and-switch-default.frag b/3rdparty/spirv-cross/shaders/frag/loop-dominator-and-switch-default.frag new file mode 100644 index 000000000..344d895bf --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/loop-dominator-and-switch-default.frag @@ -0,0 +1,34 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 fragColor; + +void main() +{ + vec4 f4; + int c = int(f4.x); + + for (int j = 0; j < c; j++) + { + switch (c) + { + case 0: + f4.y = 0.0; + break; + case 1: + f4.y = 1.0; + break; + default: + { + int i = 0; + while (i++ < c) { + f4.y += 0.5; + } + continue; + } + } + f4.y += 0.5; + } + + fragColor = f4; +} diff --git a/3rdparty/spirv-cross/shaders/frag/lut-promotion.frag b/3rdparty/spirv-cross/shaders/frag/lut-promotion.frag new file mode 100644 index 000000000..0cdc8148f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/lut-promotion.frag @@ -0,0 +1,44 @@ +#version 310 es +precision mediump float; +layout(location = 0) out float FragColor; +layout(location = 0) flat in int index; + +const float LUT[16] = float[]( + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0, + 1.0, 2.0, 3.0, 4.0); + +void main() +{ + // Try reading LUTs, both in branches and not branch. + FragColor = LUT[index]; + if (index < 10) + FragColor += LUT[index ^ 1]; + else + FragColor += LUT[index & 1]; + + // Not declared as a LUT, but can be promoted to one. + vec4 foo[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + if (index > 30) + { + FragColor += foo[index & 3].y; + } + else + { + FragColor += foo[index & 1].x; + } + + // Not declared as a LUT, but this cannot be promoted, because we have a partial write. + vec4 foobar[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + if (index > 30) + { + foobar[1].z = 20.0; + } + FragColor += foobar[index & 3].z; + + // Not declared as a LUT, but this cannot be promoted, because we have two complete writes. + vec4 baz[4] = vec4[](vec4(0.0), vec4(1.0), vec4(8.0), vec4(5.0)); + baz = vec4[](vec4(20.0), vec4(30.0), vec4(50.0), vec4(60.0)); + FragColor += baz[index & 3].z; +} diff --git a/3rdparty/spirv-cross/shaders/frag/mix.frag b/3rdparty/spirv-cross/shaders/frag/mix.frag new file mode 100644 index 000000000..a5d589dd0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/mix.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vIn0; +layout(location = 1) in vec4 vIn1; +layout(location = 2) in float vIn2; +layout(location = 3) in float vIn3; +layout(location = 0) out vec4 FragColor; + +void main() +{ + bvec4 l = bvec4(false, true, false, false); + FragColor = mix(vIn0, vIn1, l); + + bool f = true; + FragColor = vec4(mix(vIn2, vIn3, f)); + + FragColor = f ? vIn0 : vIn1; + FragColor = vec4(f ? vIn2 : vIn3); +} diff --git a/3rdparty/spirv-cross/shaders/frag/partial-write-preserve.frag b/3rdparty/spirv-cross/shaders/frag/partial-write-preserve.frag new file mode 100644 index 000000000..227df9508 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/partial-write-preserve.frag @@ -0,0 +1,95 @@ +#version 310 es +precision mediump float; + +layout(std140, binding = 0) uniform UBO +{ + int some_value; +}; + +struct B +{ + float a; + float b; +}; + +void partial_inout(inout vec4 x) +{ + x.x = 10.0; +} + +void partial_inout(inout B b) +{ + b.b = 40.0; +} + +// Make a complete write, but only conditionally ... +void branchy_inout(inout vec4 v) +{ + v.y = 20.0; + if (some_value == 20) + { + v = vec4(50.0); + } +} + +void branchy_inout(inout B b) +{ + b.b = 20.0; + if (some_value == 20) + { + b = B(10.0, 40.0); + } +} + +void branchy_inout_2(out vec4 v) +{ + if (some_value == 20) + { + v = vec4(50.0); + } + else + { + v = vec4(70.0); + } + v.y = 20.0; +} + +void branchy_inout_2(out B b) +{ + if (some_value == 20) + { + b = B(10.0, 40.0); + } + else + { + b = B(70.0, 70.0); + } + b.b = 20.0; +} + + +void complete_inout(out vec4 x) +{ + x = vec4(50.0); +} + +void complete_inout(out B b) +{ + b = B(100.0, 200.0); +} + +void main() +{ + vec4 a = vec4(10.0); + partial_inout(a); + complete_inout(a); + branchy_inout(a); + branchy_inout_2(a); + + B b = B(10.0, 20.0); + partial_inout(b); + complete_inout(b); + branchy_inout(b); + branchy_inout_2(b); +} + diff --git a/3rdparty/spirv-cross/shaders/frag/pls.frag b/3rdparty/spirv-cross/shaders/frag/pls.frag new file mode 100644 index 000000000..e3863e4e0 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/pls.frag @@ -0,0 +1,20 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 PLSIn0; +layout(location = 1) in vec4 PLSIn1; +layout(location = 2) in vec4 PLSIn2; +layout(location = 3) in vec4 PLSIn3; + +layout(location = 0) out vec4 PLSOut0; +layout(location = 1) out vec4 PLSOut1; +layout(location = 2) out vec4 PLSOut2; +layout(location = 3) out vec4 PLSOut3; + +void main() +{ + PLSOut0 = 2.0 * PLSIn0; + PLSOut1 = 6.0 * PLSIn1; + PLSOut2 = 7.0 * PLSIn2; + PLSOut3 = 4.0 * PLSIn3; +} diff --git a/3rdparty/spirv-cross/shaders/frag/sample-parameter.frag b/3rdparty/spirv-cross/shaders/frag/sample-parameter.frag new file mode 100644 index 000000000..8470bfd67 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/sample-parameter.frag @@ -0,0 +1,11 @@ +#version 310 es +#extension GL_OES_sample_variables : require +precision mediump float; + +layout(location = 0) out vec2 FragColor; + +void main() +{ + FragColor = gl_SamplePosition + vec2(gl_SampleMaskIn[0]) + float(gl_SampleID); + gl_SampleMask[0] = 1; +} diff --git a/3rdparty/spirv-cross/shaders/frag/sampler-ms.frag b/3rdparty/spirv-cross/shaders/frag/sampler-ms.frag new file mode 100644 index 000000000..659392827 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/sampler-ms.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; +precision highp int; + +layout(binding = 0) uniform mediump sampler2DMS uSampler; +layout(location = 0) out vec4 FragColor; + +void main() +{ + ivec2 coord = ivec2(gl_FragCoord.xy); + FragColor = + texelFetch(uSampler, coord, 0) + + texelFetch(uSampler, coord, 1) + + texelFetch(uSampler, coord, 2) + + texelFetch(uSampler, coord, 3); +} diff --git a/3rdparty/spirv-cross/shaders/frag/sampler-proj.frag b/3rdparty/spirv-cross/shaders/frag/sampler-proj.frag new file mode 100644 index 000000000..21fa5c026 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/sampler-proj.frag @@ -0,0 +1,12 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vTex; +layout(binding = 0) uniform sampler2D uTex; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = textureProj(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/shaders/frag/sampler.frag b/3rdparty/spirv-cross/shaders/frag/sampler.frag new file mode 100644 index 000000000..e38f76886 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/sampler.frag @@ -0,0 +1,18 @@ +#version 310 es +precision mediump float; + +layout(location = 0) in vec4 vColor; +layout(location = 1) in vec2 vTex; +layout(binding = 0) uniform sampler2D uTex; +layout(location = 0) out vec4 FragColor; + +vec4 sample_texture(sampler2D tex, vec2 uv) +{ + return texture(tex, uv); +} + +void main() +{ + FragColor = vColor * sample_texture(uTex, vTex); +} + diff --git a/3rdparty/spirv-cross/shaders/frag/switch-unsigned-case.frag b/3rdparty/spirv-cross/shaders/frag/switch-unsigned-case.frag new file mode 100644 index 000000000..d8aee43a6 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/switch-unsigned-case.frag @@ -0,0 +1,26 @@ +#version 310 es +precision mediump float; + +#define ENUM_0 0u +#define ENUM_1 1u + +layout(set = 0, binding = 0) uniform Buff +{ + uint TestVal; +}; + +layout(location = 0) out vec4 fsout_Color; + +void main() +{ + fsout_Color = vec4(1.0); + switch (TestVal) + { + case ENUM_0: + fsout_Color = vec4(0.1); + break; + case ENUM_1: + fsout_Color = vec4(0.2); + break; + } +} diff --git a/3rdparty/spirv-cross/shaders/frag/swizzle.frag b/3rdparty/spirv-cross/shaders/frag/swizzle.frag new file mode 100644 index 000000000..af22dd655 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/swizzle.frag @@ -0,0 +1,17 @@ +#version 310 es +precision mediump float; + +layout(binding = 0) uniform sampler2D samp; +layout(location = 0) out vec4 FragColor; +layout(location = 1) in vec3 vNormal; +layout(location = 2) in vec2 vUV; + +void main() +{ + FragColor = vec4(texture(samp, vUV).xyz, 1.0); + FragColor = vec4(texture(samp, vUV).xz, 1.0, 4.0); + FragColor = vec4(texture(samp, vUV).xx, texture(samp, vUV + vec2(0.1)).yy); + FragColor = vec4(vNormal, 1.0); + FragColor = vec4(vNormal + 1.8, 1.0); + FragColor = vec4(vUV, vUV + 1.8); +} diff --git a/3rdparty/spirv-cross/shaders/frag/texel-fetch-offset.frag b/3rdparty/spirv-cross/shaders/frag/texel-fetch-offset.frag new file mode 100644 index 000000000..e98748b8b --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/texel-fetch-offset.frag @@ -0,0 +1,10 @@ +#version 310 es +precision mediump float; +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uTexture; + +void main() +{ + FragColor = texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(1, 1)); + FragColor += texelFetchOffset(uTexture, ivec2(gl_FragCoord.xy), 0, ivec2(-1, 1)); +} diff --git a/3rdparty/spirv-cross/shaders/frag/ubo_layout.frag b/3rdparty/spirv-cross/shaders/frag/ubo_layout.frag new file mode 100644 index 000000000..80f9f16d3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/ubo_layout.frag @@ -0,0 +1,24 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; + +struct Str +{ + mat4 foo; +}; + +layout(binding = 0, std140) uniform UBO1 +{ + layout(row_major) Str foo; +} ubo1; + +layout(binding = 1, std140) uniform UBO2 +{ + layout(column_major) Str foo; +} ubo0; + +void main() +{ + FragColor = ubo1.foo.foo[0] + ubo0.foo.foo[0]; +} diff --git a/3rdparty/spirv-cross/shaders/frag/unary-enclose.frag b/3rdparty/spirv-cross/shaders/frag/unary-enclose.frag new file mode 100644 index 000000000..ea502e1de --- /dev/null +++ b/3rdparty/spirv-cross/shaders/frag/unary-enclose.frag @@ -0,0 +1,15 @@ +#version 310 es +precision mediump float; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec4 vIn; +layout(location = 1) flat in ivec4 vIn1; + +void main() +{ + FragColor = +(-(-vIn)); + ivec4 a = ~(~vIn1); + + bool b = false; + b = !!b; +} diff --git a/3rdparty/spirv-cross/shaders/geom/basic.geom b/3rdparty/spirv-cross/shaders/geom/basic.geom new file mode 100644 index 000000000..80b977d11 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/geom/basic.geom @@ -0,0 +1,28 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require + +layout(triangles, invocations = 4) in; +layout(triangle_strip, max_vertices = 3) out; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal + float(gl_InvocationID); + EmitVertex(); + + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal + 4.0 * float(gl_InvocationID); + EmitVertex(); + + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal + 2.0 * float(gl_InvocationID); + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/geom/lines-adjacency.geom b/3rdparty/spirv-cross/shaders/geom/lines-adjacency.geom new file mode 100644 index 000000000..4c3444073 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/geom/lines-adjacency.geom @@ -0,0 +1,28 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require + +layout(lines_adjacency) in; +layout(line_strip, max_vertices = 3) out; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/geom/lines.geom b/3rdparty/spirv-cross/shaders/geom/lines.geom new file mode 100644 index 000000000..c751d5cda --- /dev/null +++ b/3rdparty/spirv-cross/shaders/geom/lines.geom @@ -0,0 +1,24 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require + +layout(lines) in; +layout(line_strip, max_vertices = 2) out; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/geom/points.geom b/3rdparty/spirv-cross/shaders/geom/points.geom new file mode 100644 index 000000000..f7dce10d7 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/geom/points.geom @@ -0,0 +1,28 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require + +layout(points) in; +layout(points, max_vertices = 3) out; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/geom/single-invocation.geom b/3rdparty/spirv-cross/shaders/geom/single-invocation.geom new file mode 100644 index 000000000..c3c8d1526 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/geom/single-invocation.geom @@ -0,0 +1,28 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require + +layout(triangles) in; +layout(triangle_strip, max_vertices = 3) out; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/geom/triangles-adjacency.geom b/3rdparty/spirv-cross/shaders/geom/triangles-adjacency.geom new file mode 100644 index 000000000..017cef7b5 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/geom/triangles-adjacency.geom @@ -0,0 +1,28 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require + +layout(triangles_adjacency) in; +layout(triangle_strip, max_vertices = 3) out; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/geom/triangles.geom b/3rdparty/spirv-cross/shaders/geom/triangles.geom new file mode 100644 index 000000000..c3c8d1526 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/geom/triangles.geom @@ -0,0 +1,28 @@ +#version 310 es +#extension GL_EXT_geometry_shader : require + +layout(triangles) in; +layout(triangle_strip, max_vertices = 3) out; + +layout(location = 0) in VertexData { + vec3 normal; +} vin[]; + +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = gl_in[0].gl_Position; + vNormal = vin[0].normal; + EmitVertex(); + + gl_Position = gl_in[1].gl_Position; + vNormal = vin[1].normal; + EmitVertex(); + + gl_Position = gl_in[2].gl_Position; + vNormal = vin[2].normal; + EmitVertex(); + + EndPrimitive(); +} diff --git a/3rdparty/spirv-cross/shaders/legacy/fragment/explicit-lod.legacy.frag b/3rdparty/spirv-cross/shaders/legacy/fragment/explicit-lod.legacy.frag new file mode 100644 index 000000000..abe1ef2c3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/legacy/fragment/explicit-lod.legacy.frag @@ -0,0 +1,12 @@ +#version 310 es + +precision mediump float; + +layout(binding = 0) uniform sampler2D tex; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = textureLod(tex, vec2(0.4, 0.6), 0.0); +} diff --git a/3rdparty/spirv-cross/shaders/legacy/fragment/io-blocks.legacy.frag b/3rdparty/spirv-cross/shaders/legacy/fragment/io-blocks.legacy.frag new file mode 100644 index 000000000..0a151dc2d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/legacy/fragment/io-blocks.legacy.frag @@ -0,0 +1,16 @@ +#version 310 es +#extension GL_EXT_shader_io_blocks : require +precision mediump float; + +layout(location = 1) in VertexOut +{ + vec4 color; + highp vec3 normal; +} vin; + +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vin.color + vin.normal.xyzz; +} diff --git a/3rdparty/spirv-cross/shaders/legacy/fragment/struct-varying.legacy.frag b/3rdparty/spirv-cross/shaders/legacy/fragment/struct-varying.legacy.frag new file mode 100644 index 000000000..5df5c8770 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/legacy/fragment/struct-varying.legacy.frag @@ -0,0 +1,25 @@ +#version 310 es +precision highp float; + +struct Inputs +{ + vec4 a; + vec2 b; +}; + +layout(location = 0) in Inputs vin; +layout(location = 0) out vec4 FragColor; + +void main() +{ + // Read struct once. + Inputs v0 = vin; + // Read struct again. + Inputs v1 = vin; + + // Read members individually. + vec4 a = vin.a; + vec4 b = vin.b.xxyy; + + FragColor = v0.a + v0.b.xxyy + v1.a + v1.b.yyxx + a + b; +} diff --git a/3rdparty/spirv-cross/shaders/legacy/vert/implicit-lod.legacy.vert b/3rdparty/spirv-cross/shaders/legacy/vert/implicit-lod.legacy.vert new file mode 100644 index 000000000..606569427 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/legacy/vert/implicit-lod.legacy.vert @@ -0,0 +1,8 @@ +#version 310 es + +layout(binding = 0) uniform sampler2D tex; + +void main() +{ + gl_Position = texture(tex, vec2(0.4, 0.6)); +} diff --git a/3rdparty/spirv-cross/shaders/legacy/vert/io-block.legacy.vert b/3rdparty/spirv-cross/shaders/legacy/vert/io-block.legacy.vert new file mode 100644 index 000000000..4fbc9347c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/legacy/vert/io-block.legacy.vert @@ -0,0 +1,17 @@ +#version 310 es +#extension GL_EXT_shader_io_blocks : require + +layout(location = 0) out VertexOut +{ + vec4 color; + vec3 normal; +} vout; + +layout(location = 0) in vec4 Position; + +void main() +{ + gl_Position = Position; + vout.color = vec4(1.0); + vout.normal = vec3(0.5); +} diff --git a/3rdparty/spirv-cross/shaders/legacy/vert/struct-varying.legacy.vert b/3rdparty/spirv-cross/shaders/legacy/vert/struct-varying.legacy.vert new file mode 100644 index 000000000..3f491be83 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/legacy/vert/struct-varying.legacy.vert @@ -0,0 +1,33 @@ +#version 310 es + +struct Output +{ + vec4 a; + vec2 b; +}; + +layout(location = 0) out Output vout; + +void main() +{ + Output s = Output(vec4(0.5), vec2(0.25)); + + // Write whole struct. + vout = s; + // Write whole struct again, checks for scoping. + vout = s; + + // Read it back. + Output tmp = vout; + + // Write elements individually. + vout.a = tmp.a; + vout.b = tmp.b; + + // Write individual elements. + vout.a.x = 1.0; + vout.b.y = 1.0; + + // Read individual elements. + float c = vout.a.x; +} diff --git a/3rdparty/spirv-cross/shaders/legacy/vert/transpose.legacy.vert b/3rdparty/spirv-cross/shaders/legacy/vert/transpose.legacy.vert new file mode 100644 index 000000000..84f618262 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/legacy/vert/transpose.legacy.vert @@ -0,0 +1,20 @@ +#version 310 es + +uniform Buffer +{ + layout(row_major) mat4 MVPRowMajor; + layout(column_major) mat4 MVPColMajor; + mat4 M; +}; + +layout(location = 0) in vec4 Position; + +void main() +{ + vec4 c0 = M * (MVPRowMajor * Position); + vec4 c1 = M * (MVPColMajor * Position); + vec4 c2 = M * (Position * MVPRowMajor); + vec4 c3 = M * (Position * MVPColMajor); + gl_Position = c0 + c1 + c2 + c3; +} + diff --git a/3rdparty/spirv-cross/shaders/tesc/basic.tesc b/3rdparty/spirv-cross/shaders/tesc/basic.tesc new file mode 100644 index 000000000..0a41f98c8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tesc/basic.tesc @@ -0,0 +1,17 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(location = 0) patch out vec3 vFoo; + +layout(vertices = 1) out; + +void main() +{ + gl_TessLevelInner[0] = 8.9; + gl_TessLevelInner[1] = 6.9; + gl_TessLevelOuter[0] = 8.9; + gl_TessLevelOuter[1] = 6.9; + gl_TessLevelOuter[2] = 3.9; + gl_TessLevelOuter[3] = 4.9; + vFoo = vec3(1.0); +} diff --git a/3rdparty/spirv-cross/shaders/tesc/water_tess.tesc b/3rdparty/spirv-cross/shaders/tesc/water_tess.tesc new file mode 100644 index 000000000..3ecdc3d1a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tesc/water_tess.tesc @@ -0,0 +1,115 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(vertices = 1) out; +layout(location = 0) in vec2 vPatchPosBase[]; + +layout(std140) uniform UBO +{ + vec4 uScale; + highp vec3 uCamPos; + vec2 uPatchSize; + vec2 uMaxTessLevel; + float uDistanceMod; + vec4 uFrustum[6]; +}; + +layout(location = 1) patch out vec2 vOutPatchPosBase; +layout(location = 2) patch out vec4 vPatchLods; + +float lod_factor(vec2 pos_) +{ + vec2 pos = pos_ * uScale.xy; + vec3 dist_to_cam = uCamPos - vec3(pos.x, 0.0, pos.y); + float level = log2((length(dist_to_cam) + 0.0001) * uDistanceMod); + return clamp(level, 0.0, uMaxTessLevel.x); +} + +float tess_level(float lod) +{ + return uMaxTessLevel.y * exp2(-lod); +} + +vec4 tess_level(vec4 lod) +{ + return uMaxTessLevel.y * exp2(-lod); +} + +// Guard band for vertex displacement. +#define GUARD_BAND 10.0 +bool frustum_cull(vec2 p0) +{ + vec2 min_xz = (p0 - GUARD_BAND) * uScale.xy; + vec2 max_xz = (p0 + uPatchSize + GUARD_BAND) * uScale.xy; + + vec3 bb_min = vec3(min_xz.x, -GUARD_BAND, min_xz.y); + vec3 bb_max = vec3(max_xz.x, +GUARD_BAND, max_xz.y); + vec3 center = 0.5 * (bb_min + bb_max); + float radius = 0.5 * length(bb_max - bb_min); + + vec3 f0 = vec3( + dot(uFrustum[0], vec4(center, 1.0)), + dot(uFrustum[1], vec4(center, 1.0)), + dot(uFrustum[2], vec4(center, 1.0))); + + vec3 f1 = vec3( + dot(uFrustum[3], vec4(center, 1.0)), + dot(uFrustum[4], vec4(center, 1.0)), + dot(uFrustum[5], vec4(center, 1.0))); + + return !(any(lessThanEqual(f0, vec3(-radius))) || any(lessThanEqual(f1, vec3(-radius)))); +} + +void compute_tess_levels(vec2 p0) +{ + vOutPatchPosBase = p0; + + float l00 = lod_factor(p0 + vec2(-0.5, -0.5) * uPatchSize); + float l10 = lod_factor(p0 + vec2(+0.5, -0.5) * uPatchSize); + float l20 = lod_factor(p0 + vec2(+1.5, -0.5) * uPatchSize); + float l01 = lod_factor(p0 + vec2(-0.5, +0.5) * uPatchSize); + float l11 = lod_factor(p0 + vec2(+0.5, +0.5) * uPatchSize); + float l21 = lod_factor(p0 + vec2(+1.5, +0.5) * uPatchSize); + float l02 = lod_factor(p0 + vec2(-0.5, +1.5) * uPatchSize); + float l12 = lod_factor(p0 + vec2(+0.5, +1.5) * uPatchSize); + float l22 = lod_factor(p0 + vec2(+1.5, +1.5) * uPatchSize); + + vec4 lods = vec4( + dot(vec4(l01, l11, l02, l12), vec4(0.25)), + dot(vec4(l00, l10, l01, l11), vec4(0.25)), + dot(vec4(l10, l20, l11, l21), vec4(0.25)), + dot(vec4(l11, l21, l12, l22), vec4(0.25))); + + vPatchLods = lods; + + vec4 outer_lods = min(lods.xyzw, lods.yzwx); + vec4 levels = tess_level(outer_lods); + gl_TessLevelOuter[0] = levels.x; + gl_TessLevelOuter[1] = levels.y; + gl_TessLevelOuter[2] = levels.z; + gl_TessLevelOuter[3] = levels.w; + + float min_lod = min(min(lods.x, lods.y), min(lods.z, lods.w)); + float inner = tess_level(min(min_lod, l11)); + gl_TessLevelInner[0] = inner; + gl_TessLevelInner[1] = inner; +} + +void main() +{ + vec2 p0 = vPatchPosBase[0]; + if (!frustum_cull(p0)) + { + gl_TessLevelOuter[0] = -1.0; + gl_TessLevelOuter[1] = -1.0; + gl_TessLevelOuter[2] = -1.0; + gl_TessLevelOuter[3] = -1.0; + gl_TessLevelInner[0] = -1.0; + gl_TessLevelInner[1] = -1.0; + } + else + { + compute_tess_levels(p0); + } +} + diff --git a/3rdparty/spirv-cross/shaders/tese/ccw.tese b/3rdparty/spirv-cross/shaders/tese/ccw.tese new file mode 100644 index 000000000..26e9cc698 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/ccw.tese @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(ccw, triangles, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/tese/cw.tese b/3rdparty/spirv-cross/shaders/tese/cw.tese new file mode 100644 index 000000000..6ce7c2d6d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/cw.tese @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(cw, triangles, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/tese/equal.tese b/3rdparty/spirv-cross/shaders/tese/equal.tese new file mode 100644 index 000000000..08ab36ec2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/equal.tese @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(cw, triangles, equal_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/tese/fractional_even.tese b/3rdparty/spirv-cross/shaders/tese/fractional_even.tese new file mode 100644 index 000000000..6ce7c2d6d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/fractional_even.tese @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(cw, triangles, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/tese/fractional_odd.tese b/3rdparty/spirv-cross/shaders/tese/fractional_odd.tese new file mode 100644 index 000000000..a15a32926 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/fractional_odd.tese @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(cw, triangles, fractional_odd_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/tese/input-array.tese b/3rdparty/spirv-cross/shaders/tese/input-array.tese new file mode 100644 index 000000000..f1014ca5c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/input-array.tese @@ -0,0 +1,10 @@ +#version 450 + +layout(ccw, quads, fractional_odd_spacing) in; +layout(location = 0) in vec4 Floats[]; +layout(location = 2) in vec4 Floats2[gl_MaxPatchVertices]; + +void main() +{ + gl_Position = Floats[0] * gl_TessCoord.x + Floats2[1] * gl_TessCoord.y; +} diff --git a/3rdparty/spirv-cross/shaders/tese/line.tese b/3rdparty/spirv-cross/shaders/tese/line.tese new file mode 100644 index 000000000..b4237ef55 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/line.tese @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(isolines, point_mode, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/tese/triangle.tese b/3rdparty/spirv-cross/shaders/tese/triangle.tese new file mode 100644 index 000000000..6ce7c2d6d --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/triangle.tese @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require + +layout(cw, triangles, fractional_even_spacing) in; + +void main() +{ + gl_Position = vec4(1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/tese/water_tess.tese b/3rdparty/spirv-cross/shaders/tese/water_tess.tese new file mode 100644 index 000000000..32d6bc939 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/tese/water_tess.tese @@ -0,0 +1,65 @@ +#version 310 es +#extension GL_EXT_tessellation_shader : require +precision highp int; + +layout(cw, quads, fractional_even_spacing) in; + +layout(location = 0) patch in vec2 vOutPatchPosBase; +layout(location = 1) patch in vec4 vPatchLods; + +layout(binding = 1, std140) uniform UBO +{ + mat4 uMVP; + vec4 uScale; + vec2 uInvScale; + vec3 uCamPos; + vec2 uPatchSize; + vec2 uInvHeightmapSize; +}; +layout(binding = 0) uniform mediump sampler2D uHeightmapDisplacement; + +layout(location = 0) highp out vec3 vWorld; +layout(location = 1) highp out vec4 vGradNormalTex; + +vec2 lerp_vertex(vec2 tess_coord) +{ + return vOutPatchPosBase + tess_coord * uPatchSize; +} + +mediump vec2 lod_factor(vec2 tess_coord) +{ + mediump vec2 x = mix(vPatchLods.yx, vPatchLods.zw, tess_coord.x); + mediump float level = mix(x.x, x.y, tess_coord.y); + mediump float floor_level = floor(level); + mediump float fract_level = level - floor_level; + return vec2(floor_level, fract_level); +} + +mediump vec3 sample_height_displacement(vec2 uv, vec2 off, mediump vec2 lod) +{ + return mix( + textureLod(uHeightmapDisplacement, uv + 0.5 * off, lod.x).xyz, + textureLod(uHeightmapDisplacement, uv + 1.0 * off, lod.x + 1.0).xyz, + lod.y); +} + +void main() +{ + vec2 tess_coord = gl_TessCoord.xy; + vec2 pos = lerp_vertex(tess_coord); + mediump vec2 lod = lod_factor(tess_coord); + + vec2 tex = pos * uInvHeightmapSize.xy; + pos *= uScale.xy; + + mediump float delta_mod = exp2(lod.x); + vec2 off = uInvHeightmapSize.xy * delta_mod; + + vGradNormalTex = vec4(tex + 0.5 * uInvHeightmapSize.xy, tex * uScale.zw); + vec3 height_displacement = sample_height_displacement(tex, off, lod); + + pos += height_displacement.yz; + vWorld = vec3(pos.x, height_displacement.x, pos.y); + gl_Position = uMVP * vec4(vWorld, 1.0); +} + diff --git a/3rdparty/spirv-cross/shaders/vert/basic.vert b/3rdparty/spirv-cross/shaders/vert/basic.vert new file mode 100644 index 000000000..2c75d44a4 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/basic.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(std140) uniform UBO +{ + uniform mat4 uMVP; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = uMVP * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders/vert/ground.vert b/3rdparty/spirv-cross/shaders/vert/ground.vert new file mode 100755 index 000000000..2deeb5a94 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/ground.vert @@ -0,0 +1,202 @@ +#version 310 es + +#define YFLIP 0 +#define SPECULAR 0 +#define GLOSSMAP 0 + +#define DEBUG_NONE 0 +#define DEBUG_DIFFUSE 1 +#define DEBUG_SPECULAR 2 +#define DEBUG_LIGHTING 3 +#define DEBUG_FOG 4 +#define DEBUG DEBUG_NONE + +#define FORWARD 0 +#define DEFERRED 1 +#define DEFERRED_VTEX 2 + +float saturate(float x) { return clamp(x, 0.0, 1.0); } + +layout(std140, binding = 0) uniform GlobalVSData +{ + vec4 g_ViewProj_Row0; + vec4 g_ViewProj_Row1; + vec4 g_ViewProj_Row2; + vec4 g_ViewProj_Row3; + vec4 g_CamPos; + vec4 g_CamRight; + vec4 g_CamUp; + vec4 g_CamFront; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_TimeParams; + vec4 g_ResolutionParams; + vec4 g_CamAxisRight; + vec4 g_FogColor_Distance; + vec4 g_ShadowVP_Row0; + vec4 g_ShadowVP_Row1; + vec4 g_ShadowVP_Row2; + vec4 g_ShadowVP_Row3; +}; + +vec4 ComputeFogFactor(vec3 WorldPos) +{ + vec4 FogData; + vec3 vEye = WorldPos - g_CamPos.xyz; + vec3 nEye = normalize(vEye); + FogData.w = exp(-dot(vEye, vEye) * g_FogColor_Distance.w * 0.75); + + float fog_sun_factor = pow(saturate(dot(nEye, g_SunDir.xyz)), 8.0); + FogData.xyz = mix(vec3(1.0, 1.0, 1.0), vec3(0.6, 0.6, 0.9), nEye.y * 0.5 + 0.5); + FogData.xyz = mix(FogData.xyz, vec3(0.95, 0.87, 0.78), fog_sun_factor); + return FogData; +} + +void ApplyFog(inout vec3 Color, vec4 FogData) +{ + Color = mix(FogData.xyz, Color, FogData.w); +} + +void ApplyLighting(inout mediump vec3 Color, mediump float DiffuseFactor) +{ + mediump vec3 DiffuseLight = g_SunColor.xyz * DiffuseFactor; + mediump vec3 AmbientLight = vec3(0.2, 0.35, 0.55) * 0.5; + mediump vec3 Lighting = DiffuseLight + AmbientLight; +#if DEBUG == DEBUG_LIGHTING + Color = Lighting; +#else + Color *= Lighting; +#endif +} + +#pragma VARIANT SPECULAR +#pragma VARIANT GLOSSMAP + +void ApplySpecular(inout mediump vec3 Color, mediump vec3 EyeVec, mediump vec3 Normal, mediump vec3 SpecularColor, mediump float Shininess, mediump float FresnelAmount) +{ + mediump vec3 HalfAngle = normalize(-EyeVec + g_SunDir.xyz); + + mediump float v_dot_h = saturate(dot(HalfAngle, -EyeVec)); + mediump float n_dot_l = saturate(dot(Normal, g_SunDir.xyz)); + mediump float n_dot_h = saturate(dot(Normal, HalfAngle)); + mediump float n_dot_v = saturate(dot(-EyeVec, Normal)); + mediump float h_dot_l = saturate(dot(g_SunDir.xyz, HalfAngle)); + + const mediump float roughness_value = 0.25; + + mediump float r_sq = roughness_value * roughness_value; + mediump float n_dot_h_sq = n_dot_h * n_dot_h; + mediump float roughness_a = 1.0 / (4.0 * r_sq * n_dot_h_sq * n_dot_h_sq); + mediump float roughness_b = n_dot_h_sq - 1.0; + mediump float roughness_c = r_sq * n_dot_h_sq; + mediump float roughness = saturate(roughness_a * exp(roughness_b / roughness_c)); + + FresnelAmount = 0.5; + mediump float fresnel_term = pow(1.0 - n_dot_v, 5.0) * (1.0 - FresnelAmount) + FresnelAmount; + + mediump float geo_numerator = 2.0 * n_dot_h; + mediump float geo_denominator = 1.0 / v_dot_h; + mediump float geo_term = min(1.0, min(n_dot_v, n_dot_l) * geo_numerator * geo_denominator); + +#if SPECULAR || GLOSSMAP + Color += SpecularColor * g_SunColor.xyz * fresnel_term * roughness * n_dot_l * geo_term / (n_dot_v * n_dot_l + 0.0001); +#endif + + //Color = vec3(0.025 * 1.0 / (n_dot_v * n_dot_l)); +} + +layout(location = 0) in vec2 Position; +layout(location = 1) in vec4 LODWeights; + +layout(location = 0) out vec2 TexCoord; +layout(location = 1) out vec3 EyeVec; + +layout(std140, binding = 2) uniform GlobalGround +{ + vec4 GroundScale; + vec4 GroundPosition; + vec4 InvGroundSize_PatchScale; +}; + +struct PatchData +{ + vec4 Position; + vec4 LODs; +}; + +layout(std140, binding = 0) uniform PerPatch +{ + PatchData Patches[256]; +}; + +layout(binding = 0) uniform sampler2D TexHeightmap; +layout(binding = 1) uniform sampler2D TexLOD; + +vec2 lod_factor(vec2 uv) +{ + float level = textureLod(TexLOD, uv, 0.0).x * (255.0 / 32.0); + float floor_level = floor(level); + float fract_level = level - floor_level; + return vec2(floor_level, fract_level); +} + +#ifdef VULKAN +#define INSTANCE_ID gl_InstanceIndex +#else +#define INSTANCE_ID gl_InstanceID +#endif + +vec2 warp_position() +{ + float vlod = dot(LODWeights, Patches[INSTANCE_ID].LODs); + vlod = mix(vlod, Patches[INSTANCE_ID].Position.w, all(equal(LODWeights, vec4(0.0)))); + +#ifdef DEBUG_LOD_HEIGHT + LODFactor = vec4(vlod); +#endif + + float floor_lod = floor(vlod); + float fract_lod = vlod - floor_lod; + uint ufloor_lod = uint(floor_lod); + +#ifdef DEBUG_LOD_HEIGHT + LODFactor = vec4(fract_lod); +#endif + + uvec2 uPosition = uvec2(Position); + uvec2 mask = (uvec2(1u) << uvec2(ufloor_lod, ufloor_lod + 1u)) - 1u; + //uvec2 rounding = mix(uvec2(0u), mask, lessThan(uPosition, uvec2(32u))); + + uvec2 rounding = uvec2( + uPosition.x < 32u ? mask.x : 0u, + uPosition.y < 32u ? mask.y : 0u); + + vec4 lower_upper_snapped = vec4((uPosition + rounding).xyxy & (~mask).xxyy); + return mix(lower_upper_snapped.xy, lower_upper_snapped.zw, fract_lod); +} + +void main() +{ + vec2 PatchPos = Patches[INSTANCE_ID].Position.xz * InvGroundSize_PatchScale.zw; + vec2 WarpedPos = warp_position(); + vec2 VertexPos = PatchPos + WarpedPos; + vec2 NormalizedPos = VertexPos * InvGroundSize_PatchScale.xy; + vec2 lod = lod_factor(NormalizedPos); + + vec2 Offset = exp2(lod.x) * InvGroundSize_PatchScale.xy; + + float Elevation = + mix(textureLod(TexHeightmap, NormalizedPos + 0.5 * Offset, lod.x).x, + textureLod(TexHeightmap, NormalizedPos + 1.0 * Offset, lod.x + 1.0).x, + lod.y); + + vec3 WorldPos = vec3(NormalizedPos.x, Elevation, NormalizedPos.y); + WorldPos *= GroundScale.xyz; + WorldPos += GroundPosition.xyz; + + EyeVec = WorldPos - g_CamPos.xyz; + TexCoord = NormalizedPos + 0.5 * InvGroundSize_PatchScale.xy; + + gl_Position = WorldPos.x * g_ViewProj_Row0 + WorldPos.y * g_ViewProj_Row1 + WorldPos.z * g_ViewProj_Row2 + g_ViewProj_Row3; +} + diff --git a/3rdparty/spirv-cross/shaders/vert/invariant.vert b/3rdparty/spirv-cross/shaders/vert/invariant.vert new file mode 100644 index 000000000..239b985da --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/invariant.vert @@ -0,0 +1,13 @@ +#version 310 es + +invariant gl_Position; +layout(location = 0) invariant out vec4 vColor; +layout(location = 0) in vec4 vInput0; +layout(location = 1) in vec4 vInput1; +layout(location = 2) in vec4 vInput2; + +void main() +{ + gl_Position = vInput0 + vInput1 * vInput2; + vColor = (vInput0 - vInput1) * vInput2; +} diff --git a/3rdparty/spirv-cross/shaders/vert/ocean.vert b/3rdparty/spirv-cross/shaders/vert/ocean.vert new file mode 100644 index 000000000..8a5677fa1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/ocean.vert @@ -0,0 +1,200 @@ +#version 310 es + +#define YFLIP 0 +#define SPECULAR 0 +#define GLOSSMAP 0 + +#define DEBUG_NONE 0 +#define DEBUG_DIFFUSE 1 +#define DEBUG_SPECULAR 2 +#define DEBUG_LIGHTING 3 +#define DEBUG_FOG 4 +#define DEBUG DEBUG_NONE + +#define FORWARD 0 +#define DEFERRED 1 +#define DEFERRED_VTEX 2 + +float saturate(float x) { return clamp(x, 0.0, 1.0); } + +layout(std140, binding = 0) uniform GlobalVSData +{ + vec4 g_ViewProj_Row0; + vec4 g_ViewProj_Row1; + vec4 g_ViewProj_Row2; + vec4 g_ViewProj_Row3; + vec4 g_CamPos; + vec4 g_CamRight; + vec4 g_CamUp; + vec4 g_CamFront; + vec4 g_SunDir; + vec4 g_SunColor; + vec4 g_TimeParams; + vec4 g_ResolutionParams; + vec4 g_CamAxisRight; + vec4 g_FogColor_Distance; + vec4 g_ShadowVP_Row0; + vec4 g_ShadowVP_Row1; + vec4 g_ShadowVP_Row2; + vec4 g_ShadowVP_Row3; +}; + +vec4 ComputeFogFactor(vec3 WorldPos) +{ + vec4 FogData; + vec3 vEye = WorldPos - g_CamPos.xyz; + vec3 nEye = normalize(vEye); + FogData.w = exp(-dot(vEye, vEye) * g_FogColor_Distance.w * 0.75); + + float fog_sun_factor = pow(saturate(dot(nEye, g_SunDir.xyz)), 8.0); + FogData.xyz = mix(vec3(1.0, 1.0, 1.0), vec3(0.6, 0.6, 0.9), nEye.y * 0.5 + 0.5); + FogData.xyz = mix(FogData.xyz, vec3(0.95, 0.87, 0.78), fog_sun_factor); + return FogData; +} + +void ApplyFog(inout vec3 Color, vec4 FogData) +{ + Color = mix(FogData.xyz, Color, FogData.w); +} + +void ApplyLighting(inout mediump vec3 Color, mediump float DiffuseFactor) +{ + mediump vec3 DiffuseLight = g_SunColor.xyz * DiffuseFactor; + mediump vec3 AmbientLight = vec3(0.2, 0.35, 0.55) * 0.5; + mediump vec3 Lighting = DiffuseLight + AmbientLight; +#if DEBUG == DEBUG_LIGHTING + Color = Lighting; +#else + Color *= Lighting; +#endif +} + +void ApplySpecular(inout mediump vec3 Color, mediump vec3 EyeVec, mediump vec3 Normal, mediump vec3 SpecularColor, mediump float Shininess, mediump float FresnelAmount) +{ + mediump vec3 HalfAngle = normalize(-EyeVec + g_SunDir.xyz); + + mediump float v_dot_h = saturate(dot(HalfAngle, -EyeVec)); + mediump float n_dot_l = saturate(dot(Normal, g_SunDir.xyz)); + mediump float n_dot_h = saturate(dot(Normal, HalfAngle)); + mediump float n_dot_v = saturate(dot(-EyeVec, Normal)); + mediump float h_dot_l = saturate(dot(g_SunDir.xyz, HalfAngle)); + + const mediump float roughness_value = 0.25; + + mediump float r_sq = roughness_value * roughness_value; + mediump float n_dot_h_sq = n_dot_h * n_dot_h; + mediump float roughness_a = 1.0 / (4.0 * r_sq * n_dot_h_sq * n_dot_h_sq); + mediump float roughness_b = n_dot_h_sq - 1.0; + mediump float roughness_c = r_sq * n_dot_h_sq; + mediump float roughness = saturate(roughness_a * exp(roughness_b / roughness_c)); + + FresnelAmount = 0.5; + mediump float fresnel_term = pow(1.0 - n_dot_v, 5.0) * (1.0 - FresnelAmount) + FresnelAmount; + + mediump float geo_numerator = 2.0 * n_dot_h; + mediump float geo_denominator = 1.0 / v_dot_h; + mediump float geo_term = min(1.0, min(n_dot_v, n_dot_l) * geo_numerator * geo_denominator); + +#if SPECULAR || GLOSSMAP + Color += SpecularColor * g_SunColor.xyz * fresnel_term * roughness * n_dot_l * geo_term / (n_dot_v * n_dot_l + 0.0001); +#endif + + //Color = vec3(0.025 * 1.0 / (n_dot_v * n_dot_l)); +} + + +precision highp int; + +layout(binding = 0) uniform mediump sampler2D TexDisplacement; +layout(binding = 1) uniform mediump sampler2D TexLOD; + +layout(location = 0) in vec4 Position; +layout(location = 1) in vec4 LODWeights; + +layout(location = 0) out highp vec3 EyeVec; +layout(location = 1) out highp vec4 TexCoord; + +layout(std140, binding = 4) uniform GlobalOcean +{ + vec4 OceanScale; + vec4 OceanPosition; + vec4 InvOceanSize_PatchScale; + vec4 NormalTexCoordScale; +}; + +struct PatchData +{ + vec4 Position; + vec4 LODs; +}; + +layout(std140, binding = 0) uniform Offsets +{ + PatchData Patches[256]; +}; + +vec2 lod_factor(vec2 uv) +{ + float level = textureLod(TexLOD, uv, 0.0).x * (255.0 / 32.0); + float floor_level = floor(level); + float fract_level = level - floor_level; + return vec2(floor_level, fract_level); +} + +#ifdef VULKAN +#define INSTANCE_ID gl_InstanceIndex +#else +#define INSTANCE_ID gl_InstanceID +#endif + +vec2 warp_position() +{ + float vlod = dot(LODWeights, Patches[INSTANCE_ID].LODs); + vlod = mix(vlod, Patches[INSTANCE_ID].Position.w, all(equal(LODWeights, vec4(0.0)))); + + float floor_lod = floor(vlod); + float fract_lod = vlod - floor_lod; + uint ufloor_lod = uint(floor_lod); + + uvec4 uPosition = uvec4(Position); + uvec2 mask = (uvec2(1u) << uvec2(ufloor_lod, ufloor_lod + 1u)) - 1u; + + uvec4 rounding; + rounding.x = uPosition.x < 32u ? mask.x : 0u; + rounding.y = uPosition.y < 32u ? mask.x : 0u; + rounding.z = uPosition.x < 32u ? mask.y : 0u; + rounding.w = uPosition.y < 32u ? mask.y : 0u; + + //rounding = uPosition.xyxy * mask.xxyy; + vec4 lower_upper_snapped = vec4((uPosition.xyxy + rounding) & (~mask).xxyy); + return mix(lower_upper_snapped.xy, lower_upper_snapped.zw, fract_lod); +} + +void main() +{ + vec2 PatchPos = Patches[INSTANCE_ID].Position.xz * InvOceanSize_PatchScale.zw; + vec2 WarpedPos = warp_position(); + vec2 VertexPos = PatchPos + WarpedPos; + vec2 NormalizedPos = VertexPos * InvOceanSize_PatchScale.xy; + vec2 NormalizedTex = NormalizedPos * NormalTexCoordScale.zw; + vec2 lod = lod_factor(NormalizedPos); + vec2 Offset = exp2(lod.x) * InvOceanSize_PatchScale.xy * NormalTexCoordScale.zw; + + vec3 Displacement = + mix(textureLod(TexDisplacement, NormalizedTex + 0.5 * Offset, lod.x).yxz, + textureLod(TexDisplacement, NormalizedTex + 1.0 * Offset, lod.x + 1.0).yxz, + lod.y); + + vec3 WorldPos = vec3(NormalizedPos.x, 0.0, NormalizedPos.y) + Displacement; + WorldPos *= OceanScale.xyz; + WorldPos += OceanPosition.xyz; + + EyeVec = WorldPos - g_CamPos.xyz; + TexCoord = vec4(NormalizedTex, NormalizedTex * NormalTexCoordScale.xy) + 0.5 * InvOceanSize_PatchScale.xyxy * NormalTexCoordScale.zwzw; + + gl_Position = WorldPos.x * g_ViewProj_Row0 + WorldPos.y * g_ViewProj_Row1 + WorldPos.z * g_ViewProj_Row2 + g_ViewProj_Row3; +#if YFLIP + gl_Position *= vec4(1.0, -1.0, 1.0, 1.0); +#endif +} + diff --git a/3rdparty/spirv-cross/shaders/vert/read-from-row-major-array.vert b/3rdparty/spirv-cross/shaders/vert/read-from-row-major-array.vert new file mode 100644 index 000000000..792fb8e36 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/read-from-row-major-array.vert @@ -0,0 +1,20 @@ +#version 310 es +layout(location = 0) in highp vec4 a_position; +layout(location = 0) out mediump float v_vtxResult; + +layout(set = 0, binding = 0, std140, row_major) uniform Block +{ + highp mat2x3 var[3][4]; +}; + +mediump float compare_float (highp float a, highp float b) { return abs(a - b) < 0.05 ? 1.0 : 0.0; } +mediump float compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); } +mediump float compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); } + +void main (void) +{ + gl_Position = a_position; + mediump float result = 1.0; + result *= compare_mat2x3(var[0][0], mat2x3(2.0, 6.0, -6.0, 0.0, 5.0, 5.0)); + v_vtxResult = result; +} diff --git a/3rdparty/spirv-cross/shaders/vert/return-array.vert b/3rdparty/spirv-cross/shaders/vert/return-array.vert new file mode 100644 index 000000000..708460114 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/return-array.vert @@ -0,0 +1,22 @@ +#version 310 es + +layout(location = 0) in vec4 vInput0; +layout(location = 1) in vec4 vInput1; + +vec4[2] test() +{ + return vec4[](vec4(10.0), vec4(20.0)); +} + +vec4[2] test2() +{ + vec4 foobar[2]; + foobar[0] = vInput0; + foobar[1] = vInput1; + return foobar; +} + +void main() +{ + gl_Position = test()[0] + test2()[1]; +} diff --git a/3rdparty/spirv-cross/shaders/vert/texture_buffer.vert b/3rdparty/spirv-cross/shaders/vert/texture_buffer.vert new file mode 100644 index 000000000..6bc7ddfae --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/texture_buffer.vert @@ -0,0 +1,10 @@ +#version 310 es +#extension GL_OES_texture_buffer : require + +layout(binding = 4) uniform highp samplerBuffer uSamp; +layout(rgba32f, binding = 5) uniform readonly highp imageBuffer uSampo; + +void main() +{ + gl_Position = texelFetch(uSamp, 10) + imageLoad(uSampo, 100); +} diff --git a/3rdparty/spirv-cross/shaders/vert/ubo.vert b/3rdparty/spirv-cross/shaders/vert/ubo.vert new file mode 100644 index 000000000..82e4626e1 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vert/ubo.vert @@ -0,0 +1,16 @@ +#version 310 es + +layout(binding = 0, std140) uniform UBO +{ + mat4 mvp; +}; + +layout(location = 0) in vec4 aVertex; +layout(location = 1) in vec3 aNormal; +layout(location = 0) out vec3 vNormal; + +void main() +{ + gl_Position = mvp * aVertex; + vNormal = aNormal; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp b/3rdparty/spirv-cross/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp new file mode 100644 index 000000000..0b428eb0c --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/comp/spec-constant-op-member-array.vk.comp @@ -0,0 +1,33 @@ +#version 450 +layout(local_size_x = 1) in; + +layout(constant_id = 0) const int a = 100; +layout(constant_id = 1) const int b = 200; +layout(constant_id = 2) const int c = 300; +const int d = c + 50; +layout(constant_id = 3) const int e = 400; + +struct A +{ + int member0[a]; + int member1[b]; +}; + +struct B +{ + int member0[b]; + int member1[a]; +}; + +layout(set = 1, binding = 0) buffer SSBO +{ + A member_a; + B member_b; + int v[a]; + int w[d]; +}; + +void main() +{ + w[gl_GlobalInvocationID.x] += v[gl_GlobalInvocationID.x] + e; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp b/3rdparty/spirv-cross/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp new file mode 100644 index 000000000..09b65dc99 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/comp/spec-constant-work-group-size.vk.comp @@ -0,0 +1,17 @@ +#version 450 +layout(local_size_x_id = 10, local_size_y = 20) in; + +layout(constant_id = 0) const int a = 1; +layout(constant_id = 1) const int b = 2; + +layout(set = 1, binding = 0) writeonly buffer SSBO +{ + int v[]; +}; + +void main() +{ + int spec_const_array_size[b]; + spec_const_array_size[a] = a; + v[a + gl_WorkGroupSize.x + gl_WorkGroupSize.y] = b + spec_const_array_size[1 - a]; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp b/3rdparty/spirv-cross/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp new file mode 100644 index 000000000..68fc74f91 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/comp/subgroups.nocompat.invalid.vk.comp @@ -0,0 +1,125 @@ +#version 450 +#extension GL_KHR_shader_subgroup_basic : require +#extension GL_KHR_shader_subgroup_ballot : require +#extension GL_KHR_shader_subgroup_vote : require +#extension GL_KHR_shader_subgroup_shuffle : require +#extension GL_KHR_shader_subgroup_shuffle_relative : require +#extension GL_KHR_shader_subgroup_arithmetic : require +#extension GL_KHR_shader_subgroup_clustered : require +#extension GL_KHR_shader_subgroup_quad : require +layout(local_size_x = 1) in; + +layout(std430, binding = 0) buffer SSBO +{ + float FragColor; +}; + +void main() +{ + // basic + FragColor = float(gl_NumSubgroups); + FragColor = float(gl_SubgroupID); + FragColor = float(gl_SubgroupSize); + FragColor = float(gl_SubgroupInvocationID); + subgroupBarrier(); + subgroupMemoryBarrier(); + subgroupMemoryBarrierBuffer(); + subgroupMemoryBarrierShared(); + subgroupMemoryBarrierImage(); + bool elected = subgroupElect(); + + // ballot + FragColor = float(gl_SubgroupEqMask); + FragColor = float(gl_SubgroupGeMask); + FragColor = float(gl_SubgroupGtMask); + FragColor = float(gl_SubgroupLeMask); + FragColor = float(gl_SubgroupLtMask); + vec4 broadcasted = subgroupBroadcast(vec4(10.0), 8u); + vec3 first = subgroupBroadcastFirst(vec3(20.0)); + uvec4 ballot_value = subgroupBallot(true); + bool inverse_ballot_value = subgroupInverseBallot(ballot_value); + bool bit_extracted = subgroupBallotBitExtract(uvec4(10u), 8u); + uint bit_count = subgroupBallotBitCount(ballot_value); + uint inclusive_bit_count = subgroupBallotInclusiveBitCount(ballot_value); + uint exclusive_bit_count = subgroupBallotExclusiveBitCount(ballot_value); + uint lsb = subgroupBallotFindLSB(ballot_value); + uint msb = subgroupBallotFindMSB(ballot_value); + + // shuffle + uint shuffled = subgroupShuffle(10u, 8u); + uint shuffled_xor = subgroupShuffleXor(30u, 8u); + + // shuffle relative + uint shuffled_up = subgroupShuffleUp(20u, 4u); + uint shuffled_down = subgroupShuffleDown(20u, 4u); + + // vote + bool has_all = subgroupAll(true); + bool has_any = subgroupAny(true); + bool has_equal = subgroupAllEqual(true); + + // arithmetic + vec4 added = subgroupAdd(vec4(20.0)); + ivec4 iadded = subgroupAdd(ivec4(20)); + vec4 multiplied = subgroupMul(vec4(20.0)); + ivec4 imultiplied = subgroupMul(ivec4(20)); + vec4 lo = subgroupMin(vec4(20.0)); + vec4 hi = subgroupMax(vec4(20.0)); + ivec4 slo = subgroupMin(ivec4(20)); + ivec4 shi = subgroupMax(ivec4(20)); + uvec4 ulo = subgroupMin(uvec4(20)); + uvec4 uhi = subgroupMax(uvec4(20)); + uvec4 anded = subgroupAnd(ballot_value); + uvec4 ored = subgroupOr(ballot_value); + uvec4 xored = subgroupXor(ballot_value); + + added = subgroupInclusiveAdd(added); + iadded = subgroupInclusiveAdd(iadded); + multiplied = subgroupInclusiveMul(multiplied); + imultiplied = subgroupInclusiveMul(imultiplied); + lo = subgroupInclusiveMin(lo); + hi = subgroupInclusiveMax(hi); + slo = subgroupInclusiveMin(slo); + shi = subgroupInclusiveMax(shi); + ulo = subgroupInclusiveMin(ulo); + uhi = subgroupInclusiveMax(uhi); + anded = subgroupInclusiveAnd(anded); + ored = subgroupInclusiveOr(ored); + xored = subgroupInclusiveXor(ored); + added = subgroupExclusiveAdd(lo); + + added = subgroupExclusiveAdd(multiplied); + multiplied = subgroupExclusiveMul(multiplied); + iadded = subgroupExclusiveAdd(imultiplied); + imultiplied = subgroupExclusiveMul(imultiplied); + lo = subgroupExclusiveMin(lo); + hi = subgroupExclusiveMax(hi); + ulo = subgroupExclusiveMin(ulo); + uhi = subgroupExclusiveMax(uhi); + slo = subgroupExclusiveMin(slo); + shi = subgroupExclusiveMax(shi); + anded = subgroupExclusiveAnd(anded); + ored = subgroupExclusiveOr(ored); + xored = subgroupExclusiveXor(ored); + + // clustered + added = subgroupClusteredAdd(added, 4u); + multiplied = subgroupClusteredMul(multiplied, 4u); + iadded = subgroupClusteredAdd(iadded, 4u); + imultiplied = subgroupClusteredMul(imultiplied, 4u); + lo = subgroupClusteredMin(lo, 4u); + hi = subgroupClusteredMax(hi, 4u); + ulo = subgroupClusteredMin(ulo, 4u); + uhi = subgroupClusteredMax(uhi, 4u); + slo = subgroupClusteredMin(slo, 4u); + shi = subgroupClusteredMax(shi, 4u); + anded = subgroupClusteredAnd(anded, 4u); + ored = subgroupClusteredOr(ored, 4u); + xored = subgroupClusteredXor(xored, 4u); + + // quad + vec4 swap_horiz = subgroupQuadSwapHorizontal(vec4(20.0)); + vec4 swap_vertical = subgroupQuadSwapVertical(vec4(20.0)); + vec4 swap_diagonal = subgroupQuadSwapDiagonal(vec4(20.0)); + vec4 quad_broadcast = subgroupQuadBroadcast(vec4(20.0), 3u); +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag new file mode 100644 index 000000000..2fabb5ea8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/combined-texture-sampler-shadow.vk.frag @@ -0,0 +1,29 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump samplerShadow uSampler; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; +layout(set = 0, binding = 2) uniform texture2D uDepth; +layout(location = 0) out float FragColor; + +float samp2(texture2D t, mediump samplerShadow s) +{ + return texture(sampler2DShadow(t, s), vec3(1.0)); +} + +float samp3(texture2D t, mediump sampler s) +{ + return texture(sampler2D(t, s), vec2(1.0)).x; +} + +float samp(texture2D t, mediump samplerShadow s, mediump sampler s1) +{ + float r0 = samp2(t, s); + float r1 = samp3(t, s1); + return r0 + r1; +} + +void main() +{ + FragColor = samp(uDepth, uSampler, uSampler1); +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/combined-texture-sampler.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/combined-texture-sampler.vk.frag new file mode 100644 index 000000000..b7de8d47e --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/combined-texture-sampler.vk.frag @@ -0,0 +1,47 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump sampler uSampler0; +layout(set = 0, binding = 1) uniform mediump sampler uSampler1; +layout(set = 0, binding = 2) uniform mediump texture2D uTexture0; +layout(set = 0, binding = 3) uniform mediump texture2D uTexture1; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTex; + +vec4 sample_dual(mediump sampler samp, mediump texture2D tex) +{ + return texture(sampler2D(tex, samp), vTex); +} + +vec4 sample_global_tex(mediump sampler samp) +{ + vec4 a = texture(sampler2D(uTexture0, samp), vTex); + vec4 b = sample_dual(samp, uTexture1); + return a + b; +} + +vec4 sample_global_sampler(mediump texture2D tex) +{ + vec4 a = texture(sampler2D(tex, uSampler0), vTex); + vec4 b = sample_dual(uSampler1, tex); + return a + b; +} + +vec4 sample_duals() +{ + vec4 a = sample_dual(uSampler0, uTexture0); + vec4 b = sample_dual(uSampler1, uTexture1); + return a + b; +} + +void main() +{ + vec4 c0 = sample_duals(); + vec4 c1 = sample_global_tex(uSampler0); + vec4 c2 = sample_global_tex(uSampler1); + vec4 c3 = sample_global_sampler(uTexture0); + vec4 c4 = sample_global_sampler(uTexture1); + + FragColor = c0 + c1 + c2 + c3 + c4; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/desktop-mediump.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/desktop-mediump.vk.frag new file mode 100644 index 000000000..23fe3d3da --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/desktop-mediump.vk.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) in mediump vec4 F; +layout(location = 1) flat in mediump ivec4 I; +layout(location = 2) flat in mediump uvec4 U; +layout(location = 0) out mediump vec4 FragColor; + +void main() +{ + FragColor = F + vec4(I) + vec4(U); +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/input-attachment-ms.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/input-attachment-ms.vk.frag new file mode 100644 index 000000000..e06073884 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/input-attachment-ms.vk.frag @@ -0,0 +1,10 @@ +#version 450 + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform subpassInputMS uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform subpassInputMS uSubpass1; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = subpassLoad(uSubpass0, 1) + subpassLoad(uSubpass1, 2) + subpassLoad(uSubpass0, gl_SampleID); +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/input-attachment.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/input-attachment.vk.frag new file mode 100644 index 000000000..f082d15b2 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/input-attachment.vk.frag @@ -0,0 +1,11 @@ +#version 310 es +precision mediump float; + +layout(input_attachment_index = 0, set = 0, binding = 0) uniform mediump subpassInput uSubpass0; +layout(input_attachment_index = 1, set = 0, binding = 1) uniform mediump subpassInput uSubpass1; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = subpassLoad(uSubpass0) + subpassLoad(uSubpass1); +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/push-constant.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/push-constant.vk.frag new file mode 100644 index 000000000..6180faba3 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/push-constant.vk.frag @@ -0,0 +1,16 @@ +#version 310 es +precision mediump float; + +layout(push_constant, std430) uniform PushConstants +{ + vec4 value0; + vec4 value1; +} push; + +layout(location = 0) in vec4 vColor; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = vColor + push.value0 + push.value1; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag new file mode 100644 index 000000000..22d18a26a --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/separate-combined-fake-overload.vk.frag @@ -0,0 +1,21 @@ +#version 450 + +layout(location = 0) out vec4 FragColor; +layout(binding = 0) uniform sampler2D uSamp; +layout(binding = 1) uniform texture2D uT; +layout(binding = 2) uniform sampler uS; + +vec4 samp(sampler2D uSamp) +{ + return texture(uSamp, vec2(0.5)); +} + +vec4 samp(texture2D T, sampler S) +{ + return texture(sampler2D(T, S), vec2(0.5)); +} + +void main() +{ + FragColor = samp(uSamp) + samp(uT, uS); +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag new file mode 100644 index 000000000..b3501c1d8 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/separate-sampler-texture-array.vk.frag @@ -0,0 +1,42 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 1) uniform mediump texture2D uTexture[4]; +layout(set = 0, binding = 2) uniform mediump texture3D uTexture3D[4]; +layout(set = 0, binding = 3) uniform mediump textureCube uTextureCube[4]; +layout(set = 0, binding = 4) uniform mediump texture2DArray uTextureArray[4]; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; + +vec4 sample_func(mediump sampler samp, vec2 uv) +{ + return texture(sampler2D(uTexture[2], samp), uv); +} + +vec4 sample_func_dual(mediump sampler samp, mediump texture2D tex, vec2 uv) +{ + return texture(sampler2D(tex, samp), uv); +} + +vec4 sample_func_dual_array(mediump sampler samp, mediump texture2D tex[4], vec2 uv) +{ + return texture(sampler2D(tex[1], samp), uv); +} + +void main() +{ + vec2 off = 1.0 / vec2(textureSize(sampler2D(uTexture[1], uSampler), 0)); + vec2 off2 = 1.0 / vec2(textureSize(sampler2D(uTexture[2], uSampler), 1)); + + vec4 c0 = sample_func(uSampler, vTex + off + off2); + vec4 c1 = sample_func_dual(uSampler, uTexture[1], vTex + off + off2); + vec4 c2 = sample_func_dual_array(uSampler, uTexture, vTex + off + off2); + vec4 c3 = texture(sampler2DArray(uTextureArray[3], uSampler), vTex3); + vec4 c4 = texture(samplerCube(uTextureCube[1], uSampler), vTex3); + vec4 c5 = texture(sampler3D(uTexture3D[2], uSampler), vTex3); + + FragColor = c0 + c1 + c2 + c3 + c4 + c5; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/separate-sampler-texture.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/separate-sampler-texture.vk.frag new file mode 100644 index 000000000..cedf114ef --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/separate-sampler-texture.vk.frag @@ -0,0 +1,36 @@ +#version 310 es +precision mediump float; + +layout(set = 0, binding = 0) uniform mediump sampler uSampler; +layout(set = 0, binding = 1) uniform mediump texture2D uTexture; +layout(set = 0, binding = 2) uniform mediump texture3D uTexture3D; +layout(set = 0, binding = 3) uniform mediump textureCube uTextureCube; +layout(set = 0, binding = 4) uniform mediump texture2DArray uTextureArray; + +layout(location = 0) out vec4 FragColor; +layout(location = 0) in vec2 vTex; +layout(location = 1) in vec3 vTex3; + +vec4 sample_func(mediump sampler samp, vec2 uv) +{ + return texture(sampler2D(uTexture, samp), uv); +} + +vec4 sample_func_dual(mediump sampler samp, mediump texture2D tex, vec2 uv) +{ + return texture(sampler2D(tex, samp), uv); +} + +void main() +{ + vec2 off = 1.0 / vec2(textureSize(sampler2D(uTexture, uSampler), 0)); + vec2 off2 = 1.0 / vec2(textureSize(sampler2D(uTexture, uSampler), 1)); + + vec4 c0 = sample_func(uSampler, vTex + off + off2); + vec4 c1 = sample_func_dual(uSampler, uTexture, vTex + off + off2); + vec4 c2 = texture(sampler2DArray(uTextureArray, uSampler), vTex3); + vec4 c3 = texture(samplerCube(uTextureCube, uSampler), vTex3); + vec4 c4 = texture(sampler3D(uTexture3D, uSampler), vTex3); + + FragColor = c0 + c1 + c2 + c3 + c4; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/spec-constant-block-size.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/spec-constant-block-size.vk.frag new file mode 100644 index 000000000..8d2b1f326 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/spec-constant-block-size.vk.frag @@ -0,0 +1,17 @@ +#version 310 es +precision mediump float; + +layout(constant_id = 10) const int Value = 2; +layout(binding = 0) uniform SpecConstArray +{ + vec4 samples[Value]; +}; + +layout(location = 0) flat in int Index; +layout(location = 0) out vec4 FragColor; + +void main() +{ + FragColor = samples[Index]; +} + diff --git a/3rdparty/spirv-cross/shaders/vulkan/frag/spec-constant-ternary.vk.frag b/3rdparty/spirv-cross/shaders/vulkan/frag/spec-constant-ternary.vk.frag new file mode 100644 index 000000000..78dccbf04 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/frag/spec-constant-ternary.vk.frag @@ -0,0 +1,9 @@ +#version 450 +layout(location = 0) out float FragColor; +layout(constant_id = 0) const uint s = 10u; +const uint f = s > 20u ? 30u : 50u; + +void main() +{ + FragColor = float(f); +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/vert/multiview.nocompat.vk.vert b/3rdparty/spirv-cross/shaders/vulkan/vert/multiview.nocompat.vk.vert new file mode 100644 index 000000000..eb1bc766f --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/vert/multiview.nocompat.vk.vert @@ -0,0 +1,14 @@ +#version 310 es +#extension GL_EXT_multiview : require + +layout(std140, binding = 0) uniform MVPs +{ + mat4 MVP[2]; +}; + +layout(location = 0) in vec4 Position; + +void main() +{ + gl_Position = MVP[gl_ViewIndex] * Position; +} diff --git a/3rdparty/spirv-cross/shaders/vulkan/vert/small-storage.vk.vert b/3rdparty/spirv-cross/shaders/vulkan/vert/small-storage.vk.vert new file mode 100644 index 000000000..195f3d556 --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/vert/small-storage.vk.vert @@ -0,0 +1,46 @@ +#version 450 core + +// GL_EXT_shader_16bit_storage doesn't support input/output. +#extension GL_EXT_shader_8bit_storage : require +#extension GL_AMD_gpu_shader_int16 : require +#extension GL_AMD_gpu_shader_half_float : require + +layout(location = 0, component = 0) in int16_t foo; +layout(location = 0, component = 1) in uint16_t bar; +layout(location = 1) in float16_t baz; + +layout(binding = 0) uniform block { + i16vec2 a; + u16vec2 b; + i8vec2 c; + u8vec2 d; + f16vec2 e; +}; + +layout(binding = 1) readonly buffer storage { + i16vec3 f; + u16vec3 g; + i8vec3 h; + u8vec3 i; + f16vec3 j; +}; + +layout(push_constant) uniform pushconst { + i16vec4 k; + u16vec4 l; + i8vec4 m; + u8vec4 n; + f16vec4 o; +}; + +layout(location = 0) out i16vec4 p; +layout(location = 1) out u16vec4 q; +layout(location = 2) out f16vec4 r; + +void main() { + p = i16vec4(int(foo) + ivec4(ivec2(a), ivec2(c)) - ivec4(ivec3(f) / ivec3(h), 1) + ivec4(k) + ivec4(m)); + q = u16vec4(uint(bar) + uvec4(uvec2(b), uvec2(d)) - uvec4(uvec3(g) / uvec3(i), 1) + uvec4(l) + uvec4(n)); + r = f16vec4(float(baz) + vec4(vec2(e), 0, 1) - vec4(vec3(j), 1) + vec4(o)); + gl_Position = vec4(0, 0, 0, 1); +} + diff --git a/3rdparty/spirv-cross/shaders/vulkan/vert/vulkan-vertex.vk.vert b/3rdparty/spirv-cross/shaders/vulkan/vert/vulkan-vertex.vk.vert new file mode 100644 index 000000000..4d0438ace --- /dev/null +++ b/3rdparty/spirv-cross/shaders/vulkan/vert/vulkan-vertex.vk.vert @@ -0,0 +1,6 @@ +#version 310 es + +void main() +{ + gl_Position = float(gl_VertexIndex + gl_InstanceIndex) * vec4(1.0, 2.0, 3.0, 4.0); +} diff --git a/3rdparty/spirv-cross/spirv.hpp b/3rdparty/spirv-cross/spirv.hpp new file mode 100644 index 000000000..1fc24fb28 --- /dev/null +++ b/3rdparty/spirv-cross/spirv.hpp @@ -0,0 +1,1081 @@ +// Copyright (c) 2014-2018 The Khronos Group Inc. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and/or associated documentation files (the "Materials"), +// to deal in the Materials without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Materials, and to permit persons to whom the +// Materials are furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Materials. +// +// MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +// STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +// HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +// +// THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +// IN THE MATERIALS. + +// This header is automatically generated by the same tool that creates +// the Binary Section of the SPIR-V specification. + +// Enumeration tokens for SPIR-V, in various styles: +// C, C++, C++11, JSON, Lua, Python +// +// - C will have tokens with a "Spv" prefix, e.g.: SpvSourceLanguageGLSL +// - C++ will have tokens in the "spv" name space, e.g.: spv::SourceLanguageGLSL +// - C++11 will use enum classes in the spv namespace, e.g.: spv::SourceLanguage::GLSL +// - Lua will use tables, e.g.: spv.SourceLanguage.GLSL +// - Python will use dictionaries, e.g.: spv['SourceLanguage']['GLSL'] +// +// Some tokens act like mask values, which can be OR'd together, +// while others are mutually exclusive. The mask-like ones have +// "Mask" in their name, and a parallel enum that has the shift +// amount (1 << x) for each corresponding enumerant. + +#ifndef spirv_HPP +#define spirv_HPP + +namespace spv { + +typedef unsigned int Id; + +#define SPV_VERSION 0x10300 +#define SPV_REVISION 1 + +static const unsigned int MagicNumber = 0x07230203; +static const unsigned int Version = 0x00010300; +static const unsigned int Revision = 1; +static const unsigned int OpCodeMask = 0xffff; +static const unsigned int WordCountShift = 16; + +enum SourceLanguage { + SourceLanguageUnknown = 0, + SourceLanguageESSL = 1, + SourceLanguageGLSL = 2, + SourceLanguageOpenCL_C = 3, + SourceLanguageOpenCL_CPP = 4, + SourceLanguageHLSL = 5, + SourceLanguageMax = 0x7fffffff, +}; + +enum ExecutionModel { + ExecutionModelVertex = 0, + ExecutionModelTessellationControl = 1, + ExecutionModelTessellationEvaluation = 2, + ExecutionModelGeometry = 3, + ExecutionModelFragment = 4, + ExecutionModelGLCompute = 5, + ExecutionModelKernel = 6, + ExecutionModelMax = 0x7fffffff, +}; + +enum AddressingModel { + AddressingModelLogical = 0, + AddressingModelPhysical32 = 1, + AddressingModelPhysical64 = 2, + AddressingModelMax = 0x7fffffff, +}; + +enum MemoryModel { + MemoryModelSimple = 0, + MemoryModelGLSL450 = 1, + MemoryModelOpenCL = 2, + MemoryModelMax = 0x7fffffff, +}; + +enum ExecutionMode { + ExecutionModeInvocations = 0, + ExecutionModeSpacingEqual = 1, + ExecutionModeSpacingFractionalEven = 2, + ExecutionModeSpacingFractionalOdd = 3, + ExecutionModeVertexOrderCw = 4, + ExecutionModeVertexOrderCcw = 5, + ExecutionModePixelCenterInteger = 6, + ExecutionModeOriginUpperLeft = 7, + ExecutionModeOriginLowerLeft = 8, + ExecutionModeEarlyFragmentTests = 9, + ExecutionModePointMode = 10, + ExecutionModeXfb = 11, + ExecutionModeDepthReplacing = 12, + ExecutionModeDepthGreater = 14, + ExecutionModeDepthLess = 15, + ExecutionModeDepthUnchanged = 16, + ExecutionModeLocalSize = 17, + ExecutionModeLocalSizeHint = 18, + ExecutionModeInputPoints = 19, + ExecutionModeInputLines = 20, + ExecutionModeInputLinesAdjacency = 21, + ExecutionModeTriangles = 22, + ExecutionModeInputTrianglesAdjacency = 23, + ExecutionModeQuads = 24, + ExecutionModeIsolines = 25, + ExecutionModeOutputVertices = 26, + ExecutionModeOutputPoints = 27, + ExecutionModeOutputLineStrip = 28, + ExecutionModeOutputTriangleStrip = 29, + ExecutionModeVecTypeHint = 30, + ExecutionModeContractionOff = 31, + ExecutionModeInitializer = 33, + ExecutionModeFinalizer = 34, + ExecutionModeSubgroupSize = 35, + ExecutionModeSubgroupsPerWorkgroup = 36, + ExecutionModeSubgroupsPerWorkgroupId = 37, + ExecutionModeLocalSizeId = 38, + ExecutionModeLocalSizeHintId = 39, + ExecutionModePostDepthCoverage = 4446, + ExecutionModeStencilRefReplacingEXT = 5027, + ExecutionModeMax = 0x7fffffff, +}; + +enum StorageClass { + StorageClassUniformConstant = 0, + StorageClassInput = 1, + StorageClassUniform = 2, + StorageClassOutput = 3, + StorageClassWorkgroup = 4, + StorageClassCrossWorkgroup = 5, + StorageClassPrivate = 6, + StorageClassFunction = 7, + StorageClassGeneric = 8, + StorageClassPushConstant = 9, + StorageClassAtomicCounter = 10, + StorageClassImage = 11, + StorageClassStorageBuffer = 12, + StorageClassMax = 0x7fffffff, +}; + +enum Dim { + Dim1D = 0, + Dim2D = 1, + Dim3D = 2, + DimCube = 3, + DimRect = 4, + DimBuffer = 5, + DimSubpassData = 6, + DimMax = 0x7fffffff, +}; + +enum SamplerAddressingMode { + SamplerAddressingModeNone = 0, + SamplerAddressingModeClampToEdge = 1, + SamplerAddressingModeClamp = 2, + SamplerAddressingModeRepeat = 3, + SamplerAddressingModeRepeatMirrored = 4, + SamplerAddressingModeMax = 0x7fffffff, +}; + +enum SamplerFilterMode { + SamplerFilterModeNearest = 0, + SamplerFilterModeLinear = 1, + SamplerFilterModeMax = 0x7fffffff, +}; + +enum ImageFormat { + ImageFormatUnknown = 0, + ImageFormatRgba32f = 1, + ImageFormatRgba16f = 2, + ImageFormatR32f = 3, + ImageFormatRgba8 = 4, + ImageFormatRgba8Snorm = 5, + ImageFormatRg32f = 6, + ImageFormatRg16f = 7, + ImageFormatR11fG11fB10f = 8, + ImageFormatR16f = 9, + ImageFormatRgba16 = 10, + ImageFormatRgb10A2 = 11, + ImageFormatRg16 = 12, + ImageFormatRg8 = 13, + ImageFormatR16 = 14, + ImageFormatR8 = 15, + ImageFormatRgba16Snorm = 16, + ImageFormatRg16Snorm = 17, + ImageFormatRg8Snorm = 18, + ImageFormatR16Snorm = 19, + ImageFormatR8Snorm = 20, + ImageFormatRgba32i = 21, + ImageFormatRgba16i = 22, + ImageFormatRgba8i = 23, + ImageFormatR32i = 24, + ImageFormatRg32i = 25, + ImageFormatRg16i = 26, + ImageFormatRg8i = 27, + ImageFormatR16i = 28, + ImageFormatR8i = 29, + ImageFormatRgba32ui = 30, + ImageFormatRgba16ui = 31, + ImageFormatRgba8ui = 32, + ImageFormatR32ui = 33, + ImageFormatRgb10a2ui = 34, + ImageFormatRg32ui = 35, + ImageFormatRg16ui = 36, + ImageFormatRg8ui = 37, + ImageFormatR16ui = 38, + ImageFormatR8ui = 39, + ImageFormatMax = 0x7fffffff, +}; + +enum ImageChannelOrder { + ImageChannelOrderR = 0, + ImageChannelOrderA = 1, + ImageChannelOrderRG = 2, + ImageChannelOrderRA = 3, + ImageChannelOrderRGB = 4, + ImageChannelOrderRGBA = 5, + ImageChannelOrderBGRA = 6, + ImageChannelOrderARGB = 7, + ImageChannelOrderIntensity = 8, + ImageChannelOrderLuminance = 9, + ImageChannelOrderRx = 10, + ImageChannelOrderRGx = 11, + ImageChannelOrderRGBx = 12, + ImageChannelOrderDepth = 13, + ImageChannelOrderDepthStencil = 14, + ImageChannelOrdersRGB = 15, + ImageChannelOrdersRGBx = 16, + ImageChannelOrdersRGBA = 17, + ImageChannelOrdersBGRA = 18, + ImageChannelOrderABGR = 19, + ImageChannelOrderMax = 0x7fffffff, +}; + +enum ImageChannelDataType { + ImageChannelDataTypeSnormInt8 = 0, + ImageChannelDataTypeSnormInt16 = 1, + ImageChannelDataTypeUnormInt8 = 2, + ImageChannelDataTypeUnormInt16 = 3, + ImageChannelDataTypeUnormShort565 = 4, + ImageChannelDataTypeUnormShort555 = 5, + ImageChannelDataTypeUnormInt101010 = 6, + ImageChannelDataTypeSignedInt8 = 7, + ImageChannelDataTypeSignedInt16 = 8, + ImageChannelDataTypeSignedInt32 = 9, + ImageChannelDataTypeUnsignedInt8 = 10, + ImageChannelDataTypeUnsignedInt16 = 11, + ImageChannelDataTypeUnsignedInt32 = 12, + ImageChannelDataTypeHalfFloat = 13, + ImageChannelDataTypeFloat = 14, + ImageChannelDataTypeUnormInt24 = 15, + ImageChannelDataTypeUnormInt101010_2 = 16, + ImageChannelDataTypeMax = 0x7fffffff, +}; + +enum ImageOperandsShift { + ImageOperandsBiasShift = 0, + ImageOperandsLodShift = 1, + ImageOperandsGradShift = 2, + ImageOperandsConstOffsetShift = 3, + ImageOperandsOffsetShift = 4, + ImageOperandsConstOffsetsShift = 5, + ImageOperandsSampleShift = 6, + ImageOperandsMinLodShift = 7, + ImageOperandsMax = 0x7fffffff, +}; + +enum ImageOperandsMask { + ImageOperandsMaskNone = 0, + ImageOperandsBiasMask = 0x00000001, + ImageOperandsLodMask = 0x00000002, + ImageOperandsGradMask = 0x00000004, + ImageOperandsConstOffsetMask = 0x00000008, + ImageOperandsOffsetMask = 0x00000010, + ImageOperandsConstOffsetsMask = 0x00000020, + ImageOperandsSampleMask = 0x00000040, + ImageOperandsMinLodMask = 0x00000080, +}; + +enum FPFastMathModeShift { + FPFastMathModeNotNaNShift = 0, + FPFastMathModeNotInfShift = 1, + FPFastMathModeNSZShift = 2, + FPFastMathModeAllowRecipShift = 3, + FPFastMathModeFastShift = 4, + FPFastMathModeMax = 0x7fffffff, +}; + +enum FPFastMathModeMask { + FPFastMathModeMaskNone = 0, + FPFastMathModeNotNaNMask = 0x00000001, + FPFastMathModeNotInfMask = 0x00000002, + FPFastMathModeNSZMask = 0x00000004, + FPFastMathModeAllowRecipMask = 0x00000008, + FPFastMathModeFastMask = 0x00000010, +}; + +enum FPRoundingMode { + FPRoundingModeRTE = 0, + FPRoundingModeRTZ = 1, + FPRoundingModeRTP = 2, + FPRoundingModeRTN = 3, + FPRoundingModeMax = 0x7fffffff, +}; + +enum LinkageType { + LinkageTypeExport = 0, + LinkageTypeImport = 1, + LinkageTypeMax = 0x7fffffff, +}; + +enum AccessQualifier { + AccessQualifierReadOnly = 0, + AccessQualifierWriteOnly = 1, + AccessQualifierReadWrite = 2, + AccessQualifierMax = 0x7fffffff, +}; + +enum FunctionParameterAttribute { + FunctionParameterAttributeZext = 0, + FunctionParameterAttributeSext = 1, + FunctionParameterAttributeByVal = 2, + FunctionParameterAttributeSret = 3, + FunctionParameterAttributeNoAlias = 4, + FunctionParameterAttributeNoCapture = 5, + FunctionParameterAttributeNoWrite = 6, + FunctionParameterAttributeNoReadWrite = 7, + FunctionParameterAttributeMax = 0x7fffffff, +}; + +enum Decoration { + DecorationRelaxedPrecision = 0, + DecorationSpecId = 1, + DecorationBlock = 2, + DecorationBufferBlock = 3, + DecorationRowMajor = 4, + DecorationColMajor = 5, + DecorationArrayStride = 6, + DecorationMatrixStride = 7, + DecorationGLSLShared = 8, + DecorationGLSLPacked = 9, + DecorationCPacked = 10, + DecorationBuiltIn = 11, + DecorationNoPerspective = 13, + DecorationFlat = 14, + DecorationPatch = 15, + DecorationCentroid = 16, + DecorationSample = 17, + DecorationInvariant = 18, + DecorationRestrict = 19, + DecorationAliased = 20, + DecorationVolatile = 21, + DecorationConstant = 22, + DecorationCoherent = 23, + DecorationNonWritable = 24, + DecorationNonReadable = 25, + DecorationUniform = 26, + DecorationSaturatedConversion = 28, + DecorationStream = 29, + DecorationLocation = 30, + DecorationComponent = 31, + DecorationIndex = 32, + DecorationBinding = 33, + DecorationDescriptorSet = 34, + DecorationOffset = 35, + DecorationXfbBuffer = 36, + DecorationXfbStride = 37, + DecorationFuncParamAttr = 38, + DecorationFPRoundingMode = 39, + DecorationFPFastMathMode = 40, + DecorationLinkageAttributes = 41, + DecorationNoContraction = 42, + DecorationInputAttachmentIndex = 43, + DecorationAlignment = 44, + DecorationMaxByteOffset = 45, + DecorationAlignmentId = 46, + DecorationMaxByteOffsetId = 47, + DecorationExplicitInterpAMD = 4999, + DecorationOverrideCoverageNV = 5248, + DecorationPassthroughNV = 5250, + DecorationViewportRelativeNV = 5252, + DecorationSecondaryViewportRelativeNV = 5256, + DecorationHlslCounterBufferGOOGLE = 5634, + DecorationHlslSemanticGOOGLE = 5635, + DecorationMax = 0x7fffffff, +}; + +enum BuiltIn { + BuiltInPosition = 0, + BuiltInPointSize = 1, + BuiltInClipDistance = 3, + BuiltInCullDistance = 4, + BuiltInVertexId = 5, + BuiltInInstanceId = 6, + BuiltInPrimitiveId = 7, + BuiltInInvocationId = 8, + BuiltInLayer = 9, + BuiltInViewportIndex = 10, + BuiltInTessLevelOuter = 11, + BuiltInTessLevelInner = 12, + BuiltInTessCoord = 13, + BuiltInPatchVertices = 14, + BuiltInFragCoord = 15, + BuiltInPointCoord = 16, + BuiltInFrontFacing = 17, + BuiltInSampleId = 18, + BuiltInSamplePosition = 19, + BuiltInSampleMask = 20, + BuiltInFragDepth = 22, + BuiltInHelperInvocation = 23, + BuiltInNumWorkgroups = 24, + BuiltInWorkgroupSize = 25, + BuiltInWorkgroupId = 26, + BuiltInLocalInvocationId = 27, + BuiltInGlobalInvocationId = 28, + BuiltInLocalInvocationIndex = 29, + BuiltInWorkDim = 30, + BuiltInGlobalSize = 31, + BuiltInEnqueuedWorkgroupSize = 32, + BuiltInGlobalOffset = 33, + BuiltInGlobalLinearId = 34, + BuiltInSubgroupSize = 36, + BuiltInSubgroupMaxSize = 37, + BuiltInNumSubgroups = 38, + BuiltInNumEnqueuedSubgroups = 39, + BuiltInSubgroupId = 40, + BuiltInSubgroupLocalInvocationId = 41, + BuiltInVertexIndex = 42, + BuiltInInstanceIndex = 43, + BuiltInSubgroupEqMask = 4416, + BuiltInSubgroupEqMaskKHR = 4416, + BuiltInSubgroupGeMask = 4417, + BuiltInSubgroupGeMaskKHR = 4417, + BuiltInSubgroupGtMask = 4418, + BuiltInSubgroupGtMaskKHR = 4418, + BuiltInSubgroupLeMask = 4419, + BuiltInSubgroupLeMaskKHR = 4419, + BuiltInSubgroupLtMask = 4420, + BuiltInSubgroupLtMaskKHR = 4420, + BuiltInBaseVertex = 4424, + BuiltInBaseInstance = 4425, + BuiltInDrawIndex = 4426, + BuiltInDeviceIndex = 4438, + BuiltInViewIndex = 4440, + BuiltInBaryCoordNoPerspAMD = 4992, + BuiltInBaryCoordNoPerspCentroidAMD = 4993, + BuiltInBaryCoordNoPerspSampleAMD = 4994, + BuiltInBaryCoordSmoothAMD = 4995, + BuiltInBaryCoordSmoothCentroidAMD = 4996, + BuiltInBaryCoordSmoothSampleAMD = 4997, + BuiltInBaryCoordPullModelAMD = 4998, + BuiltInFragStencilRefEXT = 5014, + BuiltInViewportMaskNV = 5253, + BuiltInSecondaryPositionNV = 5257, + BuiltInSecondaryViewportMaskNV = 5258, + BuiltInPositionPerViewNV = 5261, + BuiltInViewportMaskPerViewNV = 5262, + BuiltInFullyCoveredEXT = 5264, + BuiltInMax = 0x7fffffff, +}; + +enum SelectionControlShift { + SelectionControlFlattenShift = 0, + SelectionControlDontFlattenShift = 1, + SelectionControlMax = 0x7fffffff, +}; + +enum SelectionControlMask { + SelectionControlMaskNone = 0, + SelectionControlFlattenMask = 0x00000001, + SelectionControlDontFlattenMask = 0x00000002, +}; + +enum LoopControlShift { + LoopControlUnrollShift = 0, + LoopControlDontUnrollShift = 1, + LoopControlDependencyInfiniteShift = 2, + LoopControlDependencyLengthShift = 3, + LoopControlMax = 0x7fffffff, +}; + +enum LoopControlMask { + LoopControlMaskNone = 0, + LoopControlUnrollMask = 0x00000001, + LoopControlDontUnrollMask = 0x00000002, + LoopControlDependencyInfiniteMask = 0x00000004, + LoopControlDependencyLengthMask = 0x00000008, +}; + +enum FunctionControlShift { + FunctionControlInlineShift = 0, + FunctionControlDontInlineShift = 1, + FunctionControlPureShift = 2, + FunctionControlConstShift = 3, + FunctionControlMax = 0x7fffffff, +}; + +enum FunctionControlMask { + FunctionControlMaskNone = 0, + FunctionControlInlineMask = 0x00000001, + FunctionControlDontInlineMask = 0x00000002, + FunctionControlPureMask = 0x00000004, + FunctionControlConstMask = 0x00000008, +}; + +enum MemorySemanticsShift { + MemorySemanticsAcquireShift = 1, + MemorySemanticsReleaseShift = 2, + MemorySemanticsAcquireReleaseShift = 3, + MemorySemanticsSequentiallyConsistentShift = 4, + MemorySemanticsUniformMemoryShift = 6, + MemorySemanticsSubgroupMemoryShift = 7, + MemorySemanticsWorkgroupMemoryShift = 8, + MemorySemanticsCrossWorkgroupMemoryShift = 9, + MemorySemanticsAtomicCounterMemoryShift = 10, + MemorySemanticsImageMemoryShift = 11, + MemorySemanticsMax = 0x7fffffff, +}; + +enum MemorySemanticsMask { + MemorySemanticsMaskNone = 0, + MemorySemanticsAcquireMask = 0x00000002, + MemorySemanticsReleaseMask = 0x00000004, + MemorySemanticsAcquireReleaseMask = 0x00000008, + MemorySemanticsSequentiallyConsistentMask = 0x00000010, + MemorySemanticsUniformMemoryMask = 0x00000040, + MemorySemanticsSubgroupMemoryMask = 0x00000080, + MemorySemanticsWorkgroupMemoryMask = 0x00000100, + MemorySemanticsCrossWorkgroupMemoryMask = 0x00000200, + MemorySemanticsAtomicCounterMemoryMask = 0x00000400, + MemorySemanticsImageMemoryMask = 0x00000800, +}; + +enum MemoryAccessShift { + MemoryAccessVolatileShift = 0, + MemoryAccessAlignedShift = 1, + MemoryAccessNontemporalShift = 2, + MemoryAccessMax = 0x7fffffff, +}; + +enum MemoryAccessMask { + MemoryAccessMaskNone = 0, + MemoryAccessVolatileMask = 0x00000001, + MemoryAccessAlignedMask = 0x00000002, + MemoryAccessNontemporalMask = 0x00000004, +}; + +enum Scope { + ScopeCrossDevice = 0, + ScopeDevice = 1, + ScopeWorkgroup = 2, + ScopeSubgroup = 3, + ScopeInvocation = 4, + ScopeMax = 0x7fffffff, +}; + +enum GroupOperation { + GroupOperationReduce = 0, + GroupOperationInclusiveScan = 1, + GroupOperationExclusiveScan = 2, + GroupOperationClusteredReduce = 3, + GroupOperationMax = 0x7fffffff, +}; + +enum KernelEnqueueFlags { + KernelEnqueueFlagsNoWait = 0, + KernelEnqueueFlagsWaitKernel = 1, + KernelEnqueueFlagsWaitWorkGroup = 2, + KernelEnqueueFlagsMax = 0x7fffffff, +}; + +enum KernelProfilingInfoShift { + KernelProfilingInfoCmdExecTimeShift = 0, + KernelProfilingInfoMax = 0x7fffffff, +}; + +enum KernelProfilingInfoMask { + KernelProfilingInfoMaskNone = 0, + KernelProfilingInfoCmdExecTimeMask = 0x00000001, +}; + +enum Capability { + CapabilityMatrix = 0, + CapabilityShader = 1, + CapabilityGeometry = 2, + CapabilityTessellation = 3, + CapabilityAddresses = 4, + CapabilityLinkage = 5, + CapabilityKernel = 6, + CapabilityVector16 = 7, + CapabilityFloat16Buffer = 8, + CapabilityFloat16 = 9, + CapabilityFloat64 = 10, + CapabilityInt64 = 11, + CapabilityInt64Atomics = 12, + CapabilityImageBasic = 13, + CapabilityImageReadWrite = 14, + CapabilityImageMipmap = 15, + CapabilityPipes = 17, + CapabilityGroups = 18, + CapabilityDeviceEnqueue = 19, + CapabilityLiteralSampler = 20, + CapabilityAtomicStorage = 21, + CapabilityInt16 = 22, + CapabilityTessellationPointSize = 23, + CapabilityGeometryPointSize = 24, + CapabilityImageGatherExtended = 25, + CapabilityStorageImageMultisample = 27, + CapabilityUniformBufferArrayDynamicIndexing = 28, + CapabilitySampledImageArrayDynamicIndexing = 29, + CapabilityStorageBufferArrayDynamicIndexing = 30, + CapabilityStorageImageArrayDynamicIndexing = 31, + CapabilityClipDistance = 32, + CapabilityCullDistance = 33, + CapabilityImageCubeArray = 34, + CapabilitySampleRateShading = 35, + CapabilityImageRect = 36, + CapabilitySampledRect = 37, + CapabilityGenericPointer = 38, + CapabilityInt8 = 39, + CapabilityInputAttachment = 40, + CapabilitySparseResidency = 41, + CapabilityMinLod = 42, + CapabilitySampled1D = 43, + CapabilityImage1D = 44, + CapabilitySampledCubeArray = 45, + CapabilitySampledBuffer = 46, + CapabilityImageBuffer = 47, + CapabilityImageMSArray = 48, + CapabilityStorageImageExtendedFormats = 49, + CapabilityImageQuery = 50, + CapabilityDerivativeControl = 51, + CapabilityInterpolationFunction = 52, + CapabilityTransformFeedback = 53, + CapabilityGeometryStreams = 54, + CapabilityStorageImageReadWithoutFormat = 55, + CapabilityStorageImageWriteWithoutFormat = 56, + CapabilityMultiViewport = 57, + CapabilitySubgroupDispatch = 58, + CapabilityNamedBarrier = 59, + CapabilityPipeStorage = 60, + CapabilityGroupNonUniform = 61, + CapabilityGroupNonUniformVote = 62, + CapabilityGroupNonUniformArithmetic = 63, + CapabilityGroupNonUniformBallot = 64, + CapabilityGroupNonUniformShuffle = 65, + CapabilityGroupNonUniformShuffleRelative = 66, + CapabilityGroupNonUniformClustered = 67, + CapabilityGroupNonUniformQuad = 68, + CapabilitySubgroupBallotKHR = 4423, + CapabilityDrawParameters = 4427, + CapabilitySubgroupVoteKHR = 4431, + CapabilityStorageBuffer16BitAccess = 4433, + CapabilityStorageUniformBufferBlock16 = 4433, + CapabilityStorageUniform16 = 4434, + CapabilityUniformAndStorageBuffer16BitAccess = 4434, + CapabilityStoragePushConstant16 = 4435, + CapabilityStorageInputOutput16 = 4436, + CapabilityDeviceGroup = 4437, + CapabilityMultiView = 4439, + CapabilityVariablePointersStorageBuffer = 4441, + CapabilityVariablePointers = 4442, + CapabilityAtomicStorageOps = 4445, + CapabilitySampleMaskPostDepthCoverage = 4447, + CapabilityFloat16ImageAMD = 5008, + CapabilityImageGatherBiasLodAMD = 5009, + CapabilityFragmentMaskAMD = 5010, + CapabilityStencilExportEXT = 5013, + CapabilityImageReadWriteLodAMD = 5015, + CapabilitySampleMaskOverrideCoverageNV = 5249, + CapabilityGeometryShaderPassthroughNV = 5251, + CapabilityShaderViewportIndexLayerEXT = 5254, + CapabilityShaderViewportIndexLayerNV = 5254, + CapabilityShaderViewportMaskNV = 5255, + CapabilityShaderStereoViewNV = 5259, + CapabilityPerViewAttributesNV = 5260, + CapabilityFragmentFullyCoveredEXT = 5265, + CapabilitySubgroupShuffleINTEL = 5568, + CapabilitySubgroupBufferBlockIOINTEL = 5569, + CapabilitySubgroupImageBlockIOINTEL = 5570, + CapabilityMax = 0x7fffffff, +}; + +enum Op { + OpNop = 0, + OpUndef = 1, + OpSourceContinued = 2, + OpSource = 3, + OpSourceExtension = 4, + OpName = 5, + OpMemberName = 6, + OpString = 7, + OpLine = 8, + OpExtension = 10, + OpExtInstImport = 11, + OpExtInst = 12, + OpMemoryModel = 14, + OpEntryPoint = 15, + OpExecutionMode = 16, + OpCapability = 17, + OpTypeVoid = 19, + OpTypeBool = 20, + OpTypeInt = 21, + OpTypeFloat = 22, + OpTypeVector = 23, + OpTypeMatrix = 24, + OpTypeImage = 25, + OpTypeSampler = 26, + OpTypeSampledImage = 27, + OpTypeArray = 28, + OpTypeRuntimeArray = 29, + OpTypeStruct = 30, + OpTypeOpaque = 31, + OpTypePointer = 32, + OpTypeFunction = 33, + OpTypeEvent = 34, + OpTypeDeviceEvent = 35, + OpTypeReserveId = 36, + OpTypeQueue = 37, + OpTypePipe = 38, + OpTypeForwardPointer = 39, + OpConstantTrue = 41, + OpConstantFalse = 42, + OpConstant = 43, + OpConstantComposite = 44, + OpConstantSampler = 45, + OpConstantNull = 46, + OpSpecConstantTrue = 48, + OpSpecConstantFalse = 49, + OpSpecConstant = 50, + OpSpecConstantComposite = 51, + OpSpecConstantOp = 52, + OpFunction = 54, + OpFunctionParameter = 55, + OpFunctionEnd = 56, + OpFunctionCall = 57, + OpVariable = 59, + OpImageTexelPointer = 60, + OpLoad = 61, + OpStore = 62, + OpCopyMemory = 63, + OpCopyMemorySized = 64, + OpAccessChain = 65, + OpInBoundsAccessChain = 66, + OpPtrAccessChain = 67, + OpArrayLength = 68, + OpGenericPtrMemSemantics = 69, + OpInBoundsPtrAccessChain = 70, + OpDecorate = 71, + OpMemberDecorate = 72, + OpDecorationGroup = 73, + OpGroupDecorate = 74, + OpGroupMemberDecorate = 75, + OpVectorExtractDynamic = 77, + OpVectorInsertDynamic = 78, + OpVectorShuffle = 79, + OpCompositeConstruct = 80, + OpCompositeExtract = 81, + OpCompositeInsert = 82, + OpCopyObject = 83, + OpTranspose = 84, + OpSampledImage = 86, + OpImageSampleImplicitLod = 87, + OpImageSampleExplicitLod = 88, + OpImageSampleDrefImplicitLod = 89, + OpImageSampleDrefExplicitLod = 90, + OpImageSampleProjImplicitLod = 91, + OpImageSampleProjExplicitLod = 92, + OpImageSampleProjDrefImplicitLod = 93, + OpImageSampleProjDrefExplicitLod = 94, + OpImageFetch = 95, + OpImageGather = 96, + OpImageDrefGather = 97, + OpImageRead = 98, + OpImageWrite = 99, + OpImage = 100, + OpImageQueryFormat = 101, + OpImageQueryOrder = 102, + OpImageQuerySizeLod = 103, + OpImageQuerySize = 104, + OpImageQueryLod = 105, + OpImageQueryLevels = 106, + OpImageQuerySamples = 107, + OpConvertFToU = 109, + OpConvertFToS = 110, + OpConvertSToF = 111, + OpConvertUToF = 112, + OpUConvert = 113, + OpSConvert = 114, + OpFConvert = 115, + OpQuantizeToF16 = 116, + OpConvertPtrToU = 117, + OpSatConvertSToU = 118, + OpSatConvertUToS = 119, + OpConvertUToPtr = 120, + OpPtrCastToGeneric = 121, + OpGenericCastToPtr = 122, + OpGenericCastToPtrExplicit = 123, + OpBitcast = 124, + OpSNegate = 126, + OpFNegate = 127, + OpIAdd = 128, + OpFAdd = 129, + OpISub = 130, + OpFSub = 131, + OpIMul = 132, + OpFMul = 133, + OpUDiv = 134, + OpSDiv = 135, + OpFDiv = 136, + OpUMod = 137, + OpSRem = 138, + OpSMod = 139, + OpFRem = 140, + OpFMod = 141, + OpVectorTimesScalar = 142, + OpMatrixTimesScalar = 143, + OpVectorTimesMatrix = 144, + OpMatrixTimesVector = 145, + OpMatrixTimesMatrix = 146, + OpOuterProduct = 147, + OpDot = 148, + OpIAddCarry = 149, + OpISubBorrow = 150, + OpUMulExtended = 151, + OpSMulExtended = 152, + OpAny = 154, + OpAll = 155, + OpIsNan = 156, + OpIsInf = 157, + OpIsFinite = 158, + OpIsNormal = 159, + OpSignBitSet = 160, + OpLessOrGreater = 161, + OpOrdered = 162, + OpUnordered = 163, + OpLogicalEqual = 164, + OpLogicalNotEqual = 165, + OpLogicalOr = 166, + OpLogicalAnd = 167, + OpLogicalNot = 168, + OpSelect = 169, + OpIEqual = 170, + OpINotEqual = 171, + OpUGreaterThan = 172, + OpSGreaterThan = 173, + OpUGreaterThanEqual = 174, + OpSGreaterThanEqual = 175, + OpULessThan = 176, + OpSLessThan = 177, + OpULessThanEqual = 178, + OpSLessThanEqual = 179, + OpFOrdEqual = 180, + OpFUnordEqual = 181, + OpFOrdNotEqual = 182, + OpFUnordNotEqual = 183, + OpFOrdLessThan = 184, + OpFUnordLessThan = 185, + OpFOrdGreaterThan = 186, + OpFUnordGreaterThan = 187, + OpFOrdLessThanEqual = 188, + OpFUnordLessThanEqual = 189, + OpFOrdGreaterThanEqual = 190, + OpFUnordGreaterThanEqual = 191, + OpShiftRightLogical = 194, + OpShiftRightArithmetic = 195, + OpShiftLeftLogical = 196, + OpBitwiseOr = 197, + OpBitwiseXor = 198, + OpBitwiseAnd = 199, + OpNot = 200, + OpBitFieldInsert = 201, + OpBitFieldSExtract = 202, + OpBitFieldUExtract = 203, + OpBitReverse = 204, + OpBitCount = 205, + OpDPdx = 207, + OpDPdy = 208, + OpFwidth = 209, + OpDPdxFine = 210, + OpDPdyFine = 211, + OpFwidthFine = 212, + OpDPdxCoarse = 213, + OpDPdyCoarse = 214, + OpFwidthCoarse = 215, + OpEmitVertex = 218, + OpEndPrimitive = 219, + OpEmitStreamVertex = 220, + OpEndStreamPrimitive = 221, + OpControlBarrier = 224, + OpMemoryBarrier = 225, + OpAtomicLoad = 227, + OpAtomicStore = 228, + OpAtomicExchange = 229, + OpAtomicCompareExchange = 230, + OpAtomicCompareExchangeWeak = 231, + OpAtomicIIncrement = 232, + OpAtomicIDecrement = 233, + OpAtomicIAdd = 234, + OpAtomicISub = 235, + OpAtomicSMin = 236, + OpAtomicUMin = 237, + OpAtomicSMax = 238, + OpAtomicUMax = 239, + OpAtomicAnd = 240, + OpAtomicOr = 241, + OpAtomicXor = 242, + OpPhi = 245, + OpLoopMerge = 246, + OpSelectionMerge = 247, + OpLabel = 248, + OpBranch = 249, + OpBranchConditional = 250, + OpSwitch = 251, + OpKill = 252, + OpReturn = 253, + OpReturnValue = 254, + OpUnreachable = 255, + OpLifetimeStart = 256, + OpLifetimeStop = 257, + OpGroupAsyncCopy = 259, + OpGroupWaitEvents = 260, + OpGroupAll = 261, + OpGroupAny = 262, + OpGroupBroadcast = 263, + OpGroupIAdd = 264, + OpGroupFAdd = 265, + OpGroupFMin = 266, + OpGroupUMin = 267, + OpGroupSMin = 268, + OpGroupFMax = 269, + OpGroupUMax = 270, + OpGroupSMax = 271, + OpReadPipe = 274, + OpWritePipe = 275, + OpReservedReadPipe = 276, + OpReservedWritePipe = 277, + OpReserveReadPipePackets = 278, + OpReserveWritePipePackets = 279, + OpCommitReadPipe = 280, + OpCommitWritePipe = 281, + OpIsValidReserveId = 282, + OpGetNumPipePackets = 283, + OpGetMaxPipePackets = 284, + OpGroupReserveReadPipePackets = 285, + OpGroupReserveWritePipePackets = 286, + OpGroupCommitReadPipe = 287, + OpGroupCommitWritePipe = 288, + OpEnqueueMarker = 291, + OpEnqueueKernel = 292, + OpGetKernelNDrangeSubGroupCount = 293, + OpGetKernelNDrangeMaxSubGroupSize = 294, + OpGetKernelWorkGroupSize = 295, + OpGetKernelPreferredWorkGroupSizeMultiple = 296, + OpRetainEvent = 297, + OpReleaseEvent = 298, + OpCreateUserEvent = 299, + OpIsValidEvent = 300, + OpSetUserEventStatus = 301, + OpCaptureEventProfilingInfo = 302, + OpGetDefaultQueue = 303, + OpBuildNDRange = 304, + OpImageSparseSampleImplicitLod = 305, + OpImageSparseSampleExplicitLod = 306, + OpImageSparseSampleDrefImplicitLod = 307, + OpImageSparseSampleDrefExplicitLod = 308, + OpImageSparseSampleProjImplicitLod = 309, + OpImageSparseSampleProjExplicitLod = 310, + OpImageSparseSampleProjDrefImplicitLod = 311, + OpImageSparseSampleProjDrefExplicitLod = 312, + OpImageSparseFetch = 313, + OpImageSparseGather = 314, + OpImageSparseDrefGather = 315, + OpImageSparseTexelsResident = 316, + OpNoLine = 317, + OpAtomicFlagTestAndSet = 318, + OpAtomicFlagClear = 319, + OpImageSparseRead = 320, + OpSizeOf = 321, + OpTypePipeStorage = 322, + OpConstantPipeStorage = 323, + OpCreatePipeFromPipeStorage = 324, + OpGetKernelLocalSizeForSubgroupCount = 325, + OpGetKernelMaxNumSubgroups = 326, + OpTypeNamedBarrier = 327, + OpNamedBarrierInitialize = 328, + OpMemoryNamedBarrier = 329, + OpModuleProcessed = 330, + OpExecutionModeId = 331, + OpDecorateId = 332, + OpGroupNonUniformElect = 333, + OpGroupNonUniformAll = 334, + OpGroupNonUniformAny = 335, + OpGroupNonUniformAllEqual = 336, + OpGroupNonUniformBroadcast = 337, + OpGroupNonUniformBroadcastFirst = 338, + OpGroupNonUniformBallot = 339, + OpGroupNonUniformInverseBallot = 340, + OpGroupNonUniformBallotBitExtract = 341, + OpGroupNonUniformBallotBitCount = 342, + OpGroupNonUniformBallotFindLSB = 343, + OpGroupNonUniformBallotFindMSB = 344, + OpGroupNonUniformShuffle = 345, + OpGroupNonUniformShuffleXor = 346, + OpGroupNonUniformShuffleUp = 347, + OpGroupNonUniformShuffleDown = 348, + OpGroupNonUniformIAdd = 349, + OpGroupNonUniformFAdd = 350, + OpGroupNonUniformIMul = 351, + OpGroupNonUniformFMul = 352, + OpGroupNonUniformSMin = 353, + OpGroupNonUniformUMin = 354, + OpGroupNonUniformFMin = 355, + OpGroupNonUniformSMax = 356, + OpGroupNonUniformUMax = 357, + OpGroupNonUniformFMax = 358, + OpGroupNonUniformBitwiseAnd = 359, + OpGroupNonUniformBitwiseOr = 360, + OpGroupNonUniformBitwiseXor = 361, + OpGroupNonUniformLogicalAnd = 362, + OpGroupNonUniformLogicalOr = 363, + OpGroupNonUniformLogicalXor = 364, + OpGroupNonUniformQuadBroadcast = 365, + OpGroupNonUniformQuadSwap = 366, + OpSubgroupBallotKHR = 4421, + OpSubgroupFirstInvocationKHR = 4422, + OpSubgroupAllKHR = 4428, + OpSubgroupAnyKHR = 4429, + OpSubgroupAllEqualKHR = 4430, + OpSubgroupReadInvocationKHR = 4432, + OpGroupIAddNonUniformAMD = 5000, + OpGroupFAddNonUniformAMD = 5001, + OpGroupFMinNonUniformAMD = 5002, + OpGroupUMinNonUniformAMD = 5003, + OpGroupSMinNonUniformAMD = 5004, + OpGroupFMaxNonUniformAMD = 5005, + OpGroupUMaxNonUniformAMD = 5006, + OpGroupSMaxNonUniformAMD = 5007, + OpFragmentMaskFetchAMD = 5011, + OpFragmentFetchAMD = 5012, + OpSubgroupShuffleINTEL = 5571, + OpSubgroupShuffleDownINTEL = 5572, + OpSubgroupShuffleUpINTEL = 5573, + OpSubgroupShuffleXorINTEL = 5574, + OpSubgroupBlockReadINTEL = 5575, + OpSubgroupBlockWriteINTEL = 5576, + OpSubgroupImageBlockReadINTEL = 5577, + OpSubgroupImageBlockWriteINTEL = 5578, + OpDecorateStringGOOGLE = 5632, + OpMemberDecorateStringGOOGLE = 5633, + OpMax = 0x7fffffff, +}; + +// Overload operator| for mask bit combining + +inline ImageOperandsMask operator|(ImageOperandsMask a, ImageOperandsMask b) { return ImageOperandsMask(unsigned(a) | unsigned(b)); } +inline FPFastMathModeMask operator|(FPFastMathModeMask a, FPFastMathModeMask b) { return FPFastMathModeMask(unsigned(a) | unsigned(b)); } +inline SelectionControlMask operator|(SelectionControlMask a, SelectionControlMask b) { return SelectionControlMask(unsigned(a) | unsigned(b)); } +inline LoopControlMask operator|(LoopControlMask a, LoopControlMask b) { return LoopControlMask(unsigned(a) | unsigned(b)); } +inline FunctionControlMask operator|(FunctionControlMask a, FunctionControlMask b) { return FunctionControlMask(unsigned(a) | unsigned(b)); } +inline MemorySemanticsMask operator|(MemorySemanticsMask a, MemorySemanticsMask b) { return MemorySemanticsMask(unsigned(a) | unsigned(b)); } +inline MemoryAccessMask operator|(MemoryAccessMask a, MemoryAccessMask b) { return MemoryAccessMask(unsigned(a) | unsigned(b)); } +inline KernelProfilingInfoMask operator|(KernelProfilingInfoMask a, KernelProfilingInfoMask b) { return KernelProfilingInfoMask(unsigned(a) | unsigned(b)); } + +} // end namespace spv + +#endif // #ifndef spirv_HPP + diff --git a/3rdparty/spirv-cross/spirv_cfg.cpp b/3rdparty/spirv-cross/spirv_cfg.cpp new file mode 100644 index 000000000..27792e01b --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cfg.cpp @@ -0,0 +1,229 @@ +/* + * Copyright 2016-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cfg.hpp" +#include "spirv_cross.hpp" +#include +#include + +using namespace std; + +namespace spirv_cross +{ +CFG::CFG(Compiler &compiler_, const SPIRFunction &func_) + : compiler(compiler_) + , func(func_) +{ + preceding_edges.resize(compiler.get_current_id_bound()); + succeeding_edges.resize(compiler.get_current_id_bound()); + visit_order.resize(compiler.get_current_id_bound()); + immediate_dominators.resize(compiler.get_current_id_bound()); + + build_post_order_visit_order(); + build_immediate_dominators(); +} + +uint32_t CFG::find_common_dominator(uint32_t a, uint32_t b) const +{ + while (a != b) + { + if (visit_order[a] < visit_order[b]) + a = immediate_dominators[a]; + else + b = immediate_dominators[b]; + } + return a; +} + +void CFG::build_immediate_dominators() +{ + // Traverse the post-order in reverse and build up the immediate dominator tree. + fill(begin(immediate_dominators), end(immediate_dominators), 0); + immediate_dominators[func.entry_block] = func.entry_block; + + for (auto i = post_order.size(); i; i--) + { + uint32_t block = post_order[i - 1]; + auto &pred = preceding_edges[block]; + if (pred.empty()) // This is for the entry block, but we've already set up the dominators. + continue; + + for (auto &edge : pred) + { + if (immediate_dominators[block]) + { + assert(immediate_dominators[edge]); + immediate_dominators[block] = find_common_dominator(block, edge); + } + else + immediate_dominators[block] = edge; + } + } +} + +bool CFG::is_back_edge(uint32_t to) const +{ + // We have a back edge if the visit order is set with the temporary magic value 0. + // Crossing edges will have already been recorded with a visit order. + return visit_order[to] == 0; +} + +bool CFG::post_order_visit(uint32_t block_id) +{ + // If we have already branched to this block (back edge), stop recursion. + // If our branches are back-edges, we do not record them. + // We have to record crossing edges however. + if (visit_order[block_id] >= 0) + return !is_back_edge(block_id); + + // Block back-edges from recursively revisiting ourselves. + visit_order[block_id] = 0; + + // First visit our branch targets. + auto &block = compiler.get(block_id); + switch (block.terminator) + { + case SPIRBlock::Direct: + if (post_order_visit(block.next_block)) + add_branch(block_id, block.next_block); + break; + + case SPIRBlock::Select: + if (post_order_visit(block.true_block)) + add_branch(block_id, block.true_block); + if (post_order_visit(block.false_block)) + add_branch(block_id, block.false_block); + break; + + case SPIRBlock::MultiSelect: + for (auto &target : block.cases) + { + if (post_order_visit(target.block)) + add_branch(block_id, target.block); + } + if (block.default_block && post_order_visit(block.default_block)) + add_branch(block_id, block.default_block); + break; + + default: + break; + } + + // If this is a loop header, add an implied branch to the merge target. + // This is needed to avoid annoying cases with do { ... } while(false) loops often generated by inliners. + // To the CFG, this is linear control flow, but we risk picking the do/while scope as our dominating block. + // This makes sure that if we are accessing a variable outside the do/while, we choose the loop header as dominator. + if (block.merge == SPIRBlock::MergeLoop) + add_branch(block_id, block.merge_block); + + // Then visit ourselves. Start counting at one, to let 0 be a magic value for testing back vs. crossing edges. + visit_order[block_id] = ++visit_count; + post_order.push_back(block_id); + return true; +} + +void CFG::build_post_order_visit_order() +{ + uint32_t block = func.entry_block; + visit_count = 0; + fill(begin(visit_order), end(visit_order), -1); + post_order.clear(); + post_order_visit(block); +} + +void CFG::add_branch(uint32_t from, uint32_t to) +{ + const auto add_unique = [](vector &l, uint32_t value) { + auto itr = find(begin(l), end(l), value); + if (itr == end(l)) + l.push_back(value); + }; + add_unique(preceding_edges[to], from); + add_unique(succeeding_edges[from], to); +} + +DominatorBuilder::DominatorBuilder(const CFG &cfg_) + : cfg(cfg_) +{ +} + +void DominatorBuilder::add_block(uint32_t block) +{ + if (!cfg.get_immediate_dominator(block)) + { + // Unreachable block via the CFG, we will never emit this code anyways. + return; + } + + if (!dominator) + { + dominator = block; + return; + } + + if (block != dominator) + dominator = cfg.find_common_dominator(block, dominator); +} + +void DominatorBuilder::lift_continue_block_dominator() +{ + // It is possible for a continue block to be the dominator of a variable is only accessed inside the while block of a do-while loop. + // We cannot safely declare variables inside a continue block, so move any variable declared + // in a continue block to the entry block to simplify. + // It makes very little sense for a continue block to ever be a dominator, so fall back to the simplest + // solution. + + if (!dominator) + return; + + auto &block = cfg.get_compiler().get(dominator); + auto post_order = cfg.get_visit_order(dominator); + + // If we are branching to a block with a higher post-order traversal index (continue blocks), we have a problem + // since we cannot create sensible GLSL code for this, fallback to entry block. + bool back_edge_dominator = false; + switch (block.terminator) + { + case SPIRBlock::Direct: + if (cfg.get_visit_order(block.next_block) > post_order) + back_edge_dominator = true; + break; + + case SPIRBlock::Select: + if (cfg.get_visit_order(block.true_block) > post_order) + back_edge_dominator = true; + if (cfg.get_visit_order(block.false_block) > post_order) + back_edge_dominator = true; + break; + + case SPIRBlock::MultiSelect: + for (auto &target : block.cases) + { + if (cfg.get_visit_order(target.block) > post_order) + back_edge_dominator = true; + } + if (block.default_block && cfg.get_visit_order(block.default_block) > post_order) + back_edge_dominator = true; + break; + + default: + break; + } + + if (back_edge_dominator) + dominator = cfg.get_function().entry_block; +} +} // namespace spirv_cross diff --git a/3rdparty/spirv-cross/spirv_cfg.hpp b/3rdparty/spirv-cross/spirv_cfg.hpp new file mode 100644 index 000000000..d14cb2e47 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cfg.hpp @@ -0,0 +1,119 @@ +/* + * Copyright 2016-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_CFG_HPP +#define SPIRV_CROSS_CFG_HPP + +#include "spirv_common.hpp" +#include + +namespace spirv_cross +{ +class Compiler; +class CFG +{ +public: + CFG(Compiler &compiler, const SPIRFunction &function); + + Compiler &get_compiler() + { + return compiler; + } + + const Compiler &get_compiler() const + { + return compiler; + } + + const SPIRFunction &get_function() const + { + return func; + } + + uint32_t get_immediate_dominator(uint32_t block) const + { + return immediate_dominators[block]; + } + + uint32_t get_visit_order(uint32_t block) const + { + int v = visit_order[block]; + assert(v > 0); + return uint32_t(v); + } + + uint32_t find_common_dominator(uint32_t a, uint32_t b) const; + + const std::vector &get_preceding_edges(uint32_t block) const + { + return preceding_edges[block]; + } + + const std::vector &get_succeeding_edges(uint32_t block) const + { + return succeeding_edges[block]; + } + + template + void walk_from(std::unordered_set &seen_blocks, uint32_t block, const Op &op) const + { + if (seen_blocks.count(block)) + return; + seen_blocks.insert(block); + + op(block); + for (auto b : succeeding_edges[block]) + walk_from(seen_blocks, b, op); + } + +private: + Compiler &compiler; + const SPIRFunction &func; + std::vector> preceding_edges; + std::vector> succeeding_edges; + std::vector immediate_dominators; + std::vector visit_order; + std::vector post_order; + + void add_branch(uint32_t from, uint32_t to); + void build_post_order_visit_order(); + void build_immediate_dominators(); + bool post_order_visit(uint32_t block); + uint32_t visit_count = 0; + + bool is_back_edge(uint32_t to) const; +}; + +class DominatorBuilder +{ +public: + DominatorBuilder(const CFG &cfg); + + void add_block(uint32_t block); + uint32_t get_dominator() const + { + return dominator; + } + + void lift_continue_block_dominator(); + +private: + const CFG &cfg; + uint32_t dominator = 0; +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_common.hpp b/3rdparty/spirv-cross/spirv_common.hpp new file mode 100644 index 000000000..a62a1f870 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_common.hpp @@ -0,0 +1,1428 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_COMMON_HPP +#define SPIRV_CROSS_COMMON_HPP + +#include "spirv.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace spirv_cross +{ + +#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS +#ifndef _MSC_VER +[[noreturn]] +#endif +inline void +report_and_abort(const std::string &msg) +{ +#ifdef NDEBUG + (void)msg; +#else + fprintf(stderr, "There was a compiler error: %s\n", msg.c_str()); +#endif + fflush(stderr); + abort(); +} + +#define SPIRV_CROSS_THROW(x) report_and_abort(x) +#else +class CompilerError : public std::runtime_error +{ +public: + CompilerError(const std::string &str) + : std::runtime_error(str) + { + } +}; + +#define SPIRV_CROSS_THROW(x) throw CompilerError(x) +#endif + +//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE + +// MSVC 2013 does not have noexcept. We need this for Variant to get move constructor to work correctly +// instead of copy constructor. +// MSVC 2013 ignores that move constructors cannot throw in std::vector, so just don't define it. +#if defined(_MSC_VER) && _MSC_VER < 1900 +#define SPIRV_CROSS_NOEXCEPT +#else +#define SPIRV_CROSS_NOEXCEPT noexcept +#endif + +#if __cplusplus >= 201402l +#define SPIRV_CROSS_DEPRECATED(reason) [[deprecated(reason)]] +#elif defined(__GNUC__) +#define SPIRV_CROSS_DEPRECATED(reason) __attribute__((deprecated)) +#elif defined(_MSC_VER) +#define SPIRV_CROSS_DEPRECATED(reason) __declspec(deprecated(reason)) +#else +#define SPIRV_CROSS_DEPRECATED(reason) +#endif + +namespace inner +{ +template +void join_helper(std::ostringstream &stream, T &&t) +{ + stream << std::forward(t); +} + +template +void join_helper(std::ostringstream &stream, T &&t, Ts &&... ts) +{ + stream << std::forward(t); + join_helper(stream, std::forward(ts)...); +} +} // namespace inner + +class Bitset +{ +public: + Bitset() = default; + explicit inline Bitset(uint64_t lower_) + : lower(lower_) + { + } + + inline bool get(uint32_t bit) const + { + if (bit < 64) + return (lower & (1ull << bit)) != 0; + else + return higher.count(bit) != 0; + } + + inline void set(uint32_t bit) + { + if (bit < 64) + lower |= 1ull << bit; + else + higher.insert(bit); + } + + inline void clear(uint32_t bit) + { + if (bit < 64) + lower &= ~(1ull << bit); + else + higher.erase(bit); + } + + inline uint64_t get_lower() const + { + return lower; + } + + inline void reset() + { + lower = 0; + higher.clear(); + } + + inline void merge_and(const Bitset &other) + { + lower &= other.lower; + std::unordered_set tmp_set; + for (auto &v : higher) + if (other.higher.count(v) != 0) + tmp_set.insert(v); + higher = std::move(tmp_set); + } + + inline void merge_or(const Bitset &other) + { + lower |= other.lower; + for (auto &v : other.higher) + higher.insert(v); + } + + inline bool operator==(const Bitset &other) const + { + if (lower != other.lower) + return false; + + if (higher.size() != other.higher.size()) + return false; + + for (auto &v : higher) + if (other.higher.count(v) == 0) + return false; + + return true; + } + + inline bool operator!=(const Bitset &other) const + { + return !(*this == other); + } + + template + void for_each_bit(const Op &op) const + { + // TODO: Add ctz-based iteration. + for (uint32_t i = 0; i < 64; i++) + { + if (lower & (1ull << i)) + op(i); + } + + if (higher.empty()) + return; + + // Need to enforce an order here for reproducible results, + // but hitting this path should happen extremely rarely, so having this slow path is fine. + std::vector bits; + bits.reserve(higher.size()); + for (auto &v : higher) + bits.push_back(v); + std::sort(std::begin(bits), std::end(bits)); + + for (auto &v : bits) + op(v); + } + + inline bool empty() const + { + return lower == 0 && higher.empty(); + } + +private: + // The most common bits to set are all lower than 64, + // so optimize for this case. Bits spilling outside 64 go into a slower data structure. + // In almost all cases, higher data structure will not be used. + uint64_t lower = 0; + std::unordered_set higher; +}; + +// Helper template to avoid lots of nasty string temporary munging. +template +std::string join(Ts &&... ts) +{ + std::ostringstream stream; + inner::join_helper(stream, std::forward(ts)...); + return stream.str(); +} + +inline std::string merge(const std::vector &list) +{ + std::string s; + for (auto &elem : list) + { + s += elem; + if (&elem != &list.back()) + s += ", "; + } + return s; +} + +template +inline std::string convert_to_string(T &&t) +{ + return std::to_string(std::forward(t)); +} + +// Allow implementations to set a convenient standard precision +#ifndef SPIRV_CROSS_FLT_FMT +#define SPIRV_CROSS_FLT_FMT "%.32g" +#endif + +#ifdef _MSC_VER +// sprintf warning. +// We cannot rely on snprintf existing because, ..., MSVC. +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + +inline std::string convert_to_string(float t) +{ + // std::to_string for floating point values is broken. + // Fallback to something more sane. + char buf[64]; + sprintf(buf, SPIRV_CROSS_FLT_FMT, t); + // Ensure that the literal is float. + if (!strchr(buf, '.') && !strchr(buf, 'e')) + strcat(buf, ".0"); + return buf; +} + +inline std::string convert_to_string(double t) +{ + // std::to_string for floating point values is broken. + // Fallback to something more sane. + char buf[64]; + sprintf(buf, SPIRV_CROSS_FLT_FMT, t); + // Ensure that the literal is float. + if (!strchr(buf, '.') && !strchr(buf, 'e')) + strcat(buf, ".0"); + return buf; +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +struct Instruction +{ + uint16_t op = 0; + uint16_t count = 0; + uint32_t offset = 0; + uint32_t length = 0; +}; + +// Helper for Variant interface. +struct IVariant +{ + virtual ~IVariant() = default; + virtual std::unique_ptr clone() = 0; + + uint32_t self = 0; +}; + +#define SPIRV_CROSS_DECLARE_CLONE(T) \ + std::unique_ptr clone() override \ + { \ + return std::unique_ptr(new T(*this)); \ + } + +enum Types +{ + TypeNone, + TypeType, + TypeVariable, + TypeConstant, + TypeFunction, + TypeFunctionPrototype, + TypePointer, + TypeBlock, + TypeExtension, + TypeExpression, + TypeConstantOp, + TypeCombinedImageSampler, + TypeAccessChain, + TypeUndef +}; + +struct SPIRUndef : IVariant +{ + enum + { + type = TypeUndef + }; + SPIRUndef(uint32_t basetype_) + : basetype(basetype_) + { + } + uint32_t basetype; + + SPIRV_CROSS_DECLARE_CLONE(SPIRUndef) +}; + +// This type is only used by backends which need to access the combined image and sampler IDs separately after +// the OpSampledImage opcode. +struct SPIRCombinedImageSampler : IVariant +{ + enum + { + type = TypeCombinedImageSampler + }; + SPIRCombinedImageSampler(uint32_t type_, uint32_t image_, uint32_t sampler_) + : combined_type(type_) + , image(image_) + , sampler(sampler_) + { + } + uint32_t combined_type; + uint32_t image; + uint32_t sampler; + + SPIRV_CROSS_DECLARE_CLONE(SPIRCombinedImageSampler) +}; + +struct SPIRConstantOp : IVariant +{ + enum + { + type = TypeConstantOp + }; + + SPIRConstantOp(uint32_t result_type, spv::Op op, const uint32_t *args, uint32_t length) + : opcode(op) + , arguments(args, args + length) + , basetype(result_type) + { + } + + spv::Op opcode; + std::vector arguments; + uint32_t basetype; + + SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp) +}; + +struct SPIRType : IVariant +{ + enum + { + type = TypeType + }; + + enum BaseType + { + Unknown, + Void, + Boolean, + Char, + SByte, + UByte, + Short, + UShort, + Int, + UInt, + Int64, + UInt64, + AtomicCounter, + Half, + Float, + Double, + Struct, + Image, + SampledImage, + Sampler + }; + + // Scalar/vector/matrix support. + BaseType basetype = Unknown; + uint32_t width = 0; + uint32_t vecsize = 1; + uint32_t columns = 1; + + // Arrays, support array of arrays by having a vector of array sizes. + std::vector array; + + // Array elements can be either specialization constants or specialization ops. + // This array determines how to interpret the array size. + // If an element is true, the element is a literal, + // otherwise, it's an expression, which must be resolved on demand. + // The actual size is not really known until runtime. + std::vector array_size_literal; + + // Pointers + // Keep track of how many pointer layers we have. + uint32_t pointer_depth = 0; + bool pointer = false; + + spv::StorageClass storage = spv::StorageClassGeneric; + + std::vector member_types; + + struct ImageType + { + uint32_t type; + spv::Dim dim; + bool depth; + bool arrayed; + bool ms; + uint32_t sampled; + spv::ImageFormat format; + spv::AccessQualifier access; + } image; + + // Structs can be declared multiple times if they are used as part of interface blocks. + // We want to detect this so that we only emit the struct definition once. + // Since we cannot rely on OpName to be equal, we need to figure out aliases. + uint32_t type_alias = 0; + + // Denotes the type which this type is based on. + // Allows the backend to traverse how a complex type is built up during access chains. + uint32_t parent_type = 0; + + // Used in backends to avoid emitting members with conflicting names. + std::unordered_set member_name_cache; + + SPIRV_CROSS_DECLARE_CLONE(SPIRType) +}; + +struct SPIRExtension : IVariant +{ + enum + { + type = TypeExtension + }; + + enum Extension + { + Unsupported, + GLSL, + SPV_AMD_shader_ballot, + SPV_AMD_shader_explicit_vertex_parameter, + SPV_AMD_shader_trinary_minmax, + SPV_AMD_gcn_shader + }; + + SPIRExtension(Extension ext_) + : ext(ext_) + { + } + + Extension ext; + SPIRV_CROSS_DECLARE_CLONE(SPIRExtension) +}; + +// SPIREntryPoint is not a variant since its IDs are used to decorate OpFunction, +// so in order to avoid conflicts, we can't stick them in the ids array. +struct SPIREntryPoint +{ + SPIREntryPoint(uint32_t self_, spv::ExecutionModel execution_model, const std::string &entry_name) + : self(self_) + , name(entry_name) + , orig_name(entry_name) + , model(execution_model) + { + } + SPIREntryPoint() = default; + + uint32_t self = 0; + std::string name; + std::string orig_name; + std::vector interface_variables; + + Bitset flags; + struct + { + uint32_t x = 0, y = 0, z = 0; + uint32_t constant = 0; // Workgroup size can be expressed as a constant/spec-constant instead. + } workgroup_size; + uint32_t invocations = 0; + uint32_t output_vertices = 0; + spv::ExecutionModel model; +}; + +struct SPIRExpression : IVariant +{ + enum + { + type = TypeExpression + }; + + // Only created by the backend target to avoid creating tons of temporaries. + SPIRExpression(std::string expr, uint32_t expression_type_, bool immutable_) + : expression(move(expr)) + , expression_type(expression_type_) + , immutable(immutable_) + { + } + + // If non-zero, prepend expression with to_expression(base_expression). + // Used in amortizing multiple calls to to_expression() + // where in certain cases that would quickly force a temporary when not needed. + uint32_t base_expression = 0; + + std::string expression; + uint32_t expression_type = 0; + + // If this expression is a forwarded load, + // allow us to reference the original variable. + uint32_t loaded_from = 0; + + // If this expression will never change, we can avoid lots of temporaries + // in high level source. + // An expression being immutable can be speculative, + // it is assumed that this is true almost always. + bool immutable = false; + + // Before use, this expression must be transposed. + // This is needed for targets which don't support row_major layouts. + bool need_transpose = false; + + // Whether or not this is an access chain expression. + bool access_chain = false; + + // A list of expressions which this expression depends on. + std::vector expression_dependencies; + + // By reading this expression, we implicitly read these expressions as well. + // Used by access chain Store and Load since we read multiple expressions in this case. + std::vector implied_read_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRExpression) +}; + +struct SPIRFunctionPrototype : IVariant +{ + enum + { + type = TypeFunctionPrototype + }; + + SPIRFunctionPrototype(uint32_t return_type_) + : return_type(return_type_) + { + } + + uint32_t return_type; + std::vector parameter_types; + + SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype) +}; + +struct SPIRBlock : IVariant +{ + enum + { + type = TypeBlock + }; + + enum Terminator + { + Unknown, + Direct, // Emit next block directly without a particular condition. + + Select, // Block ends with an if/else block. + MultiSelect, // Block ends with switch statement. + + Return, // Block ends with return. + Unreachable, // Noop + Kill // Discard + }; + + enum Merge + { + MergeNone, + MergeLoop, + MergeSelection + }; + + enum Hints + { + HintNone, + HintUnroll, + HintDontUnroll, + HintFlatten, + HintDontFlatten + }; + + enum Method + { + MergeToSelectForLoop, + MergeToDirectForLoop, + MergeToSelectContinueForLoop + }; + + enum ContinueBlockType + { + ContinueNone, + + // Continue block is branchless and has at least one instruction. + ForLoop, + + // Noop continue block. + WhileLoop, + + // Continue block is conditional. + DoWhileLoop, + + // Highly unlikely that anything will use this, + // since it is really awkward/impossible to express in GLSL. + ComplexLoop + }; + + enum + { + NoDominator = 0xffffffffu + }; + + Terminator terminator = Unknown; + Merge merge = MergeNone; + Hints hint = HintNone; + uint32_t next_block = 0; + uint32_t merge_block = 0; + uint32_t continue_block = 0; + + uint32_t return_value = 0; // If 0, return nothing (void). + uint32_t condition = 0; + uint32_t true_block = 0; + uint32_t false_block = 0; + uint32_t default_block = 0; + + std::vector ops; + + struct Phi + { + uint32_t local_variable; // flush local variable ... + uint32_t parent; // If we're in from_block and want to branch into this block ... + uint32_t function_variable; // to this function-global "phi" variable first. + }; + + // Before entering this block flush out local variables to magical "phi" variables. + std::vector phi_variables; + + // Declare these temporaries before beginning the block. + // Used for handling complex continue blocks which have side effects. + std::vector> declare_temporary; + + // Declare these temporaries, but only conditionally if this block turns out to be + // a complex loop header. + std::vector> potential_declare_temporary; + + struct Case + { + uint32_t value; + uint32_t block; + }; + std::vector cases; + + // If we have tried to optimize code for this block but failed, + // keep track of this. + bool disable_block_optimization = false; + + // If the continue block is complex, fallback to "dumb" for loops. + bool complex_continue = false; + + // Do we need a ladder variable to defer breaking out of a loop construct after a switch block? + bool need_ladder_break = false; + + // The dominating block which this block might be within. + // Used in continue; blocks to determine if we really need to write continue. + uint32_t loop_dominator = 0; + + // All access to these variables are dominated by this block, + // so before branching anywhere we need to make sure that we declare these variables. + std::vector dominated_variables; + + // These are variables which should be declared in a for loop header, if we + // fail to use a classic for-loop, + // we remove these variables, and fall back to regular variables outside the loop. + std::vector loop_variables; + + // Some expressions are control-flow dependent, i.e. any instruction which relies on derivatives or + // sub-group-like operations. + // Make sure that we only use these expressions in the original block. + std::vector invalidate_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRBlock) +}; + +struct SPIRFunction : IVariant +{ + enum + { + type = TypeFunction + }; + + SPIRFunction(uint32_t return_type_, uint32_t function_type_) + : return_type(return_type_) + , function_type(function_type_) + { + } + + struct Parameter + { + uint32_t type; + uint32_t id; + uint32_t read_count; + uint32_t write_count; + + // Set to true if this parameter aliases a global variable, + // used mostly in Metal where global variables + // have to be passed down to functions as regular arguments. + // However, for this kind of variable, we should not care about + // read and write counts as access to the function arguments + // is not local to the function in question. + bool alias_global_variable; + }; + + // When calling a function, and we're remapping separate image samplers, + // resolve these arguments into combined image samplers and pass them + // as additional arguments in this order. + // It gets more complicated as functions can pull in their own globals + // and combine them with parameters, + // so we need to distinguish if something is local parameter index + // or a global ID. + struct CombinedImageSamplerParameter + { + uint32_t id; + uint32_t image_id; + uint32_t sampler_id; + bool global_image; + bool global_sampler; + bool depth; + }; + + uint32_t return_type; + uint32_t function_type; + std::vector arguments; + + // Can be used by backends to add magic arguments. + // Currently used by combined image/sampler implementation. + + std::vector shadow_arguments; + std::vector local_variables; + uint32_t entry_block = 0; + std::vector blocks; + std::vector combined_parameters; + + void add_local_variable(uint32_t id) + { + local_variables.push_back(id); + } + + void add_parameter(uint32_t parameter_type, uint32_t id, bool alias_global_variable = false) + { + // Arguments are read-only until proven otherwise. + arguments.push_back({ parameter_type, id, 0u, 0u, alias_global_variable }); + } + + // Hooks to be run when the function returns. + // Mostly used for lowering internal data structures onto flattened structures. + // Need to defer this, because they might rely on things which change during compilation. + std::vector> fixup_hooks_out; + + // Hooks to be run when the function begins. + // Mostly used for populating internal data structures from flattened structures. + // Need to defer this, because they might rely on things which change during compilation. + std::vector> fixup_hooks_in; + + bool active = false; + bool flush_undeclared = true; + bool do_combined_parameters = true; + + SPIRV_CROSS_DECLARE_CLONE(SPIRFunction) +}; + +struct SPIRAccessChain : IVariant +{ + enum + { + type = TypeAccessChain + }; + + SPIRAccessChain(uint32_t basetype_, spv::StorageClass storage_, std::string base_, std::string dynamic_index_, + int32_t static_index_) + : basetype(basetype_) + , storage(storage_) + , base(base_) + , dynamic_index(std::move(dynamic_index_)) + , static_index(static_index_) + { + } + + // The access chain represents an offset into a buffer. + // Some backends need more complicated handling of access chains to be able to use buffers, like HLSL + // which has no usable buffer type ala GLSL SSBOs. + // StructuredBuffer is too limited, so our only option is to deal with ByteAddressBuffer which works with raw addresses. + + uint32_t basetype; + spv::StorageClass storage; + std::string base; + std::string dynamic_index; + int32_t static_index; + + uint32_t loaded_from = 0; + uint32_t matrix_stride = 0; + bool row_major_matrix = false; + bool immutable = false; + + // By reading this expression, we implicitly read these expressions as well. + // Used by access chain Store and Load since we read multiple expressions in this case. + std::vector implied_read_expressions; + + SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain) +}; + +struct SPIRVariable : IVariant +{ + enum + { + type = TypeVariable + }; + + SPIRVariable() = default; + SPIRVariable(uint32_t basetype_, spv::StorageClass storage_, uint32_t initializer_ = 0, uint32_t basevariable_ = 0) + : basetype(basetype_) + , storage(storage_) + , initializer(initializer_) + , basevariable(basevariable_) + { + } + + uint32_t basetype = 0; + spv::StorageClass storage = spv::StorageClassGeneric; + uint32_t decoration = 0; + uint32_t initializer = 0; + uint32_t basevariable = 0; + + std::vector dereference_chain; + bool compat_builtin = false; + + // If a variable is shadowed, we only statically assign to it + // and never actually emit a statement for it. + // When we read the variable as an expression, just forward + // shadowed_id as the expression. + bool statically_assigned = false; + uint32_t static_expression = 0; + + // Temporaries which can remain forwarded as long as this variable is not modified. + std::vector dependees; + bool forwardable = true; + + bool deferred_declaration = false; + bool phi_variable = false; + + // Used to deal with Phi variable flushes. See flush_phi(). + bool allocate_temporary_copy = false; + + bool remapped_variable = false; + uint32_t remapped_components = 0; + + // The block which dominates all access to this variable. + uint32_t dominator = 0; + // If true, this variable is a loop variable, when accessing the variable + // outside a loop, + // we should statically forward it. + bool loop_variable = false; + // Set to true while we're inside the for loop. + bool loop_variable_enable = false; + + SPIRFunction::Parameter *parameter = nullptr; + + SPIRV_CROSS_DECLARE_CLONE(SPIRVariable) +}; + +struct SPIRConstant : IVariant +{ + enum + { + type = TypeConstant + }; + + union Constant { + uint32_t u32; + int32_t i32; + float f32; + + uint64_t u64; + int64_t i64; + double f64; + }; + + struct ConstantVector + { + Constant r[4]; + // If != 0, this element is a specialization constant, and we should keep track of it as such. + uint32_t id[4]; + uint32_t vecsize = 1; + + // Workaround for MSVC 2013, initializing an array breaks. + ConstantVector() + { + memset(r, 0, sizeof(r)); + for (unsigned i = 0; i < 4; i++) + id[i] = 0; + } + }; + + struct ConstantMatrix + { + ConstantVector c[4]; + // If != 0, this column is a specialization constant, and we should keep track of it as such. + uint32_t id[4]; + uint32_t columns = 1; + + // Workaround for MSVC 2013, initializing an array breaks. + ConstantMatrix() + { + for (unsigned i = 0; i < 4; i++) + id[i] = 0; + } + }; + + static inline float f16_to_f32(uint16_t u16_value) + { + // Based on the GLM implementation. + int s = (u16_value >> 15) & 0x1; + int e = (u16_value >> 10) & 0x1f; + int m = (u16_value >> 0) & 0x3ff; + + union { + float f32; + uint32_t u32; + } u; + + if (e == 0) + { + if (m == 0) + { + u.u32 = uint32_t(s) << 31; + return u.f32; + } + else + { + while ((m & 0x400) == 0) + { + m <<= 1; + e--; + } + + e++; + m &= ~0x400; + } + } + else if (e == 31) + { + if (m == 0) + { + u.u32 = (uint32_t(s) << 31) | 0x7f800000u; + return u.f32; + } + else + { + u.u32 = (uint32_t(s) << 31) | 0x7f800000u | (m << 13); + return u.f32; + } + } + + e += 127 - 15; + m <<= 13; + u.u32 = (uint32_t(s) << 31) | (e << 23) | m; + return u.f32; + } + + inline uint32_t specialization_constant_id(uint32_t col, uint32_t row) const + { + return m.c[col].id[row]; + } + + inline uint32_t specialization_constant_id(uint32_t col) const + { + return m.id[col]; + } + + inline uint32_t scalar(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].u32; + } + + inline int16_t scalar_i16(uint32_t col = 0, uint32_t row = 0) const + { + return int16_t(m.c[col].r[row].u32 & 0xffffu); + } + + inline uint16_t scalar_u16(uint32_t col = 0, uint32_t row = 0) const + { + return uint16_t(m.c[col].r[row].u32 & 0xffffu); + } + + inline float scalar_f16(uint32_t col = 0, uint32_t row = 0) const + { + return f16_to_f32(scalar_u16(col, row)); + } + + inline float scalar_f32(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].f32; + } + + inline int32_t scalar_i32(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].i32; + } + + inline double scalar_f64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].f64; + } + + inline int64_t scalar_i64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].i64; + } + + inline uint64_t scalar_u64(uint32_t col = 0, uint32_t row = 0) const + { + return m.c[col].r[row].u64; + } + + inline const ConstantVector &vector() const + { + return m.c[0]; + } + + inline uint32_t vector_size() const + { + return m.c[0].vecsize; + } + + inline uint32_t columns() const + { + return m.columns; + } + + inline void make_null(const SPIRType &constant_type_) + { + m = {}; + m.columns = constant_type_.columns; + for (auto &c : m.c) + c.vecsize = constant_type_.vecsize; + } + + inline bool constant_is_null() const + { + if (specialization) + return false; + if (!subconstants.empty()) + return false; + + for (uint32_t col = 0; col < columns(); col++) + for (uint32_t row = 0; row < vector_size(); row++) + if (scalar_u64(col, row) != 0) + return false; + + return true; + } + + explicit SPIRConstant(uint32_t constant_type_) + : constant_type(constant_type_) + { + } + + SPIRConstant() = default; + + SPIRConstant(uint32_t constant_type_, const uint32_t *elements, uint32_t num_elements, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + subconstants.insert(end(subconstants), elements, elements + num_elements); + specialization = specialized; + } + + // Construct scalar (32-bit). + SPIRConstant(uint32_t constant_type_, uint32_t v0, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + m.c[0].r[0].u32 = v0; + m.c[0].vecsize = 1; + m.columns = 1; + } + + // Construct scalar (64-bit). + SPIRConstant(uint32_t constant_type_, uint64_t v0, bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + m.c[0].r[0].u64 = v0; + m.c[0].vecsize = 1; + m.columns = 1; + } + + // Construct vectors and matrices. + SPIRConstant(uint32_t constant_type_, const SPIRConstant *const *vector_elements, uint32_t num_elements, + bool specialized) + : constant_type(constant_type_) + , specialization(specialized) + { + bool matrix = vector_elements[0]->m.c[0].vecsize > 1; + + if (matrix) + { + m.columns = num_elements; + + for (uint32_t i = 0; i < num_elements; i++) + { + m.c[i] = vector_elements[i]->m.c[0]; + if (vector_elements[i]->specialization) + m.id[i] = vector_elements[i]->self; + } + } + else + { + m.c[0].vecsize = num_elements; + m.columns = 1; + + for (uint32_t i = 0; i < num_elements; i++) + { + m.c[0].r[i] = vector_elements[i]->m.c[0].r[0]; + if (vector_elements[i]->specialization) + m.c[0].id[i] = vector_elements[i]->self; + } + } + } + + uint32_t constant_type; + ConstantMatrix m; + + // If this constant is a specialization constant (i.e. created with OpSpecConstant*). + bool specialization = false; + // If this constant is used as an array length which creates specialization restrictions on some backends. + bool is_used_as_array_length = false; + + // If true, this is a LUT, and should always be declared in the outer scope. + bool is_used_as_lut = false; + + // For composites which are constant arrays, etc. + std::vector subconstants; + + // Non-Vulkan GLSL, HLSL and sometimes MSL emits defines for each specialization constant, + // and uses them to initialize the constant. This allows the user + // to still be able to specialize the value by supplying corresponding + // preprocessor directives before compiling the shader. + std::string specialization_constant_macro_name; + + SPIRV_CROSS_DECLARE_CLONE(SPIRConstant) +}; + +class Variant +{ +public: + // MSVC 2013 workaround, we shouldn't need these constructors. + Variant() = default; + + // Marking custom move constructor as noexcept is important. + Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT + { + *this = std::move(other); + } + + Variant(const Variant &variant) + { + *this = variant; + } + + // Marking custom move constructor as noexcept is important. + Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT + { + if (this != &other) + { + holder = std::move(other.holder); + type = other.type; + allow_type_rewrite = other.allow_type_rewrite; + other.type = TypeNone; + } + return *this; + } + + // This copy/clone should only be called in the Compiler constructor. + // If this is called inside ::compile(), we invalidate any references we took higher in the stack. + // This should never happen. + Variant &operator=(const Variant &other) + { +#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE + abort(); +#endif + if (this != &other) + { + holder.reset(); + if (other.holder) + holder = other.holder->clone(); + type = other.type; + allow_type_rewrite = other.allow_type_rewrite; + } + return *this; + } + + void set(std::unique_ptr val, uint32_t new_type) + { + holder = std::move(val); + if (!allow_type_rewrite && type != TypeNone && type != new_type) + SPIRV_CROSS_THROW("Overwriting a variant with new type."); + type = new_type; + allow_type_rewrite = false; + } + + template + T &get() + { + if (!holder) + SPIRV_CROSS_THROW("nullptr"); + if (T::type != type) + SPIRV_CROSS_THROW("Bad cast"); + return *static_cast(holder.get()); + } + + template + const T &get() const + { + if (!holder) + SPIRV_CROSS_THROW("nullptr"); + if (T::type != type) + SPIRV_CROSS_THROW("Bad cast"); + return *static_cast(holder.get()); + } + + uint32_t get_type() const + { + return type; + } + + uint32_t get_id() const + { + return holder ? holder->self : 0; + } + + bool empty() const + { + return !holder; + } + + void reset() + { + holder.reset(); + type = TypeNone; + } + + void set_allow_type_rewrite() + { + allow_type_rewrite = true; + } + +private: + std::unique_ptr holder; + uint32_t type = TypeNone; + bool allow_type_rewrite = false; +}; + +template +T &variant_get(Variant &var) +{ + return var.get(); +} + +template +const T &variant_get(const Variant &var) +{ + return var.get(); +} + +template +T &variant_set(Variant &var, P &&... args) +{ + auto uptr = std::unique_ptr(new T(std::forward

(args)...)); + auto ptr = uptr.get(); + var.set(std::move(uptr), T::type); + return *ptr; +} + +struct AccessChainMeta +{ + bool need_transpose = false; + bool storage_is_packed = false; + bool storage_is_invariant = false; +}; + +struct Meta +{ + struct Decoration + { + std::string alias; + std::string qualified_alias; + std::string hlsl_semantic; + Bitset decoration_flags; + spv::BuiltIn builtin_type = spv::BuiltInMax; + uint32_t location = 0; + uint32_t component = 0; + uint32_t set = 0; + uint32_t binding = 0; + uint32_t offset = 0; + uint32_t array_stride = 0; + uint32_t matrix_stride = 0; + uint32_t input_attachment = 0; + uint32_t spec_id = 0; + uint32_t index = 0; + spv::FPRoundingMode fp_rounding_mode = spv::FPRoundingModeMax; + bool builtin = false; + }; + + Decoration decoration; + std::vector members; + + std::unordered_map decoration_word_offset; + + // For SPV_GOOGLE_hlsl_functionality1. + bool hlsl_is_magic_counter_buffer = false; + // ID for the sibling counter buffer. + uint32_t hlsl_magic_counter_buffer = 0; +}; + +// A user callback that remaps the type of any variable. +// var_name is the declared name of the variable. +// name_of_type is the textual name of the type which will be used in the code unless written to by the callback. +using VariableTypeRemapCallback = + std::function; + +class ClassicLocale +{ +public: + ClassicLocale() + { + old = std::locale::global(std::locale::classic()); + } + ~ClassicLocale() + { + std::locale::global(old); + } + +private: + std::locale old; +}; + +class Hasher +{ +public: + inline void u32(uint32_t value) + { + h = (h * 0x100000001b3ull) ^ value; + } + + inline uint64_t get() const + { + return h; + } + +private: + uint64_t h = 0xcbf29ce484222325ull; +}; + +static inline bool type_is_floating_point(const SPIRType &type) +{ + return type.basetype == SPIRType::Half || type.basetype == SPIRType::Float || type.basetype == SPIRType::Double; +} + +static inline bool type_is_integral(const SPIRType &type) +{ + return type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte || type.basetype == SPIRType::Short || + type.basetype == SPIRType::UShort || type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt || + type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64; +} +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_cpp.cpp b/3rdparty/spirv-cross/spirv_cpp.cpp new file mode 100644 index 000000000..094275fa8 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cpp.cpp @@ -0,0 +1,550 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cpp.hpp" + +using namespace spv; +using namespace spirv_cross; +using namespace std; + +void CompilerCPP::emit_buffer_block(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + auto instance_name = to_name(var.self); + + uint32_t descriptor_set = ir.meta[var.self].decoration.set; + uint32_t binding = ir.meta[var.self].decoration.binding; + + emit_block_struct(type); + auto buffer_name = to_name(type.self); + + statement("internal::Resource<", buffer_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back( + join("s.register_resource(", instance_name, "__", ", ", descriptor_set, ", ", binding, ");")); + statement(""); +} + +void CompilerCPP::emit_interface_block(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + + const char *qual = var.storage == StorageClassInput ? "StageInput" : "StageOutput"; + const char *lowerqual = var.storage == StorageClassInput ? "stage_input" : "stage_output"; + auto instance_name = to_name(var.self); + uint32_t location = ir.meta[var.self].decoration.location; + + string buffer_name; + auto flags = ir.meta[type.self].decoration.decoration_flags; + if (flags.get(DecorationBlock)) + { + emit_block_struct(type); + buffer_name = to_name(type.self); + } + else + buffer_name = type_to_glsl(type); + + statement("internal::", qual, "<", buffer_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back(join("s.register_", lowerqual, "(", instance_name, "__", ", ", location, ");")); + statement(""); +} + +void CompilerCPP::emit_shared(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto instance_name = to_name(var.self); + statement(CompilerGLSL::variable_decl(var), ";"); + statement_no_indent("#define ", instance_name, " __res->", instance_name); +} + +void CompilerCPP::emit_uniform(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + auto instance_name = to_name(var.self); + + uint32_t descriptor_set = ir.meta[var.self].decoration.set; + uint32_t binding = ir.meta[var.self].decoration.binding; + uint32_t location = ir.meta[var.self].decoration.location; + + string type_name = type_to_glsl(type); + remap_variable_type_name(type, instance_name, type_name); + + if (type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage || + type.basetype == SPIRType::AtomicCounter) + { + statement("internal::Resource<", type_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back( + join("s.register_resource(", instance_name, "__", ", ", descriptor_set, ", ", binding, ");")); + } + else + { + statement("internal::UniformConstant<", type_name, type_to_array_glsl(type), "> ", instance_name, "__;"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()"); + resource_registrations.push_back( + join("s.register_uniform_constant(", instance_name, "__", ", ", location, ");")); + } + + statement(""); +} + +void CompilerCPP::emit_push_constant_block(const SPIRVariable &var) +{ + add_resource_name(var.self); + + auto &type = get(var.basetype); + auto &flags = ir.meta[var.self].decoration.decoration_flags; + if (flags.get(DecorationBinding) || flags.get(DecorationDescriptorSet)) + SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. " + "Remap to location with reflection API first or disable these decorations."); + + emit_block_struct(type); + auto buffer_name = to_name(type.self); + auto instance_name = to_name(var.self); + + statement("internal::PushConstant<", buffer_name, type_to_array_glsl(type), "> ", instance_name, ";"); + statement_no_indent("#define ", instance_name, " __res->", instance_name, ".get()"); + resource_registrations.push_back(join("s.register_push_constant(", instance_name, "__", ");")); + statement(""); +} + +void CompilerCPP::emit_block_struct(SPIRType &type) +{ + // C++ can't do interface blocks, so we fake it by emitting a separate struct. + // However, these structs are not allowed to alias anything, so remove it before + // emitting the struct. + // + // The type we have here needs to be resolved to the non-pointer type so we can remove aliases. + auto &self = get(type.self); + self.type_alias = 0; + emit_struct(self); +} + +void CompilerCPP::emit_resources() +{ + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + bool needs_declaration = c.specialization || c.is_used_as_lut; + + if (needs_declaration) + { + if (!options.vulkan_semantics && c.specialization) + { + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + } + emit_constant(c); + } + } + else if (id.get_type() == TypeConstantOp) + { + emit_specialization_constant_op(id.get()); + } + } + + // Output all basic struct types which are not Block or BufferBlock as these are declared inplace + // when such variables are instantiated. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer && + (!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) && + !ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + emit_struct(type); + } + } + } + + statement("struct Resources : ", resource_type); + begin_scope(); + + // Output UBOs and SSBOs + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassUniform && + !is_hidden_variable(var) && + (ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + emit_buffer_block(var); + } + } + } + + // Output push constant blocks + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + if (!is_hidden_variable(var) && var.storage != StorageClassFunction && type.pointer && + type.storage == StorageClassPushConstant) + { + emit_push_constant_block(var); + } + } + } + + // Output in/out interfaces. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (var.storage != StorageClassFunction && !is_hidden_variable(var) && type.pointer && + (var.storage == StorageClassInput || var.storage == StorageClassOutput) && + interface_variable_exists_in_entry_point(var.self)) + { + emit_interface_block(var); + } + } + } + + // Output Uniform Constants (values, samplers, images, etc). + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (var.storage != StorageClassFunction && !is_hidden_variable(var) && type.pointer && + (type.storage == StorageClassUniformConstant || type.storage == StorageClassAtomicCounter)) + { + emit_uniform(var); + } + } + } + + // Global variables. + bool emitted = false; + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage == StorageClassWorkgroup) + { + emit_shared(var); + emitted = true; + } + } + + if (emitted) + statement(""); + + declare_undefined_values(); + + statement("inline void init(spirv_cross_shader& s)"); + begin_scope(); + statement(resource_type, "::init(s);"); + for (auto ® : resource_registrations) + statement(reg); + end_scope(); + resource_registrations.clear(); + + end_scope_decl(); + + statement(""); + statement("Resources* __res;"); + if (get_entry_point().model == ExecutionModelGLCompute) + statement("ComputePrivateResources __priv_res;"); + statement(""); + + // Emit regular globals which are allocated per invocation. + emitted = false; + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage == StorageClassPrivate) + { + if (var.storage == StorageClassWorkgroup) + emit_shared(var); + else + statement(CompilerGLSL::variable_decl(var), ";"); + emitted = true; + } + } + + if (emitted) + statement(""); +} + +string CompilerCPP::compile() +{ + // Force a classic "C" locale, reverts when function returns + ClassicLocale classic_locale; + + // Do not deal with ES-isms like precision, older extensions and such. + options.es = false; + options.version = 450; + backend.float_literal_suffix = true; + backend.double_literal_suffix = false; + backend.long_long_literal_suffix = true; + backend.uint32_t_literal_suffix = true; + backend.basic_int_type = "int32_t"; + backend.basic_uint_type = "uint32_t"; + backend.swizzle_is_function = true; + backend.shared_is_implied = true; + backend.flexible_member_array_supported = false; + backend.explicit_struct_type = true; + backend.use_initializer_list = true; + + build_function_control_flow_graphs_and_analyze(); + update_active_builtins(); + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + resource_registrations.clear(); + reset(); + + // Move constructor for this type is broken on GCC 4.9 ... + buffer = unique_ptr(new ostringstream()); + + emit_header(); + emit_resources(); + + emit_function(get(ir.default_entry_point), Bitset()); + + pass_count++; + } while (force_recompile); + + // Match opening scope of emit_header(). + end_scope_decl(); + // namespace + end_scope(); + + // Emit C entry points + emit_c_linkage(); + + // Entry point in CPP is always main() for the time being. + get_entry_point().name = "main"; + + return buffer->str(); +} + +void CompilerCPP::emit_c_linkage() +{ + statement(""); + + statement("spirv_cross_shader_t *spirv_cross_construct(void)"); + begin_scope(); + statement("return new ", impl_type, "();"); + end_scope(); + + statement(""); + statement("void spirv_cross_destruct(spirv_cross_shader_t *shader)"); + begin_scope(); + statement("delete static_cast<", impl_type, "*>(shader);"); + end_scope(); + + statement(""); + statement("void spirv_cross_invoke(spirv_cross_shader_t *shader)"); + begin_scope(); + statement("static_cast<", impl_type, "*>(shader)->invoke();"); + end_scope(); + + statement(""); + statement("static const struct spirv_cross_interface vtable ="); + begin_scope(); + statement("spirv_cross_construct,"); + statement("spirv_cross_destruct,"); + statement("spirv_cross_invoke,"); + end_scope_decl(); + + statement(""); + statement("const struct spirv_cross_interface *", + interface_name.empty() ? string("spirv_cross_get_interface") : interface_name, "(void)"); + begin_scope(); + statement("return &vtable;"); + end_scope(); +} + +void CompilerCPP::emit_function_prototype(SPIRFunction &func, const Bitset &) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + local_variable_names = resource_names; + string decl; + + auto &type = get(func.return_type); + decl += "inline "; + decl += type_to_glsl(type); + decl += " "; + + if (func.self == ir.default_entry_point) + { + decl += "main"; + processing_entry_point = true; + } + else + decl += to_name(func.self); + + decl += "("; + for (auto &arg : func.arguments) + { + add_local_variable_name(arg.id); + + decl += argument_decl(arg); + if (&arg != &func.arguments.back()) + decl += ", "; + + // Hold a pointer to the parameter so we can invalidate the readonly field if needed. + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + decl += ")"; + statement(decl); +} + +string CompilerCPP::argument_decl(const SPIRFunction::Parameter &arg) +{ + auto &type = expression_type(arg.id); + bool constref = !type.pointer || arg.write_count == 0; + + auto &var = get(arg.id); + + string base = type_to_glsl(type); + string variable_name = to_name(var.self); + remap_variable_type_name(type, variable_name, base); + + for (uint32_t i = 0; i < type.array.size(); i++) + base = join("std::array<", base, ", ", to_array_size(type, i), ">"); + + return join(constref ? "const " : "", base, " &", variable_name); +} + +string CompilerCPP::variable_decl(const SPIRType &type, const string &name, uint32_t /* id */) +{ + string base = type_to_glsl(type); + remap_variable_type_name(type, name, base); + bool runtime = false; + + for (uint32_t i = 0; i < type.array.size(); i++) + { + auto &array = type.array[i]; + if (!array && type.array_size_literal[i]) + { + // Avoid using runtime arrays with std::array since this is undefined. + // Runtime arrays cannot be passed around as values, so this is fine. + runtime = true; + } + else + base = join("std::array<", base, ", ", to_array_size(type, i), ">"); + } + base += ' '; + return base + name + (runtime ? "[1]" : ""); +} + +void CompilerCPP::emit_header() +{ + auto &execution = get_entry_point(); + + statement("// This C++ shader is autogenerated by spirv-cross."); + statement("#include \"spirv_cross/internal_interface.hpp\""); + statement("#include \"spirv_cross/external_interface.h\""); + // Needed to properly implement GLSL-style arrays. + statement("#include "); + statement("#include "); + statement(""); + statement("using namespace spirv_cross;"); + statement("using namespace glm;"); + statement(""); + + statement("namespace Impl"); + begin_scope(); + + switch (execution.model) + { + case ExecutionModelGeometry: + case ExecutionModelTessellationControl: + case ExecutionModelTessellationEvaluation: + case ExecutionModelGLCompute: + case ExecutionModelFragment: + case ExecutionModelVertex: + statement("struct Shader"); + begin_scope(); + break; + + default: + SPIRV_CROSS_THROW("Unsupported execution model."); + } + + switch (execution.model) + { + case ExecutionModelGeometry: + impl_type = "GeometryShader"; + resource_type = "GeometryResources"; + break; + + case ExecutionModelVertex: + impl_type = "VertexShader"; + resource_type = "VertexResources"; + break; + + case ExecutionModelFragment: + impl_type = "FragmentShader"; + resource_type = "FragmentResources"; + break; + + case ExecutionModelGLCompute: + impl_type = join("ComputeShader"); + resource_type = "ComputeResources"; + break; + + case ExecutionModelTessellationControl: + impl_type = "TessControlShader"; + resource_type = "TessControlResources"; + break; + + case ExecutionModelTessellationEvaluation: + impl_type = "TessEvaluationShader"; + resource_type = "TessEvaluationResources"; + break; + + default: + SPIRV_CROSS_THROW("Unsupported execution model."); + } +} diff --git a/3rdparty/spirv-cross/spirv_cpp.hpp b/3rdparty/spirv-cross/spirv_cpp.hpp new file mode 100644 index 000000000..bcdb669b3 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cpp.hpp @@ -0,0 +1,87 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_CPP_HPP +#define SPIRV_CROSS_CPP_HPP + +#include "spirv_glsl.hpp" +#include +#include + +namespace spirv_cross +{ +class CompilerCPP : public CompilerGLSL +{ +public: + explicit CompilerCPP(std::vector spirv_) + : CompilerGLSL(move(spirv_)) + { + } + + CompilerCPP(const uint32_t *ir_, size_t word_count) + : CompilerGLSL(ir_, word_count) + { + } + + explicit CompilerCPP(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + } + + explicit CompilerCPP(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + } + + std::string compile() override; + + // Sets a custom symbol name that can override + // spirv_cross_get_interface. + // + // Useful when several shader interfaces are linked + // statically into the same binary. + void set_interface_name(std::string name) + { + interface_name = std::move(name); + } + +private: + void emit_header() override; + void emit_c_linkage(); + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + + void emit_resources(); + void emit_buffer_block(const SPIRVariable &type) override; + void emit_push_constant_block(const SPIRVariable &var) override; + void emit_interface_block(const SPIRVariable &type); + void emit_block_chain(SPIRBlock &block); + void emit_uniform(const SPIRVariable &var) override; + void emit_shared(const SPIRVariable &var); + void emit_block_struct(SPIRType &type); + std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id) override; + + std::string argument_decl(const SPIRFunction::Parameter &arg); + + std::vector resource_registrations; + std::string impl_type; + std::string resource_type; + uint32_t shared_counter = 0; + + std::string interface_name; +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_cross.cpp b/3rdparty/spirv-cross/spirv_cross.cpp new file mode 100644 index 000000000..fc54f09db --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cross.cpp @@ -0,0 +1,3791 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cross.hpp" +#include "GLSL.std.450.h" +#include "spirv_cfg.hpp" +#include "spirv_parser.hpp" +#include +#include +#include + +using namespace std; +using namespace spv; +using namespace spirv_cross; + +Compiler::Compiler(vector ir_) +{ + Parser parser(move(ir_)); + parser.parse(); + set_ir(move(parser.get_parsed_ir())); +} + +Compiler::Compiler(const uint32_t *ir_, size_t word_count) +{ + Parser parser(ir_, word_count); + parser.parse(); + set_ir(move(parser.get_parsed_ir())); +} + +Compiler::Compiler(const ParsedIR &ir_) +{ + set_ir(ir_); +} + +Compiler::Compiler(ParsedIR &&ir_) +{ + set_ir(move(ir_)); +} + +void Compiler::set_ir(ParsedIR &&ir_) +{ + ir = move(ir_); + parse_fixup(); +} + +void Compiler::set_ir(const ParsedIR &ir_) +{ + ir = ir_; + parse_fixup(); +} + +string Compiler::compile() +{ + // Force a classic "C" locale, reverts when function returns + ClassicLocale classic_locale; + return ""; +} + +bool Compiler::variable_storage_is_aliased(const SPIRVariable &v) +{ + auto &type = get(v.basetype); + bool ssbo = v.storage == StorageClassStorageBuffer || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + bool image = type.basetype == SPIRType::Image; + bool counter = type.basetype == SPIRType::AtomicCounter; + + bool is_restrict; + if (ssbo) + is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict); + else + is_restrict = has_decoration(v.self, DecorationRestrict); + + return !is_restrict && (ssbo || image || counter); +} + +bool Compiler::block_is_pure(const SPIRBlock &block) +{ + for (auto &i : block.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + switch (op) + { + case OpFunctionCall: + { + uint32_t func = ops[2]; + if (!function_is_pure(get(func))) + return false; + break; + } + + case OpCopyMemory: + case OpStore: + { + auto &type = expression_type(ops[0]); + if (type.storage != StorageClassFunction) + return false; + break; + } + + case OpImageWrite: + return false; + + // Atomics are impure. + case OpAtomicLoad: + case OpAtomicStore: + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicCompareExchangeWeak: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + return false; + + // Geometry shader builtins modify global state. + case OpEndPrimitive: + case OpEmitStreamVertex: + case OpEndStreamPrimitive: + case OpEmitVertex: + return false; + + // Barriers disallow any reordering, so we should treat blocks with barrier as writing. + case OpControlBarrier: + case OpMemoryBarrier: + return false; + + // OpExtInst is potentially impure depending on extension, but GLSL builtins are at least pure. + + default: + break; + } + } + + return true; +} + +string Compiler::to_name(uint32_t id, bool allow_alias) const +{ + if (allow_alias && ir.ids.at(id).get_type() == TypeType) + { + // If this type is a simple alias, emit the + // name of the original type instead. + // We don't want to override the meta alias + // as that can be overridden by the reflection APIs after parse. + auto &type = get(id); + if (type.type_alias) + { + // If the alias master has been specially packed, we will have emitted a clean variant as well, + // so skip the name aliasing here. + if (!has_decoration(type.type_alias, DecorationCPacked)) + return to_name(type.type_alias); + } + } + + if (ir.meta[id].decoration.alias.empty()) + return join("_", id); + else + return ir.meta[id].decoration.alias; +} + +bool Compiler::function_is_pure(const SPIRFunction &func) +{ + for (auto block : func.blocks) + { + if (!block_is_pure(get(block))) + { + //fprintf(stderr, "Function %s is impure!\n", to_name(func.self).c_str()); + return false; + } + } + + //fprintf(stderr, "Function %s is pure!\n", to_name(func.self).c_str()); + return true; +} + +void Compiler::register_global_read_dependencies(const SPIRBlock &block, uint32_t id) +{ + for (auto &i : block.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + switch (op) + { + case OpFunctionCall: + { + uint32_t func = ops[2]; + register_global_read_dependencies(get(func), id); + break; + } + + case OpLoad: + case OpImageRead: + { + // If we're in a storage class which does not get invalidated, adding dependencies here is no big deal. + auto *var = maybe_get_backing_variable(ops[2]); + if (var && var->storage != StorageClassFunction) + { + auto &type = get(var->basetype); + + // InputTargets are immutable. + if (type.basetype != SPIRType::Image && type.image.dim != DimSubpassData) + var->dependees.push_back(id); + } + break; + } + + default: + break; + } + } +} + +void Compiler::register_global_read_dependencies(const SPIRFunction &func, uint32_t id) +{ + for (auto block : func.blocks) + register_global_read_dependencies(get(block), id); +} + +SPIRVariable *Compiler::maybe_get_backing_variable(uint32_t chain) +{ + auto *var = maybe_get(chain); + if (!var) + { + auto *cexpr = maybe_get(chain); + if (cexpr) + var = maybe_get(cexpr->loaded_from); + + auto *access_chain = maybe_get(chain); + if (access_chain) + var = maybe_get(access_chain->loaded_from); + } + + return var; +} + +void Compiler::register_read(uint32_t expr, uint32_t chain, bool forwarded) +{ + auto &e = get(expr); + auto *var = maybe_get_backing_variable(chain); + + if (var) + { + e.loaded_from = var->self; + + // If the backing variable is immutable, we do not need to depend on the variable. + if (forwarded && !is_immutable(var->self)) + var->dependees.push_back(e.self); + + // If we load from a parameter, make sure we create "inout" if we also write to the parameter. + // The default is "in" however, so we never invalidate our compilation by reading. + if (var && var->parameter) + var->parameter->read_count++; + } +} + +void Compiler::register_write(uint32_t chain) +{ + auto *var = maybe_get(chain); + if (!var) + { + // If we're storing through an access chain, invalidate the backing variable instead. + auto *expr = maybe_get(chain); + if (expr && expr->loaded_from) + var = maybe_get(expr->loaded_from); + + auto *access_chain = maybe_get(chain); + if (access_chain && access_chain->loaded_from) + var = maybe_get(access_chain->loaded_from); + } + + if (var) + { + // If our variable is in a storage class which can alias with other buffers, + // invalidate all variables which depend on aliased variables. And if this is a + // variable pointer, then invalidate all variables regardless. + if (get_variable_data_type(*var).pointer) + flush_all_active_variables(); + if (variable_storage_is_aliased(*var)) + flush_all_aliased_variables(); + else if (var) + flush_dependees(*var); + + // We tried to write to a parameter which is not marked with out qualifier, force a recompile. + if (var->parameter && var->parameter->write_count == 0) + { + var->parameter->write_count++; + force_recompile = true; + } + } + else + { + // If we stored through a variable pointer, then we don't know which + // variable we stored to. So *all* expressions after this point need to + // be invalidated. + // FIXME: If we can prove that the variable pointer will point to + // only certain variables, we can invalidate only those. + flush_all_active_variables(); + } +} + +void Compiler::flush_dependees(SPIRVariable &var) +{ + for (auto expr : var.dependees) + invalid_expressions.insert(expr); + var.dependees.clear(); +} + +void Compiler::flush_all_aliased_variables() +{ + for (auto aliased : aliased_variables) + flush_dependees(get(aliased)); +} + +void Compiler::flush_all_atomic_capable_variables() +{ + for (auto global : global_variables) + flush_dependees(get(global)); + flush_all_aliased_variables(); +} + +void Compiler::flush_control_dependent_expressions(uint32_t block_id) +{ + auto &block = get(block_id); + for (auto &expr : block.invalidate_expressions) + invalid_expressions.insert(expr); + block.invalidate_expressions.clear(); +} + +void Compiler::flush_all_active_variables() +{ + // Invalidate all temporaries we read from variables in this block since they were forwarded. + // Invalidate all temporaries we read from globals. + for (auto &v : current_function->local_variables) + flush_dependees(get(v)); + for (auto &arg : current_function->arguments) + flush_dependees(get(arg.id)); + for (auto global : global_variables) + flush_dependees(get(global)); + + flush_all_aliased_variables(); +} + +uint32_t Compiler::expression_type_id(uint32_t id) const +{ + switch (ir.ids[id].get_type()) + { + case TypeVariable: + return get(id).basetype; + + case TypeExpression: + return get(id).expression_type; + + case TypeConstant: + return get(id).constant_type; + + case TypeConstantOp: + return get(id).basetype; + + case TypeUndef: + return get(id).basetype; + + case TypeCombinedImageSampler: + return get(id).combined_type; + + case TypeAccessChain: + return get(id).basetype; + + default: + SPIRV_CROSS_THROW("Cannot resolve expression type."); + } +} + +const SPIRType &Compiler::expression_type(uint32_t id) const +{ + return get(expression_type_id(id)); +} + +bool Compiler::expression_is_lvalue(uint32_t id) const +{ + auto &type = expression_type(id); + switch (type.basetype) + { + case SPIRType::SampledImage: + case SPIRType::Image: + case SPIRType::Sampler: + return false; + + default: + return true; + } +} + +bool Compiler::is_immutable(uint32_t id) const +{ + if (ir.ids[id].get_type() == TypeVariable) + { + auto &var = get(id); + + // Anything we load from the UniformConstant address space is guaranteed to be immutable. + bool pointer_to_const = var.storage == StorageClassUniformConstant; + return pointer_to_const || var.phi_variable || !expression_is_lvalue(id); + } + else if (ir.ids[id].get_type() == TypeAccessChain) + return get(id).immutable; + else if (ir.ids[id].get_type() == TypeExpression) + return get(id).immutable; + else if (ir.ids[id].get_type() == TypeConstant || ir.ids[id].get_type() == TypeConstantOp || + ir.ids[id].get_type() == TypeUndef) + return true; + else + return false; +} + +static inline bool storage_class_is_interface(spv::StorageClass storage) +{ + switch (storage) + { + case StorageClassInput: + case StorageClassOutput: + case StorageClassUniform: + case StorageClassUniformConstant: + case StorageClassAtomicCounter: + case StorageClassPushConstant: + case StorageClassStorageBuffer: + return true; + + default: + return false; + } +} + +bool Compiler::is_hidden_variable(const SPIRVariable &var, bool include_builtins) const +{ + if ((is_builtin_variable(var) && !include_builtins) || var.remapped_variable) + return true; + + // Combined image samplers are always considered active as they are "magic" variables. + if (find_if(begin(combined_image_samplers), end(combined_image_samplers), [&var](const CombinedImageSampler &samp) { + return samp.combined_id == var.self; + }) != end(combined_image_samplers)) + { + return false; + } + + bool hidden = false; + if (check_active_interface_variables && storage_class_is_interface(var.storage)) + hidden = active_interface_variables.find(var.self) == end(active_interface_variables); + return hidden; +} + +bool Compiler::is_builtin_type(const SPIRType &type) const +{ + // We can have builtin structs as well. If one member of a struct is builtin, the struct must also be builtin. + for (auto &m : ir.meta[type.self].members) + if (m.builtin) + return true; + + return false; +} + +bool Compiler::is_builtin_variable(const SPIRVariable &var) const +{ + if (var.compat_builtin || ir.meta[var.self].decoration.builtin) + return true; + else + return is_builtin_type(get(var.basetype)); +} + +bool Compiler::is_member_builtin(const SPIRType &type, uint32_t index, BuiltIn *builtin) const +{ + auto &memb = ir.meta[type.self].members; + if (index < memb.size() && memb[index].builtin) + { + if (builtin) + *builtin = memb[index].builtin_type; + return true; + } + + return false; +} + +bool Compiler::is_scalar(const SPIRType &type) const +{ + return type.vecsize == 1 && type.columns == 1; +} + +bool Compiler::is_vector(const SPIRType &type) const +{ + return type.vecsize > 1 && type.columns == 1; +} + +bool Compiler::is_matrix(const SPIRType &type) const +{ + return type.vecsize > 1 && type.columns > 1; +} + +bool Compiler::is_array(const SPIRType &type) const +{ + return !type.array.empty(); +} + +ShaderResources Compiler::get_shader_resources() const +{ + return get_shader_resources(nullptr); +} + +ShaderResources Compiler::get_shader_resources(const unordered_set &active_variables) const +{ + return get_shader_resources(&active_variables); +} + +bool Compiler::InterfaceVariableAccessHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + uint32_t variable = 0; + switch (opcode) + { + // Need this first, otherwise, GCC complains about unhandled switch statements. + default: + break; + + case OpFunctionCall: + { + // Invalid SPIR-V. + if (length < 3) + return false; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + { + auto *var = compiler.maybe_get(args[i]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[i]); + } + break; + } + + case OpSelect: + { + // Invalid SPIR-V. + if (length < 5) + return false; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + { + auto *var = compiler.maybe_get(args[i]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[i]); + } + break; + } + + case OpPhi: + { + // Invalid SPIR-V. + if (length < 2) + return false; + + uint32_t count = length - 2; + args += 2; + for (uint32_t i = 0; i < count; i += 2) + { + auto *var = compiler.maybe_get(args[i]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[i]); + } + break; + } + + case OpAtomicStore: + case OpStore: + // Invalid SPIR-V. + if (length < 1) + return false; + variable = args[0]; + break; + + case OpCopyMemory: + { + if (length < 2) + return false; + + auto *var = compiler.maybe_get(args[0]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(variable); + + var = compiler.maybe_get(args[1]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(variable); + break; + } + + case OpExtInst: + { + if (length < 5) + return false; + uint32_t extension_set = args[2]; + if (compiler.get(extension_set).ext == SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter) + { + enum AMDShaderExplicitVertexParameter + { + InterpolateAtVertexAMD = 1 + }; + + auto op = static_cast(args[3]); + + switch (op) + { + case InterpolateAtVertexAMD: + { + auto *var = compiler.maybe_get(args[4]); + if (var && storage_class_is_interface(var->storage)) + variables.insert(args[4]); + break; + } + + default: + break; + } + } + break; + } + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + case OpLoad: + case OpCopyObject: + case OpImageTexelPointer: + case OpAtomicLoad: + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicCompareExchangeWeak: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + // Invalid SPIR-V. + if (length < 3) + return false; + variable = args[2]; + break; + } + + if (variable) + { + auto *var = compiler.maybe_get(variable); + if (var && storage_class_is_interface(var->storage)) + variables.insert(variable); + } + return true; +} + +unordered_set Compiler::get_active_interface_variables() const +{ + // Traverse the call graph and find all interface variables which are in use. + unordered_set variables; + InterfaceVariableAccessHandler handler(*this, variables); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + + // If we needed to create one, we'll need it. + if (dummy_sampler_id) + variables.insert(dummy_sampler_id); + + return variables; +} + +void Compiler::set_enabled_interface_variables(std::unordered_set active_variables) +{ + active_interface_variables = move(active_variables); + check_active_interface_variables = true; +} + +ShaderResources Compiler::get_shader_resources(const unordered_set *active_variables) const +{ + ShaderResources res; + + for (auto &id : ir.ids) + { + if (id.get_type() != TypeVariable) + continue; + + auto &var = id.get(); + auto &type = get(var.basetype); + + // It is possible for uniform storage classes to be passed as function parameters, so detect + // that. To detect function parameters, check of StorageClass of variable is function scope. + if (var.storage == StorageClassFunction || !type.pointer || is_builtin_variable(var)) + continue; + + if (active_variables && active_variables->find(var.self) == end(*active_variables)) + continue; + + // Input + if (var.storage == StorageClassInput && interface_variable_exists_in_entry_point(var.self)) + { + if (ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock)) + res.stage_inputs.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) }); + else + res.stage_inputs.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // Subpass inputs + else if (var.storage == StorageClassUniformConstant && type.image.dim == DimSubpassData) + { + res.subpass_inputs.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // Outputs + else if (var.storage == StorageClassOutput && interface_variable_exists_in_entry_point(var.self)) + { + if (ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock)) + res.stage_outputs.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) }); + else + res.stage_outputs.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // UBOs + else if (type.storage == StorageClassUniform && + (ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock))) + { + res.uniform_buffers.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) }); + } + // Old way to declare SSBOs. + else if (type.storage == StorageClassUniform && + (ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + res.storage_buffers.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) }); + } + // Modern way to declare SSBOs. + else if (type.storage == StorageClassStorageBuffer) + { + res.storage_buffers.push_back( + { var.self, var.basetype, type.self, get_remapped_declared_block_name(var.self) }); + } + // Push constant blocks + else if (type.storage == StorageClassPushConstant) + { + // There can only be one push constant block, but keep the vector in case this restriction is lifted + // in the future. + res.push_constant_buffers.push_back( + { var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // Images + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image && + type.image.sampled == 2) + { + res.storage_images.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // Separate images + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Image && + type.image.sampled == 1) + { + res.separate_images.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // Separate samplers + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::Sampler) + { + res.separate_samplers.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // Textures + else if (type.storage == StorageClassUniformConstant && type.basetype == SPIRType::SampledImage) + { + res.sampled_images.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + // Atomic counters + else if (type.storage == StorageClassAtomicCounter) + { + res.atomic_counters.push_back({ var.self, var.basetype, type.self, ir.meta[var.self].decoration.alias }); + } + } + + return res; +} + +bool Compiler::type_is_block_like(const SPIRType &type) const +{ + if (type.basetype != SPIRType::Struct) + return false; + + if (has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock)) + { + return true; + } + + // Block-like types may have Offset decorations. + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + if (has_member_decoration(type.self, i, DecorationOffset)) + return true; + + return false; +} + +void Compiler::fixup_type_alias() +{ + // Due to how some backends work, the "master" type of type_alias must be a block-like type if it exists. + // FIXME: Multiple alias types which are both block-like will be awkward, for now, it's best to just drop the type + // alias if the slave type is a block type. + for (auto &id : ir.ids) + { + if (id.get_type() != TypeType) + continue; + + auto &type = id.get(); + + if (type.type_alias && type_is_block_like(type)) + { + // Become the master. + for (auto &other_id : ir.ids) + { + if (other_id.get_type() != TypeType) + continue; + if (other_id.get_id() == type.self) + continue; + + auto &other_type = other_id.get(); + if (other_type.type_alias == type.type_alias) + other_type.type_alias = type.self; + } + + get(type.type_alias).type_alias = id.get_id(); + type.type_alias = 0; + } + } + + for (auto &id : ir.ids) + { + if (id.get_type() != TypeType) + continue; + + auto &type = id.get(); + if (type.type_alias && type_is_block_like(type)) + { + // This is not allowed, drop the type_alias. + type.type_alias = 0; + } + } +} + +void Compiler::parse_fixup() +{ + // Figure out specialization constants for work group sizes. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + if (ir.meta[c.self].decoration.builtin && ir.meta[c.self].decoration.builtin_type == BuiltInWorkgroupSize) + { + // In current SPIR-V, there can be just one constant like this. + // All entry points will receive the constant value. + for (auto &entry : ir.entry_points) + { + entry.second.workgroup_size.constant = c.self; + entry.second.workgroup_size.x = c.scalar(0, 0); + entry.second.workgroup_size.y = c.scalar(0, 1); + entry.second.workgroup_size.z = c.scalar(0, 2); + } + } + } + else if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + if (var.storage == StorageClassPrivate || var.storage == StorageClassWorkgroup || + var.storage == StorageClassOutput) + global_variables.push_back(var.self); + if (variable_storage_is_aliased(var)) + aliased_variables.push_back(var.self); + } + } + + fixup_type_alias(); +} + +void Compiler::flatten_interface_block(uint32_t id) +{ + auto &var = get(id); + auto &type = get(var.basetype); + auto &flags = ir.meta.at(type.self).decoration.decoration_flags; + + if (!type.array.empty()) + SPIRV_CROSS_THROW("Type is array of UBOs."); + if (type.basetype != SPIRType::Struct) + SPIRV_CROSS_THROW("Type is not a struct."); + if (!flags.get(DecorationBlock)) + SPIRV_CROSS_THROW("Type is not a block."); + if (type.member_types.empty()) + SPIRV_CROSS_THROW("Member list of struct is empty."); + + uint32_t t = type.member_types[0]; + for (auto &m : type.member_types) + if (t != m) + SPIRV_CROSS_THROW("Types in block differ."); + + auto &mtype = get(t); + if (!mtype.array.empty()) + SPIRV_CROSS_THROW("Member type cannot be arrays."); + if (mtype.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Member type cannot be struct."); + + // Inherit variable name from interface block name. + ir.meta.at(var.self).decoration.alias = ir.meta.at(type.self).decoration.alias; + + auto storage = var.storage; + if (storage == StorageClassUniform) + storage = StorageClassUniformConstant; + + // Change type definition in-place into an array instead. + // Access chains will still work as-is. + uint32_t array_size = uint32_t(type.member_types.size()); + type = mtype; + type.array.push_back(array_size); + type.pointer = true; + type.storage = storage; + type.parent_type = t; + var.storage = storage; +} + +void Compiler::update_name_cache(unordered_set &cache_primary, const unordered_set &cache_secondary, + string &name) +{ + if (name.empty()) + return; + + const auto find_name = [&](const string &n) -> bool { + if (cache_primary.find(n) != end(cache_primary)) + return true; + + if (&cache_primary != &cache_secondary) + if (cache_secondary.find(n) != end(cache_secondary)) + return true; + + return false; + }; + + const auto insert_name = [&](const string &n) { cache_primary.insert(n); }; + + if (!find_name(name)) + { + insert_name(name); + return; + } + + uint32_t counter = 0; + auto tmpname = name; + + bool use_linked_underscore = true; + + if (tmpname == "_") + { + // We cannot just append numbers, as we will end up creating internally reserved names. + // Make it like _0_ instead. + tmpname += "0"; + } + else if (tmpname.back() == '_') + { + // The last_character is an underscore, so we don't need to link in underscore. + // This would violate double underscore rules. + use_linked_underscore = false; + } + + // If there is a collision (very rare), + // keep tacking on extra identifier until it's unique. + do + { + counter++; + name = tmpname + (use_linked_underscore ? "_" : "") + convert_to_string(counter); + } while (find_name(name)); + insert_name(name); +} + +void Compiler::update_name_cache(unordered_set &cache, string &name) +{ + update_name_cache(cache, cache, name); +} + +void Compiler::set_name(uint32_t id, const std::string &name) +{ + ir.set_name(id, name); +} + +const SPIRType &Compiler::get_type(uint32_t id) const +{ + return get(id); +} + +const SPIRType &Compiler::get_type_from_variable(uint32_t id) const +{ + return get(get(id).basetype); +} + +uint32_t Compiler::get_pointee_type_id(uint32_t type_id) const +{ + auto *p_type = &get(type_id); + if (p_type->pointer) + { + assert(p_type->parent_type); + type_id = p_type->parent_type; + } + return type_id; +} + +const SPIRType &Compiler::get_pointee_type(const SPIRType &type) const +{ + auto *p_type = &type; + if (p_type->pointer) + { + assert(p_type->parent_type); + p_type = &get(p_type->parent_type); + } + return *p_type; +} + +const SPIRType &Compiler::get_pointee_type(uint32_t type_id) const +{ + return get_pointee_type(get(type_id)); +} + +uint32_t Compiler::get_variable_data_type_id(const SPIRVariable &var) const +{ + if (var.phi_variable) + return var.basetype; + return get_pointee_type_id(var.basetype); +} + +SPIRType &Compiler::get_variable_data_type(const SPIRVariable &var) +{ + return get(get_variable_data_type_id(var)); +} + +const SPIRType &Compiler::get_variable_data_type(const SPIRVariable &var) const +{ + return get(get_variable_data_type_id(var)); +} + +bool Compiler::is_sampled_image_type(const SPIRType &type) +{ + return (type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage) && type.image.sampled == 1 && + type.image.dim != DimBuffer; +} + +void Compiler::set_member_decoration_string(uint32_t id, uint32_t index, spv::Decoration decoration, + const std::string &argument) +{ + ir.set_member_decoration_string(id, index, decoration, argument); +} + +void Compiler::set_member_decoration(uint32_t id, uint32_t index, Decoration decoration, uint32_t argument) +{ + ir.set_member_decoration(id, index, decoration, argument); +} + +void Compiler::set_member_name(uint32_t id, uint32_t index, const std::string &name) +{ + ir.set_member_name(id, index, name); +} + +const std::string &Compiler::get_member_name(uint32_t id, uint32_t index) const +{ + return ir.get_member_name(id, index); +} + +void Compiler::set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name) +{ + ir.meta.at(type_id).members.resize(max(ir.meta[type_id].members.size(), size_t(index) + 1)); + ir.meta.at(type_id).members[index].qualified_alias = name; +} + +const std::string &Compiler::get_member_qualified_name(uint32_t type_id, uint32_t index) const +{ + const static string empty; + + auto &m = ir.meta.at(type_id); + if (index < m.members.size()) + return m.members[index].qualified_alias; + else + return empty; +} + +uint32_t Compiler::get_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const +{ + return ir.get_member_decoration(id, index, decoration); +} + +uint64_t Compiler::get_member_decoration_mask(uint32_t id, uint32_t index) const +{ + return get_member_decoration_bitset(id, index).get_lower(); +} + +const Bitset &Compiler::get_member_decoration_bitset(uint32_t id, uint32_t index) const +{ + return ir.get_member_decoration_bitset(id, index); +} + +bool Compiler::has_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const +{ + return ir.has_member_decoration(id, index, decoration); +} + +void Compiler::unset_member_decoration(uint32_t id, uint32_t index, Decoration decoration) +{ + ir.unset_member_decoration(id, index, decoration); +} + +void Compiler::set_decoration_string(uint32_t id, spv::Decoration decoration, const std::string &argument) +{ + ir.set_decoration_string(id, decoration, argument); +} + +void Compiler::set_decoration(uint32_t id, Decoration decoration, uint32_t argument) +{ + ir.set_decoration(id, decoration, argument); +} + +StorageClass Compiler::get_storage_class(uint32_t id) const +{ + return get(id).storage; +} + +const std::string &Compiler::get_name(uint32_t id) const +{ + return ir.meta.at(id).decoration.alias; +} + +const std::string Compiler::get_fallback_name(uint32_t id) const +{ + return join("_", id); +} + +const std::string Compiler::get_block_fallback_name(uint32_t id) const +{ + auto &var = get(id); + if (get_name(id).empty()) + return join("_", get(var.basetype).self, "_", id); + else + return get_name(id); +} + +uint64_t Compiler::get_decoration_mask(uint32_t id) const +{ + return get_decoration_bitset(id).get_lower(); +} + +const Bitset &Compiler::get_decoration_bitset(uint32_t id) const +{ + return ir.get_decoration_bitset(id); +} + +bool Compiler::has_decoration(uint32_t id, Decoration decoration) const +{ + return ir.has_decoration(id, decoration); +} + +const string &Compiler::get_decoration_string(uint32_t id, Decoration decoration) const +{ + return ir.get_decoration_string(id, decoration); +} + +const string &Compiler::get_member_decoration_string(uint32_t id, uint32_t index, Decoration decoration) const +{ + return ir.get_member_decoration_string(id, index, decoration); +} + +uint32_t Compiler::get_decoration(uint32_t id, Decoration decoration) const +{ + return ir.get_decoration(id, decoration); +} + +void Compiler::unset_decoration(uint32_t id, Decoration decoration) +{ + ir.unset_decoration(id, decoration); +} + +bool Compiler::get_binary_offset_for_decoration(uint32_t id, spv::Decoration decoration, uint32_t &word_offset) const +{ + auto &word_offsets = ir.meta.at(id).decoration_word_offset; + auto itr = word_offsets.find(decoration); + if (itr == end(word_offsets)) + return false; + + word_offset = itr->second; + return true; +} + +bool Compiler::block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const +{ + // Tried and failed. + if (block.disable_block_optimization || block.complex_continue) + return false; + + if (method == SPIRBlock::MergeToSelectForLoop || method == SPIRBlock::MergeToSelectContinueForLoop) + { + // Try to detect common for loop pattern + // which the code backend can use to create cleaner code. + // for(;;) { if (cond) { some_body; } else { break; } } + // is the pattern we're looking for. + bool ret = block.terminator == SPIRBlock::Select && block.merge == SPIRBlock::MergeLoop && + block.true_block != block.merge_block && block.true_block != block.self && + block.false_block == block.merge_block; + + if (ret && method == SPIRBlock::MergeToSelectContinueForLoop) + ret = block.true_block == block.continue_block; + + // If we have OpPhi which depends on branches which came from our own block, + // we need to flush phi variables in else block instead of a trivial break, + // so we cannot assume this is a for loop candidate. + if (ret) + { + for (auto &phi : block.phi_variables) + if (phi.parent == block.self) + return false; + + auto *merge = maybe_get(block.merge_block); + if (merge) + for (auto &phi : merge->phi_variables) + if (phi.parent == block.self) + return false; + } + return ret; + } + else if (method == SPIRBlock::MergeToDirectForLoop) + { + // Empty loop header that just sets up merge target + // and branches to loop body. + bool ret = block.terminator == SPIRBlock::Direct && block.merge == SPIRBlock::MergeLoop && block.ops.empty(); + + if (!ret) + return false; + + auto &child = get(block.next_block); + ret = child.terminator == SPIRBlock::Select && child.merge == SPIRBlock::MergeNone && + child.false_block == block.merge_block && child.true_block != block.merge_block && + child.true_block != block.self; + + // If we have OpPhi which depends on branches which came from our own block, + // we need to flush phi variables in else block instead of a trivial break, + // so we cannot assume this is a for loop candidate. + if (ret) + { + for (auto &phi : block.phi_variables) + if (phi.parent == block.self || phi.parent == child.self) + return false; + + for (auto &phi : child.phi_variables) + if (phi.parent == block.self) + return false; + + auto *merge = maybe_get(block.merge_block); + if (merge) + for (auto &phi : merge->phi_variables) + if (phi.parent == block.self || phi.parent == child.false_block) + return false; + } + + return ret; + } + else + return false; +} + +bool Compiler::block_is_outside_flow_control_from_block(const SPIRBlock &from, const SPIRBlock &to) +{ + auto *start = &from; + + if (start->self == to.self) + return true; + + // Break cycles. + if (is_continue(start->self)) + return false; + + // If our select block doesn't merge, we must break or continue in these blocks, + // so if continues occur branchless within these blocks, consider them branchless as well. + // This is typically used for loop control. + if (start->terminator == SPIRBlock::Select && start->merge == SPIRBlock::MergeNone && + (block_is_outside_flow_control_from_block(get(start->true_block), to) || + block_is_outside_flow_control_from_block(get(start->false_block), to))) + { + return true; + } + else if (start->merge_block && block_is_outside_flow_control_from_block(get(start->merge_block), to)) + { + return true; + } + else if (start->next_block && block_is_outside_flow_control_from_block(get(start->next_block), to)) + { + return true; + } + else + return false; +} + +bool Compiler::execution_is_noop(const SPIRBlock &from, const SPIRBlock &to) const +{ + if (!execution_is_branchless(from, to)) + return false; + + auto *start = &from; + for (;;) + { + if (start->self == to.self) + return true; + + if (!start->ops.empty()) + return false; + + auto &next = get(start->next_block); + // Flushing phi variables does not count as noop. + for (auto &phi : next.phi_variables) + if (phi.parent == start->self) + return false; + + start = &next; + } +} + +bool Compiler::execution_is_branchless(const SPIRBlock &from, const SPIRBlock &to) const +{ + auto *start = &from; + for (;;) + { + if (start->self == to.self) + return true; + + if (start->terminator == SPIRBlock::Direct && start->merge == SPIRBlock::MergeNone) + start = &get(start->next_block); + else + return false; + } +} + +SPIRBlock::ContinueBlockType Compiler::continue_block_type(const SPIRBlock &block) const +{ + // The block was deemed too complex during code emit, pick conservative fallback paths. + if (block.complex_continue) + return SPIRBlock::ComplexLoop; + + // In older glslang output continue block can be equal to the loop header. + // In this case, execution is clearly branchless, so just assume a while loop header here. + if (block.merge == SPIRBlock::MergeLoop) + return SPIRBlock::WhileLoop; + + auto &dominator = get(block.loop_dominator); + + if (execution_is_noop(block, dominator)) + return SPIRBlock::WhileLoop; + else if (execution_is_branchless(block, dominator)) + return SPIRBlock::ForLoop; + else + { + if (block.merge == SPIRBlock::MergeNone && block.terminator == SPIRBlock::Select && + block.true_block == dominator.self && block.false_block == dominator.merge_block) + { + return SPIRBlock::DoWhileLoop; + } + else + return SPIRBlock::ComplexLoop; + } +} + +bool Compiler::traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const +{ + handler.set_current_block(block); + + // Ideally, perhaps traverse the CFG instead of all blocks in order to eliminate dead blocks, + // but this shouldn't be a problem in practice unless the SPIR-V is doing insane things like recursing + // inside dead blocks ... + for (auto &i : block.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + if (!handler.handle(op, ops, i.length)) + return false; + + if (op == OpFunctionCall) + { + auto &func = get(ops[2]); + if (handler.follow_function_call(func)) + { + if (!handler.begin_function_scope(ops, i.length)) + return false; + if (!traverse_all_reachable_opcodes(get(ops[2]), handler)) + return false; + if (!handler.end_function_scope(ops, i.length)) + return false; + } + } + } + + return true; +} + +bool Compiler::traverse_all_reachable_opcodes(const SPIRFunction &func, OpcodeHandler &handler) const +{ + for (auto block : func.blocks) + if (!traverse_all_reachable_opcodes(get(block), handler)) + return false; + + return true; +} + +uint32_t Compiler::type_struct_member_offset(const SPIRType &type, uint32_t index) const +{ + // Decoration must be set in valid SPIR-V, otherwise throw. + auto &dec = ir.meta[type.self].members.at(index); + if (dec.decoration_flags.get(DecorationOffset)) + return dec.offset; + else + SPIRV_CROSS_THROW("Struct member does not have Offset set."); +} + +uint32_t Compiler::type_struct_member_array_stride(const SPIRType &type, uint32_t index) const +{ + // Decoration must be set in valid SPIR-V, otherwise throw. + // ArrayStride is part of the array type not OpMemberDecorate. + auto &dec = ir.meta[type.member_types[index]].decoration; + if (dec.decoration_flags.get(DecorationArrayStride)) + return dec.array_stride; + else + SPIRV_CROSS_THROW("Struct member does not have ArrayStride set."); +} + +uint32_t Compiler::type_struct_member_matrix_stride(const SPIRType &type, uint32_t index) const +{ + // Decoration must be set in valid SPIR-V, otherwise throw. + // MatrixStride is part of OpMemberDecorate. + auto &dec = ir.meta[type.self].members[index]; + if (dec.decoration_flags.get(DecorationMatrixStride)) + return dec.matrix_stride; + else + SPIRV_CROSS_THROW("Struct member does not have MatrixStride set."); +} + +size_t Compiler::get_declared_struct_size(const SPIRType &type) const +{ + if (type.member_types.empty()) + SPIRV_CROSS_THROW("Declared struct in block cannot be empty."); + + uint32_t last = uint32_t(type.member_types.size() - 1); + size_t offset = type_struct_member_offset(type, last); + size_t size = get_declared_struct_member_size(type, last); + return offset + size; +} + +size_t Compiler::get_declared_struct_size_runtime_array(const SPIRType &type, size_t array_size) const +{ + if (type.member_types.empty()) + SPIRV_CROSS_THROW("Declared struct in block cannot be empty."); + + size_t size = get_declared_struct_size(type); + auto &last_type = get(type.member_types.back()); + if (!last_type.array.empty() && last_type.array_size_literal[0] && last_type.array[0] == 0) // Runtime array + size += array_size * type_struct_member_array_stride(type, uint32_t(type.member_types.size() - 1)); + + return size; +} + +size_t Compiler::get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const +{ + if (struct_type.member_types.empty()) + SPIRV_CROSS_THROW("Declared struct in block cannot be empty."); + + auto &flags = get_member_decoration_bitset(struct_type.self, index); + auto &type = get(struct_type.member_types[index]); + + switch (type.basetype) + { + case SPIRType::Unknown: + case SPIRType::Void: + case SPIRType::Boolean: // Bools are purely logical, and cannot be used for externally visible types. + case SPIRType::AtomicCounter: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + SPIRV_CROSS_THROW("Querying size for object with opaque size."); + + default: + break; + } + + if (!type.array.empty()) + { + // For arrays, we can use ArrayStride to get an easy check. + bool array_size_literal = type.array_size_literal.back(); + uint32_t array_size = array_size_literal ? type.array.back() : get(type.array.back()).scalar(); + return type_struct_member_array_stride(struct_type, index) * array_size; + } + else if (type.basetype == SPIRType::Struct) + { + return get_declared_struct_size(type); + } + else + { + unsigned vecsize = type.vecsize; + unsigned columns = type.columns; + + // Vectors. + if (columns == 1) + { + size_t component_size = type.width / 8; + return vecsize * component_size; + } + else + { + uint32_t matrix_stride = type_struct_member_matrix_stride(struct_type, index); + + // Per SPIR-V spec, matrices must be tightly packed and aligned up for vec3 accesses. + if (flags.get(DecorationRowMajor)) + return matrix_stride * vecsize; + else if (flags.get(DecorationColMajor)) + return matrix_stride * columns; + else + SPIRV_CROSS_THROW("Either row-major or column-major must be declared for matrices."); + } + } +} + +bool Compiler::BufferAccessHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + if (opcode != OpAccessChain && opcode != OpInBoundsAccessChain && opcode != OpPtrAccessChain) + return true; + + bool ptr_chain = (opcode == OpPtrAccessChain); + + // Invalid SPIR-V. + if (length < (ptr_chain ? 5 : 4)) + return false; + + if (args[2] != id) + return true; + + // Don't bother traversing the entire access chain tree yet. + // If we access a struct member, assume we access the entire member. + uint32_t index = compiler.get(args[ptr_chain ? 4 : 3]).scalar(); + + // Seen this index already. + if (seen.find(index) != end(seen)) + return true; + seen.insert(index); + + auto &type = compiler.expression_type(id); + uint32_t offset = compiler.type_struct_member_offset(type, index); + + size_t range; + // If we have another member in the struct, deduce the range by looking at the next member. + // This is okay since structs in SPIR-V can have padding, but Offset decoration must be + // monotonically increasing. + // Of course, this doesn't take into account if the SPIR-V for some reason decided to add + // very large amounts of padding, but that's not really a big deal. + if (index + 1 < type.member_types.size()) + { + range = compiler.type_struct_member_offset(type, index + 1) - offset; + } + else + { + // No padding, so just deduce it from the size of the member directly. + range = compiler.get_declared_struct_member_size(type, index); + } + + ranges.push_back({ index, offset, range }); + return true; +} + +std::vector Compiler::get_active_buffer_ranges(uint32_t id) const +{ + std::vector ranges; + BufferAccessHandler handler(*this, ranges, id); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + return ranges; +} + +bool Compiler::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const +{ + if (a.basetype != b.basetype) + return false; + if (a.width != b.width) + return false; + if (a.vecsize != b.vecsize) + return false; + if (a.columns != b.columns) + return false; + if (a.array.size() != b.array.size()) + return false; + + size_t array_count = a.array.size(); + if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0) + return false; + + if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage) + { + if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0) + return false; + } + + if (a.member_types.size() != b.member_types.size()) + return false; + + size_t member_types = a.member_types.size(); + for (size_t i = 0; i < member_types; i++) + { + if (!types_are_logically_equivalent(get(a.member_types[i]), get(b.member_types[i]))) + return false; + } + + return true; +} + +uint64_t Compiler::get_execution_mode_mask() const +{ + return get_entry_point().flags.get_lower(); +} + +const Bitset &Compiler::get_execution_mode_bitset() const +{ + return get_entry_point().flags; +} + +void Compiler::set_execution_mode(ExecutionMode mode, uint32_t arg0, uint32_t arg1, uint32_t arg2) +{ + auto &execution = get_entry_point(); + + execution.flags.set(mode); + switch (mode) + { + case ExecutionModeLocalSize: + execution.workgroup_size.x = arg0; + execution.workgroup_size.y = arg1; + execution.workgroup_size.z = arg2; + break; + + case ExecutionModeInvocations: + execution.invocations = arg0; + break; + + case ExecutionModeOutputVertices: + execution.output_vertices = arg0; + break; + + default: + break; + } +} + +void Compiler::unset_execution_mode(ExecutionMode mode) +{ + auto &execution = get_entry_point(); + execution.flags.clear(mode); +} + +uint32_t Compiler::get_work_group_size_specialization_constants(SpecializationConstant &x, SpecializationConstant &y, + SpecializationConstant &z) const +{ + auto &execution = get_entry_point(); + x = { 0, 0 }; + y = { 0, 0 }; + z = { 0, 0 }; + + if (execution.workgroup_size.constant != 0) + { + auto &c = get(execution.workgroup_size.constant); + + if (c.m.c[0].id[0] != 0) + { + x.id = c.m.c[0].id[0]; + x.constant_id = get_decoration(c.m.c[0].id[0], DecorationSpecId); + } + + if (c.m.c[0].id[1] != 0) + { + y.id = c.m.c[0].id[1]; + y.constant_id = get_decoration(c.m.c[0].id[1], DecorationSpecId); + } + + if (c.m.c[0].id[2] != 0) + { + z.id = c.m.c[0].id[2]; + z.constant_id = get_decoration(c.m.c[0].id[2], DecorationSpecId); + } + } + + return execution.workgroup_size.constant; +} + +uint32_t Compiler::get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index) const +{ + auto &execution = get_entry_point(); + switch (mode) + { + case ExecutionModeLocalSize: + switch (index) + { + case 0: + return execution.workgroup_size.x; + case 1: + return execution.workgroup_size.y; + case 2: + return execution.workgroup_size.z; + default: + return 0; + } + + case ExecutionModeInvocations: + return execution.invocations; + + case ExecutionModeOutputVertices: + return execution.output_vertices; + + default: + return 0; + } +} + +ExecutionModel Compiler::get_execution_model() const +{ + auto &execution = get_entry_point(); + return execution.model; +} + +void Compiler::set_remapped_variable_state(uint32_t id, bool remap_enable) +{ + get(id).remapped_variable = remap_enable; +} + +bool Compiler::get_remapped_variable_state(uint32_t id) const +{ + return get(id).remapped_variable; +} + +void Compiler::set_subpass_input_remapped_components(uint32_t id, uint32_t components) +{ + get(id).remapped_components = components; +} + +uint32_t Compiler::get_subpass_input_remapped_components(uint32_t id) const +{ + return get(id).remapped_components; +} + +void Compiler::add_implied_read_expression(SPIRExpression &e, uint32_t source) +{ + auto itr = find(begin(e.implied_read_expressions), end(e.implied_read_expressions), source); + if (itr == end(e.implied_read_expressions)) + e.implied_read_expressions.push_back(source); +} + +void Compiler::add_implied_read_expression(SPIRAccessChain &e, uint32_t source) +{ + auto itr = find(begin(e.implied_read_expressions), end(e.implied_read_expressions), source); + if (itr == end(e.implied_read_expressions)) + e.implied_read_expressions.push_back(source); +} + +void Compiler::inherit_expression_dependencies(uint32_t dst, uint32_t source_expression) +{ + // Don't inherit any expression dependencies if the expression in dst + // is not a forwarded temporary. + if (forwarded_temporaries.find(dst) == end(forwarded_temporaries) || + forced_temporaries.find(dst) != end(forced_temporaries)) + { + return; + } + + auto &e = get(dst); + auto *phi = maybe_get(source_expression); + if (phi && phi->phi_variable) + { + // We have used a phi variable, which can change at the end of the block, + // so make sure we take a dependency on this phi variable. + phi->dependees.push_back(dst); + } + + auto *s = maybe_get(source_expression); + if (!s) + return; + + auto &e_deps = e.expression_dependencies; + auto &s_deps = s->expression_dependencies; + + // If we depend on a expression, we also depend on all sub-dependencies from source. + e_deps.push_back(source_expression); + e_deps.insert(end(e_deps), begin(s_deps), end(s_deps)); + + // Eliminate duplicated dependencies. + sort(begin(e_deps), end(e_deps)); + e_deps.erase(unique(begin(e_deps), end(e_deps)), end(e_deps)); +} + +vector Compiler::get_entry_points() const +{ + vector entries; + for (auto &entry : ir.entry_points) + entries.push_back(entry.second.orig_name); + return entries; +} + +vector Compiler::get_entry_points_and_stages() const +{ + vector entries; + for (auto &entry : ir.entry_points) + entries.push_back({ entry.second.orig_name, entry.second.model }); + return entries; +} + +void Compiler::rename_entry_point(const std::string &old_name, const std::string &new_name) +{ + auto &entry = get_first_entry_point(old_name); + entry.orig_name = new_name; + entry.name = new_name; +} + +void Compiler::rename_entry_point(const std::string &old_name, const std::string &new_name, spv::ExecutionModel model) +{ + auto &entry = get_entry_point(old_name, model); + entry.orig_name = new_name; + entry.name = new_name; +} + +void Compiler::set_entry_point(const std::string &name) +{ + auto &entry = get_first_entry_point(name); + ir.default_entry_point = entry.self; +} + +void Compiler::set_entry_point(const std::string &name, spv::ExecutionModel model) +{ + auto &entry = get_entry_point(name, model); + ir.default_entry_point = entry.self; +} + +SPIREntryPoint &Compiler::get_entry_point(const std::string &name) +{ + return get_first_entry_point(name); +} + +const SPIREntryPoint &Compiler::get_entry_point(const std::string &name) const +{ + return get_first_entry_point(name); +} + +SPIREntryPoint &Compiler::get_first_entry_point(const std::string &name) +{ + auto itr = find_if( + begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { return entry.second.orig_name == name; }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +const SPIREntryPoint &Compiler::get_first_entry_point(const std::string &name) const +{ + auto itr = find_if( + begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { return entry.second.orig_name == name; }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +SPIREntryPoint &Compiler::get_entry_point(const std::string &name, ExecutionModel model) +{ + auto itr = find_if(begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { + return entry.second.orig_name == name && entry.second.model == model; + }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +const SPIREntryPoint &Compiler::get_entry_point(const std::string &name, ExecutionModel model) const +{ + auto itr = find_if(begin(ir.entry_points), end(ir.entry_points), + [&](const std::pair &entry) -> bool { + return entry.second.orig_name == name && entry.second.model == model; + }); + + if (itr == end(ir.entry_points)) + SPIRV_CROSS_THROW("Entry point does not exist."); + + return itr->second; +} + +const string &Compiler::get_cleansed_entry_point_name(const std::string &name) const +{ + return get_first_entry_point(name).name; +} + +const string &Compiler::get_cleansed_entry_point_name(const std::string &name, ExecutionModel model) const +{ + return get_entry_point(name, model).name; +} + +const SPIREntryPoint &Compiler::get_entry_point() const +{ + return ir.entry_points.find(ir.default_entry_point)->second; +} + +SPIREntryPoint &Compiler::get_entry_point() +{ + return ir.entry_points.find(ir.default_entry_point)->second; +} + +bool Compiler::interface_variable_exists_in_entry_point(uint32_t id) const +{ + auto &var = get(id); + if (var.storage != StorageClassInput && var.storage != StorageClassOutput && + var.storage != StorageClassUniformConstant) + SPIRV_CROSS_THROW("Only Input, Output variables and Uniform constants are part of a shader linking interface."); + + // This is to avoid potential problems with very old glslang versions which did + // not emit input/output interfaces properly. + // We can assume they only had a single entry point, and single entry point + // shaders could easily be assumed to use every interface variable anyways. + if (ir.entry_points.size() <= 1) + return true; + + auto &execution = get_entry_point(); + return find(begin(execution.interface_variables), end(execution.interface_variables), id) != + end(execution.interface_variables); +} + +void Compiler::CombinedImageSamplerHandler::push_remap_parameters(const SPIRFunction &func, const uint32_t *args, + uint32_t length) +{ + // If possible, pipe through a remapping table so that parameters know + // which variables they actually bind to in this scope. + unordered_map remapping; + for (uint32_t i = 0; i < length; i++) + remapping[func.arguments[i].id] = remap_parameter(args[i]); + parameter_remapping.push(move(remapping)); +} + +void Compiler::CombinedImageSamplerHandler::pop_remap_parameters() +{ + parameter_remapping.pop(); +} + +uint32_t Compiler::CombinedImageSamplerHandler::remap_parameter(uint32_t id) +{ + auto *var = compiler.maybe_get_backing_variable(id); + if (var) + id = var->self; + + if (parameter_remapping.empty()) + return id; + + auto &remapping = parameter_remapping.top(); + auto itr = remapping.find(id); + if (itr != end(remapping)) + return itr->second; + else + return id; +} + +bool Compiler::CombinedImageSamplerHandler::begin_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + + auto &callee = compiler.get(args[2]); + args += 3; + length -= 3; + push_remap_parameters(callee, args, length); + functions.push(&callee); + return true; +} + +bool Compiler::CombinedImageSamplerHandler::end_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + + auto &callee = compiler.get(args[2]); + args += 3; + + // There are two types of cases we have to handle, + // a callee might call sampler2D(texture2D, sampler) directly where + // one or more parameters originate from parameters. + // Alternatively, we need to provide combined image samplers to our callees, + // and in this case we need to add those as well. + + pop_remap_parameters(); + + // Our callee has now been processed at least once. + // No point in doing it again. + callee.do_combined_parameters = false; + + auto ¶ms = functions.top()->combined_parameters; + functions.pop(); + if (functions.empty()) + return true; + + auto &caller = *functions.top(); + if (caller.do_combined_parameters) + { + for (auto ¶m : params) + { + uint32_t image_id = param.global_image ? param.image_id : args[param.image_id]; + uint32_t sampler_id = param.global_sampler ? param.sampler_id : args[param.sampler_id]; + + auto *i = compiler.maybe_get_backing_variable(image_id); + auto *s = compiler.maybe_get_backing_variable(sampler_id); + if (i) + image_id = i->self; + if (s) + sampler_id = s->self; + + register_combined_image_sampler(caller, image_id, sampler_id, param.depth); + } + } + + return true; +} + +void Compiler::CombinedImageSamplerHandler::register_combined_image_sampler(SPIRFunction &caller, uint32_t image_id, + uint32_t sampler_id, bool depth) +{ + // We now have a texture ID and a sampler ID which will either be found as a global + // or a parameter in our own function. If both are global, they will not need a parameter, + // otherwise, add it to our list. + SPIRFunction::CombinedImageSamplerParameter param = { + 0u, image_id, sampler_id, true, true, depth, + }; + + auto texture_itr = find_if(begin(caller.arguments), end(caller.arguments), + [image_id](const SPIRFunction::Parameter &p) { return p.id == image_id; }); + auto sampler_itr = find_if(begin(caller.arguments), end(caller.arguments), + [sampler_id](const SPIRFunction::Parameter &p) { return p.id == sampler_id; }); + + if (texture_itr != end(caller.arguments)) + { + param.global_image = false; + param.image_id = uint32_t(texture_itr - begin(caller.arguments)); + } + + if (sampler_itr != end(caller.arguments)) + { + param.global_sampler = false; + param.sampler_id = uint32_t(sampler_itr - begin(caller.arguments)); + } + + if (param.global_image && param.global_sampler) + return; + + auto itr = find_if(begin(caller.combined_parameters), end(caller.combined_parameters), + [¶m](const SPIRFunction::CombinedImageSamplerParameter &p) { + return param.image_id == p.image_id && param.sampler_id == p.sampler_id && + param.global_image == p.global_image && param.global_sampler == p.global_sampler; + }); + + if (itr == end(caller.combined_parameters)) + { + uint32_t id = compiler.ir.increase_bound_by(3); + auto type_id = id + 0; + auto ptr_type_id = id + 1; + auto combined_id = id + 2; + auto &base = compiler.expression_type(image_id); + auto &type = compiler.set(type_id); + auto &ptr_type = compiler.set(ptr_type_id); + + type = base; + type.self = type_id; + type.basetype = SPIRType::SampledImage; + type.pointer = false; + type.storage = StorageClassGeneric; + type.image.depth = depth; + + ptr_type = type; + ptr_type.pointer = true; + ptr_type.storage = StorageClassUniformConstant; + ptr_type.parent_type = type_id; + + // Build new variable. + compiler.set(combined_id, ptr_type_id, StorageClassFunction, 0); + + // Inherit RelaxedPrecision (and potentially other useful flags if deemed relevant). + auto &new_flags = compiler.ir.meta[combined_id].decoration.decoration_flags; + auto &old_flags = compiler.ir.meta[sampler_id].decoration.decoration_flags; + new_flags.reset(); + if (old_flags.get(DecorationRelaxedPrecision)) + new_flags.set(DecorationRelaxedPrecision); + + param.id = combined_id; + + compiler.set_name(combined_id, + join("SPIRV_Cross_Combined", compiler.to_name(image_id), compiler.to_name(sampler_id))); + + caller.combined_parameters.push_back(param); + caller.shadow_arguments.push_back({ ptr_type_id, combined_id, 0u, 0u, true }); + } +} + +bool Compiler::DummySamplerForCombinedImageHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + if (need_dummy_sampler) + { + // No need to traverse further, we know the result. + return false; + } + + switch (opcode) + { + case OpLoad: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + + auto &type = compiler.get(result_type); + bool separate_image = + type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer; + + // If not separate image, don't bother. + if (!separate_image) + return true; + + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + break; + } + + case OpImageFetch: + case OpImageQuerySizeLod: + case OpImageQuerySize: + case OpImageQueryLevels: + case OpImageQuerySamples: + { + // If we are fetching or querying LOD from a plain OpTypeImage, we must pre-combine with our dummy sampler. + auto *var = compiler.maybe_get_backing_variable(args[2]); + if (var) + { + auto &type = compiler.get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer) + need_dummy_sampler = true; + } + + break; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + auto &type = compiler.get(result_type); + bool separate_image = + type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer; + if (!separate_image) + return true; + + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + + // Other backends might use SPIRAccessChain for this later. + compiler.ir.ids[id].set_allow_type_rewrite(); + break; + } + + default: + break; + } + + return true; +} + +bool Compiler::CombinedImageSamplerHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + // We need to figure out where samplers and images are loaded from, so do only the bare bones compilation we need. + bool is_fetch = false; + + switch (opcode) + { + case OpLoad: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + + auto &type = compiler.get(result_type); + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + + // If not separate image or sampler, don't bother. + if (!separate_image && !separate_sampler) + return true; + + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + return true; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + if (length < 3) + return false; + + // Technically, it is possible to have arrays of textures and arrays of samplers and combine them, but this becomes essentially + // impossible to implement, since we don't know which concrete sampler we are accessing. + // One potential way is to create a combinatorial explosion where N textures and M samplers are combined into N * M sampler2Ds, + // but this seems ridiculously complicated for a problem which is easy to work around. + // Checking access chains like this assumes we don't have samplers or textures inside uniform structs, but this makes no sense. + + uint32_t result_type = args[0]; + + auto &type = compiler.get(result_type); + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + if (separate_sampler) + SPIRV_CROSS_THROW( + "Attempting to use arrays or structs of separate samplers. This is not possible to statically " + "remap to plain GLSL."); + + if (separate_image) + { + uint32_t id = args[1]; + uint32_t ptr = args[2]; + compiler.set(id, "", result_type, true); + compiler.register_read(id, ptr, true); + } + return true; + } + + case OpImageFetch: + case OpImageQuerySizeLod: + case OpImageQuerySize: + case OpImageQueryLevels: + case OpImageQuerySamples: + { + // If we are fetching from a plain OpTypeImage or querying LOD, we must pre-combine with our dummy sampler. + auto *var = compiler.maybe_get_backing_variable(args[2]); + if (!var) + return true; + + auto &type = compiler.get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer) + { + if (compiler.dummy_sampler_id == 0) + SPIRV_CROSS_THROW("texelFetch without sampler was found, but no dummy sampler has been created with " + "build_dummy_sampler_for_combined_images()."); + + // Do it outside. + is_fetch = true; + break; + } + + return true; + } + + case OpSampledImage: + // Do it outside. + break; + + default: + return true; + } + + // Registers sampler2D calls used in case they are parameters so + // that their callees know which combined image samplers to propagate down the call stack. + if (!functions.empty()) + { + auto &callee = *functions.top(); + if (callee.do_combined_parameters) + { + uint32_t image_id = args[2]; + + auto *image = compiler.maybe_get_backing_variable(image_id); + if (image) + image_id = image->self; + + uint32_t sampler_id = is_fetch ? compiler.dummy_sampler_id : args[3]; + auto *sampler = compiler.maybe_get_backing_variable(sampler_id); + if (sampler) + sampler_id = sampler->self; + + auto &combined_type = compiler.get(args[0]); + register_combined_image_sampler(callee, image_id, sampler_id, combined_type.image.depth); + } + } + + // For function calls, we need to remap IDs which are function parameters into global variables. + // This information is statically known from the current place in the call stack. + // Function parameters are not necessarily pointers, so if we don't have a backing variable, remapping will know + // which backing variable the image/sample came from. + uint32_t image_id = remap_parameter(args[2]); + uint32_t sampler_id = is_fetch ? compiler.dummy_sampler_id : remap_parameter(args[3]); + + auto itr = find_if(begin(compiler.combined_image_samplers), end(compiler.combined_image_samplers), + [image_id, sampler_id](const CombinedImageSampler &combined) { + return combined.image_id == image_id && combined.sampler_id == sampler_id; + }); + + if (itr == end(compiler.combined_image_samplers)) + { + uint32_t sampled_type; + if (is_fetch) + { + // Have to invent the sampled image type. + sampled_type = compiler.ir.increase_bound_by(1); + auto &type = compiler.set(sampled_type); + type = compiler.expression_type(args[2]); + type.self = sampled_type; + type.basetype = SPIRType::SampledImage; + type.image.depth = false; + } + else + { + sampled_type = args[0]; + } + + auto id = compiler.ir.increase_bound_by(2); + auto type_id = id + 0; + auto combined_id = id + 1; + + // Make a new type, pointer to OpTypeSampledImage, so we can make a variable of this type. + // We will probably have this type lying around, but it doesn't hurt to make duplicates for internal purposes. + auto &type = compiler.set(type_id); + auto &base = compiler.get(sampled_type); + type = base; + type.pointer = true; + type.storage = StorageClassUniformConstant; + type.parent_type = type_id; + + // Build new variable. + compiler.set(combined_id, type_id, StorageClassUniformConstant, 0); + + // Inherit RelaxedPrecision (and potentially other useful flags if deemed relevant). + auto &new_flags = compiler.ir.meta[combined_id].decoration.decoration_flags; + // Fetch inherits precision from the image, not sampler (there is no sampler). + auto &old_flags = compiler.ir.meta[is_fetch ? image_id : sampler_id].decoration.decoration_flags; + new_flags.reset(); + if (old_flags.get(DecorationRelaxedPrecision)) + new_flags.set(DecorationRelaxedPrecision); + + // Propagate the array type for the original image as well. + auto *var = compiler.maybe_get_backing_variable(image_id); + if (var) + { + auto &parent_type = compiler.get(var->basetype); + type.array = parent_type.array; + type.array_size_literal = parent_type.array_size_literal; + } + + compiler.combined_image_samplers.push_back({ combined_id, image_id, sampler_id }); + } + + return true; +} + +uint32_t Compiler::build_dummy_sampler_for_combined_images() +{ + DummySamplerForCombinedImageHandler handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + if (handler.need_dummy_sampler) + { + uint32_t offset = ir.increase_bound_by(3); + auto type_id = offset + 0; + auto ptr_type_id = offset + 1; + auto var_id = offset + 2; + + SPIRType sampler_type; + auto &sampler = set(type_id); + sampler.basetype = SPIRType::Sampler; + + auto &ptr_sampler = set(ptr_type_id); + ptr_sampler = sampler; + ptr_sampler.self = type_id; + ptr_sampler.storage = StorageClassUniformConstant; + ptr_sampler.pointer = true; + ptr_sampler.parent_type = type_id; + + set(var_id, ptr_type_id, StorageClassUniformConstant, 0); + set_name(var_id, "SPIRV_Cross_DummySampler"); + dummy_sampler_id = var_id; + return var_id; + } + else + return 0; +} + +void Compiler::build_combined_image_samplers() +{ + for (auto &id : ir.ids) + { + if (id.get_type() == TypeFunction) + { + auto &func = id.get(); + func.combined_parameters.clear(); + func.shadow_arguments.clear(); + func.do_combined_parameters = true; + } + } + + combined_image_samplers.clear(); + CombinedImageSamplerHandler handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); +} + +vector Compiler::get_specialization_constants() const +{ + vector spec_consts; + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + if (c.specialization && has_decoration(c.self, DecorationSpecId)) + spec_consts.push_back({ c.self, get_decoration(c.self, DecorationSpecId) }); + } + } + return spec_consts; +} + +SPIRConstant &Compiler::get_constant(uint32_t id) +{ + return get(id); +} + +const SPIRConstant &Compiler::get_constant(uint32_t id) const +{ + return get(id); +} + +static bool exists_unaccessed_path_to_return(const CFG &cfg, uint32_t block, const unordered_set &blocks) +{ + // This block accesses the variable. + if (blocks.find(block) != end(blocks)) + return false; + + // We are at the end of the CFG. + if (cfg.get_succeeding_edges(block).empty()) + return true; + + // If any of our successors have a path to the end, there exists a path from block. + for (auto &succ : cfg.get_succeeding_edges(block)) + if (exists_unaccessed_path_to_return(cfg, succ, blocks)) + return true; + + return false; +} + +void Compiler::analyze_parameter_preservation( + SPIRFunction &entry, const CFG &cfg, const unordered_map> &variable_to_blocks, + const unordered_map> &complete_write_blocks) +{ + for (auto &arg : entry.arguments) + { + // Non-pointers are always inputs. + auto &type = get(arg.type); + if (!type.pointer) + continue; + + // Opaque argument types are always in + bool potential_preserve; + switch (type.basetype) + { + case SPIRType::Sampler: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::AtomicCounter: + potential_preserve = false; + break; + + default: + potential_preserve = true; + break; + } + + if (!potential_preserve) + continue; + + auto itr = variable_to_blocks.find(arg.id); + if (itr == end(variable_to_blocks)) + { + // Variable is never accessed. + continue; + } + + // We have accessed a variable, but there was no complete writes to that variable. + // We deduce that we must preserve the argument. + itr = complete_write_blocks.find(arg.id); + if (itr == end(complete_write_blocks)) + { + arg.read_count++; + continue; + } + + // If there is a path through the CFG where no block completely writes to the variable, the variable will be in an undefined state + // when the function returns. We therefore need to implicitly preserve the variable in case there are writers in the function. + // Major case here is if a function is + // void foo(int &var) { if (cond) var = 10; } + // Using read/write counts, we will think it's just an out variable, but it really needs to be inout, + // because if we don't write anything whatever we put into the function must return back to the caller. + if (exists_unaccessed_path_to_return(cfg, entry.entry_block, itr->second)) + arg.read_count++; + } +} + +Compiler::AnalyzeVariableScopeAccessHandler::AnalyzeVariableScopeAccessHandler(Compiler &compiler_, + SPIRFunction &entry_) + : compiler(compiler_) + , entry(entry_) +{ +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::follow_function_call(const SPIRFunction &) +{ + // Only analyze within this function. + return false; +} + +void Compiler::AnalyzeVariableScopeAccessHandler::set_current_block(const SPIRBlock &block) +{ + current_block = █ + + // If we're branching to a block which uses OpPhi, in GLSL + // this will be a variable write when we branch, + // so we need to track access to these variables as well to + // have a complete picture. + const auto test_phi = [this, &block](uint32_t to) { + auto &next = compiler.get(to); + for (auto &phi : next.phi_variables) + { + if (phi.parent == block.self) + { + accessed_variables_to_block[phi.function_variable].insert(block.self); + // Phi variables are also accessed in our target branch block. + accessed_variables_to_block[phi.function_variable].insert(next.self); + + notify_variable_access(phi.local_variable, block.self); + } + } + }; + + switch (block.terminator) + { + case SPIRBlock::Direct: + notify_variable_access(block.condition, block.self); + test_phi(block.next_block); + break; + + case SPIRBlock::Select: + notify_variable_access(block.condition, block.self); + test_phi(block.true_block); + test_phi(block.false_block); + break; + + case SPIRBlock::MultiSelect: + notify_variable_access(block.condition, block.self); + for (auto &target : block.cases) + test_phi(target.block); + if (block.default_block) + test_phi(block.default_block); + break; + + default: + break; + } +} + +void Compiler::AnalyzeVariableScopeAccessHandler::notify_variable_access(uint32_t id, uint32_t block) +{ + if (id_is_phi_variable(id)) + accessed_variables_to_block[id].insert(block); + else if (id_is_potential_temporary(id)) + accessed_temporaries_to_block[id].insert(block); +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::id_is_phi_variable(uint32_t id) const +{ + if (id >= compiler.get_current_id_bound()) + return false; + auto *var = compiler.maybe_get(id); + return var && var->phi_variable; +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::id_is_potential_temporary(uint32_t id) const +{ + if (id >= compiler.get_current_id_bound()) + return false; + + // Temporaries are not created before we start emitting code. + return compiler.ir.ids[id].empty() || (compiler.ir.ids[id].get_type() == TypeExpression); +} + +bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint32_t *args, uint32_t length) +{ + // Keep track of the types of temporaries, so we can hoist them out as necessary. + uint32_t result_type, result_id; + if (compiler.instruction_to_result_type(result_type, result_id, op, args, length)) + result_id_to_type[result_id] = result_type; + + switch (op) + { + case OpStore: + { + if (length < 2) + return false; + + uint32_t ptr = args[0]; + auto *var = compiler.maybe_get_backing_variable(ptr); + + // If we store through an access chain, we have a partial write. + if (var) + { + accessed_variables_to_block[var->self].insert(current_block->self); + if (var->self == ptr) + complete_write_variables_to_block[var->self].insert(current_block->self); + else + partial_write_variables_to_block[var->self].insert(current_block->self); + } + + // Might try to store a Phi variable here. + notify_variable_access(args[1], current_block->self); + break; + } + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + { + if (length < 3) + return false; + + uint32_t ptr = args[2]; + auto *var = compiler.maybe_get(ptr); + if (var) + accessed_variables_to_block[var->self].insert(current_block->self); + + for (uint32_t i = 3; i < length; i++) + notify_variable_access(args[i], current_block->self); + + // The result of an access chain is a fixed expression and is not really considered a temporary. + auto &e = compiler.set(args[1], "", args[0], true); + auto *backing_variable = compiler.maybe_get_backing_variable(ptr); + e.loaded_from = backing_variable ? backing_variable->self : 0; + + // Other backends might use SPIRAccessChain for this later. + compiler.ir.ids[args[1]].set_allow_type_rewrite(); + break; + } + + case OpCopyMemory: + { + if (length < 2) + return false; + + uint32_t lhs = args[0]; + uint32_t rhs = args[1]; + auto *var = compiler.maybe_get_backing_variable(lhs); + + // If we store through an access chain, we have a partial write. + if (var) + { + accessed_variables_to_block[var->self].insert(current_block->self); + if (var->self == lhs) + complete_write_variables_to_block[var->self].insert(current_block->self); + else + partial_write_variables_to_block[var->self].insert(current_block->self); + } + + var = compiler.maybe_get_backing_variable(rhs); + if (var) + accessed_variables_to_block[var->self].insert(current_block->self); + break; + } + + case OpCopyObject: + { + if (length < 3) + return false; + + auto *var = compiler.maybe_get_backing_variable(args[2]); + if (var) + accessed_variables_to_block[var->self].insert(current_block->self); + + // Might try to copy a Phi variable here. + notify_variable_access(args[2], current_block->self); + break; + } + + case OpLoad: + { + if (length < 3) + return false; + uint32_t ptr = args[2]; + auto *var = compiler.maybe_get_backing_variable(ptr); + if (var) + accessed_variables_to_block[var->self].insert(current_block->self); + + // Loaded value is a temporary. + notify_variable_access(args[1], current_block->self); + break; + } + + case OpFunctionCall: + { + if (length < 3) + return false; + + length -= 3; + args += 3; + + for (uint32_t i = 0; i < length; i++) + { + auto *var = compiler.maybe_get_backing_variable(args[i]); + if (var) + { + accessed_variables_to_block[var->self].insert(current_block->self); + // Assume we can get partial writes to this variable. + partial_write_variables_to_block[var->self].insert(current_block->self); + } + + // Cannot easily prove if argument we pass to a function is completely written. + // Usually, functions write to a dummy variable, + // which is then copied to in full to the real argument. + + // Might try to copy a Phi variable here. + notify_variable_access(args[i], current_block->self); + } + + // Return value may be a temporary. + notify_variable_access(args[1], current_block->self); + break; + } + + case OpExtInst: + { + for (uint32_t i = 4; i < length; i++) + notify_variable_access(args[i], current_block->self); + notify_variable_access(args[1], current_block->self); + break; + } + + case OpArrayLength: + // Uses literals, but cannot be a phi variable, so ignore. + break; + + // Atomics shouldn't be able to access function-local variables. + // Some GLSL builtins access a pointer. + + case OpCompositeInsert: + case OpVectorShuffle: + // Specialize for opcode which contains literals. + for (uint32_t i = 1; i < 4; i++) + notify_variable_access(args[i], current_block->self); + break; + + case OpCompositeExtract: + // Specialize for opcode which contains literals. + for (uint32_t i = 1; i < 3; i++) + notify_variable_access(args[i], current_block->self); + break; + + default: + { + // Rather dirty way of figuring out where Phi variables are used. + // As long as only IDs are used, we can scan through instructions and try to find any evidence that + // the ID of a variable has been used. + // There are potential false positives here where a literal is used in-place of an ID, + // but worst case, it does not affect the correctness of the compile. + // Exhaustive analysis would be better here, but it's not worth it for now. + for (uint32_t i = 0; i < length; i++) + notify_variable_access(args[i], current_block->self); + break; + } + } + return true; +} + +Compiler::StaticExpressionAccessHandler::StaticExpressionAccessHandler(Compiler &compiler_, uint32_t variable_id_) + : compiler(compiler_) + , variable_id(variable_id_) +{ +} + +bool Compiler::StaticExpressionAccessHandler::follow_function_call(const SPIRFunction &) +{ + return false; +} + +bool Compiler::StaticExpressionAccessHandler::handle(spv::Op op, const uint32_t *args, uint32_t length) +{ + switch (op) + { + case OpStore: + if (length < 2) + return false; + if (args[0] == variable_id) + { + static_expression = args[1]; + write_count++; + } + break; + + case OpLoad: + if (length < 3) + return false; + if (args[2] == variable_id && static_expression == 0) // Tried to read from variable before it was initialized. + return false; + break; + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + if (length < 3) + return false; + if (args[2] == variable_id) // If we try to access chain our candidate variable before we store to it, bail. + return false; + break; + + default: + break; + } + + return true; +} + +void Compiler::find_function_local_luts(SPIRFunction &entry, const AnalyzeVariableScopeAccessHandler &handler) +{ + auto &cfg = *function_cfgs.find(entry.self)->second; + + // For each variable which is statically accessed. + for (auto &accessed_var : handler.accessed_variables_to_block) + { + auto &blocks = accessed_var.second; + auto &var = get(accessed_var.first); + auto &type = expression_type(accessed_var.first); + + // Only consider function local variables here. + if (var.storage != StorageClassFunction) + continue; + + // We cannot be a phi variable. + if (var.phi_variable) + continue; + + // Only consider arrays here. + if (type.array.empty()) + continue; + + // HACK: Do not consider structs. This is a quirk with how types are currently being emitted. + // Structs are emitted after specialization constants and composite constants. + // FIXME: Fix declaration order so declared constants can have struct types. + if (type.basetype == SPIRType::Struct) + continue; + + // If the variable has an initializer, make sure it is a constant expression. + uint32_t static_constant_expression = 0; + if (var.initializer) + { + if (ir.ids[var.initializer].get_type() != TypeConstant) + continue; + static_constant_expression = var.initializer; + + // There can be no stores to this variable, we have now proved we have a LUT. + if (handler.complete_write_variables_to_block.count(var.self) != 0 || + handler.partial_write_variables_to_block.count(var.self) != 0) + continue; + } + else + { + // We can have one, and only one write to the variable, and that write needs to be a constant. + + // No partial writes allowed. + if (handler.partial_write_variables_to_block.count(var.self) != 0) + continue; + + auto itr = handler.complete_write_variables_to_block.find(var.self); + + // No writes? + if (itr == end(handler.complete_write_variables_to_block)) + continue; + + // We write to the variable in more than one block. + auto &write_blocks = itr->second; + if (write_blocks.size() != 1) + continue; + + // The write needs to happen in the dominating block. + DominatorBuilder builder(cfg); + for (auto &block : blocks) + builder.add_block(block); + uint32_t dominator = builder.get_dominator(); + + // The complete write happened in a branch or similar, cannot deduce static expression. + if (write_blocks.count(dominator) == 0) + continue; + + // Find the static expression for this variable. + StaticExpressionAccessHandler static_expression_handler(*this, var.self); + traverse_all_reachable_opcodes(get(dominator), static_expression_handler); + + // We want one, and exactly one write + if (static_expression_handler.write_count != 1 || static_expression_handler.static_expression == 0) + continue; + + // Is it a constant expression? + if (ir.ids[static_expression_handler.static_expression].get_type() != TypeConstant) + continue; + + // We found a LUT! + static_constant_expression = static_expression_handler.static_expression; + } + + get(static_constant_expression).is_used_as_lut = true; + var.static_expression = static_constant_expression; + var.statically_assigned = true; + var.remapped_variable = true; + } +} + +void Compiler::analyze_variable_scope(SPIRFunction &entry, AnalyzeVariableScopeAccessHandler &handler) +{ + // First, we map out all variable access within a function. + // Essentially a map of block -> { variables accessed in the basic block } + traverse_all_reachable_opcodes(entry, handler); + + auto &cfg = *function_cfgs.find(entry.self)->second; + + // Analyze if there are parameters which need to be implicitly preserved with an "in" qualifier. + analyze_parameter_preservation(entry, cfg, handler.accessed_variables_to_block, + handler.complete_write_variables_to_block); + + unordered_map potential_loop_variables; + + // For each variable which is statically accessed. + for (auto &var : handler.accessed_variables_to_block) + { + // Only deal with variables which are considered local variables in this function. + if (find(begin(entry.local_variables), end(entry.local_variables), var.first) == end(entry.local_variables)) + continue; + + DominatorBuilder builder(cfg); + auto &blocks = var.second; + auto &type = expression_type(var.first); + + // Figure out which block is dominating all accesses of those variables. + for (auto &block : blocks) + { + // If we're accessing a variable inside a continue block, this variable might be a loop variable. + // We can only use loop variables with scalars, as we cannot track static expressions for vectors. + if (is_continue(block)) + { + // Potentially awkward case to check for. + // We might have a variable inside a loop, which is touched by the continue block, + // but is not actually a loop variable. + // The continue block is dominated by the inner part of the loop, which does not make sense in high-level + // language output because it will be declared before the body, + // so we will have to lift the dominator up to the relevant loop header instead. + builder.add_block(ir.continue_block_to_loop_header[block]); + + // Arrays or structs cannot be loop variables. + if (type.vecsize == 1 && type.columns == 1 && type.basetype != SPIRType::Struct && type.array.empty()) + { + // The variable is used in multiple continue blocks, this is not a loop + // candidate, signal that by setting block to -1u. + auto &potential = potential_loop_variables[var.first]; + + if (potential == 0) + potential = block; + else + potential = ~(0u); + } + } + builder.add_block(block); + } + + builder.lift_continue_block_dominator(); + + // Add it to a per-block list of variables. + uint32_t dominating_block = builder.get_dominator(); + + // If all blocks here are dead code, this will be 0, so the variable in question + // will be completely eliminated. + if (dominating_block) + { + auto &block = get(dominating_block); + block.dominated_variables.push_back(var.first); + get(var.first).dominator = dominating_block; + } + } + + for (auto &var : handler.accessed_temporaries_to_block) + { + auto itr = handler.result_id_to_type.find(var.first); + + if (itr == end(handler.result_id_to_type)) + { + // We found a false positive ID being used, ignore. + // This should probably be an assert. + continue; + } + + DominatorBuilder builder(cfg); + bool force_temporary = false; + + // Figure out which block is dominating all accesses of those temporaries. + auto &blocks = var.second; + for (auto &block : blocks) + { + builder.add_block(block); + + // If a temporary is used in more than one block, we might have to lift continue block + // access up to loop header like we did for variables. + if (blocks.size() != 1 && is_continue(block)) + builder.add_block(ir.continue_block_to_loop_header[block]); + else if (blocks.size() != 1 && is_single_block_loop(block)) + { + // Awkward case, because the loop header is also the continue block. + force_temporary = true; + } + } + + uint32_t dominating_block = builder.get_dominator(); + if (dominating_block) + { + // If we touch a variable in the dominating block, this is the expected setup. + // SPIR-V normally mandates this, but we have extra cases for temporary use inside loops. + bool first_use_is_dominator = blocks.count(dominating_block) != 0; + + if (!first_use_is_dominator || force_temporary) + { + // This should be very rare, but if we try to declare a temporary inside a loop, + // and that temporary is used outside the loop as well (spirv-opt inliner likes this) + // we should actually emit the temporary outside the loop. + hoisted_temporaries.insert(var.first); + forced_temporaries.insert(var.first); + + auto &block_temporaries = get(dominating_block).declare_temporary; + block_temporaries.emplace_back(handler.result_id_to_type[var.first], var.first); + } + else if (blocks.size() > 1) + { + // Keep track of the temporary as we might have to declare this temporary. + // This can happen if the loop header dominates a temporary, but we have a complex fallback loop. + // In this case, the header is actually inside the for (;;) {} block, and we have problems. + // What we need to do is hoist the temporaries outside the for (;;) {} block in case the header block + // declares the temporary. + auto &block_temporaries = get(dominating_block).potential_declare_temporary; + block_temporaries.emplace_back(handler.result_id_to_type[var.first], var.first); + } + } + } + + unordered_set seen_blocks; + + // Now, try to analyze whether or not these variables are actually loop variables. + for (auto &loop_variable : potential_loop_variables) + { + auto &var = get(loop_variable.first); + auto dominator = var.dominator; + auto block = loop_variable.second; + + // The variable was accessed in multiple continue blocks, ignore. + if (block == ~(0u) || block == 0) + continue; + + // Dead code. + if (dominator == 0) + continue; + + uint32_t header = 0; + + // Find the loop header for this block if we are a continue block. + { + auto itr = ir.continue_block_to_loop_header.find(block); + if (itr != end(ir.continue_block_to_loop_header)) + { + header = itr->second; + } + else if (get(block).continue_block == block) + { + // Also check for self-referential continue block. + header = block; + } + } + + assert(header); + auto &header_block = get(header); + auto &blocks = handler.accessed_variables_to_block[loop_variable.first]; + + // If a loop variable is not used before the loop, it's probably not a loop variable. + bool has_accessed_variable = blocks.count(header) != 0; + + // Now, there are two conditions we need to meet for the variable to be a loop variable. + // 1. The dominating block must have a branch-free path to the loop header, + // this way we statically know which expression should be part of the loop variable initializer. + + // Walk from the dominator, if there is one straight edge connecting + // dominator and loop header, we statically know the loop initializer. + bool static_loop_init = true; + while (dominator != header) + { + if (blocks.count(dominator) != 0) + has_accessed_variable = true; + + auto &succ = cfg.get_succeeding_edges(dominator); + if (succ.size() != 1) + { + static_loop_init = false; + break; + } + + auto &pred = cfg.get_preceding_edges(succ.front()); + if (pred.size() != 1 || pred.front() != dominator) + { + static_loop_init = false; + break; + } + + dominator = succ.front(); + } + + if (!static_loop_init || !has_accessed_variable) + continue; + + // The second condition we need to meet is that no access after the loop + // merge can occur. Walk the CFG to see if we find anything. + + seen_blocks.clear(); + cfg.walk_from(seen_blocks, header_block.merge_block, [&](uint32_t walk_block) { + // We found a block which accesses the variable outside the loop. + if (blocks.find(walk_block) != end(blocks)) + static_loop_init = false; + }); + + if (!static_loop_init) + continue; + + // We have a loop variable. + header_block.loop_variables.push_back(loop_variable.first); + // Need to sort here as variables come from an unordered container, and pushing stuff in wrong order + // will break reproducability in regression runs. + sort(begin(header_block.loop_variables), end(header_block.loop_variables)); + get(loop_variable.first).loop_variable = true; + } +} + +Bitset Compiler::get_buffer_block_flags(uint32_t id) const +{ + return ir.get_buffer_block_flags(get(id)); +} + +bool Compiler::get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type) +{ + if (type.basetype == SPIRType::Struct) + { + base_type = SPIRType::Unknown; + for (auto &member_type : type.member_types) + { + SPIRType::BaseType member_base; + if (!get_common_basic_type(get(member_type), member_base)) + return false; + + if (base_type == SPIRType::Unknown) + base_type = member_base; + else if (base_type != member_base) + return false; + } + return true; + } + else + { + base_type = type.basetype; + return true; + } +} + +void Compiler::ActiveBuiltinHandler::handle_builtin(const SPIRType &type, BuiltIn builtin, + const Bitset &decoration_flags) +{ + // If used, we will need to explicitly declare a new array size for these builtins. + + if (builtin == BuiltInClipDistance) + { + if (!type.array_size_literal[0]) + SPIRV_CROSS_THROW("Array size for ClipDistance must be a literal."); + uint32_t array_size = type.array[0]; + if (array_size == 0) + SPIRV_CROSS_THROW("Array size for ClipDistance must not be unsized."); + compiler.clip_distance_count = array_size; + } + else if (builtin == BuiltInCullDistance) + { + if (!type.array_size_literal[0]) + SPIRV_CROSS_THROW("Array size for CullDistance must be a literal."); + uint32_t array_size = type.array[0]; + if (array_size == 0) + SPIRV_CROSS_THROW("Array size for CullDistance must not be unsized."); + compiler.cull_distance_count = array_size; + } + else if (builtin == BuiltInPosition) + { + if (decoration_flags.get(DecorationInvariant)) + compiler.position_invariant = true; + } +} + +bool Compiler::ActiveBuiltinHandler::handle(spv::Op opcode, const uint32_t *args, uint32_t length) +{ + const auto add_if_builtin = [&](uint32_t id) { + // Only handles variables here. + // Builtins which are part of a block are handled in AccessChain. + auto *var = compiler.maybe_get(id); + auto &decorations = compiler.ir.meta[id].decoration; + if (var && decorations.builtin) + { + auto &type = compiler.get(var->basetype); + auto &flags = + type.storage == StorageClassInput ? compiler.active_input_builtins : compiler.active_output_builtins; + flags.set(decorations.builtin_type); + handle_builtin(type, decorations.builtin_type, decorations.decoration_flags); + } + }; + + switch (opcode) + { + case OpStore: + if (length < 1) + return false; + + add_if_builtin(args[0]); + break; + + case OpCopyMemory: + if (length < 2) + return false; + + add_if_builtin(args[0]); + add_if_builtin(args[1]); + break; + + case OpCopyObject: + case OpLoad: + if (length < 3) + return false; + + add_if_builtin(args[2]); + break; + + case OpSelect: + if (length < 5) + return false; + + add_if_builtin(args[3]); + add_if_builtin(args[4]); + break; + + case OpPhi: + { + if (length < 2) + return false; + + uint32_t count = length - 2; + args += 2; + for (uint32_t i = 0; i < count; i += 2) + add_if_builtin(args[i]); + break; + } + + case OpFunctionCall: + { + if (length < 3) + return false; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + add_if_builtin(args[i]); + break; + } + + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + { + if (length < 4) + return false; + + // Only consider global variables, cannot consider variables in functions yet, or other + // access chains as they have not been created yet. + auto *var = compiler.maybe_get(args[2]); + if (!var) + break; + + // Required if we access chain into builtins like gl_GlobalInvocationID. + add_if_builtin(args[2]); + + // Start traversing type hierarchy at the proper non-pointer types. + auto *type = &compiler.get_variable_data_type(*var); + + auto &flags = + type->storage == StorageClassInput ? compiler.active_input_builtins : compiler.active_output_builtins; + + uint32_t count = length - 3; + args += 3; + for (uint32_t i = 0; i < count; i++) + { + // Pointers + if (opcode == OpPtrAccessChain && i == 0) + { + type = &compiler.get(type->parent_type); + continue; + } + + // Arrays + if (!type->array.empty()) + { + type = &compiler.get(type->parent_type); + } + // Structs + else if (type->basetype == SPIRType::Struct) + { + uint32_t index = compiler.get(args[i]).scalar(); + + if (index < uint32_t(compiler.ir.meta[type->self].members.size())) + { + auto &decorations = compiler.ir.meta[type->self].members[index]; + if (decorations.builtin) + { + flags.set(decorations.builtin_type); + handle_builtin(compiler.get(type->member_types[index]), decorations.builtin_type, + decorations.decoration_flags); + } + } + + type = &compiler.get(type->member_types[index]); + } + else + { + // No point in traversing further. We won't find any extra builtins. + break; + } + } + break; + } + + default: + break; + } + + return true; +} + +void Compiler::update_active_builtins() +{ + active_input_builtins.reset(); + active_output_builtins.reset(); + cull_distance_count = 0; + clip_distance_count = 0; + ActiveBuiltinHandler handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); +} + +// Returns whether this shader uses a builtin of the storage class +bool Compiler::has_active_builtin(BuiltIn builtin, StorageClass storage) +{ + const Bitset *flags; + switch (storage) + { + case StorageClassInput: + flags = &active_input_builtins; + break; + case StorageClassOutput: + flags = &active_output_builtins; + break; + + default: + return false; + } + return flags->get(builtin); +} + +void Compiler::analyze_image_and_sampler_usage() +{ + CombinedImageSamplerDrefHandler dref_handler(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), dref_handler); + + CombinedImageSamplerUsageHandler handler(*this, dref_handler.dref_combined_samplers); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + comparison_ids = move(handler.comparison_ids); + need_subpass_input = handler.need_subpass_input; + + // Forward information from separate images and samplers into combined image samplers. + for (auto &combined : combined_image_samplers) + if (comparison_ids.count(combined.sampler_id)) + comparison_ids.insert(combined.combined_id); +} + +bool Compiler::CombinedImageSamplerDrefHandler::handle(spv::Op opcode, const uint32_t *args, uint32_t) +{ + // Mark all sampled images which are used with Dref. + switch (opcode) + { + case OpImageSampleDrefExplicitLod: + case OpImageSampleDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageSparseSampleProjDrefImplicitLod: + case OpImageSparseSampleDrefImplicitLod: + case OpImageSparseSampleProjDrefExplicitLod: + case OpImageSparseSampleDrefExplicitLod: + case OpImageDrefGather: + case OpImageSparseDrefGather: + dref_combined_samplers.insert(args[2]); + return true; + + default: + break; + } + + return true; +} + +void Compiler::build_function_control_flow_graphs_and_analyze() +{ + CFGBuilder handler(*this); + handler.function_cfgs[ir.default_entry_point].reset(new CFG(*this, get(ir.default_entry_point))); + traverse_all_reachable_opcodes(get(ir.default_entry_point), handler); + function_cfgs = move(handler.function_cfgs); + + for (auto &f : function_cfgs) + { + auto &func = get(f.first); + AnalyzeVariableScopeAccessHandler scope_handler(*this, func); + analyze_variable_scope(func, scope_handler); + find_function_local_luts(func, scope_handler); + + // Check if we can actually use the loop variables we found in analyze_variable_scope. + // To use multiple initializers, we need the same type and qualifiers. + for (auto block : func.blocks) + { + auto &b = get(block); + if (b.loop_variables.size() < 2) + continue; + + auto &flags = get_decoration_bitset(b.loop_variables.front()); + uint32_t type = get(b.loop_variables.front()).basetype; + bool invalid_initializers = false; + for (auto loop_variable : b.loop_variables) + { + if (flags != get_decoration_bitset(loop_variable) || + type != get(b.loop_variables.front()).basetype) + { + invalid_initializers = true; + break; + } + } + + if (invalid_initializers) + { + for (auto loop_variable : b.loop_variables) + get(loop_variable).loop_variable = false; + b.loop_variables.clear(); + } + } + } +} + +Compiler::CFGBuilder::CFGBuilder(spirv_cross::Compiler &compiler_) + : compiler(compiler_) +{ +} + +bool Compiler::CFGBuilder::handle(spv::Op, const uint32_t *, uint32_t) +{ + return true; +} + +bool Compiler::CFGBuilder::follow_function_call(const SPIRFunction &func) +{ + if (function_cfgs.find(func.self) == end(function_cfgs)) + { + function_cfgs[func.self].reset(new CFG(compiler, func)); + return true; + } + else + return false; +} + +bool Compiler::CombinedImageSamplerUsageHandler::begin_function_scope(const uint32_t *args, uint32_t length) +{ + if (length < 3) + return false; + + auto &func = compiler.get(args[2]); + const auto *arg = &args[3]; + length -= 3; + + for (uint32_t i = 0; i < length; i++) + { + auto &argument = func.arguments[i]; + dependency_hierarchy[argument.id].insert(arg[i]); + } + + return true; +} + +void Compiler::CombinedImageSamplerUsageHandler::add_hierarchy_to_comparison_ids(uint32_t id) +{ + // Traverse the variable dependency hierarchy and tag everything in its path with comparison ids. + comparison_ids.insert(id); + for (auto &dep_id : dependency_hierarchy[id]) + add_hierarchy_to_comparison_ids(dep_id); +} + +bool Compiler::CombinedImageSamplerUsageHandler::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + switch (opcode) + { + case OpAccessChain: + case OpInBoundsAccessChain: + case OpPtrAccessChain: + case OpLoad: + { + if (length < 3) + return false; + dependency_hierarchy[args[1]].insert(args[2]); + + // Ideally defer this to OpImageRead, but then we'd need to track loaded IDs. + // If we load an image, we're going to use it and there is little harm in declaring an unused gl_FragCoord. + auto &type = compiler.get(args[0]); + if (type.image.dim == DimSubpassData) + need_subpass_input = true; + + // If we load a SampledImage and it will be used with Dref, propagate the state up. + if (dref_combined_samplers.count(args[1]) != 0) + add_hierarchy_to_comparison_ids(args[1]); + break; + } + + case OpSampledImage: + { + if (length < 4) + return false; + + uint32_t result_type = args[0]; + uint32_t result_id = args[1]; + auto &type = compiler.get(result_type); + if (type.image.depth || dref_combined_samplers.count(result_id) != 0) + { + // This image must be a depth image. + uint32_t image = args[2]; + add_hierarchy_to_comparison_ids(image); + + // This sampler must be a SamplerComparisonState, and not a regular SamplerState. + uint32_t sampler = args[3]; + add_hierarchy_to_comparison_ids(sampler); + + // Mark the OpSampledImage itself as being comparison state. + comparison_ids.insert(result_id); + } + return true; + } + + default: + break; + } + + return true; +} + +bool Compiler::buffer_is_hlsl_counter_buffer(uint32_t id) const +{ + return ir.meta.at(id).hlsl_is_magic_counter_buffer; +} + +bool Compiler::buffer_get_hlsl_counter_buffer(uint32_t id, uint32_t &counter_id) const +{ + // First, check for the proper decoration. + if (ir.meta[id].hlsl_magic_counter_buffer != 0) + { + counter_id = ir.meta[id].hlsl_magic_counter_buffer; + return true; + } + else + return false; +} + +void Compiler::make_constant_null(uint32_t id, uint32_t type) +{ + auto &constant_type = get(type); + + if (constant_type.pointer) + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } + else if (!constant_type.array.empty()) + { + assert(constant_type.parent_type); + uint32_t parent_id = ir.increase_bound_by(1); + make_constant_null(parent_id, constant_type.parent_type); + + if (!constant_type.array_size_literal.back()) + SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal."); + + vector elements(constant_type.array.back()); + for (uint32_t i = 0; i < constant_type.array.back(); i++) + elements[i] = parent_id; + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else if (!constant_type.member_types.empty()) + { + uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size())); + vector elements(constant_type.member_types.size()); + for (uint32_t i = 0; i < constant_type.member_types.size(); i++) + { + make_constant_null(member_ids + i, constant_type.member_types[i]); + elements[i] = member_ids + i; + } + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } +} + +const std::vector &Compiler::get_declared_capabilities() const +{ + return ir.declared_capabilities; +} + +const std::vector &Compiler::get_declared_extensions() const +{ + return ir.declared_extensions; +} + +std::string Compiler::get_remapped_declared_block_name(uint32_t id) const +{ + auto itr = declared_block_names.find(id); + if (itr != end(declared_block_names)) + return itr->second; + else + { + auto &var = get(id); + auto &type = get(var.basetype); + auto &block_name = ir.meta[type.self].decoration.alias; + return block_name.empty() ? get_block_fallback_name(id) : block_name; + } +} + +bool Compiler::instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op, const uint32_t *args, + uint32_t length) +{ + // Most instructions follow the pattern of . + // There are some exceptions. + switch (op) + { + case OpStore: + case OpCopyMemory: + case OpCopyMemorySized: + case OpImageWrite: + case OpAtomicStore: + case OpAtomicFlagClear: + case OpEmitStreamVertex: + case OpEndStreamPrimitive: + case OpControlBarrier: + case OpMemoryBarrier: + case OpGroupWaitEvents: + case OpRetainEvent: + case OpReleaseEvent: + case OpSetUserEventStatus: + case OpCaptureEventProfilingInfo: + case OpCommitReadPipe: + case OpCommitWritePipe: + case OpGroupCommitReadPipe: + case OpGroupCommitWritePipe: + return false; + + default: + if (length > 1) + { + result_type = args[0]; + result_id = args[1]; + return true; + } + else + return false; + } +} + +Bitset Compiler::combined_decoration_for_member(const SPIRType &type, uint32_t index) const +{ + Bitset flags; + auto &memb = ir.meta[type.self].members; + if (index >= memb.size()) + return flags; + auto &dec = memb[index]; + + // If our type is a struct, traverse all the members as well recursively. + flags.merge_or(dec.decoration_flags); + for (uint32_t i = 0; i < type.member_types.size(); i++) + flags.merge_or(combined_decoration_for_member(get(type.member_types[i]), i)); + + return flags; +} + +bool Compiler::is_desktop_only_format(spv::ImageFormat format) +{ + switch (format) + { + // Desktop-only formats + case ImageFormatR11fG11fB10f: + case ImageFormatR16f: + case ImageFormatRgb10A2: + case ImageFormatR8: + case ImageFormatRg8: + case ImageFormatR16: + case ImageFormatRg16: + case ImageFormatRgba16: + case ImageFormatR16Snorm: + case ImageFormatRg16Snorm: + case ImageFormatRgba16Snorm: + case ImageFormatR8Snorm: + case ImageFormatRg8Snorm: + case ImageFormatR8ui: + case ImageFormatRg8ui: + case ImageFormatR16ui: + case ImageFormatRgb10a2ui: + case ImageFormatR8i: + case ImageFormatRg8i: + case ImageFormatR16i: + return true; + default: + break; + } + + return false; +} + +bool Compiler::image_is_comparison(const spirv_cross::SPIRType &type, uint32_t id) const +{ + return type.image.depth || (comparison_ids.count(id) != 0); +} diff --git a/3rdparty/spirv-cross/spirv_cross.hpp b/3rdparty/spirv-cross/spirv_cross.hpp new file mode 100644 index 000000000..8441b82b9 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cross.hpp @@ -0,0 +1,958 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_HPP +#define SPIRV_CROSS_HPP + +#include "spirv.hpp" +#include "spirv_cfg.hpp" +#include "spirv_cross_parsed_ir.hpp" + +namespace spirv_cross +{ +struct Resource +{ + // Resources are identified with their SPIR-V ID. + // This is the ID of the OpVariable. + uint32_t id; + + // The type ID of the variable which includes arrays and all type modifications. + // This type ID is not suitable for parsing OpMemberDecoration of a struct and other decorations in general + // since these modifications typically happen on the base_type_id. + uint32_t type_id; + + // The base type of the declared resource. + // This type is the base type which ignores pointers and arrays of the type_id. + // This is mostly useful to parse decorations of the underlying type. + // base_type_id can also be obtained with get_type(get_type(type_id).self). + uint32_t base_type_id; + + // The declared name (OpName) of the resource. + // For Buffer blocks, the name actually reflects the externally + // visible Block name. + // + // This name can be retrieved again by using either + // get_name(id) or get_name(base_type_id) depending if it's a buffer block or not. + // + // This name can be an empty string in which case get_fallback_name(id) can be + // used which obtains a suitable fallback identifier for an ID. + std::string name; +}; + +struct ShaderResources +{ + std::vector uniform_buffers; + std::vector storage_buffers; + std::vector stage_inputs; + std::vector stage_outputs; + std::vector subpass_inputs; + std::vector storage_images; + std::vector sampled_images; + std::vector atomic_counters; + + // There can only be one push constant block, + // but keep the vector in case this restriction is lifted in the future. + std::vector push_constant_buffers; + + // For Vulkan GLSL and HLSL source, + // these correspond to separate texture2D and samplers respectively. + std::vector separate_images; + std::vector separate_samplers; +}; + +struct CombinedImageSampler +{ + // The ID of the sampler2D variable. + uint32_t combined_id; + // The ID of the texture2D variable. + uint32_t image_id; + // The ID of the sampler variable. + uint32_t sampler_id; +}; + +struct SpecializationConstant +{ + // The ID of the specialization constant. + uint32_t id; + // The constant ID of the constant, used in Vulkan during pipeline creation. + uint32_t constant_id; +}; + +struct BufferRange +{ + unsigned index; + size_t offset; + size_t range; +}; + +enum BufferPackingStandard +{ + BufferPackingStd140, + BufferPackingStd430, + BufferPackingStd140EnhancedLayout, + BufferPackingStd430EnhancedLayout, + BufferPackingHLSLCbuffer, + BufferPackingHLSLCbufferPackOffset +}; + +struct EntryPoint +{ + std::string name; + spv::ExecutionModel execution_model; +}; + +class Compiler +{ +public: + friend class CFG; + friend class DominatorBuilder; + + // The constructor takes a buffer of SPIR-V words and parses it. + // It will create its own parser, parse the SPIR-V and move the parsed IR + // as if you had called the constructors taking ParsedIR directly. + explicit Compiler(std::vector ir); + Compiler(const uint32_t *ir, size_t word_count); + + // This is more modular. We can also consume a ParsedIR structure directly, either as a move, or copy. + // With copy, we can reuse the same parsed IR for multiple Compiler instances. + explicit Compiler(const ParsedIR &ir); + explicit Compiler(ParsedIR &&ir); + + virtual ~Compiler() = default; + + // After parsing, API users can modify the SPIR-V via reflection and call this + // to disassemble the SPIR-V into the desired langauage. + // Sub-classes actually implement this. + virtual std::string compile(); + + // Gets the identifier (OpName) of an ID. If not defined, an empty string will be returned. + const std::string &get_name(uint32_t id) const; + + // Applies a decoration to an ID. Effectively injects OpDecorate. + void set_decoration(uint32_t id, spv::Decoration decoration, uint32_t argument = 0); + void set_decoration_string(uint32_t id, spv::Decoration decoration, const std::string &argument); + + // Overrides the identifier OpName of an ID. + // Identifiers beginning with underscores or identifiers which contain double underscores + // are reserved by the implementation. + void set_name(uint32_t id, const std::string &name); + + // Gets a bitmask for the decorations which are applied to ID. + // I.e. (1ull << spv::DecorationFoo) | (1ull << spv::DecorationBar) + SPIRV_CROSS_DEPRECATED("Please use get_decoration_bitset instead.") + uint64_t get_decoration_mask(uint32_t id) const; + const Bitset &get_decoration_bitset(uint32_t id) const; + + // Returns whether the decoration has been applied to the ID. + bool has_decoration(uint32_t id, spv::Decoration decoration) const; + + // Gets the value for decorations which take arguments. + // If the decoration is a boolean (i.e. spv::DecorationNonWritable), + // 1 will be returned. + // If decoration doesn't exist or decoration is not recognized, + // 0 will be returned. + uint32_t get_decoration(uint32_t id, spv::Decoration decoration) const; + const std::string &get_decoration_string(uint32_t id, spv::Decoration decoration) const; + + // Removes the decoration for an ID. + void unset_decoration(uint32_t id, spv::Decoration decoration); + + // Gets the SPIR-V type associated with ID. + // Mostly used with Resource::type_id and Resource::base_type_id to parse the underlying type of a resource. + const SPIRType &get_type(uint32_t id) const; + + // Gets the SPIR-V type of a variable. + const SPIRType &get_type_from_variable(uint32_t id) const; + + // Gets the id of SPIR-V type underlying the given type_id, which might be a pointer. + uint32_t get_pointee_type_id(uint32_t type_id) const; + + // Gets the SPIR-V type underlying the given type, which might be a pointer. + const SPIRType &get_pointee_type(const SPIRType &type) const; + + // Gets the SPIR-V type underlying the given type_id, which might be a pointer. + const SPIRType &get_pointee_type(uint32_t type_id) const; + + // Gets the ID of the SPIR-V type underlying a variable. + uint32_t get_variable_data_type_id(const SPIRVariable &var) const; + + // Gets the SPIR-V type underlying a variable. + SPIRType &get_variable_data_type(const SPIRVariable &var); + + // Gets the SPIR-V type underlying a variable. + const SPIRType &get_variable_data_type(const SPIRVariable &var) const; + + // Returns if the given type refers to a sampled image. + bool is_sampled_image_type(const SPIRType &type); + + // Gets the underlying storage class for an OpVariable. + spv::StorageClass get_storage_class(uint32_t id) const; + + // If get_name() is an empty string, get the fallback name which will be used + // instead in the disassembled source. + virtual const std::string get_fallback_name(uint32_t id) const; + + // If get_name() of a Block struct is an empty string, get the fallback name. + // This needs to be per-variable as multiple variables can use the same block type. + virtual const std::string get_block_fallback_name(uint32_t id) const; + + // Given an OpTypeStruct in ID, obtain the identifier for member number "index". + // This may be an empty string. + const std::string &get_member_name(uint32_t id, uint32_t index) const; + + // Given an OpTypeStruct in ID, obtain the OpMemberDecoration for member number "index". + uint32_t get_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration) const; + const std::string &get_member_decoration_string(uint32_t id, uint32_t index, spv::Decoration decoration) const; + + // Sets the member identifier for OpTypeStruct ID, member number "index". + void set_member_name(uint32_t id, uint32_t index, const std::string &name); + + // Returns the qualified member identifier for OpTypeStruct ID, member number "index", + // or an empty string if no qualified alias exists + const std::string &get_member_qualified_name(uint32_t type_id, uint32_t index) const; + + // Sets the qualified member identifier for OpTypeStruct ID, member number "index". + void set_member_qualified_name(uint32_t type_id, uint32_t index, const std::string &name); + + // Gets the decoration mask for a member of a struct, similar to get_decoration_mask. + SPIRV_CROSS_DEPRECATED("Please use get_member_decoration_bitset instead.") + uint64_t get_member_decoration_mask(uint32_t id, uint32_t index) const; + const Bitset &get_member_decoration_bitset(uint32_t id, uint32_t index) const; + + // Returns whether the decoration has been applied to a member of a struct. + bool has_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration) const; + + // Similar to set_decoration, but for struct members. + void set_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0); + void set_member_decoration_string(uint32_t id, uint32_t index, spv::Decoration decoration, + const std::string &argument); + + // Unsets a member decoration, similar to unset_decoration. + void unset_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration); + + // Gets the fallback name for a member, similar to get_fallback_name. + virtual const std::string get_fallback_member_name(uint32_t index) const + { + return join("_", index); + } + + // Returns a vector of which members of a struct are potentially in use by a + // SPIR-V shader. The granularity of this analysis is per-member of a struct. + // This can be used for Buffer (UBO), BufferBlock/StorageBuffer (SSBO) and PushConstant blocks. + // ID is the Resource::id obtained from get_shader_resources(). + std::vector get_active_buffer_ranges(uint32_t id) const; + + // Returns the effective size of a buffer block. + size_t get_declared_struct_size(const SPIRType &struct_type) const; + + // Returns the effective size of a buffer block, with a given array size + // for a runtime array. + // SSBOs are typically declared as runtime arrays. get_declared_struct_size() will return 0 for the size. + // This is not very helpful for applications which might need to know the array stride of its last member. + // This can be done through the API, but it is not very intuitive how to accomplish this, so here we provide a helper function + // to query the size of the buffer, assuming that the last member has a certain size. + // If the buffer does not contain a runtime array, array_size is ignored, and the function will behave as + // get_declared_struct_size(). + // To get the array stride of the last member, something like: + // get_declared_struct_size_runtime_array(type, 1) - get_declared_struct_size_runtime_array(type, 0) will work. + size_t get_declared_struct_size_runtime_array(const SPIRType &struct_type, size_t array_size) const; + + // Returns the effective size of a buffer block struct member. + virtual size_t get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const; + + // Legacy GLSL compatibility method. Deprecated in favor of CompilerGLSL::flatten_buffer_block + SPIRV_CROSS_DEPRECATED("Please use flatten_buffer_block instead.") void flatten_interface_block(uint32_t id); + + // Returns a set of all global variables which are statically accessed + // by the control flow graph from the current entry point. + // Only variables which change the interface for a shader are returned, that is, + // variables with storage class of Input, Output, Uniform, UniformConstant, PushConstant and AtomicCounter + // storage classes are returned. + // + // To use the returned set as the filter for which variables are used during compilation, + // this set can be moved to set_enabled_interface_variables(). + std::unordered_set get_active_interface_variables() const; + + // Sets the interface variables which are used during compilation. + // By default, all variables are used. + // Once set, compile() will only consider the set in active_variables. + void set_enabled_interface_variables(std::unordered_set active_variables); + + // Query shader resources, use ids with reflection interface to modify or query binding points, etc. + ShaderResources get_shader_resources() const; + + // Query shader resources, but only return the variables which are part of active_variables. + // E.g.: get_shader_resources(get_active_variables()) to only return the variables which are statically + // accessed. + ShaderResources get_shader_resources(const std::unordered_set &active_variables) const; + + // Remapped variables are considered built-in variables and a backend will + // not emit a declaration for this variable. + // This is mostly useful for making use of builtins which are dependent on extensions. + void set_remapped_variable_state(uint32_t id, bool remap_enable); + bool get_remapped_variable_state(uint32_t id) const; + + // For subpassInput variables which are remapped to plain variables, + // the number of components in the remapped + // variable must be specified as the backing type of subpass inputs are opaque. + void set_subpass_input_remapped_components(uint32_t id, uint32_t components); + uint32_t get_subpass_input_remapped_components(uint32_t id) const; + + // All operations work on the current entry point. + // Entry points can be swapped out with set_entry_point(). + // Entry points should be set right after the constructor completes as some reflection functions traverse the graph from the entry point. + // Resource reflection also depends on the entry point. + // By default, the current entry point is set to the first OpEntryPoint which appears in the SPIR-V module. + SPIRV_CROSS_DEPRECATED("Please use get_entry_points_and_stages instead.") + std::vector get_entry_points() const; + SPIRV_CROSS_DEPRECATED("Please use set_entry_point(const std::string &, spv::ExecutionModel) instead.") + void set_entry_point(const std::string &name); + + // Renames an entry point from old_name to new_name. + // If old_name is currently selected as the current entry point, it will continue to be the current entry point, + // albeit with a new name. + // get_entry_points() is essentially invalidated at this point. + SPIRV_CROSS_DEPRECATED( + "Please use rename_entry_point(const std::string&, const std::string&, spv::ExecutionModel) instead.") + void rename_entry_point(const std::string &old_name, const std::string &new_name); + + // Returns the internal data structure for entry points to allow poking around. + SPIRV_CROSS_DEPRECATED("Please use get_entry_point(const std::string &, spv::ExecutionModel instead.") + const SPIREntryPoint &get_entry_point(const std::string &name) const; + SPIRV_CROSS_DEPRECATED("Please use get_entry_point(const std::string &, spv::ExecutionModel instead.") + SPIREntryPoint &get_entry_point(const std::string &name); + + // Some shader languages restrict the names that can be given to entry points, and the + // corresponding backend will automatically rename an entry point name, during the call + // to compile() if it is illegal. For example, the common entry point name main() is + // illegal in MSL, and is renamed to an alternate name by the MSL backend. + // Given the original entry point name contained in the SPIR-V, this function returns + // the name, as updated by the backend during the call to compile(). If the name is not + // illegal, and has not been renamed, or if this function is called before compile(), + // this function will simply return the same name. + SPIRV_CROSS_DEPRECATED( + "Please use get_cleansed_entry_point_name(const std::string &, spv::ExecutionModel) instead.") + const std::string &get_cleansed_entry_point_name(const std::string &name) const; + + // New variants of entry point query and reflection. + // Names for entry points in the SPIR-V module may alias if they belong to different execution models. + // To disambiguate, we must pass along with the entry point names the execution model. + std::vector get_entry_points_and_stages() const; + void set_entry_point(const std::string &entry, spv::ExecutionModel execution_model); + void rename_entry_point(const std::string &old_name, const std::string &new_name, + spv::ExecutionModel execution_model); + const SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model) const; + SPIREntryPoint &get_entry_point(const std::string &name, spv::ExecutionModel execution_model); + const std::string &get_cleansed_entry_point_name(const std::string &name, + spv::ExecutionModel execution_model) const; + + // Query and modify OpExecutionMode. + SPIRV_CROSS_DEPRECATED("Please use get_execution_mode_bitset instead.") + uint64_t get_execution_mode_mask() const; + const Bitset &get_execution_mode_bitset() const; + + void unset_execution_mode(spv::ExecutionMode mode); + void set_execution_mode(spv::ExecutionMode mode, uint32_t arg0 = 0, uint32_t arg1 = 0, uint32_t arg2 = 0); + + // Gets argument for an execution mode (LocalSize, Invocations, OutputVertices). + // For LocalSize, the index argument is used to select the dimension (X = 0, Y = 1, Z = 2). + // For execution modes which do not have arguments, 0 is returned. + uint32_t get_execution_mode_argument(spv::ExecutionMode mode, uint32_t index = 0) const; + spv::ExecutionModel get_execution_model() const; + + // In SPIR-V, the compute work group size can be represented by a constant vector, in which case + // the LocalSize execution mode is ignored. + // + // This constant vector can be a constant vector, specialization constant vector, or partly specialized constant vector. + // To modify and query work group dimensions which are specialization constants, SPIRConstant values must be modified + // directly via get_constant() rather than using LocalSize directly. This function will return which constants should be modified. + // + // To modify dimensions which are *not* specialization constants, set_execution_mode should be used directly. + // Arguments to set_execution_mode which are specialization constants are effectively ignored during compilation. + // NOTE: This is somewhat different from how SPIR-V works. In SPIR-V, the constant vector will completely replace LocalSize, + // while in this interface, LocalSize is only ignored for specialization constants. + // + // The specialization constant will be written to x, y and z arguments. + // If the component is not a specialization constant, a zeroed out struct will be written. + // The return value is the constant ID of the builtin WorkGroupSize, but this is not expected to be useful + // for most use cases. + uint32_t get_work_group_size_specialization_constants(SpecializationConstant &x, SpecializationConstant &y, + SpecializationConstant &z) const; + + // Analyzes all OpImageFetch (texelFetch) opcodes and checks if there are instances where + // said instruction is used without a combined image sampler. + // GLSL targets do not support the use of texelFetch without a sampler. + // To workaround this, we must inject a dummy sampler which can be used to form a sampler2D at the call-site of + // texelFetch as necessary. + // + // This must be called before build_combined_image_samplers(). + // build_combined_image_samplers() may refer to the ID returned by this method if the returned ID is non-zero. + // The return value will be the ID of a sampler object if a dummy sampler is necessary, or 0 if no sampler object + // is required. + // + // If the returned ID is non-zero, it can be decorated with set/bindings as desired before calling compile(). + // Calling this function also invalidates get_active_interface_variables(), so this should be called + // before that function. + uint32_t build_dummy_sampler_for_combined_images(); + + // Analyzes all separate image and samplers used from the currently selected entry point, + // and re-routes them all to a combined image sampler instead. + // This is required to "support" separate image samplers in targets which do not natively support + // this feature, like GLSL/ESSL. + // + // This must be called before compile() if such remapping is desired. + // This call will add new sampled images to the SPIR-V, + // so it will appear in reflection if get_shader_resources() is called after build_combined_image_samplers. + // + // If any image/sampler remapping was found, no separate image/samplers will appear in the decompiled output, + // but will still appear in reflection. + // + // The resulting samplers will be void of any decorations like name, descriptor sets and binding points, + // so this can be added before compile() if desired. + // + // Combined image samplers originating from this set are always considered active variables. + // Arrays of separate samplers are not supported, but arrays of separate images are supported. + // Array of images + sampler -> Array of combined image samplers. + void build_combined_image_samplers(); + + // Gets a remapping for the combined image samplers. + const std::vector &get_combined_image_samplers() const + { + return combined_image_samplers; + } + + // Set a new variable type remap callback. + // The type remapping is designed to allow global interface variable to assume more special types. + // A typical example here is to remap sampler2D into samplerExternalOES, which currently isn't supported + // directly by SPIR-V. + // + // In compile() while emitting code, + // for every variable that is declared, including function parameters, the callback will be called + // and the API user has a chance to change the textual representation of the type used to declare the variable. + // The API user can detect special patterns in names to guide the remapping. + void set_variable_type_remap_callback(VariableTypeRemapCallback cb) + { + variable_remap_callback = std::move(cb); + } + + // API for querying which specialization constants exist. + // To modify a specialization constant before compile(), use get_constant(constant.id), + // then update constants directly in the SPIRConstant data structure. + // For composite types, the subconstants can be iterated over and modified. + // constant_type is the SPIRType for the specialization constant, + // which can be queried to determine which fields in the unions should be poked at. + std::vector get_specialization_constants() const; + SPIRConstant &get_constant(uint32_t id); + const SPIRConstant &get_constant(uint32_t id) const; + + uint32_t get_current_id_bound() const + { + return uint32_t(ir.ids.size()); + } + + // API for querying buffer objects. + // The type passed in here should be the base type of a resource, i.e. + // get_type(resource.base_type_id) + // as decorations are set in the basic Block type. + // The type passed in here must have these decorations set, or an exception is raised. + // Only UBOs and SSBOs or sub-structs which are part of these buffer types will have these decorations set. + uint32_t type_struct_member_offset(const SPIRType &type, uint32_t index) const; + uint32_t type_struct_member_array_stride(const SPIRType &type, uint32_t index) const; + uint32_t type_struct_member_matrix_stride(const SPIRType &type, uint32_t index) const; + + // Gets the offset in SPIR-V words (uint32_t) for a decoration which was originally declared in the SPIR-V binary. + // The offset will point to one or more uint32_t literals which can be modified in-place before using the SPIR-V binary. + // Note that adding or removing decorations using the reflection API will not change the behavior of this function. + // If the decoration was declared, sets the word_offset to an offset into the provided SPIR-V binary buffer and returns true, + // otherwise, returns false. + // If the decoration does not have any value attached to it (e.g. DecorationRelaxedPrecision), this function will also return false. + bool get_binary_offset_for_decoration(uint32_t id, spv::Decoration decoration, uint32_t &word_offset) const; + + // HLSL counter buffer reflection interface. + // Append/Consume/Increment/Decrement in HLSL is implemented as two "neighbor" buffer objects where + // one buffer implements the storage, and a single buffer containing just a lone "int" implements the counter. + // To SPIR-V these will be exposed as two separate buffers, but glslang HLSL frontend emits a special indentifier + // which lets us link the two buffers together. + + // Queries if a variable ID is a counter buffer which "belongs" to a regular buffer object. + + // If SPV_GOOGLE_hlsl_functionality1 is used, this can be used even with a stripped SPIR-V module. + // Otherwise, this query is purely based on OpName identifiers as found in the SPIR-V module, and will + // only return true if OpSource was reported HLSL. + // To rely on this functionality, ensure that the SPIR-V module is not stripped. + + bool buffer_is_hlsl_counter_buffer(uint32_t id) const; + + // Queries if a buffer object has a neighbor "counter" buffer. + // If so, the ID of that counter buffer will be returned in counter_id. + // If SPV_GOOGLE_hlsl_functionality1 is used, this can be used even with a stripped SPIR-V module. + // Otherwise, this query is purely based on OpName identifiers as found in the SPIR-V module, and will + // only return true if OpSource was reported HLSL. + // To rely on this functionality, ensure that the SPIR-V module is not stripped. + bool buffer_get_hlsl_counter_buffer(uint32_t id, uint32_t &counter_id) const; + + // Gets the list of all SPIR-V Capabilities which were declared in the SPIR-V module. + const std::vector &get_declared_capabilities() const; + + // Gets the list of all SPIR-V extensions which were declared in the SPIR-V module. + const std::vector &get_declared_extensions() const; + + // When declaring buffer blocks in GLSL, the name declared in the GLSL source + // might not be the same as the name declared in the SPIR-V module due to naming conflicts. + // In this case, SPIRV-Cross needs to find a fallback-name, and it might only + // be possible to know this name after compiling to GLSL. + // This is particularly important for HLSL input and UAVs which tends to reuse the same block type + // for multiple distinct blocks. For these cases it is not possible to modify the name of the type itself + // because it might be unique. Instead, you can use this interface to check after compilation which + // name was actually used if your input SPIR-V tends to have this problem. + // For other names like remapped names for variables, etc, it's generally enough to query the name of the variables + // after compiling, block names are an exception to this rule. + // ID is the name of a variable as returned by Resource::id, and must be a variable with a Block-like type. + // + // This also applies to HLSL cbuffers. + std::string get_remapped_declared_block_name(uint32_t id) const; + + // For buffer block variables, get the decorations for that variable. + // Sometimes, decorations for buffer blocks are found in member decorations instead + // of direct decorations on the variable itself. + // The most common use here is to check if a buffer is readonly or writeonly. + Bitset get_buffer_block_flags(uint32_t id) const; + +protected: + const uint32_t *stream(const Instruction &instr) const + { + // If we're not going to use any arguments, just return nullptr. + // We want to avoid case where we return an out of range pointer + // that trips debug assertions on some platforms. + if (!instr.length) + return nullptr; + + if (instr.offset + instr.length > ir.spirv.size()) + SPIRV_CROSS_THROW("Compiler::stream() out of range."); + return &ir.spirv[instr.offset]; + } + + ParsedIR ir; + // Marks variables which have global scope and variables which can alias with other variables + // (SSBO, image load store, etc) + std::vector global_variables; + std::vector aliased_variables; + + SPIRFunction *current_function = nullptr; + SPIRBlock *current_block = nullptr; + std::unordered_set active_interface_variables; + bool check_active_interface_variables = false; + + // If our IDs are out of range here as part of opcodes, throw instead of + // undefined behavior. + template + T &set(uint32_t id, P &&... args) + { + auto &var = variant_set(ir.ids.at(id), std::forward

(args)...); + var.self = id; + return var; + } + + template + T &get(uint32_t id) + { + return variant_get(ir.ids.at(id)); + } + + template + T *maybe_get(uint32_t id) + { + if (ir.ids.at(id).get_type() == T::type) + return &get(id); + else + return nullptr; + } + + template + const T &get(uint32_t id) const + { + return variant_get(ir.ids.at(id)); + } + + template + const T *maybe_get(uint32_t id) const + { + if (ir.ids.at(id).get_type() == T::type) + return &get(id); + else + return nullptr; + } + + const SPIREntryPoint &get_entry_point() const; + SPIREntryPoint &get_entry_point(); + + virtual std::string to_name(uint32_t id, bool allow_alias = true) const; + bool is_builtin_variable(const SPIRVariable &var) const; + bool is_builtin_type(const SPIRType &type) const; + bool is_hidden_variable(const SPIRVariable &var, bool include_builtins = false) const; + bool is_immutable(uint32_t id) const; + bool is_member_builtin(const SPIRType &type, uint32_t index, spv::BuiltIn *builtin) const; + bool is_scalar(const SPIRType &type) const; + bool is_vector(const SPIRType &type) const; + bool is_matrix(const SPIRType &type) const; + bool is_array(const SPIRType &type) const; + uint32_t expression_type_id(uint32_t id) const; + const SPIRType &expression_type(uint32_t id) const; + bool expression_is_lvalue(uint32_t id) const; + bool variable_storage_is_aliased(const SPIRVariable &var); + SPIRVariable *maybe_get_backing_variable(uint32_t chain); + + void register_read(uint32_t expr, uint32_t chain, bool forwarded); + void register_write(uint32_t chain); + + inline bool is_continue(uint32_t next) const + { + return (ir.block_meta[next] & ParsedIR::BLOCK_META_CONTINUE_BIT) != 0; + } + + inline bool is_single_block_loop(uint32_t next) const + { + auto &block = get(next); + return block.merge == SPIRBlock::MergeLoop && block.continue_block == next; + } + + inline bool is_break(uint32_t next) const + { + return (ir.block_meta[next] & + (ParsedIR::BLOCK_META_LOOP_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0; + } + + inline bool is_loop_break(uint32_t next) const + { + return (ir.block_meta[next] & ParsedIR::BLOCK_META_LOOP_MERGE_BIT) != 0; + } + + inline bool is_conditional(uint32_t next) const + { + return (ir.block_meta[next] & + (ParsedIR::BLOCK_META_SELECTION_MERGE_BIT | ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT)) != 0; + } + + // Dependency tracking for temporaries read from variables. + void flush_dependees(SPIRVariable &var); + void flush_all_active_variables(); + void flush_control_dependent_expressions(uint32_t block); + void flush_all_atomic_capable_variables(); + void flush_all_aliased_variables(); + void register_global_read_dependencies(const SPIRBlock &func, uint32_t id); + void register_global_read_dependencies(const SPIRFunction &func, uint32_t id); + std::unordered_set invalid_expressions; + + void update_name_cache(std::unordered_set &cache, std::string &name); + + // A variant which takes two sets of names. The secondary is only used to verify there are no collisions, + // but the set is not updated when we have found a new name. + // Used primarily when adding block interface names. + void update_name_cache(std::unordered_set &cache_primary, + const std::unordered_set &cache_secondary, std::string &name); + + bool function_is_pure(const SPIRFunction &func); + bool block_is_pure(const SPIRBlock &block); + bool block_is_outside_flow_control_from_block(const SPIRBlock &from, const SPIRBlock &to); + + bool execution_is_branchless(const SPIRBlock &from, const SPIRBlock &to) const; + bool execution_is_noop(const SPIRBlock &from, const SPIRBlock &to) const; + SPIRBlock::ContinueBlockType continue_block_type(const SPIRBlock &continue_block) const; + + bool force_recompile = false; + + bool block_is_loop_candidate(const SPIRBlock &block, SPIRBlock::Method method) const; + + bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const; + void inherit_expression_dependencies(uint32_t dst, uint32_t source); + void add_implied_read_expression(SPIRExpression &e, uint32_t source); + void add_implied_read_expression(SPIRAccessChain &e, uint32_t source); + + // For proper multiple entry point support, allow querying if an Input or Output + // variable is part of that entry points interface. + bool interface_variable_exists_in_entry_point(uint32_t id) const; + + std::vector combined_image_samplers; + + void remap_variable_type_name(const SPIRType &type, const std::string &var_name, std::string &type_name) const + { + if (variable_remap_callback) + variable_remap_callback(type, var_name, type_name); + } + + void set_ir(const ParsedIR &parsed); + void set_ir(ParsedIR &&parsed); + void parse_fixup(); + + // Used internally to implement various traversals for queries. + struct OpcodeHandler + { + virtual ~OpcodeHandler() = default; + + // Return true if traversal should continue. + // If false, traversal will end immediately. + virtual bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) = 0; + + virtual bool follow_function_call(const SPIRFunction &) + { + return true; + } + + virtual void set_current_block(const SPIRBlock &) + { + } + + virtual bool begin_function_scope(const uint32_t *, uint32_t) + { + return true; + } + + virtual bool end_function_scope(const uint32_t *, uint32_t) + { + return true; + } + }; + + struct BufferAccessHandler : OpcodeHandler + { + BufferAccessHandler(const Compiler &compiler_, std::vector &ranges_, uint32_t id_) + : compiler(compiler_) + , ranges(ranges_) + , id(id_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + const Compiler &compiler; + std::vector &ranges; + uint32_t id; + + std::unordered_set seen; + }; + + struct InterfaceVariableAccessHandler : OpcodeHandler + { + InterfaceVariableAccessHandler(const Compiler &compiler_, std::unordered_set &variables_) + : compiler(compiler_) + , variables(variables_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + const Compiler &compiler; + std::unordered_set &variables; + }; + + struct CombinedImageSamplerHandler : OpcodeHandler + { + CombinedImageSamplerHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool end_function_scope(const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + + // Each function in the call stack needs its own remapping for parameters so we can deduce which global variable each texture/sampler the parameter is statically bound to. + std::stack> parameter_remapping; + std::stack functions; + + uint32_t remap_parameter(uint32_t id); + void push_remap_parameters(const SPIRFunction &func, const uint32_t *args, uint32_t length); + void pop_remap_parameters(); + void register_combined_image_sampler(SPIRFunction &caller, uint32_t texture_id, uint32_t sampler_id, + bool depth); + }; + + struct DummySamplerForCombinedImageHandler : OpcodeHandler + { + DummySamplerForCombinedImageHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + bool need_dummy_sampler = false; + }; + + struct ActiveBuiltinHandler : OpcodeHandler + { + ActiveBuiltinHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + + void handle_builtin(const SPIRType &type, spv::BuiltIn builtin, const Bitset &decoration_flags); + }; + + bool traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const; + bool traverse_all_reachable_opcodes(const SPIRFunction &block, OpcodeHandler &handler) const; + // This must be an ordered data structure so we always pick the same type aliases. + std::vector global_struct_cache; + + ShaderResources get_shader_resources(const std::unordered_set *active_variables) const; + + VariableTypeRemapCallback variable_remap_callback; + + bool get_common_basic_type(const SPIRType &type, SPIRType::BaseType &base_type); + + std::unordered_set forced_temporaries; + std::unordered_set forwarded_temporaries; + std::unordered_set hoisted_temporaries; + + Bitset active_input_builtins; + Bitset active_output_builtins; + uint32_t clip_distance_count = 0; + uint32_t cull_distance_count = 0; + bool position_invariant = false; + + // Traverses all reachable opcodes and sets active_builtins to a bitmask of all builtin variables which are accessed in the shader. + void update_active_builtins(); + bool has_active_builtin(spv::BuiltIn builtin, spv::StorageClass storage); + + void analyze_parameter_preservation( + SPIRFunction &entry, const CFG &cfg, + const std::unordered_map> &variable_to_blocks, + const std::unordered_map> &complete_write_blocks); + + // If a variable ID or parameter ID is found in this set, a sampler is actually a shadow/comparison sampler. + // SPIR-V does not support this distinction, so we must keep track of this information outside the type system. + // There might be unrelated IDs found in this set which do not correspond to actual variables. + // This set should only be queried for the existence of samplers which are already known to be variables or parameter IDs. + // Similar is implemented for images, as well as if subpass inputs are needed. + std::unordered_set comparison_ids; + bool need_subpass_input = false; + + // In certain backends, we will need to use a dummy sampler to be able to emit code. + // GLSL does not support texelFetch on texture2D objects, but SPIR-V does, + // so we need to workaround by having the application inject a dummy sampler. + uint32_t dummy_sampler_id = 0; + + void analyze_image_and_sampler_usage(); + + struct CombinedImageSamplerDrefHandler : OpcodeHandler + { + CombinedImageSamplerDrefHandler(Compiler &compiler_) + : compiler(compiler_) + { + } + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + std::unordered_set dref_combined_samplers; + }; + + struct CombinedImageSamplerUsageHandler : OpcodeHandler + { + CombinedImageSamplerUsageHandler(Compiler &compiler_, + const std::unordered_set &dref_combined_samplers_) + : compiler(compiler_) + , dref_combined_samplers(dref_combined_samplers_) + { + } + + bool begin_function_scope(const uint32_t *args, uint32_t length) override; + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + const std::unordered_set &dref_combined_samplers; + + std::unordered_map> dependency_hierarchy; + std::unordered_set comparison_ids; + + void add_hierarchy_to_comparison_ids(uint32_t ids); + bool need_subpass_input = false; + }; + + void build_function_control_flow_graphs_and_analyze(); + std::unordered_map> function_cfgs; + struct CFGBuilder : OpcodeHandler + { + CFGBuilder(Compiler &compiler_); + + bool follow_function_call(const SPIRFunction &func) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + Compiler &compiler; + std::unordered_map> function_cfgs; + }; + + struct AnalyzeVariableScopeAccessHandler : OpcodeHandler + { + AnalyzeVariableScopeAccessHandler(Compiler &compiler_, SPIRFunction &entry_); + + bool follow_function_call(const SPIRFunction &) override; + void set_current_block(const SPIRBlock &block) override; + + void notify_variable_access(uint32_t id, uint32_t block); + bool id_is_phi_variable(uint32_t id) const; + bool id_is_potential_temporary(uint32_t id) const; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + SPIRFunction &entry; + std::unordered_map> accessed_variables_to_block; + std::unordered_map> accessed_temporaries_to_block; + std::unordered_map result_id_to_type; + std::unordered_map> complete_write_variables_to_block; + std::unordered_map> partial_write_variables_to_block; + const SPIRBlock *current_block = nullptr; + }; + + struct StaticExpressionAccessHandler : OpcodeHandler + { + StaticExpressionAccessHandler(Compiler &compiler_, uint32_t variable_id_); + bool follow_function_call(const SPIRFunction &) override; + bool handle(spv::Op op, const uint32_t *args, uint32_t length) override; + + Compiler &compiler; + uint32_t variable_id; + uint32_t static_expression = 0; + uint32_t write_count = 0; + }; + + void analyze_variable_scope(SPIRFunction &function, AnalyzeVariableScopeAccessHandler &handler); + void find_function_local_luts(SPIRFunction &function, const AnalyzeVariableScopeAccessHandler &handler); + + void make_constant_null(uint32_t id, uint32_t type); + + std::unordered_map declared_block_names; + + bool instruction_to_result_type(uint32_t &result_type, uint32_t &result_id, spv::Op op, const uint32_t *args, + uint32_t length); + + Bitset combined_decoration_for_member(const SPIRType &type, uint32_t index) const; + static bool is_desktop_only_format(spv::ImageFormat format); + + bool image_is_comparison(const SPIRType &type, uint32_t id) const; + +private: + // Used only to implement the old deprecated get_entry_point() interface. + const SPIREntryPoint &get_first_entry_point(const std::string &name) const; + SPIREntryPoint &get_first_entry_point(const std::string &name); + + void fixup_type_alias(); + bool type_is_block_like(const SPIRType &type) const; +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_cross_parsed_ir.cpp b/3rdparty/spirv-cross/spirv_cross_parsed_ir.cpp new file mode 100644 index 000000000..a18042333 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cross_parsed_ir.cpp @@ -0,0 +1,552 @@ +/* + * Copyright 2018-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cross_parsed_ir.hpp" +#include + +using namespace std; +using namespace spv; + +namespace spirv_cross +{ +void ParsedIR::set_id_bounds(uint32_t bounds) +{ + ids.resize(bounds); + meta.resize(bounds); + block_meta.resize(bounds); +} + +static string ensure_valid_identifier(const string &name, bool member) +{ + // Functions in glslangValidator are mangled with name( stuff. + // Normally, we would never see '(' in any legal identifiers, so just strip them out. + auto str = name.substr(0, name.find('(')); + + for (uint32_t i = 0; i < str.size(); i++) + { + auto &c = str[i]; + + if (member) + { + // _m variables are reserved by the internal implementation, + // otherwise, make sure the name is a valid identifier. + if (i == 0) + c = isalpha(c) ? c : '_'; + else if (i == 2 && str[0] == '_' && str[1] == 'm') + c = isalpha(c) ? c : '_'; + else + c = isalnum(c) ? c : '_'; + } + else + { + // _ variables are reserved by the internal implementation, + // otherwise, make sure the name is a valid identifier. + if (i == 0 || (str[0] == '_' && i == 1)) + c = isalpha(c) ? c : '_'; + else + c = isalnum(c) ? c : '_'; + } + } + return str; +} + +const string &ParsedIR::get_name(uint32_t id) const +{ + return meta[id].decoration.alias; +} + +const string &ParsedIR::get_member_name(uint32_t id, uint32_t index) const +{ + auto &m = meta[id]; + if (index >= m.members.size()) + { + static string empty; + return empty; + } + + return m.members[index].alias; +} + +void ParsedIR::set_name(uint32_t id, const string &name) +{ + auto &str = meta[id].decoration.alias; + str.clear(); + + if (name.empty()) + return; + + // Reserved for temporaries. + if (name[0] == '_' && name.size() >= 2 && isdigit(name[1])) + return; + + str = ensure_valid_identifier(name, false); +} + +void ParsedIR::set_member_name(uint32_t id, uint32_t index, const string &name) +{ + meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1)); + + auto &str = meta[id].members[index].alias; + str.clear(); + if (name.empty()) + return; + + // Reserved for unnamed members. + if (name[0] == '_' && name.size() >= 3 && name[1] == 'm' && isdigit(name[2])) + return; + + str = ensure_valid_identifier(name, true); +} + +void ParsedIR::set_decoration_string(uint32_t id, Decoration decoration, const string &argument) +{ + auto &dec = meta[id].decoration; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic = argument; + break; + + default: + break; + } +} + +void ParsedIR::set_decoration(uint32_t id, Decoration decoration, uint32_t argument) +{ + auto &dec = meta[id].decoration; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = true; + dec.builtin_type = static_cast(argument); + break; + + case DecorationLocation: + dec.location = argument; + break; + + case DecorationComponent: + dec.component = argument; + break; + + case DecorationOffset: + dec.offset = argument; + break; + + case DecorationArrayStride: + dec.array_stride = argument; + break; + + case DecorationMatrixStride: + dec.matrix_stride = argument; + break; + + case DecorationBinding: + dec.binding = argument; + break; + + case DecorationDescriptorSet: + dec.set = argument; + break; + + case DecorationInputAttachmentIndex: + dec.input_attachment = argument; + break; + + case DecorationSpecId: + dec.spec_id = argument; + break; + + case DecorationIndex: + dec.index = argument; + break; + + case DecorationHlslCounterBufferGOOGLE: + meta[id].hlsl_magic_counter_buffer = argument; + meta[argument].hlsl_is_magic_counter_buffer = true; + break; + + case DecorationFPRoundingMode: + dec.fp_rounding_mode = static_cast(argument); + break; + + default: + break; + } +} + +void ParsedIR::set_member_decoration(uint32_t id, uint32_t index, Decoration decoration, uint32_t argument) +{ + meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1)); + auto &dec = meta[id].members[index]; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = true; + dec.builtin_type = static_cast(argument); + break; + + case DecorationLocation: + dec.location = argument; + break; + + case DecorationComponent: + dec.component = argument; + break; + + case DecorationBinding: + dec.binding = argument; + break; + + case DecorationOffset: + dec.offset = argument; + break; + + case DecorationSpecId: + dec.spec_id = argument; + break; + + case DecorationMatrixStride: + dec.matrix_stride = argument; + break; + + case DecorationIndex: + dec.index = argument; + break; + + default: + break; + } +} + +// Recursively marks any constants referenced by the specified constant instruction as being used +// as an array length. The id must be a constant instruction (SPIRConstant or SPIRConstantOp). +void ParsedIR::mark_used_as_array_length(uint32_t id) +{ + switch (ids[id].get_type()) + { + case TypeConstant: + get(id).is_used_as_array_length = true; + break; + + case TypeConstantOp: + { + auto &cop = get(id); + for (uint32_t arg_id : cop.arguments) + mark_used_as_array_length(arg_id); + break; + } + + case TypeUndef: + break; + + default: + assert(0); + } +} + +Bitset ParsedIR::get_buffer_block_flags(const SPIRVariable &var) const +{ + auto &type = get(var.basetype); + assert(type.basetype == SPIRType::Struct); + + // Some flags like non-writable, non-readable are actually found + // as member decorations. If all members have a decoration set, propagate + // the decoration up as a regular variable decoration. + Bitset base_flags = meta[var.self].decoration.decoration_flags; + + if (type.member_types.empty()) + return base_flags; + + Bitset all_members_flags = get_member_decoration_bitset(type.self, 0); + for (uint32_t i = 1; i < uint32_t(type.member_types.size()); i++) + all_members_flags.merge_and(get_member_decoration_bitset(type.self, i)); + + base_flags.merge_or(all_members_flags); + return base_flags; +} + +const Bitset &ParsedIR::get_member_decoration_bitset(uint32_t id, uint32_t index) const +{ + auto &m = meta[id]; + if (index >= m.members.size()) + { + static const Bitset cleared = {}; + return cleared; + } + + return m.members[index].decoration_flags; +} + +bool ParsedIR::has_decoration(uint32_t id, Decoration decoration) const +{ + return get_decoration_bitset(id).get(decoration); +} + +uint32_t ParsedIR::get_decoration(uint32_t id, Decoration decoration) const +{ + auto &dec = meta[id].decoration; + if (!dec.decoration_flags.get(decoration)) + return 0; + + switch (decoration) + { + case DecorationBuiltIn: + return dec.builtin_type; + case DecorationLocation: + return dec.location; + case DecorationComponent: + return dec.component; + case DecorationOffset: + return dec.offset; + case DecorationBinding: + return dec.binding; + case DecorationDescriptorSet: + return dec.set; + case DecorationInputAttachmentIndex: + return dec.input_attachment; + case DecorationSpecId: + return dec.spec_id; + case DecorationArrayStride: + return dec.array_stride; + case DecorationMatrixStride: + return dec.matrix_stride; + case DecorationIndex: + return dec.index; + case DecorationFPRoundingMode: + return dec.fp_rounding_mode; + default: + return 1; + } +} + +const string &ParsedIR::get_decoration_string(uint32_t id, Decoration decoration) const +{ + auto &dec = meta[id].decoration; + static const string empty; + + if (!dec.decoration_flags.get(decoration)) + return empty; + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + return dec.hlsl_semantic; + + default: + return empty; + } +} + +void ParsedIR::unset_decoration(uint32_t id, Decoration decoration) +{ + auto &dec = meta[id].decoration; + dec.decoration_flags.clear(decoration); + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = false; + break; + + case DecorationLocation: + dec.location = 0; + break; + + case DecorationComponent: + dec.component = 0; + break; + + case DecorationOffset: + dec.offset = 0; + break; + + case DecorationBinding: + dec.binding = 0; + break; + + case DecorationDescriptorSet: + dec.set = 0; + break; + + case DecorationInputAttachmentIndex: + dec.input_attachment = 0; + break; + + case DecorationSpecId: + dec.spec_id = 0; + break; + + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic.clear(); + break; + + case DecorationFPRoundingMode: + dec.fp_rounding_mode = FPRoundingModeMax; + break; + + case DecorationHlslCounterBufferGOOGLE: + { + auto &counter = meta[id].hlsl_magic_counter_buffer; + if (counter) + { + meta[counter].hlsl_is_magic_counter_buffer = false; + counter = 0; + } + break; + } + + default: + break; + } +} + +bool ParsedIR::has_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const +{ + return get_member_decoration_bitset(id, index).get(decoration); +} + +uint32_t ParsedIR::get_member_decoration(uint32_t id, uint32_t index, Decoration decoration) const +{ + auto &m = meta[id]; + if (index >= m.members.size()) + return 0; + + auto &dec = m.members[index]; + if (!dec.decoration_flags.get(decoration)) + return 0; + + switch (decoration) + { + case DecorationBuiltIn: + return dec.builtin_type; + case DecorationLocation: + return dec.location; + case DecorationComponent: + return dec.component; + case DecorationBinding: + return dec.binding; + case DecorationOffset: + return dec.offset; + case DecorationSpecId: + return dec.spec_id; + case DecorationIndex: + return dec.index; + default: + return 1; + } +} + +const Bitset &ParsedIR::get_decoration_bitset(uint32_t id) const +{ + auto &dec = meta[id].decoration; + return dec.decoration_flags; +} + +void ParsedIR::set_member_decoration_string(uint32_t id, uint32_t index, Decoration decoration, const string &argument) +{ + meta[id].members.resize(max(meta[id].members.size(), size_t(index) + 1)); + auto &dec = meta[id].members[index]; + dec.decoration_flags.set(decoration); + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic = argument; + break; + + default: + break; + } +} + +const string &ParsedIR::get_member_decoration_string(uint32_t id, uint32_t index, Decoration decoration) const +{ + static const string empty; + auto &m = meta[id]; + + if (!has_member_decoration(id, index, decoration)) + return empty; + + auto &dec = m.members[index]; + + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + return dec.hlsl_semantic; + + default: + return empty; + } +} + +void ParsedIR::unset_member_decoration(uint32_t id, uint32_t index, Decoration decoration) +{ + auto &m = meta[id]; + if (index >= m.members.size()) + return; + + auto &dec = m.members[index]; + + dec.decoration_flags.clear(decoration); + switch (decoration) + { + case DecorationBuiltIn: + dec.builtin = false; + break; + + case DecorationLocation: + dec.location = 0; + break; + + case DecorationComponent: + dec.component = 0; + break; + + case DecorationOffset: + dec.offset = 0; + break; + + case DecorationSpecId: + dec.spec_id = 0; + break; + + case DecorationHlslSemanticGOOGLE: + dec.hlsl_semantic.clear(); + break; + + default: + break; + } +} + +uint32_t ParsedIR::increase_bound_by(uint32_t incr_amount) +{ + auto curr_bound = ids.size(); + auto new_bound = curr_bound + incr_amount; + ids.resize(new_bound); + meta.resize(new_bound); + block_meta.resize(new_bound); + return uint32_t(curr_bound); +} + +} // namespace spirv_cross diff --git a/3rdparty/spirv-cross/spirv_cross_parsed_ir.hpp b/3rdparty/spirv-cross/spirv_cross_parsed_ir.hpp new file mode 100644 index 000000000..f363b0075 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cross_parsed_ir.hpp @@ -0,0 +1,129 @@ +/* + * Copyright 2018-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_PARSED_IR_HPP +#define SPIRV_CROSS_PARSED_IR_HPP + +#include "spirv_common.hpp" +#include +#include +#include + +namespace spirv_cross +{ + +// This data structure holds all information needed to perform cross-compilation and reflection. +// It is the output of the Parser, but any implementation could create this structure. +// It is intentionally very "open" and struct-like with some helper functions to deal with decorations. +// Parser is the reference implementation of how this data structure should be filled in. + +class ParsedIR +{ +public: + // Resizes ids, meta and block_meta. + void set_id_bounds(uint32_t bounds); + + // The raw SPIR-V, instructions and opcodes refer to this by offset + count. + std::vector spirv; + + // Holds various data structures which inherit from IVariant. + std::vector ids; + + // Various meta data for IDs, decorations, names, etc. + std::vector meta; + + // Declared capabilities and extensions in the SPIR-V module. + // Not really used except for reflection at the moment. + std::vector declared_capabilities; + std::vector declared_extensions; + + // Meta data about blocks. The cross-compiler needs to query if a block is either of these types. + // It is a bitset as there can be more than one tag per block. + enum BlockMetaFlagBits + { + BLOCK_META_LOOP_HEADER_BIT = 1 << 0, + BLOCK_META_CONTINUE_BIT = 1 << 1, + BLOCK_META_LOOP_MERGE_BIT = 1 << 2, + BLOCK_META_SELECTION_MERGE_BIT = 1 << 3, + BLOCK_META_MULTISELECT_MERGE_BIT = 1 << 4 + }; + using BlockMetaFlags = uint8_t; + std::vector block_meta; + std::unordered_map continue_block_to_loop_header; + + // Normally, we'd stick SPIREntryPoint in ids array, but it conflicts with SPIRFunction. + // Entry points can therefore be seen as some sort of meta structure. + std::unordered_map entry_points; + uint32_t default_entry_point = 0; + + struct Source + { + uint32_t version = 0; + bool es = false; + bool known = false; + bool hlsl = false; + + Source() = default; + }; + + Source source; + + // Decoration handling methods. + // Can be useful for simple "raw" reflection. + // However, most members are here because the Parser needs most of these, + // and might as well just have the whole suite of decoration/name handling in one place. + void set_name(uint32_t id, const std::string &name); + const std::string &get_name(uint32_t id) const; + void set_decoration(uint32_t id, spv::Decoration decoration, uint32_t argument = 0); + void set_decoration_string(uint32_t id, spv::Decoration decoration, const std::string &argument); + bool has_decoration(uint32_t id, spv::Decoration decoration) const; + uint32_t get_decoration(uint32_t id, spv::Decoration decoration) const; + const std::string &get_decoration_string(uint32_t id, spv::Decoration decoration) const; + const Bitset &get_decoration_bitset(uint32_t id) const; + void unset_decoration(uint32_t id, spv::Decoration decoration); + + // Decoration handling methods (for members of a struct). + void set_member_name(uint32_t id, uint32_t index, const std::string &name); + const std::string &get_member_name(uint32_t id, uint32_t index) const; + void set_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration, uint32_t argument = 0); + void set_member_decoration_string(uint32_t id, uint32_t index, spv::Decoration decoration, + const std::string &argument); + uint32_t get_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration) const; + const std::string &get_member_decoration_string(uint32_t id, uint32_t index, spv::Decoration decoration) const; + bool has_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration) const; + const Bitset &get_member_decoration_bitset(uint32_t id, uint32_t index) const; + void unset_member_decoration(uint32_t id, uint32_t index, spv::Decoration decoration); + + void mark_used_as_array_length(uint32_t id); + uint32_t increase_bound_by(uint32_t count); + Bitset get_buffer_block_flags(const SPIRVariable &var) const; + +private: + template + T &get(uint32_t id) + { + return variant_get(ids[id]); + } + + template + const T &get(uint32_t id) const + { + return variant_get(ids[id]); + } +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_cross_util.cpp b/3rdparty/spirv-cross/spirv_cross_util.cpp new file mode 100644 index 000000000..58c1ddc2d --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cross_util.cpp @@ -0,0 +1,70 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_cross_util.hpp" +#include "spirv_common.hpp" + +using namespace spv; +using namespace spirv_cross; + +namespace spirv_cross_util +{ +void rename_interface_variable(spirv_cross::Compiler &compiler, const std::vector &resources, + uint32_t location, const std::string &name) +{ + for (auto &v : resources) + { + if (!compiler.has_decoration(v.id, spv::DecorationLocation)) + continue; + + auto loc = compiler.get_decoration(v.id, spv::DecorationLocation); + if (loc != location) + continue; + + auto &type = compiler.get_type(v.base_type_id); + + // This is more of a friendly variant. If we need to rename interface variables, we might have to rename + // structs as well and make sure all the names match up. + if (type.basetype == SPIRType::Struct) + { + compiler.set_name(v.base_type_id, join("SPIRV_Cross_Interface_Location", location)); + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + compiler.set_member_name(v.base_type_id, i, join("InterfaceMember", i)); + } + + compiler.set_name(v.id, name); + } +} + +void inherit_combined_sampler_bindings(spirv_cross::Compiler &compiler) +{ + auto &samplers = compiler.get_combined_image_samplers(); + for (auto &s : samplers) + { + if (compiler.has_decoration(s.image_id, spv::DecorationDescriptorSet)) + { + uint32_t set = compiler.get_decoration(s.image_id, spv::DecorationDescriptorSet); + compiler.set_decoration(s.combined_id, spv::DecorationDescriptorSet, set); + } + + if (compiler.has_decoration(s.image_id, spv::DecorationBinding)) + { + uint32_t binding = compiler.get_decoration(s.image_id, spv::DecorationBinding); + compiler.set_decoration(s.combined_id, spv::DecorationBinding, binding); + } + } +} +} // namespace spirv_cross_util diff --git a/3rdparty/spirv-cross/spirv_cross_util.hpp b/3rdparty/spirv-cross/spirv_cross_util.hpp new file mode 100644 index 000000000..faf0f4876 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_cross_util.hpp @@ -0,0 +1,29 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_UTIL_HPP +#define SPIRV_CROSS_UTIL_HPP + +#include "spirv_cross.hpp" + +namespace spirv_cross_util +{ +void rename_interface_variable(spirv_cross::Compiler &compiler, const std::vector &resources, + uint32_t location, const std::string &name); +void inherit_combined_sampler_bindings(spirv_cross::Compiler &compiler); +} // namespace spirv_cross_util + +#endif diff --git a/3rdparty/spirv-cross/spirv_glsl.cpp b/3rdparty/spirv-cross/spirv_glsl.cpp new file mode 100644 index 000000000..e3b7f4080 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_glsl.cpp @@ -0,0 +1,10863 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_glsl.hpp" +#include "GLSL.std.450.h" +#include "spirv_common.hpp" +#include +#include +#include +#include +#include + +using namespace spv; +using namespace spirv_cross; +using namespace std; + +static bool is_unsigned_opcode(Op op) +{ + // Don't have to be exhaustive, only relevant for legacy target checking ... + switch (op) + { + case OpShiftRightLogical: + case OpUGreaterThan: + case OpUGreaterThanEqual: + case OpULessThan: + case OpULessThanEqual: + case OpUConvert: + case OpUDiv: + case OpUMod: + case OpUMulExtended: + case OpConvertUToF: + case OpConvertFToU: + return true; + + default: + return false; + } +} + +static bool is_unsigned_glsl_opcode(GLSLstd450 op) +{ + // Don't have to be exhaustive, only relevant for legacy target checking ... + switch (op) + { + case GLSLstd450UClamp: + case GLSLstd450UMin: + case GLSLstd450UMax: + case GLSLstd450FindUMsb: + return true; + + default: + return false; + } +} + +static bool packing_is_vec4_padded(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingHLSLCbuffer: + case BufferPackingHLSLCbufferPackOffset: + case BufferPackingStd140: + case BufferPackingStd140EnhancedLayout: + return true; + + default: + return false; + } +} + +static bool packing_is_hlsl(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingHLSLCbuffer: + case BufferPackingHLSLCbufferPackOffset: + return true; + + default: + return false; + } +} + +static bool packing_has_flexible_offset(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingStd140: + case BufferPackingStd430: + case BufferPackingHLSLCbuffer: + return false; + + default: + return true; + } +} + +static BufferPackingStandard packing_to_substruct_packing(BufferPackingStandard packing) +{ + switch (packing) + { + case BufferPackingStd140EnhancedLayout: + return BufferPackingStd140; + case BufferPackingStd430EnhancedLayout: + return BufferPackingStd430; + case BufferPackingHLSLCbufferPackOffset: + return BufferPackingHLSLCbuffer; + default: + return packing; + } +} + +// Sanitizes underscores for GLSL where multiple underscores in a row are not allowed. +string CompilerGLSL::sanitize_underscores(const string &str) +{ + string res; + res.reserve(str.size()); + + bool last_underscore = false; + for (auto c : str) + { + if (c == '_') + { + if (last_underscore) + continue; + + res += c; + last_underscore = true; + } + else + { + res += c; + last_underscore = false; + } + } + return res; +} + +// Returns true if an arithmetic operation does not change behavior depending on signedness. +static bool glsl_opcode_is_sign_invariant(Op opcode) +{ + switch (opcode) + { + case OpIEqual: + case OpINotEqual: + case OpISub: + case OpIAdd: + case OpIMul: + case OpShiftLeftLogical: + case OpBitwiseOr: + case OpBitwiseXor: + case OpBitwiseAnd: + return true; + + default: + return false; + } +} + +static const char *to_pls_layout(PlsFormat format) +{ + switch (format) + { + case PlsR11FG11FB10F: + return "layout(r11f_g11f_b10f) "; + case PlsR32F: + return "layout(r32f) "; + case PlsRG16F: + return "layout(rg16f) "; + case PlsRGB10A2: + return "layout(rgb10_a2) "; + case PlsRGBA8: + return "layout(rgba8) "; + case PlsRG16: + return "layout(rg16) "; + case PlsRGBA8I: + return "layout(rgba8i)"; + case PlsRG16I: + return "layout(rg16i) "; + case PlsRGB10A2UI: + return "layout(rgb10_a2ui) "; + case PlsRGBA8UI: + return "layout(rgba8ui) "; + case PlsRG16UI: + return "layout(rg16ui) "; + case PlsR32UI: + return "layout(r32ui) "; + default: + return ""; + } +} + +static SPIRType::BaseType pls_format_to_basetype(PlsFormat format) +{ + switch (format) + { + default: + case PlsR11FG11FB10F: + case PlsR32F: + case PlsRG16F: + case PlsRGB10A2: + case PlsRGBA8: + case PlsRG16: + return SPIRType::Float; + + case PlsRGBA8I: + case PlsRG16I: + return SPIRType::Int; + + case PlsRGB10A2UI: + case PlsRGBA8UI: + case PlsRG16UI: + case PlsR32UI: + return SPIRType::UInt; + } +} + +static uint32_t pls_format_to_components(PlsFormat format) +{ + switch (format) + { + default: + case PlsR32F: + case PlsR32UI: + return 1; + + case PlsRG16F: + case PlsRG16: + case PlsRG16UI: + case PlsRG16I: + return 2; + + case PlsR11FG11FB10F: + return 3; + + case PlsRGB10A2: + case PlsRGBA8: + case PlsRGBA8I: + case PlsRGB10A2UI: + case PlsRGBA8UI: + return 4; + } +} + +static const char *vector_swizzle(int vecsize, int index) +{ + static const char *swizzle[4][4] = { + { ".x", ".y", ".z", ".w" }, { ".xy", ".yz", ".zw" }, { ".xyz", ".yzw" }, { "" } + }; + + assert(vecsize >= 1 && vecsize <= 4); + assert(index >= 0 && index < 4); + assert(swizzle[vecsize - 1][index]); + + return swizzle[vecsize - 1][index]; +} + +void CompilerGLSL::reset() +{ + // We do some speculative optimizations which should pretty much always work out, + // but just in case the SPIR-V is rather weird, recompile until it's happy. + // This typically only means one extra pass. + force_recompile = false; + + // Clear invalid expression tracking. + invalid_expressions.clear(); + current_function = nullptr; + + // Clear temporary usage tracking. + expression_usage_counts.clear(); + forwarded_temporaries.clear(); + + resource_names.clear(); + block_input_names.clear(); + block_output_names.clear(); + block_ubo_names.clear(); + block_ssbo_names.clear(); + block_names.clear(); + function_overloads.clear(); + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + // Clear unflushed dependees. + id.get().dependees.clear(); + } + else if (id.get_type() == TypeExpression) + { + // And remove all expressions. + id.reset(); + } + else if (id.get_type() == TypeFunction) + { + // Reset active state for all functions. + id.get().active = false; + id.get().flush_undeclared = true; + } + } + + statement_count = 0; + indent = 0; +} + +void CompilerGLSL::remap_pls_variables() +{ + for (auto &input : pls_inputs) + { + auto &var = get(input.id); + + bool input_is_target = false; + if (var.storage == StorageClassUniformConstant) + { + auto &type = get(var.basetype); + input_is_target = type.image.dim == DimSubpassData; + } + + if (var.storage != StorageClassInput && !input_is_target) + SPIRV_CROSS_THROW("Can only use in and target variables for PLS inputs."); + var.remapped_variable = true; + } + + for (auto &output : pls_outputs) + { + auto &var = get(output.id); + if (var.storage != StorageClassOutput) + SPIRV_CROSS_THROW("Can only use out variables for PLS outputs."); + var.remapped_variable = true; + } +} + +void CompilerGLSL::find_static_extensions() +{ + for (auto &id : ir.ids) + { + if (id.get_type() == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Double) + { + if (options.es) + SPIRV_CROSS_THROW("FP64 not supported in ES profile."); + if (!options.es && options.version < 400) + require_extension_internal("GL_ARB_gpu_shader_fp64"); + } + + if (type.basetype == SPIRType::Int64 || type.basetype == SPIRType::UInt64) + { + if (options.es) + SPIRV_CROSS_THROW("64-bit integers not supported in ES profile."); + if (!options.es) + require_extension_internal("GL_ARB_gpu_shader_int64"); + } + + if (type.basetype == SPIRType::Half) + require_extension_internal("GL_AMD_gpu_shader_half_float"); + + if (type.basetype == SPIRType::SByte || type.basetype == SPIRType::UByte) + require_extension_internal("GL_EXT_shader_8bit_storage"); + + if (type.basetype == SPIRType::Short || type.basetype == SPIRType::UShort) + require_extension_internal("GL_AMD_gpu_shader_int16"); + } + } + + auto &execution = get_entry_point(); + switch (execution.model) + { + case ExecutionModelGLCompute: + if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_compute_shader"); + if (options.es && options.version < 310) + SPIRV_CROSS_THROW("At least ESSL 3.10 required for compute shaders."); + break; + + case ExecutionModelGeometry: + if (options.es && options.version < 320) + require_extension_internal("GL_EXT_geometry_shader"); + if (!options.es && options.version < 150) + require_extension_internal("GL_ARB_geometry_shader4"); + + if (execution.flags.get(ExecutionModeInvocations) && execution.invocations != 1) + { + // Instanced GS is part of 400 core or this extension. + if (!options.es && options.version < 400) + require_extension_internal("GL_ARB_gpu_shader5"); + } + break; + + case ExecutionModelTessellationEvaluation: + case ExecutionModelTessellationControl: + if (options.es && options.version < 320) + require_extension_internal("GL_EXT_tessellation_shader"); + if (!options.es && options.version < 400) + require_extension_internal("GL_ARB_tessellation_shader"); + break; + + default: + break; + } + + if (!pls_inputs.empty() || !pls_outputs.empty()) + require_extension_internal("GL_EXT_shader_pixel_local_storage"); + + if (options.separate_shader_objects && !options.es && options.version < 410) + require_extension_internal("GL_ARB_separate_shader_objects"); +} + +string CompilerGLSL::compile() +{ + // Force a classic "C" locale, reverts when function returns + ClassicLocale classic_locale; + + if (options.vulkan_semantics) + backend.allow_precision_qualifiers = true; + backend.force_gl_in_out_block = true; + backend.supports_extensions = true; + + // Scan the SPIR-V to find trivial uses of extensions. + build_function_control_flow_graphs_and_analyze(); + find_static_extensions(); + fixup_image_load_store_access(); + update_active_builtins(); + analyze_image_and_sampler_usage(); + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + reset(); + + // Move constructor for this type is broken on GCC 4.9 ... + buffer = unique_ptr(new ostringstream()); + + emit_header(); + emit_resources(); + + emit_function(get(ir.default_entry_point), Bitset()); + + pass_count++; + } while (force_recompile); + + // Entry point in GLSL is always main(). + get_entry_point().name = "main"; + + return buffer->str(); +} + +std::string CompilerGLSL::get_partial_source() +{ + return buffer ? buffer->str() : "No compiled source available yet."; +} + +void CompilerGLSL::build_workgroup_size(vector &arguments, const SpecializationConstant &wg_x, + const SpecializationConstant &wg_y, const SpecializationConstant &wg_z) +{ + auto &execution = get_entry_point(); + + if (wg_x.id) + { + if (options.vulkan_semantics) + arguments.push_back(join("local_size_x_id = ", wg_x.constant_id)); + else + arguments.push_back(join("local_size_x = ", get(wg_x.id).specialization_constant_macro_name)); + } + else + arguments.push_back(join("local_size_x = ", execution.workgroup_size.x)); + + if (wg_y.id) + { + if (options.vulkan_semantics) + arguments.push_back(join("local_size_y_id = ", wg_y.constant_id)); + else + arguments.push_back(join("local_size_y = ", get(wg_y.id).specialization_constant_macro_name)); + } + else + arguments.push_back(join("local_size_y = ", execution.workgroup_size.y)); + + if (wg_z.id) + { + if (options.vulkan_semantics) + arguments.push_back(join("local_size_z_id = ", wg_z.constant_id)); + else + arguments.push_back(join("local_size_z = ", get(wg_z.id).specialization_constant_macro_name)); + } + else + arguments.push_back(join("local_size_z = ", execution.workgroup_size.z)); +} + +void CompilerGLSL::emit_header() +{ + auto &execution = get_entry_point(); + statement("#version ", options.version, options.es && options.version > 100 ? " es" : ""); + + if (!options.es && options.version < 420) + { + // Needed for binding = # on UBOs, etc. + if (options.enable_420pack_extension) + { + statement("#ifdef GL_ARB_shading_language_420pack"); + statement("#extension GL_ARB_shading_language_420pack : require"); + statement("#endif"); + } + // Needed for: layout(early_fragment_tests) in; + if (execution.flags.get(ExecutionModeEarlyFragmentTests)) + require_extension_internal("GL_ARB_shader_image_load_store"); + } + + for (auto &ext : forced_extensions) + { + if (ext == "GL_AMD_gpu_shader_half_float") + { + // Special case, this extension has a potential fallback to another vendor extension in normal GLSL. + // GL_AMD_gpu_shader_half_float is a superset, so try that first. + statement("#if defined(GL_AMD_gpu_shader_half_float)"); + statement("#extension GL_AMD_gpu_shader_half_float : require"); + if (!options.vulkan_semantics) + { + statement("#elif defined(GL_NV_gpu_shader5)"); + statement("#extension GL_NV_gpu_shader5 : require"); + } + statement("#elif defined(GL_EXT_shader_16bit_storage)"); + statement("#extension GL_EXT_shader_16bit_storage : require"); + statement("#else"); + statement("#error No extension available for FP16."); + statement("#endif"); + } + else if (ext == "GL_AMD_gpu_shader_int16") + { + // GL_AMD_gpu_shader_int16 is a superset, so try that first. + statement("#if defined(GL_AMD_gpu_shader_int16)"); + statement("#extension GL_AMD_gpu_shader_int16 : require"); + statement("#elif defined(GL_EXT_shader_16bit_storage)"); + statement("#extension GL_EXT_shader_16bit_storage : require"); + statement("#else"); + statement("#error No extension available for Int16."); + statement("#endif"); + } + else + statement("#extension ", ext, " : require"); + } + + for (auto &header : header_lines) + statement(header); + + vector inputs; + vector outputs; + + switch (execution.model) + { + case ExecutionModelGeometry: + outputs.push_back(join("max_vertices = ", execution.output_vertices)); + if ((execution.flags.get(ExecutionModeInvocations)) && execution.invocations != 1) + inputs.push_back(join("invocations = ", execution.invocations)); + if (execution.flags.get(ExecutionModeInputPoints)) + inputs.push_back("points"); + if (execution.flags.get(ExecutionModeInputLines)) + inputs.push_back("lines"); + if (execution.flags.get(ExecutionModeInputLinesAdjacency)) + inputs.push_back("lines_adjacency"); + if (execution.flags.get(ExecutionModeTriangles)) + inputs.push_back("triangles"); + if (execution.flags.get(ExecutionModeInputTrianglesAdjacency)) + inputs.push_back("triangles_adjacency"); + if (execution.flags.get(ExecutionModeOutputTriangleStrip)) + outputs.push_back("triangle_strip"); + if (execution.flags.get(ExecutionModeOutputPoints)) + outputs.push_back("points"); + if (execution.flags.get(ExecutionModeOutputLineStrip)) + outputs.push_back("line_strip"); + break; + + case ExecutionModelTessellationControl: + if (execution.flags.get(ExecutionModeOutputVertices)) + outputs.push_back(join("vertices = ", execution.output_vertices)); + break; + + case ExecutionModelTessellationEvaluation: + if (execution.flags.get(ExecutionModeQuads)) + inputs.push_back("quads"); + if (execution.flags.get(ExecutionModeTriangles)) + inputs.push_back("triangles"); + if (execution.flags.get(ExecutionModeIsolines)) + inputs.push_back("isolines"); + if (execution.flags.get(ExecutionModePointMode)) + inputs.push_back("point_mode"); + + if (!execution.flags.get(ExecutionModeIsolines)) + { + if (execution.flags.get(ExecutionModeVertexOrderCw)) + inputs.push_back("cw"); + if (execution.flags.get(ExecutionModeVertexOrderCcw)) + inputs.push_back("ccw"); + } + + if (execution.flags.get(ExecutionModeSpacingFractionalEven)) + inputs.push_back("fractional_even_spacing"); + if (execution.flags.get(ExecutionModeSpacingFractionalOdd)) + inputs.push_back("fractional_odd_spacing"); + if (execution.flags.get(ExecutionModeSpacingEqual)) + inputs.push_back("equal_spacing"); + break; + + case ExecutionModelGLCompute: + { + if (execution.workgroup_size.constant != 0) + { + SpecializationConstant wg_x, wg_y, wg_z; + get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + // If there are any spec constants on legacy GLSL, defer declaration, we need to set up macro + // declarations before we can emit the work group size. + if (options.vulkan_semantics || ((wg_x.id == 0) && (wg_y.id == 0) && (wg_z.id == 0))) + build_workgroup_size(inputs, wg_x, wg_y, wg_z); + } + else + { + inputs.push_back(join("local_size_x = ", execution.workgroup_size.x)); + inputs.push_back(join("local_size_y = ", execution.workgroup_size.y)); + inputs.push_back(join("local_size_z = ", execution.workgroup_size.z)); + } + break; + } + + case ExecutionModelFragment: + if (options.es) + { + switch (options.fragment.default_float_precision) + { + case Options::Lowp: + statement("precision lowp float;"); + break; + + case Options::Mediump: + statement("precision mediump float;"); + break; + + case Options::Highp: + statement("precision highp float;"); + break; + + default: + break; + } + + switch (options.fragment.default_int_precision) + { + case Options::Lowp: + statement("precision lowp int;"); + break; + + case Options::Mediump: + statement("precision mediump int;"); + break; + + case Options::Highp: + statement("precision highp int;"); + break; + + default: + break; + } + } + + if (execution.flags.get(ExecutionModeEarlyFragmentTests)) + inputs.push_back("early_fragment_tests"); + + if (!options.es && execution.flags.get(ExecutionModeDepthGreater)) + statement("layout(depth_greater) out float gl_FragDepth;"); + else if (!options.es && execution.flags.get(ExecutionModeDepthLess)) + statement("layout(depth_less) out float gl_FragDepth;"); + + break; + + default: + break; + } + + if (!inputs.empty()) + statement("layout(", merge(inputs), ") in;"); + if (!outputs.empty()) + statement("layout(", merge(outputs), ") out;"); + + statement(""); +} + +bool CompilerGLSL::type_is_empty(const SPIRType &type) +{ + return type.basetype == SPIRType::Struct && type.member_types.empty(); +} + +void CompilerGLSL::emit_struct(SPIRType &type) +{ + // Struct types can be stamped out multiple times + // with just different offsets, matrix layouts, etc ... + // Type-punning with these types is legal, which complicates things + // when we are storing struct and array types in an SSBO for example. + // If the type master is packed however, we can no longer assume that the struct declaration will be redundant. + if (type.type_alias != 0 && !has_decoration(type.type_alias, DecorationCPacked)) + return; + + add_resource_name(type.self); + auto name = type_to_glsl(type); + + statement(!backend.explicit_struct_type ? "struct " : "", name); + begin_scope(); + + type.member_name_cache.clear(); + + uint32_t i = 0; + bool emitted = false; + for (auto &member : type.member_types) + { + add_member_name(type, i); + emit_struct_member(type, member, i); + i++; + emitted = true; + } + + // Don't declare empty structs in GLSL, this is not allowed. + if (type_is_empty(type) && !backend.supports_empty_struct) + { + statement("int empty_struct_member;"); + emitted = true; + } + + end_scope_decl(); + + if (emitted) + statement(""); +} + +string CompilerGLSL::to_interpolation_qualifiers(const Bitset &flags) +{ + string res; + //if (flags & (1ull << DecorationSmooth)) + // res += "smooth "; + if (flags.get(DecorationFlat)) + res += "flat "; + if (flags.get(DecorationNoPerspective)) + res += "noperspective "; + if (flags.get(DecorationCentroid)) + res += "centroid "; + if (flags.get(DecorationPatch)) + res += "patch "; + if (flags.get(DecorationSample)) + res += "sample "; + if (flags.get(DecorationInvariant)) + res += "invariant "; + if (flags.get(DecorationExplicitInterpAMD)) + res += "__explicitInterpAMD "; + + return res; +} + +string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index) +{ + if (is_legacy()) + return ""; + + bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + if (!is_block) + return ""; + + auto &memb = ir.meta[type.self].members; + if (index >= memb.size()) + return ""; + auto &dec = memb[index]; + + vector attr; + + // We can only apply layouts on members in block interfaces. + // This is a bit problematic because in SPIR-V decorations are applied on the struct types directly. + // This is not supported on GLSL, so we have to make the assumption that if a struct within our buffer block struct + // has a decoration, it was originally caused by a top-level layout() qualifier in GLSL. + // + // We would like to go from (SPIR-V style): + // + // struct Foo { layout(row_major) mat4 matrix; }; + // buffer UBO { Foo foo; }; + // + // to + // + // struct Foo { mat4 matrix; }; // GLSL doesn't support any layout shenanigans in raw struct declarations. + // buffer UBO { layout(row_major) Foo foo; }; // Apply the layout on top-level. + auto flags = combined_decoration_for_member(type, index); + + if (flags.get(DecorationRowMajor)) + attr.push_back("row_major"); + // We don't emit any global layouts, so column_major is default. + //if (flags & (1ull << DecorationColMajor)) + // attr.push_back("column_major"); + + if (dec.decoration_flags.get(DecorationLocation) && can_use_io_location(type.storage, true)) + attr.push_back(join("location = ", dec.location)); + + // Can only declare component if we can declare location. + if (dec.decoration_flags.get(DecorationComponent) && can_use_io_location(type.storage, true)) + { + if (!options.es) + { + if (options.version < 440 && options.version >= 140) + require_extension_internal("GL_ARB_enhanced_layouts"); + else if (options.version < 140) + SPIRV_CROSS_THROW("Component decoration is not supported in targets below GLSL 1.40."); + attr.push_back(join("component = ", dec.component)); + } + else + SPIRV_CROSS_THROW("Component decoration is not supported in ES targets."); + } + + // DecorationCPacked is set by layout_for_variable earlier to mark that we need to emit offset qualifiers. + // This is only done selectively in GLSL as needed. + if (has_decoration(type.self, DecorationCPacked) && dec.decoration_flags.get(DecorationOffset)) + attr.push_back(join("offset = ", dec.offset)); + + if (attr.empty()) + return ""; + + string res = "layout("; + res += merge(attr); + res += ") "; + return res; +} + +const char *CompilerGLSL::format_to_glsl(spv::ImageFormat format) +{ + if (options.es && is_desktop_only_format(format)) + SPIRV_CROSS_THROW("Attempting to use image format not supported in ES profile."); + + switch (format) + { + case ImageFormatRgba32f: + return "rgba32f"; + case ImageFormatRgba16f: + return "rgba16f"; + case ImageFormatR32f: + return "r32f"; + case ImageFormatRgba8: + return "rgba8"; + case ImageFormatRgba8Snorm: + return "rgba8_snorm"; + case ImageFormatRg32f: + return "rg32f"; + case ImageFormatRg16f: + return "rg16f"; + case ImageFormatRgba32i: + return "rgba32i"; + case ImageFormatRgba16i: + return "rgba16i"; + case ImageFormatR32i: + return "r32i"; + case ImageFormatRgba8i: + return "rgba8i"; + case ImageFormatRg32i: + return "rg32i"; + case ImageFormatRg16i: + return "rg16i"; + case ImageFormatRgba32ui: + return "rgba32ui"; + case ImageFormatRgba16ui: + return "rgba16ui"; + case ImageFormatR32ui: + return "r32ui"; + case ImageFormatRgba8ui: + return "rgba8ui"; + case ImageFormatRg32ui: + return "rg32ui"; + case ImageFormatRg16ui: + return "rg16ui"; + case ImageFormatR11fG11fB10f: + return "r11f_g11f_b10f"; + case ImageFormatR16f: + return "r16f"; + case ImageFormatRgb10A2: + return "rgb10_a2"; + case ImageFormatR8: + return "r8"; + case ImageFormatRg8: + return "rg8"; + case ImageFormatR16: + return "r16"; + case ImageFormatRg16: + return "rg16"; + case ImageFormatRgba16: + return "rgba16"; + case ImageFormatR16Snorm: + return "r16_snorm"; + case ImageFormatRg16Snorm: + return "rg16_snorm"; + case ImageFormatRgba16Snorm: + return "rgba16_snorm"; + case ImageFormatR8Snorm: + return "r8_snorm"; + case ImageFormatRg8Snorm: + return "rg8_snorm"; + case ImageFormatR8ui: + return "r8ui"; + case ImageFormatRg8ui: + return "rg8ui"; + case ImageFormatR16ui: + return "r16ui"; + case ImageFormatRgb10a2ui: + return "rgb10_a2ui"; + case ImageFormatR8i: + return "r8i"; + case ImageFormatRg8i: + return "rg8i"; + case ImageFormatR16i: + return "r16i"; + default: + case ImageFormatUnknown: + return nullptr; + } +} + +uint32_t CompilerGLSL::type_to_packed_base_size(const SPIRType &type, BufferPackingStandard) +{ + switch (type.basetype) + { + case SPIRType::Double: + case SPIRType::Int64: + case SPIRType::UInt64: + return 8; + case SPIRType::Float: + case SPIRType::Int: + case SPIRType::UInt: + return 4; + case SPIRType::Half: + case SPIRType::Short: + case SPIRType::UShort: + return 2; + case SPIRType::SByte: + case SPIRType::UByte: + return 1; + + default: + SPIRV_CROSS_THROW("Unrecognized type in type_to_packed_base_size."); + } +} + +uint32_t CompilerGLSL::type_to_packed_alignment(const SPIRType &type, const Bitset &flags, + BufferPackingStandard packing) +{ + if (!type.array.empty()) + { + uint32_t minimum_alignment = 1; + if (packing_is_vec4_padded(packing)) + minimum_alignment = 16; + + auto *tmp = &get(type.parent_type); + while (!tmp->array.empty()) + tmp = &get(tmp->parent_type); + + // Get the alignment of the base type, then maybe round up. + return max(minimum_alignment, type_to_packed_alignment(*tmp, flags, packing)); + } + + if (type.basetype == SPIRType::Struct) + { + // Rule 9. Structs alignments are maximum alignment of its members. + uint32_t alignment = 0; + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + auto member_flags = ir.meta[type.self].members.at(i).decoration_flags; + alignment = + max(alignment, type_to_packed_alignment(get(type.member_types[i]), member_flags, packing)); + } + + // In std140, struct alignment is rounded up to 16. + if (packing_is_vec4_padded(packing)) + alignment = max(alignment, 16u); + + return alignment; + } + else + { + const uint32_t base_alignment = type_to_packed_base_size(type, packing); + + // Vectors are *not* aligned in HLSL, but there's an extra rule where vectors cannot straddle + // a vec4, this is handled outside since that part knows our current offset. + if (type.columns == 1 && packing_is_hlsl(packing)) + return base_alignment; + + // From 7.6.2.2 in GL 4.5 core spec. + // Rule 1 + if (type.vecsize == 1 && type.columns == 1) + return base_alignment; + + // Rule 2 + if ((type.vecsize == 2 || type.vecsize == 4) && type.columns == 1) + return type.vecsize * base_alignment; + + // Rule 3 + if (type.vecsize == 3 && type.columns == 1) + return 4 * base_alignment; + + // Rule 4 implied. Alignment does not change in std430. + + // Rule 5. Column-major matrices are stored as arrays of + // vectors. + if (flags.get(DecorationColMajor) && type.columns > 1) + { + if (packing_is_vec4_padded(packing)) + return 4 * base_alignment; + else if (type.vecsize == 3) + return 4 * base_alignment; + else + return type.vecsize * base_alignment; + } + + // Rule 6 implied. + + // Rule 7. + if (flags.get(DecorationRowMajor) && type.vecsize > 1) + { + if (packing_is_vec4_padded(packing)) + return 4 * base_alignment; + else if (type.columns == 3) + return 4 * base_alignment; + else + return type.columns * base_alignment; + } + + // Rule 8 implied. + } + + SPIRV_CROSS_THROW("Did not find suitable rule for type. Bogus decorations?"); +} + +uint32_t CompilerGLSL::type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, + BufferPackingStandard packing) +{ + // Array stride is equal to aligned size of the underlying type. + uint32_t parent = type.parent_type; + assert(parent); + + auto &tmp = get(parent); + + uint32_t size = type_to_packed_size(tmp, flags, packing); + if (tmp.array.empty()) + { + uint32_t alignment = type_to_packed_alignment(type, flags, packing); + return (size + alignment - 1) & ~(alignment - 1); + } + else + { + // For multidimensional arrays, array stride always matches size of subtype. + // The alignment cannot change because multidimensional arrays are basically N * M array elements. + return size; + } +} + +uint32_t CompilerGLSL::type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing) +{ + if (!type.array.empty()) + { + return to_array_size_literal(type) * type_to_packed_array_stride(type, flags, packing); + } + + uint32_t size = 0; + + if (type.basetype == SPIRType::Struct) + { + uint32_t pad_alignment = 1; + + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + auto member_flags = ir.meta[type.self].members.at(i).decoration_flags; + auto &member_type = get(type.member_types[i]); + + uint32_t packed_alignment = type_to_packed_alignment(member_type, member_flags, packing); + uint32_t alignment = max(packed_alignment, pad_alignment); + + // The next member following a struct member is aligned to the base alignment of the struct that came before. + // GL 4.5 spec, 7.6.2.2. + if (member_type.basetype == SPIRType::Struct) + pad_alignment = packed_alignment; + else + pad_alignment = 1; + + size = (size + alignment - 1) & ~(alignment - 1); + size += type_to_packed_size(member_type, member_flags, packing); + } + } + else + { + const uint32_t base_alignment = type_to_packed_base_size(type, packing); + + if (type.columns == 1) + size = type.vecsize * base_alignment; + + if (flags.get(DecorationColMajor) && type.columns > 1) + { + if (packing_is_vec4_padded(packing)) + size = type.columns * 4 * base_alignment; + else if (type.vecsize == 3) + size = type.columns * 4 * base_alignment; + else + size = type.columns * type.vecsize * base_alignment; + } + + if (flags.get(DecorationRowMajor) && type.vecsize > 1) + { + if (packing_is_vec4_padded(packing)) + size = type.vecsize * 4 * base_alignment; + else if (type.columns == 3) + size = type.vecsize * 4 * base_alignment; + else + size = type.vecsize * type.columns * base_alignment; + } + } + + return size; +} + +bool CompilerGLSL::buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, + uint32_t start_offset, uint32_t end_offset) +{ + // This is very tricky and error prone, but try to be exhaustive and correct here. + // SPIR-V doesn't directly say if we're using std430 or std140. + // SPIR-V communicates this using Offset and ArrayStride decorations (which is what really matters), + // so we have to try to infer whether or not the original GLSL source was std140 or std430 based on this information. + // We do not have to consider shared or packed since these layouts are not allowed in Vulkan SPIR-V (they are useless anyways, and custom offsets would do the same thing). + // + // It is almost certain that we're using std430, but it gets tricky with arrays in particular. + // We will assume std430, but infer std140 if we can prove the struct is not compliant with std430. + // + // The only two differences between std140 and std430 are related to padding alignment/array stride + // in arrays and structs. In std140 they take minimum vec4 alignment. + // std430 only removes the vec4 requirement. + + uint32_t offset = 0; + uint32_t pad_alignment = 1; + + bool is_top_level_block = + has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock); + + for (uint32_t i = 0; i < type.member_types.size(); i++) + { + auto &memb_type = get(type.member_types[i]); + auto member_flags = ir.meta[type.self].members.at(i).decoration_flags; + + // Verify alignment rules. + uint32_t packed_alignment = type_to_packed_alignment(memb_type, member_flags, packing); + + // This is a rather dirty workaround to deal with some cases of OpSpecConstantOp used as array size, e.g: + // layout(constant_id = 0) const int s = 10; + // const int S = s + 5; // SpecConstantOp + // buffer Foo { int data[S]; }; // <-- Very hard for us to deduce a fixed value here, + // we would need full implementation of compile-time constant folding. :( + // If we are the last member of a struct, there might be cases where the actual size of that member is irrelevant + // for our analysis (e.g. unsized arrays). + // This lets us simply ignore that there are spec constant op sized arrays in our buffers. + // Querying size of this member will fail, so just don't call it unless we have to. + // + // This is likely "best effort" we can support without going into unacceptably complicated workarounds. + bool member_can_be_unsized = + is_top_level_block && size_t(i + 1) == type.member_types.size() && !memb_type.array.empty(); + + uint32_t packed_size = 0; + if (!member_can_be_unsized) + packed_size = type_to_packed_size(memb_type, member_flags, packing); + + // We only need to care about this if we have non-array types which can straddle the vec4 boundary. + if (packing_is_hlsl(packing)) + { + // If a member straddles across a vec4 boundary, alignment is actually vec4. + uint32_t begin_word = offset / 16; + uint32_t end_word = (offset + packed_size - 1) / 16; + if (begin_word != end_word) + packed_alignment = max(packed_alignment, 16u); + } + + uint32_t alignment = max(packed_alignment, pad_alignment); + offset = (offset + alignment - 1) & ~(alignment - 1); + + // Field is not in the specified range anymore and we can ignore any further fields. + if (offset >= end_offset) + break; + + // The next member following a struct member is aligned to the base alignment of the struct that came before. + // GL 4.5 spec, 7.6.2.2. + if (memb_type.basetype == SPIRType::Struct) + pad_alignment = packed_alignment; + else + pad_alignment = 1; + + // Only care about packing if we are in the given range + if (offset >= start_offset) + { + // We only care about offsets in std140, std430, etc ... + // For EnhancedLayout variants, we have the flexibility to choose our own offsets. + if (!packing_has_flexible_offset(packing)) + { + uint32_t actual_offset = type_struct_member_offset(type, i); + if (actual_offset != offset) // This cannot be the packing we're looking for. + return false; + } + + // Verify array stride rules. + if (!memb_type.array.empty() && type_to_packed_array_stride(memb_type, member_flags, packing) != + type_struct_member_array_stride(type, i)) + return false; + + // Verify that sub-structs also follow packing rules. + // We cannot use enhanced layouts on substructs, so they better be up to spec. + auto substruct_packing = packing_to_substruct_packing(packing); + + if (!memb_type.member_types.empty() && !buffer_is_packing_standard(memb_type, substruct_packing)) + return false; + } + + // Bump size. + offset += packed_size; + } + + return true; +} + +bool CompilerGLSL::can_use_io_location(StorageClass storage, bool block) +{ + // Location specifiers are must have in SPIR-V, but they aren't really supported in earlier versions of GLSL. + // Be very explicit here about how to solve the issue. + if ((get_execution_model() != ExecutionModelVertex && storage == StorageClassInput) || + (get_execution_model() != ExecutionModelFragment && storage == StorageClassOutput)) + { + uint32_t minimum_desktop_version = block ? 440 : 410; + // ARB_enhanced_layouts vs ARB_separate_shader_objects ... + + if (!options.es && options.version < minimum_desktop_version && !options.separate_shader_objects) + return false; + else if (options.es && options.version < 310) + return false; + } + + if ((get_execution_model() == ExecutionModelVertex && storage == StorageClassInput) || + (get_execution_model() == ExecutionModelFragment && storage == StorageClassOutput)) + { + if (options.es && options.version < 300) + return false; + else if (!options.es && options.version < 330) + return false; + } + + if (storage == StorageClassUniform || storage == StorageClassUniformConstant) + { + if (options.es && options.version < 310) + return false; + else if (!options.es && options.version < 430) + return false; + } + + return true; +} + +string CompilerGLSL::layout_for_variable(const SPIRVariable &var) +{ + // FIXME: Come up with a better solution for when to disable layouts. + // Having layouts depend on extensions as well as which types + // of layouts are used. For now, the simple solution is to just disable + // layouts for legacy versions. + if (is_legacy()) + return ""; + + vector attr; + + auto &dec = ir.meta[var.self].decoration; + auto &type = get(var.basetype); + auto &flags = dec.decoration_flags; + auto typeflags = ir.meta[type.self].decoration.decoration_flags; + + if (options.vulkan_semantics && var.storage == StorageClassPushConstant) + attr.push_back("push_constant"); + + if (flags.get(DecorationRowMajor)) + attr.push_back("row_major"); + if (flags.get(DecorationColMajor)) + attr.push_back("column_major"); + + if (options.vulkan_semantics) + { + if (flags.get(DecorationInputAttachmentIndex)) + attr.push_back(join("input_attachment_index = ", dec.input_attachment)); + } + + bool is_block = has_decoration(type.self, DecorationBlock); + if (flags.get(DecorationLocation) && can_use_io_location(var.storage, is_block)) + { + Bitset combined_decoration; + for (uint32_t i = 0; i < ir.meta[type.self].members.size(); i++) + combined_decoration.merge_or(combined_decoration_for_member(type, i)); + + // If our members have location decorations, we don't need to + // emit location decorations at the top as well (looks weird). + if (!combined_decoration.get(DecorationLocation)) + attr.push_back(join("location = ", dec.location)); + } + + // Can only declare Component if we can declare location. + if (flags.get(DecorationComponent) && can_use_io_location(var.storage, is_block)) + { + if (!options.es) + { + if (options.version < 440 && options.version >= 140) + require_extension_internal("GL_ARB_enhanced_layouts"); + else if (options.version < 140) + SPIRV_CROSS_THROW("Component decoration is not supported in targets below GLSL 1.40."); + attr.push_back(join("component = ", dec.component)); + } + else + SPIRV_CROSS_THROW("Component decoration is not supported in ES targets."); + } + + if (flags.get(DecorationIndex)) + attr.push_back(join("index = ", dec.index)); + + // Do not emit set = decoration in regular GLSL output, but + // we need to preserve it in Vulkan GLSL mode. + if (var.storage != StorageClassPushConstant) + { + if (flags.get(DecorationDescriptorSet) && options.vulkan_semantics) + attr.push_back(join("set = ", dec.set)); + } + + // GL 3.0/GLSL 1.30 is not considered legacy, but it doesn't have UBOs ... + bool can_use_buffer_blocks = (options.es && options.version >= 300) || (!options.es && options.version >= 140); + + bool can_use_binding; + if (options.es) + can_use_binding = options.version >= 310; + else + can_use_binding = options.enable_420pack_extension || (options.version >= 420); + + // Make sure we don't emit binding layout for a classic uniform on GLSL 1.30. + if (!can_use_buffer_blocks && var.storage == StorageClassUniform) + can_use_binding = false; + + if (can_use_binding && flags.get(DecorationBinding)) + attr.push_back(join("binding = ", dec.binding)); + + if (flags.get(DecorationOffset)) + attr.push_back(join("offset = ", dec.offset)); + + bool push_constant_block = options.vulkan_semantics && var.storage == StorageClassPushConstant; + bool ssbo_block = var.storage == StorageClassStorageBuffer || + (var.storage == StorageClassUniform && typeflags.get(DecorationBufferBlock)); + + // Instead of adding explicit offsets for every element here, just assume we're using std140 or std430. + // If SPIR-V does not comply with either layout, we cannot really work around it. + if (can_use_buffer_blocks && var.storage == StorageClassUniform && typeflags.get(DecorationBlock)) + { + if (buffer_is_packing_standard(type, BufferPackingStd140)) + attr.push_back("std140"); + else if (buffer_is_packing_standard(type, BufferPackingStd140EnhancedLayout)) + { + attr.push_back("std140"); + // Fallback time. We might be able to use the ARB_enhanced_layouts to deal with this difference, + // however, we can only use layout(offset) on the block itself, not any substructs, so the substructs better be the appropriate layout. + // Enhanced layouts seem to always work in Vulkan GLSL, so no need for extensions there. + if (options.es && !options.vulkan_semantics) + SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do " + "not support GL_ARB_enhanced_layouts."); + if (!options.es && !options.vulkan_semantics && options.version < 440) + require_extension_internal("GL_ARB_enhanced_layouts"); + + // This is a very last minute to check for this, but use this unused decoration to mark that we should emit + // explicit offsets for this block type. + // layout_for_variable() will be called before the actual buffer emit. + // The alternative is a full pass before codegen where we deduce this decoration, + // but then we are just doing the exact same work twice, and more complexity. + set_decoration(type.self, DecorationCPacked); + } + else + { + SPIRV_CROSS_THROW("Uniform buffer cannot be expressed as std140, even with enhanced layouts. You can try " + "flattening this block to " + "support a more flexible layout."); + } + } + else if (can_use_buffer_blocks && (push_constant_block || ssbo_block)) + { + if (buffer_is_packing_standard(type, BufferPackingStd430)) + attr.push_back("std430"); + else if (buffer_is_packing_standard(type, BufferPackingStd140)) + attr.push_back("std140"); + else if (buffer_is_packing_standard(type, BufferPackingStd140EnhancedLayout)) + { + attr.push_back("std140"); + + // Fallback time. We might be able to use the ARB_enhanced_layouts to deal with this difference, + // however, we can only use layout(offset) on the block itself, not any substructs, so the substructs better be the appropriate layout. + // Enhanced layouts seem to always work in Vulkan GLSL, so no need for extensions there. + if (options.es && !options.vulkan_semantics) + SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do " + "not support GL_ARB_enhanced_layouts."); + if (!options.es && !options.vulkan_semantics && options.version < 440) + require_extension_internal("GL_ARB_enhanced_layouts"); + + set_decoration(type.self, DecorationCPacked); + } + else if (buffer_is_packing_standard(type, BufferPackingStd430EnhancedLayout)) + { + attr.push_back("std430"); + if (options.es && !options.vulkan_semantics) + SPIRV_CROSS_THROW("Push constant block cannot be expressed as neither std430 nor std140. ES-targets do " + "not support GL_ARB_enhanced_layouts."); + if (!options.es && !options.vulkan_semantics && options.version < 440) + require_extension_internal("GL_ARB_enhanced_layouts"); + + set_decoration(type.self, DecorationCPacked); + } + else + { + SPIRV_CROSS_THROW("Buffer block cannot be expressed as neither std430 nor std140, even with enhanced " + "layouts. You can try flattening this block to support a more flexible layout."); + } + } + + // For images, the type itself adds a layout qualifer. + // Only emit the format for storage images. + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + { + const char *fmt = format_to_glsl(type.image.format); + if (fmt) + attr.push_back(fmt); + } + + if (attr.empty()) + return ""; + + string res = "layout("; + res += merge(attr); + res += ") "; + return res; +} + +void CompilerGLSL::emit_push_constant_block(const SPIRVariable &var) +{ + if (flattened_buffer_blocks.count(var.self)) + emit_buffer_block_flattened(var); + else if (options.vulkan_semantics) + emit_push_constant_block_vulkan(var); + else + emit_push_constant_block_glsl(var); +} + +void CompilerGLSL::emit_push_constant_block_vulkan(const SPIRVariable &var) +{ + emit_buffer_block(var); +} + +void CompilerGLSL::emit_push_constant_block_glsl(const SPIRVariable &var) +{ + // OpenGL has no concept of push constant blocks, implement it as a uniform struct. + auto &type = get(var.basetype); + + auto &flags = ir.meta[var.self].decoration.decoration_flags; + flags.clear(DecorationBinding); + flags.clear(DecorationDescriptorSet); + +#if 0 + if (flags & ((1ull << DecorationBinding) | (1ull << DecorationDescriptorSet))) + SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. " + "Remap to location with reflection API first or disable these decorations."); +#endif + + // We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily. + // Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed. + auto &block_flags = ir.meta[type.self].decoration.decoration_flags; + bool block_flag = block_flags.get(DecorationBlock); + block_flags.clear(DecorationBlock); + + emit_struct(type); + + if (block_flag) + block_flags.set(DecorationBlock); + + emit_uniform(var); + statement(""); +} + +void CompilerGLSL::emit_buffer_block(const SPIRVariable &var) +{ + if (flattened_buffer_blocks.count(var.self)) + emit_buffer_block_flattened(var); + else if (is_legacy() || (!options.es && options.version == 130)) + emit_buffer_block_legacy(var); + else + emit_buffer_block_native(var); +} + +void CompilerGLSL::emit_buffer_block_legacy(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + bool ssbo = var.storage == StorageClassStorageBuffer || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + if (ssbo) + SPIRV_CROSS_THROW("SSBOs not supported in legacy targets."); + + // We're emitting the push constant block as a regular struct, so disable the block qualifier temporarily. + // Otherwise, we will end up emitting layout() qualifiers on naked structs which is not allowed. + auto &block_flags = ir.meta[type.self].decoration.decoration_flags; + bool block_flag = block_flags.get(DecorationBlock); + block_flags.clear(DecorationBlock); + emit_struct(type); + if (block_flag) + block_flags.set(DecorationBlock); + emit_uniform(var); + statement(""); +} + +void CompilerGLSL::emit_buffer_block_native(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + Bitset flags = ir.get_buffer_block_flags(var); + bool ssbo = var.storage == StorageClassStorageBuffer || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + bool is_restrict = ssbo && flags.get(DecorationRestrict); + bool is_writeonly = ssbo && flags.get(DecorationNonReadable); + bool is_readonly = ssbo && flags.get(DecorationNonWritable); + bool is_coherent = ssbo && flags.get(DecorationCoherent); + + // Block names should never alias, but from HLSL input they kind of can because block types are reused for UAVs ... + auto buffer_name = to_name(type.self, false); + + auto &block_namespace = ssbo ? block_ssbo_names : block_ubo_names; + + // Shaders never use the block by interface name, so we don't + // have to track this other than updating name caches. + // If we have a collision for any reason, just fallback immediately. + if (ir.meta[type.self].decoration.alias.empty() || block_namespace.find(buffer_name) != end(block_namespace) || + resource_names.find(buffer_name) != end(resource_names)) + { + buffer_name = get_block_fallback_name(var.self); + } + + // Make sure we get something unique for both global name scope and block name scope. + // See GLSL 4.5 spec: section 4.3.9 for details. + add_variable(block_namespace, resource_names, buffer_name); + + // If for some reason buffer_name is an illegal name, make a final fallback to a workaround name. + // This cannot conflict with anything else, so we're safe now. + // We cannot reuse this fallback name in neither global scope (blocked by block_names) nor block name scope. + if (buffer_name.empty()) + buffer_name = join("_", get(var.basetype).self, "_", var.self); + + block_names.insert(buffer_name); + block_namespace.insert(buffer_name); + + // Save for post-reflection later. + declared_block_names[var.self] = buffer_name; + + statement(layout_for_variable(var), is_coherent ? "coherent " : "", is_restrict ? "restrict " : "", + is_writeonly ? "writeonly " : "", is_readonly ? "readonly " : "", ssbo ? "buffer " : "uniform ", + buffer_name); + + begin_scope(); + + type.member_name_cache.clear(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + emit_struct_member(type, member, i); + i++; + } + + add_resource_name(var.self); + end_scope_decl(to_name(var.self) + type_to_array_glsl(type)); + statement(""); +} + +void CompilerGLSL::emit_buffer_block_flattened(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + // Block names should never alias. + auto buffer_name = to_name(type.self, false); + size_t buffer_size = (get_declared_struct_size(type) + 15) / 16; + + SPIRType::BaseType basic_type; + if (get_common_basic_type(type, basic_type)) + { + SPIRType tmp; + tmp.basetype = basic_type; + tmp.vecsize = 4; + if (basic_type != SPIRType::Float && basic_type != SPIRType::Int && basic_type != SPIRType::UInt) + SPIRV_CROSS_THROW("Basic types in a flattened UBO must be float, int or uint."); + + auto flags = ir.get_buffer_block_flags(var); + statement("uniform ", flags_to_precision_qualifiers_glsl(tmp, flags), type_to_glsl(tmp), " ", buffer_name, "[", + buffer_size, "];"); + } + else + SPIRV_CROSS_THROW("All basic types in a flattened block must be the same."); +} + +const char *CompilerGLSL::to_storage_qualifiers_glsl(const SPIRVariable &var) +{ + auto &execution = get_entry_point(); + + if (var.storage == StorageClassInput || var.storage == StorageClassOutput) + { + if (is_legacy() && execution.model == ExecutionModelVertex) + return var.storage == StorageClassInput ? "attribute " : "varying "; + else if (is_legacy() && execution.model == ExecutionModelFragment) + return "varying "; // Fragment outputs are renamed so they never hit this case. + else + return var.storage == StorageClassInput ? "in " : "out "; + } + else if (var.storage == StorageClassUniformConstant || var.storage == StorageClassUniform || + var.storage == StorageClassPushConstant) + { + return "uniform "; + } + + return ""; +} + +void CompilerGLSL::emit_flattened_io_block(const SPIRVariable &var, const char *qual) +{ + auto &type = get(var.basetype); + if (!type.array.empty()) + SPIRV_CROSS_THROW("Array of varying structs cannot be flattened to legacy-compatible varyings."); + + auto old_flags = ir.meta[type.self].decoration.decoration_flags; + // Emit the members as if they are part of a block to get all qualifiers. + ir.meta[type.self].decoration.decoration_flags.set(DecorationBlock); + + type.member_name_cache.clear(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + auto &membertype = get(member); + + if (membertype.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Cannot flatten struct inside structs in I/O variables."); + + // Pass in the varying qualifier here so it will appear in the correct declaration order. + // Replace member name while emitting it so it encodes both struct name and member name. + // Sanitize underscores because joining the two identifiers might create more than 1 underscore in a row, + // which is not allowed. + auto backup_name = get_member_name(type.self, i); + auto member_name = to_member_name(type, i); + set_member_name(type.self, i, sanitize_underscores(join(to_name(var.self), "_", member_name))); + emit_struct_member(type, member, i, qual); + // Restore member name. + set_member_name(type.self, i, member_name); + i++; + } + + ir.meta[type.self].decoration.decoration_flags = old_flags; + + // Treat this variable as flattened from now on. + flattened_structs.insert(var.self); +} + +void CompilerGLSL::emit_interface_block(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + // Either make it plain in/out or in/out blocks depending on what shader is doing ... + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + const char *qual = to_storage_qualifiers_glsl(var); + + if (block) + { + // ESSL earlier than 310 and GLSL earlier than 150 did not support + // I/O variables which are struct types. + // To support this, flatten the struct into separate varyings instead. + if ((options.es && options.version < 310) || (!options.es && options.version < 150)) + { + // I/O blocks on ES require version 310 with Android Extension Pack extensions, or core version 320. + // On desktop, I/O blocks were introduced with geometry shaders in GL 3.2 (GLSL 150). + emit_flattened_io_block(var, qual); + } + else + { + if (options.es && options.version < 320) + { + // Geometry and tessellation extensions imply this extension. + if (!has_extension("GL_EXT_geometry_shader") && !has_extension("GL_EXT_tessellation_shader")) + require_extension_internal("GL_EXT_shader_io_blocks"); + } + + // Block names should never alias. + auto block_name = to_name(type.self, false); + + // The namespace for I/O blocks is separate from other variables in GLSL. + auto &block_namespace = type.storage == StorageClassInput ? block_input_names : block_output_names; + + // Shaders never use the block by interface name, so we don't + // have to track this other than updating name caches. + if (block_name.empty() || block_namespace.find(block_name) != end(block_namespace)) + block_name = get_fallback_name(type.self); + else + block_namespace.insert(block_name); + + // If for some reason buffer_name is an illegal name, make a final fallback to a workaround name. + // This cannot conflict with anything else, so we're safe now. + if (block_name.empty()) + block_name = join("_", get(var.basetype).self, "_", var.self); + + // Instance names cannot alias block names. + resource_names.insert(block_name); + + statement(layout_for_variable(var), qual, block_name); + begin_scope(); + + type.member_name_cache.clear(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + emit_struct_member(type, member, i); + i++; + } + + add_resource_name(var.self); + end_scope_decl(join(to_name(var.self), type_to_array_glsl(type))); + statement(""); + } + } + else + { + // ESSL earlier than 310 and GLSL earlier than 150 did not support + // I/O variables which are struct types. + // To support this, flatten the struct into separate varyings instead. + if (type.basetype == SPIRType::Struct && + ((options.es && options.version < 310) || (!options.es && options.version < 150))) + { + emit_flattened_io_block(var, qual); + } + else + { + add_resource_name(var.self); + statement(layout_for_variable(var), to_qualifiers_glsl(var.self), + variable_decl(type, to_name(var.self), var.self), ";"); + } + } +} + +void CompilerGLSL::emit_uniform(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + { + if (!options.es && options.version < 420) + require_extension_internal("GL_ARB_shader_image_load_store"); + else if (options.es && options.version < 310) + SPIRV_CROSS_THROW("At least ESSL 3.10 required for shader image load store."); + } + + add_resource_name(var.self); + statement(layout_for_variable(var), variable_decl(var), ";"); +} + +string CompilerGLSL::constant_value_macro_name(uint32_t id) +{ + return join("SPIRV_CROSS_CONSTANT_ID_", id); +} + +void CompilerGLSL::emit_specialization_constant_op(const SPIRConstantOp &constant) +{ + auto &type = get(constant.basetype); + auto name = to_name(constant.self); + statement("const ", variable_decl(type, name), " = ", constant_op_expression(constant), ";"); +} + +void CompilerGLSL::emit_constant(const SPIRConstant &constant) +{ + auto &type = get(constant.constant_type); + auto name = to_name(constant.self); + + SpecializationConstant wg_x, wg_y, wg_z; + uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + // This specialization constant is implicitly declared by emitting layout() in; + if (constant.self == workgroup_size_id) + return; + + // These specialization constants are implicitly declared by emitting layout() in; + // In legacy GLSL, we will still need to emit macros for these, so a layout() in; declaration + // later can use macro overrides for work group size. + bool is_workgroup_size_constant = constant.self == wg_x.id || constant.self == wg_y.id || constant.self == wg_z.id; + + if (options.vulkan_semantics && is_workgroup_size_constant) + { + // Vulkan GLSL does not need to declare workgroup spec constants explicitly, it is handled in layout(). + return; + } + else if (!options.vulkan_semantics && is_workgroup_size_constant && + !has_decoration(constant.self, DecorationSpecId)) + { + // Only bother declaring a workgroup size if it is actually a specialization constant, because we need macros. + return; + } + + // Only scalars have constant IDs. + if (has_decoration(constant.self, DecorationSpecId)) + { + if (options.vulkan_semantics) + { + statement("layout(constant_id = ", get_decoration(constant.self, DecorationSpecId), ") const ", + variable_decl(type, name), " = ", constant_expression(constant), ";"); + } + else + { + const string ¯o_name = constant.specialization_constant_macro_name; + statement("#ifndef ", macro_name); + statement("#define ", macro_name, " ", constant_expression(constant)); + statement("#endif"); + + // For workgroup size constants, only emit the macros. + if (!is_workgroup_size_constant) + statement("const ", variable_decl(type, name), " = ", macro_name, ";"); + } + } + else + { + statement("const ", variable_decl(type, name), " = ", constant_expression(constant), ";"); + } +} + +void CompilerGLSL::emit_entry_point_declarations() +{ +} + +void CompilerGLSL::replace_illegal_names() +{ + // clang-format off + static const unordered_set keywords = { + "abs", "acos", "acosh", "all", "any", "asin", "asinh", "atan", "atanh", + "atomicAdd", "atomicCompSwap", "atomicCounter", "atomicCounterDecrement", "atomicCounterIncrement", + "atomicExchange", "atomicMax", "atomicMin", "atomicOr", "atomicXor", + "bitCount", "bitfieldExtract", "bitfieldInsert", "bitfieldReverse", + "ceil", "cos", "cosh", "cross", "degrees", + "dFdx", "dFdxCoarse", "dFdxFine", + "dFdy", "dFdyCoarse", "dFdyFine", + "distance", "dot", "EmitStreamVertex", "EmitVertex", "EndPrimitive", "EndStreamPrimitive", "equal", "exp", "exp2", + "faceforward", "findLSB", "findMSB", "float16BitsToInt16", "float16BitsToUint16", "floatBitsToInt", "floatBitsToUint", "floor", "fma", "fract", + "frexp", "fwidth", "fwidthCoarse", "fwidthFine", + "greaterThan", "greaterThanEqual", "groupMemoryBarrier", + "imageAtomicAdd", "imageAtomicAnd", "imageAtomicCompSwap", "imageAtomicExchange", "imageAtomicMax", "imageAtomicMin", "imageAtomicOr", "imageAtomicXor", + "imageLoad", "imageSamples", "imageSize", "imageStore", "imulExtended", "int16BitsToFloat16", "intBitsToFloat", "interpolateAtOffset", "interpolateAtCentroid", "interpolateAtSample", + "inverse", "inversesqrt", "isinf", "isnan", "ldexp", "length", "lessThan", "lessThanEqual", "log", "log2", + "matrixCompMult", "max", "memoryBarrier", "memoryBarrierAtomicCounter", "memoryBarrierBuffer", "memoryBarrierImage", "memoryBarrierShared", + "min", "mix", "mod", "modf", "noise", "noise1", "noise2", "noise3", "noise4", "normalize", "not", "notEqual", + "outerProduct", "packDouble2x32", "packHalf2x16", "packInt2x16", "packInt4x16", "packSnorm2x16", "packSnorm4x8", + "packUint2x16", "packUint4x16", "packUnorm2x16", "packUnorm4x8", "pow", + "radians", "reflect", "refract", "round", "roundEven", "sign", "sin", "sinh", "smoothstep", "sqrt", "step", + "tan", "tanh", "texelFetch", "texelFetchOffset", "texture", "textureGather", "textureGatherOffset", "textureGatherOffsets", + "textureGrad", "textureGradOffset", "textureLod", "textureLodOffset", "textureOffset", "textureProj", "textureProjGrad", + "textureProjGradOffset", "textureProjLod", "textureProjLodOffset", "textureProjOffset", "textureQueryLevels", "textureQueryLod", "textureSamples", "textureSize", + "transpose", "trunc", "uaddCarry", "uint16BitsToFloat16", "uintBitsToFloat", "umulExtended", "unpackDouble2x32", "unpackHalf2x16", "unpackInt2x16", "unpackInt4x16", + "unpackSnorm2x16", "unpackSnorm4x8", "unpackUint2x16", "unpackUint4x16", "unpackUnorm2x16", "unpackUnorm4x8", "usubBorrow", + + "active", "asm", "atomic_uint", "attribute", "bool", "break", "buffer", + "bvec2", "bvec3", "bvec4", "case", "cast", "centroid", "class", "coherent", "common", "const", "continue", "default", "discard", + "dmat2", "dmat2x2", "dmat2x3", "dmat2x4", "dmat3", "dmat3x2", "dmat3x3", "dmat3x4", "dmat4", "dmat4x2", "dmat4x3", "dmat4x4", + "do", "double", "dvec2", "dvec3", "dvec4", "else", "enum", "extern", "external", "false", "filter", "fixed", "flat", "float", + "for", "fvec2", "fvec3", "fvec4", "goto", "half", "highp", "hvec2", "hvec3", "hvec4", "if", "iimage1D", "iimage1DArray", + "iimage2D", "iimage2DArray", "iimage2DMS", "iimage2DMSArray", "iimage2DRect", "iimage3D", "iimageBuffer", "iimageCube", + "iimageCubeArray", "image1D", "image1DArray", "image2D", "image2DArray", "image2DMS", "image2DMSArray", "image2DRect", + "image3D", "imageBuffer", "imageCube", "imageCubeArray", "in", "inline", "inout", "input", "int", "interface", "invariant", + "isampler1D", "isampler1DArray", "isampler2D", "isampler2DArray", "isampler2DMS", "isampler2DMSArray", "isampler2DRect", + "isampler3D", "isamplerBuffer", "isamplerCube", "isamplerCubeArray", "ivec2", "ivec3", "ivec4", "layout", "long", "lowp", + "mat2", "mat2x2", "mat2x3", "mat2x4", "mat3", "mat3x2", "mat3x3", "mat3x4", "mat4", "mat4x2", "mat4x3", "mat4x4", "mediump", + "namespace", "noinline", "noperspective", "out", "output", "packed", "partition", "patch", "precise", "precision", "public", "readonly", + "resource", "restrict", "return", "sample", "sampler1D", "sampler1DArray", "sampler1DArrayShadow", + "sampler1DShadow", "sampler2D", "sampler2DArray", "sampler2DArrayShadow", "sampler2DMS", "sampler2DMSArray", + "sampler2DRect", "sampler2DRectShadow", "sampler2DShadow", "sampler3D", "sampler3DRect", "samplerBuffer", + "samplerCube", "samplerCubeArray", "samplerCubeArrayShadow", "samplerCubeShadow", "shared", "short", "sizeof", "smooth", "static", + "struct", "subroutine", "superp", "switch", "template", "this", "true", "typedef", "uimage1D", "uimage1DArray", "uimage2D", + "uimage2DArray", "uimage2DMS", "uimage2DMSArray", "uimage2DRect", "uimage3D", "uimageBuffer", "uimageCube", + "uimageCubeArray", "uint", "uniform", "union", "unsigned", "usampler1D", "usampler1DArray", "usampler2D", "usampler2DArray", + "usampler2DMS", "usampler2DMSArray", "usampler2DRect", "usampler3D", "usamplerBuffer", "usamplerCube", + "usamplerCubeArray", "using", "uvec2", "uvec3", "uvec4", "varying", "vec2", "vec3", "vec4", "void", "volatile", + "while", "writeonly", + }; + // clang-format on + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + if (!is_hidden_variable(var)) + { + auto &m = ir.meta[var.self].decoration; + if (m.alias.compare(0, 3, "gl_") == 0 || keywords.find(m.alias) != end(keywords)) + m.alias = join("_", m.alias); + } + } + } +} + +void CompilerGLSL::replace_fragment_output(SPIRVariable &var) +{ + auto &m = ir.meta[var.self].decoration; + uint32_t location = 0; + if (m.decoration_flags.get(DecorationLocation)) + location = m.location; + + // If our variable is arrayed, we must not emit the array part of this as the SPIR-V will + // do the access chain part of this for us. + auto &type = get(var.basetype); + + if (type.array.empty()) + { + // Redirect the write to a specific render target in legacy GLSL. + m.alias = join("gl_FragData[", location, "]"); + + if (is_legacy_es() && location != 0) + require_extension_internal("GL_EXT_draw_buffers"); + } + else if (type.array.size() == 1) + { + // If location is non-zero, we probably have to add an offset. + // This gets really tricky since we'd have to inject an offset in the access chain. + // FIXME: This seems like an extremely odd-ball case, so it's probably fine to leave it like this for now. + m.alias = "gl_FragData"; + if (location != 0) + SPIRV_CROSS_THROW("Arrayed output variable used, but location is not 0. " + "This is unimplemented in SPIRV-Cross."); + + if (is_legacy_es()) + require_extension_internal("GL_EXT_draw_buffers"); + } + else + SPIRV_CROSS_THROW("Array-of-array output variable used. This cannot be implemented in legacy GLSL."); + + var.compat_builtin = true; // We don't want to declare this variable, but use the name as-is. +} + +void CompilerGLSL::replace_fragment_outputs() +{ + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (!is_builtin_variable(var) && !var.remapped_variable && type.pointer && + var.storage == StorageClassOutput) + replace_fragment_output(var); + } + } +} + +string CompilerGLSL::remap_swizzle(const SPIRType &out_type, uint32_t input_components, const string &expr) +{ + if (out_type.vecsize == input_components) + return expr; + else if (input_components == 1 && !backend.can_swizzle_scalar) + return join(type_to_glsl(out_type), "(", expr, ")"); + else + { + // FIXME: This will not work with packed expressions. + auto e = enclose_expression(expr) + "."; + // Just clamp the swizzle index if we have more outputs than inputs. + for (uint32_t c = 0; c < out_type.vecsize; c++) + e += index_to_swizzle(min(c, input_components - 1)); + if (backend.swizzle_is_function && out_type.vecsize > 1) + e += "()"; + + remove_duplicate_swizzle(e); + return e; + } +} + +void CompilerGLSL::emit_pls() +{ + auto &execution = get_entry_point(); + if (execution.model != ExecutionModelFragment) + SPIRV_CROSS_THROW("Pixel local storage only supported in fragment shaders."); + + if (!options.es) + SPIRV_CROSS_THROW("Pixel local storage only supported in OpenGL ES."); + + if (options.version < 300) + SPIRV_CROSS_THROW("Pixel local storage only supported in ESSL 3.0 and above."); + + if (!pls_inputs.empty()) + { + statement("__pixel_local_inEXT _PLSIn"); + begin_scope(); + for (auto &input : pls_inputs) + statement(pls_decl(input), ";"); + end_scope_decl(); + statement(""); + } + + if (!pls_outputs.empty()) + { + statement("__pixel_local_outEXT _PLSOut"); + begin_scope(); + for (auto &output : pls_outputs) + statement(pls_decl(output), ";"); + end_scope_decl(); + statement(""); + } +} + +void CompilerGLSL::fixup_image_load_store_access() +{ + for (auto &id : ir.ids) + { + if (id.get_type() != TypeVariable) + continue; + + uint32_t var = id.get().self; + auto &vartype = expression_type(var); + if (vartype.basetype == SPIRType::Image) + { + // Older glslangValidator does not emit required qualifiers here. + // Solve this by making the image access as restricted as possible and loosen up if we need to. + // If any no-read/no-write flags are actually set, assume that the compiler knows what it's doing. + + auto &flags = ir.meta.at(var).decoration.decoration_flags; + if (!flags.get(DecorationNonWritable) && !flags.get(DecorationNonReadable)) + { + flags.set(DecorationNonWritable); + flags.set(DecorationNonReadable); + } + } + } +} + +void CompilerGLSL::emit_declared_builtin_block(StorageClass storage, ExecutionModel model) +{ + Bitset emitted_builtins; + Bitset global_builtins; + const SPIRVariable *block_var = nullptr; + bool emitted_block = false; + bool builtin_array = false; + + // Need to use declared size in the type. + // These variables might have been declared, but not statically used, so we haven't deduced their size yet. + uint32_t cull_distance_size = 0; + uint32_t clip_distance_size = 0; + + for (auto &id : ir.ids) + { + if (id.get_type() != TypeVariable) + continue; + + auto &var = id.get(); + auto &type = get(var.basetype); + bool block = has_decoration(type.self, DecorationBlock); + Bitset builtins; + + if (var.storage == storage && block && is_builtin_variable(var)) + { + uint32_t index = 0; + for (auto &m : ir.meta[type.self].members) + { + if (m.builtin) + { + builtins.set(m.builtin_type); + if (m.builtin_type == BuiltInCullDistance) + cull_distance_size = get(type.member_types[index]).array.front(); + else if (m.builtin_type == BuiltInClipDistance) + clip_distance_size = get(type.member_types[index]).array.front(); + } + index++; + } + } + else if (var.storage == storage && !block && is_builtin_variable(var)) + { + // While we're at it, collect all declared global builtins (HLSL mostly ...). + auto &m = ir.meta[var.self].decoration; + if (m.builtin) + { + global_builtins.set(m.builtin_type); + if (m.builtin_type == BuiltInCullDistance) + cull_distance_size = type.array.front(); + else if (m.builtin_type == BuiltInClipDistance) + clip_distance_size = type.array.front(); + } + } + + if (builtins.empty()) + continue; + + if (emitted_block) + SPIRV_CROSS_THROW("Cannot use more than one builtin I/O block."); + + emitted_builtins = builtins; + emitted_block = true; + builtin_array = !type.array.empty(); + block_var = &var; + } + + global_builtins = + Bitset(global_builtins.get_lower() & ((1ull << BuiltInPosition) | (1ull << BuiltInPointSize) | + (1ull << BuiltInClipDistance) | (1ull << BuiltInCullDistance))); + + // Try to collect all other declared builtins. + if (!emitted_block) + emitted_builtins = global_builtins; + + // Can't declare an empty interface block. + if (emitted_builtins.empty()) + return; + + if (storage == StorageClassOutput) + statement("out gl_PerVertex"); + else + statement("in gl_PerVertex"); + + begin_scope(); + if (emitted_builtins.get(BuiltInPosition)) + statement("vec4 gl_Position;"); + if (emitted_builtins.get(BuiltInPointSize)) + statement("float gl_PointSize;"); + if (emitted_builtins.get(BuiltInClipDistance)) + statement("float gl_ClipDistance[", clip_distance_size, "];"); + if (emitted_builtins.get(BuiltInCullDistance)) + statement("float gl_CullDistance[", cull_distance_size, "];"); + + bool tessellation = model == ExecutionModelTessellationEvaluation || model == ExecutionModelTessellationControl; + if (builtin_array) + { + // Make sure the array has a supported name in the code. + if (storage == StorageClassOutput) + set_name(block_var->self, "gl_out"); + else if (storage == StorageClassInput) + set_name(block_var->self, "gl_in"); + + if (model == ExecutionModelTessellationControl && storage == StorageClassOutput) + end_scope_decl(join(to_name(block_var->self), "[", get_entry_point().output_vertices, "]")); + else + end_scope_decl(join(to_name(block_var->self), tessellation ? "[gl_MaxPatchVertices]" : "[]")); + } + else + end_scope_decl(); + statement(""); +} + +void CompilerGLSL::declare_undefined_values() +{ + bool emitted = false; + for (auto &id : ir.ids) + { + if (id.get_type() != TypeUndef) + continue; + + auto &undef = id.get(); + statement(variable_decl(get(undef.basetype), to_name(undef.self), undef.self), ";"); + emitted = true; + } + + if (emitted) + statement(""); +} + +void CompilerGLSL::emit_resources() +{ + auto &execution = get_entry_point(); + + replace_illegal_names(); + + // Legacy GL uses gl_FragData[], redeclare all fragment outputs + // with builtins. + if (execution.model == ExecutionModelFragment && is_legacy()) + replace_fragment_outputs(); + + // Emit PLS blocks if we have such variables. + if (!pls_inputs.empty() || !pls_outputs.empty()) + emit_pls(); + + // Emit custom gl_PerVertex for SSO compatibility. + if (options.separate_shader_objects && !options.es && execution.model != ExecutionModelFragment) + { + switch (execution.model) + { + case ExecutionModelGeometry: + case ExecutionModelTessellationControl: + case ExecutionModelTessellationEvaluation: + emit_declared_builtin_block(StorageClassInput, execution.model); + emit_declared_builtin_block(StorageClassOutput, execution.model); + break; + + case ExecutionModelVertex: + emit_declared_builtin_block(StorageClassOutput, execution.model); + break; + + default: + break; + } + } + else + { + // Need to redeclare clip/cull distance with explicit size to use them. + // SPIR-V mandates these builtins have a size declared. + const char *storage = execution.model == ExecutionModelFragment ? "in" : "out"; + if (clip_distance_count != 0) + statement(storage, " float gl_ClipDistance[", clip_distance_count, "];"); + if (cull_distance_count != 0) + statement(storage, " float gl_CullDistance[", cull_distance_count, "];"); + if (clip_distance_count != 0 || cull_distance_count != 0) + statement(""); + } + + if (position_invariant) + { + statement("invariant gl_Position;"); + statement(""); + } + + bool emitted = false; + + // If emitted Vulkan GLSL, + // emit specialization constants as actual floats, + // spec op expressions will redirect to the constant name. + // + // TODO: If we have the fringe case that we create a spec constant which depends on a struct type, + // we'll have to deal with that, but there's currently no known way to express that. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + bool needs_declaration = c.specialization || c.is_used_as_lut; + + if (needs_declaration) + { + if (!options.vulkan_semantics && c.specialization) + { + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + } + emit_constant(c); + emitted = true; + } + } + else if (id.get_type() == TypeConstantOp) + { + emit_specialization_constant_op(id.get()); + emitted = true; + } + } + + if (emitted) + statement(""); + + // If we needed to declare work group size late, check here. + // If the work group size depends on a specialization constant, we need to declare the layout() block + // after constants (and their macros) have been declared. + if (execution.model == ExecutionModelGLCompute && !options.vulkan_semantics && + execution.workgroup_size.constant != 0) + { + SpecializationConstant wg_x, wg_y, wg_z; + get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + if ((wg_x.id != 0) || (wg_y.id != 0) || (wg_z.id != 0)) + { + vector inputs; + build_workgroup_size(inputs, wg_x, wg_y, wg_z); + statement("layout(", merge(inputs), ") in;"); + statement(""); + } + } + + emitted = false; + + // Output all basic struct types which are not Block or BufferBlock as these are declared inplace + // when such variables are instantiated. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer && + (!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) && + !ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + emit_struct(type); + } + } + } + + // Output UBOs and SSBOs + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform; + bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) && + has_block_flags) + { + emit_buffer_block(var); + } + } + } + + // Output push constant blocks + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassPushConstant && + !is_hidden_variable(var)) + { + emit_push_constant_block(var); + } + } + } + + bool skip_separate_image_sampler = !combined_image_samplers.empty() || !options.vulkan_semantics; + + // Output Uniform Constants (values, samplers, images, etc). + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + // If we're remapping separate samplers and images, only emit the combined samplers. + if (skip_separate_image_sampler) + { + // Sampler buffers are always used without a sampler, and they will also work in regular GL. + bool sampler_buffer = type.basetype == SPIRType::Image && type.image.dim == DimBuffer; + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + if (!sampler_buffer && (separate_image || separate_sampler)) + continue; + } + + if (var.storage != StorageClassFunction && type.pointer && + (type.storage == StorageClassUniformConstant || type.storage == StorageClassAtomicCounter) && + !is_hidden_variable(var)) + { + emit_uniform(var); + emitted = true; + } + } + } + + if (emitted) + statement(""); + emitted = false; + + // Output in/out interfaces. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + if (var.storage != StorageClassFunction && type.pointer && + (var.storage == StorageClassInput || var.storage == StorageClassOutput) && + interface_variable_exists_in_entry_point(var.self) && !is_hidden_variable(var)) + { + emit_interface_block(var); + emitted = true; + } + else if (is_builtin_variable(var)) + { + // For gl_InstanceIndex emulation on GLES, the API user needs to + // supply this uniform. + if (options.vertex.support_nonzero_base_instance && + ir.meta[var.self].decoration.builtin_type == BuiltInInstanceIndex && !options.vulkan_semantics) + { + statement("uniform int SPIRV_Cross_BaseInstance;"); + emitted = true; + } + } + } + } + + // Global variables. + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage != StorageClassOutput) + { + add_resource_name(var.self); + statement(variable_decl(var), ";"); + emitted = true; + } + } + + if (emitted) + statement(""); + + declare_undefined_values(); +} + +// Returns a string representation of the ID, usable as a function arg. +// Default is to simply return the expression representation fo the arg ID. +// Subclasses may override to modify the return value. +string CompilerGLSL::to_func_call_arg(uint32_t id) +{ + // Make sure that we use the name of the original variable, and not the parameter alias. + uint32_t name_id = id; + auto *var = maybe_get(id); + if (var && var->basevariable) + name_id = var->basevariable; + return to_expression(name_id); +} + +void CompilerGLSL::handle_invalid_expression(uint32_t id) +{ + // We tried to read an invalidated expression. + // This means we need another pass at compilation, but next time, force temporary variables so that they cannot be invalidated. + forced_temporaries.insert(id); + force_recompile = true; +} + +// Converts the format of the current expression from packed to unpacked, +// by wrapping the expression in a constructor of the appropriate type. +// GLSL does not support packed formats, so simply return the expression. +// Subclasses that do will override +string CompilerGLSL::unpack_expression_type(string expr_str, const SPIRType &) +{ + return expr_str; +} + +// Sometimes we proactively enclosed an expression where it turns out we might have not needed it after all. +void CompilerGLSL::strip_enclosed_expression(string &expr) +{ + if (expr.size() < 2 || expr.front() != '(' || expr.back() != ')') + return; + + // Have to make sure that our first and last parens actually enclose everything inside it. + uint32_t paren_count = 0; + for (auto &c : expr) + { + if (c == '(') + paren_count++; + else if (c == ')') + { + paren_count--; + + // If we hit 0 and this is not the final char, our first and final parens actually don't + // enclose the expression, and we cannot strip, e.g.: (a + b) * (c + d). + if (paren_count == 0 && &c != &expr.back()) + return; + } + } + expr.erase(expr.size() - 1, 1); + expr.erase(begin(expr)); +} + +string CompilerGLSL::enclose_expression(const string &expr) +{ + bool need_parens = false; + + // If the expression starts with a unary we need to enclose to deal with cases where we have back-to-back + // unary expressions. + if (!expr.empty()) + { + auto c = expr.front(); + if (c == '-' || c == '+' || c == '!' || c == '~' || c == '&' || c == '*') + need_parens = true; + } + + if (!need_parens) + { + uint32_t paren_count = 0; + for (auto c : expr) + { + if (c == '(' || c == '[') + paren_count++; + else if (c == ')' || c == ']') + { + assert(paren_count); + paren_count--; + } + else if (c == ' ' && paren_count == 0) + { + need_parens = true; + break; + } + } + assert(paren_count == 0); + } + + // If this expression contains any spaces which are not enclosed by parentheses, + // we need to enclose it so we can treat the whole string as an expression. + // This happens when two expressions have been part of a binary op earlier. + if (need_parens) + return join('(', expr, ')'); + else + return expr; +} + +string CompilerGLSL::dereference_expression(const std::string &expr) +{ + // If this expression starts with an address-of operator ('&'), then + // just return the part after the operator. + // TODO: Strip parens if unnecessary? + if (expr.at(0) == '&') + return expr.substr(1); + else + return join('*', expr); +} + +string CompilerGLSL::address_of_expression(const std::string &expr) +{ + // If this expression starts with a dereference operator ('*'), then + // just return the part after the operator. + // TODO: Strip parens if unnecessary? + if (expr.at(0) == '*') + return expr.substr(1); + else + return join('&', expr); +} + +// Just like to_expression except that we enclose the expression inside parentheses if needed. +string CompilerGLSL::to_enclosed_expression(uint32_t id, bool register_expression_read) +{ + return enclose_expression(to_expression(id, register_expression_read)); +} + +string CompilerGLSL::to_unpacked_expression(uint32_t id) +{ + // If we need to transpose, it will also take care of unpacking rules. + auto *e = maybe_get(id); + bool need_transpose = e && e->need_transpose; + if (!need_transpose && has_decoration(id, DecorationCPacked)) + return unpack_expression_type(to_expression(id), expression_type(id)); + else + return to_expression(id); +} + +string CompilerGLSL::to_enclosed_unpacked_expression(uint32_t id) +{ + // If we need to transpose, it will also take care of unpacking rules. + auto *e = maybe_get(id); + bool need_transpose = e && e->need_transpose; + if (!need_transpose && has_decoration(id, DecorationCPacked)) + return unpack_expression_type(to_expression(id), expression_type(id)); + else + return to_enclosed_expression(id); +} + +string CompilerGLSL::to_dereferenced_expression(uint32_t id, bool register_expression_read) +{ + auto &type = expression_type(id); + if (type.pointer && should_dereference(id)) + return dereference_expression(to_enclosed_expression(id, register_expression_read)); + else + return to_expression(id, register_expression_read); +} + +string CompilerGLSL::to_pointer_expression(uint32_t id) +{ + auto &type = expression_type(id); + if (type.pointer && expression_is_lvalue(id) && !should_dereference(id)) + return address_of_expression(to_enclosed_expression(id)); + else + return to_expression(id); +} + +string CompilerGLSL::to_enclosed_pointer_expression(uint32_t id) +{ + auto &type = expression_type(id); + if (type.pointer && expression_is_lvalue(id) && !should_dereference(id)) + return address_of_expression(to_enclosed_expression(id)); + else + return to_enclosed_expression(id); +} + +string CompilerGLSL::to_extract_component_expression(uint32_t id, uint32_t index) +{ + auto expr = to_enclosed_expression(id); + if (has_decoration(id, DecorationCPacked)) + return join(expr, "[", index, "]"); + else + return join(expr, ".", index_to_swizzle(index)); +} + +string CompilerGLSL::to_expression(uint32_t id, bool register_expression_read) +{ + auto itr = invalid_expressions.find(id); + if (itr != end(invalid_expressions)) + handle_invalid_expression(id); + + if (ir.ids[id].get_type() == TypeExpression) + { + // We might have a more complex chain of dependencies. + // A possible scenario is that we + // + // %1 = OpLoad + // %2 = OpDoSomething %1 %1. here %2 will have a dependency on %1. + // %3 = OpDoSomethingAgain %2 %2. Here %3 will lose the link to %1 since we don't propagate the dependencies like that. + // OpStore %1 %foo // Here we can invalidate %1, and hence all expressions which depend on %1. Only %2 will know since it's part of invalid_expressions. + // %4 = OpDoSomethingAnotherTime %3 %3 // If we forward all expressions we will see %1 expression after store, not before. + // + // However, we can propagate up a list of depended expressions when we used %2, so we can check if %2 is invalid when reading %3 after the store, + // and see that we should not forward reads of the original variable. + auto &expr = get(id); + for (uint32_t dep : expr.expression_dependencies) + if (invalid_expressions.find(dep) != end(invalid_expressions)) + handle_invalid_expression(dep); + } + + if (register_expression_read) + track_expression_read(id); + + switch (ir.ids[id].get_type()) + { + case TypeExpression: + { + auto &e = get(id); + if (e.base_expression) + return to_enclosed_expression(e.base_expression) + e.expression; + else if (e.need_transpose) + { + bool is_packed = has_decoration(id, DecorationCPacked); + return convert_row_major_matrix(e.expression, get(e.expression_type), is_packed); + } + else + { + if (force_recompile) + { + // During first compilation phase, certain expression patterns can trigger exponential growth of memory. + // Avoid this by returning dummy expressions during this phase. + // Do not use empty expressions here, because those are sentinels for other cases. + return "_"; + } + else + return e.expression; + } + } + + case TypeConstant: + { + auto &c = get(id); + auto &type = get(c.constant_type); + + // WorkGroupSize may be a constant. + auto &dec = ir.meta[c.self].decoration; + if (dec.builtin) + return builtin_to_glsl(dec.builtin_type, StorageClassGeneric); + else if (c.specialization) + return to_name(id); + else if (c.is_used_as_lut) + return to_name(id); + else if (type.basetype == SPIRType::Struct && !backend.can_declare_struct_inline) + return to_name(id); + else if (!type.array.empty() && !backend.can_declare_arrays_inline) + return to_name(id); + else + return constant_expression(c); + } + + case TypeConstantOp: + return to_name(id); + + case TypeVariable: + { + auto &var = get(id); + // If we try to use a loop variable before the loop header, we have to redirect it to the static expression, + // the variable has not been declared yet. + if (var.statically_assigned || (var.loop_variable && !var.loop_variable_enable)) + return to_expression(var.static_expression); + else if (var.deferred_declaration) + { + var.deferred_declaration = false; + return variable_decl(var); + } + else if (flattened_structs.count(id)) + { + return load_flattened_struct(var); + } + else + { + auto &dec = ir.meta[var.self].decoration; + if (dec.builtin) + return builtin_to_glsl(dec.builtin_type, var.storage); + else + return to_name(id); + } + } + + case TypeCombinedImageSampler: + // This type should never be taken the expression of directly. + // The intention is that texture sampling functions will extract the image and samplers + // separately and take their expressions as needed. + // GLSL does not use this type because OpSampledImage immediately creates a combined image sampler + // expression ala sampler2D(texture, sampler). + SPIRV_CROSS_THROW("Combined image samplers have no default expression representation."); + + case TypeAccessChain: + // We cannot express this type. They only have meaning in other OpAccessChains, OpStore or OpLoad. + SPIRV_CROSS_THROW("Access chains have no default expression representation."); + + default: + return to_name(id); + } +} + +string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop) +{ + auto &type = get(cop.basetype); + bool binary = false; + bool unary = false; + string op; + + if (is_legacy() && is_unsigned_opcode(cop.opcode)) + SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets."); + + // TODO: Find a clean way to reuse emit_instruction. + switch (cop.opcode) + { + case OpSConvert: + case OpUConvert: + case OpFConvert: + op = type_to_glsl_constructor(type); + break; + +#define GLSL_BOP(opname, x) \ + case Op##opname: \ + binary = true; \ + op = x; \ + break + +#define GLSL_UOP(opname, x) \ + case Op##opname: \ + unary = true; \ + op = x; \ + break + + GLSL_UOP(SNegate, "-"); + GLSL_UOP(Not, "~"); + GLSL_BOP(IAdd, "+"); + GLSL_BOP(ISub, "-"); + GLSL_BOP(IMul, "*"); + GLSL_BOP(SDiv, "/"); + GLSL_BOP(UDiv, "/"); + GLSL_BOP(UMod, "%"); + GLSL_BOP(SMod, "%"); + GLSL_BOP(ShiftRightLogical, ">>"); + GLSL_BOP(ShiftRightArithmetic, ">>"); + GLSL_BOP(ShiftLeftLogical, "<<"); + GLSL_BOP(BitwiseOr, "|"); + GLSL_BOP(BitwiseXor, "^"); + GLSL_BOP(BitwiseAnd, "&"); + GLSL_BOP(LogicalOr, "||"); + GLSL_BOP(LogicalAnd, "&&"); + GLSL_UOP(LogicalNot, "!"); + GLSL_BOP(LogicalEqual, "=="); + GLSL_BOP(LogicalNotEqual, "!="); + GLSL_BOP(IEqual, "=="); + GLSL_BOP(INotEqual, "!="); + GLSL_BOP(ULessThan, "<"); + GLSL_BOP(SLessThan, "<"); + GLSL_BOP(ULessThanEqual, "<="); + GLSL_BOP(SLessThanEqual, "<="); + GLSL_BOP(UGreaterThan, ">"); + GLSL_BOP(SGreaterThan, ">"); + GLSL_BOP(UGreaterThanEqual, ">="); + GLSL_BOP(SGreaterThanEqual, ">="); + + case OpSelect: + { + if (cop.arguments.size() < 3) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + + // This one is pretty annoying. It's triggered from + // uint(bool), int(bool) from spec constants. + // In order to preserve its compile-time constness in Vulkan GLSL, + // we need to reduce the OpSelect expression back to this simplified model. + // If we cannot, fail. + if (to_trivial_mix_op(type, op, cop.arguments[2], cop.arguments[1], cop.arguments[0])) + { + // Implement as a simple cast down below. + } + else + { + // Implement a ternary and pray the compiler understands it :) + return to_ternary_expression(type, cop.arguments[0], cop.arguments[1], cop.arguments[2]); + } + break; + } + + case OpVectorShuffle: + { + string expr = type_to_glsl_constructor(type); + expr += "("; + + uint32_t left_components = expression_type(cop.arguments[0]).vecsize; + string left_arg = to_enclosed_expression(cop.arguments[0]); + string right_arg = to_enclosed_expression(cop.arguments[1]); + + for (uint32_t i = 2; i < uint32_t(cop.arguments.size()); i++) + { + uint32_t index = cop.arguments[i]; + if (index >= left_components) + expr += right_arg + "." + "xyzw"[index - left_components]; + else + expr += left_arg + "." + "xyzw"[index]; + + if (i + 1 < uint32_t(cop.arguments.size())) + expr += ", "; + } + + expr += ")"; + return expr; + } + + case OpCompositeExtract: + { + auto expr = + access_chain_internal(cop.arguments[0], &cop.arguments[1], uint32_t(cop.arguments.size() - 1), true, false); + return expr; + } + + case OpCompositeInsert: + SPIRV_CROSS_THROW("OpCompositeInsert spec constant op is not supported."); + + default: + // Some opcodes are unimplemented here, these are currently not possible to test from glslang. + SPIRV_CROSS_THROW("Unimplemented spec constant op."); + } + + SPIRType::BaseType input_type; + bool skip_cast_if_equal_type = glsl_opcode_is_sign_invariant(cop.opcode); + + switch (cop.opcode) + { + case OpIEqual: + case OpINotEqual: + input_type = SPIRType::Int; + break; + + default: + input_type = type.basetype; + break; + } + +#undef GLSL_BOP +#undef GLSL_UOP + if (binary) + { + if (cop.arguments.size() < 2) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + + string cast_op0; + string cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, cop.arguments[0], + cop.arguments[1], skip_cast_if_equal_type); + + if (type.basetype != input_type && type.basetype != SPIRType::Boolean) + { + expected_type.basetype = input_type; + auto expr = bitcast_glsl_op(type, expected_type); + expr += '('; + expr += join(cast_op0, " ", op, " ", cast_op1); + expr += ')'; + return expr; + } + else + return join("(", cast_op0, " ", op, " ", cast_op1, ")"); + } + else if (unary) + { + if (cop.arguments.size() < 1) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + + // Auto-bitcast to result type as needed. + // Works around various casting scenarios in glslang as there is no OpBitcast for specialization constants. + return join("(", op, bitcast_glsl(type, cop.arguments[0]), ")"); + } + else + { + if (cop.arguments.size() < 1) + SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp."); + return join(op, "(", to_expression(cop.arguments[0]), ")"); + } +} + +string CompilerGLSL::constant_expression(const SPIRConstant &c) +{ + auto &type = get(c.constant_type); + + if (type.pointer) + { + return backend.null_pointer_literal; + } + else if (!c.subconstants.empty()) + { + // Handles Arrays and structures. + string res; + if (backend.use_initializer_list && backend.use_typed_initializer_list && type.basetype == SPIRType::Struct && + type.array.empty()) + { + res = type_to_glsl_constructor(type) + "{ "; + } + else if (backend.use_initializer_list) + { + res = "{ "; + } + else + { + res = type_to_glsl_constructor(type) + "("; + } + + for (auto &elem : c.subconstants) + { + auto &subc = get(elem); + if (subc.specialization) + res += to_name(elem); + else + res += constant_expression(subc); + + if (&elem != &c.subconstants.back()) + res += ", "; + } + + res += backend.use_initializer_list ? " }" : ")"; + return res; + } + else if (c.columns() == 1) + { + return constant_expression_vector(c, 0); + } + else + { + string res = type_to_glsl(get(c.constant_type)) + "("; + for (uint32_t col = 0; col < c.columns(); col++) + { + if (c.specialization_constant_id(col) != 0) + res += to_name(c.specialization_constant_id(col)); + else + res += constant_expression_vector(c, col); + + if (col + 1 < c.columns()) + res += ", "; + } + res += ")"; + return res; + } +} + +#ifdef _MSC_VER +// sprintf warning. +// We cannot rely on snprintf existing because, ..., MSVC. +#pragma warning(push) +#pragma warning(disable : 4996) +#endif + +string CompilerGLSL::convert_half_to_string(const SPIRConstant &c, uint32_t col, uint32_t row) +{ + string res; + float float_value = c.scalar_f16(col, row); + + if (std::isnan(float_value) || std::isinf(float_value)) + { + if (backend.half_literal_suffix) + { + // There is no uintBitsToFloat for 16-bit, so have to rely on legacy fallback here. + if (float_value == numeric_limits::infinity()) + res = join("(1.0", backend.half_literal_suffix, " / 0.0", backend.half_literal_suffix, ")"); + else if (float_value == -numeric_limits::infinity()) + res = join("(-1.0", backend.half_literal_suffix, " / 0.0", backend.half_literal_suffix, ")"); + else if (std::isnan(float_value)) + res = join("(0.0", backend.half_literal_suffix, " / 0.0", backend.half_literal_suffix, ")"); + else + SPIRV_CROSS_THROW("Cannot represent non-finite floating point constant."); + } + else + { + SPIRType type; + type.basetype = SPIRType::Half; + type.vecsize = 1; + type.columns = 1; + + if (float_value == numeric_limits::infinity()) + res = join(type_to_glsl(type), "(1.0 / 0.0)"); + else if (float_value == -numeric_limits::infinity()) + res = join(type_to_glsl(type), "(-1.0 / 0.0)"); + else if (std::isnan(float_value)) + res = join(type_to_glsl(type), "(0.0 / 0.0)"); + else + SPIRV_CROSS_THROW("Cannot represent non-finite floating point constant."); + } + } + else + { + if (backend.half_literal_suffix) + res = convert_to_string(float_value) + backend.half_literal_suffix; + else + { + // In HLSL (FXC), it's important to cast the literals to half precision right away. + // There is no literal for it. + SPIRType type; + type.basetype = SPIRType::Half; + type.vecsize = 1; + type.columns = 1; + res = join(type_to_glsl(type), "(", convert_to_string(float_value), ")"); + } + } + + return res; +} + +string CompilerGLSL::convert_float_to_string(const SPIRConstant &c, uint32_t col, uint32_t row) +{ + string res; + float float_value = c.scalar_f32(col, row); + + if (std::isnan(float_value) || std::isinf(float_value)) + { + // Use special representation. + if (!is_legacy()) + { + SPIRType out_type; + SPIRType in_type; + out_type.basetype = SPIRType::Float; + in_type.basetype = SPIRType::UInt; + out_type.vecsize = 1; + in_type.vecsize = 1; + out_type.width = 32; + in_type.width = 32; + + char print_buffer[32]; + sprintf(print_buffer, "0x%xu", c.scalar(col, row)); + res = join(bitcast_glsl_op(out_type, in_type), "(", print_buffer, ")"); + } + else + { + if (float_value == numeric_limits::infinity()) + { + if (backend.float_literal_suffix) + res = "(1.0f / 0.0f)"; + else + res = "(1.0 / 0.0)"; + } + else if (float_value == -numeric_limits::infinity()) + { + if (backend.float_literal_suffix) + res = "(-1.0f / 0.0f)"; + else + res = "(-1.0 / 0.0)"; + } + else if (std::isnan(float_value)) + { + if (backend.float_literal_suffix) + res = "(0.0f / 0.0f)"; + else + res = "(0.0 / 0.0)"; + } + else + SPIRV_CROSS_THROW("Cannot represent non-finite floating point constant."); + } + } + else + { + res = convert_to_string(float_value); + if (backend.float_literal_suffix) + res += "f"; + } + + return res; +} + +std::string CompilerGLSL::convert_double_to_string(const SPIRConstant &c, uint32_t col, uint32_t row) +{ + string res; + double double_value = c.scalar_f64(col, row); + + if (std::isnan(double_value) || std::isinf(double_value)) + { + // Use special representation. + if (!is_legacy()) + { + SPIRType out_type; + SPIRType in_type; + out_type.basetype = SPIRType::Double; + in_type.basetype = SPIRType::UInt64; + out_type.vecsize = 1; + in_type.vecsize = 1; + out_type.width = 64; + in_type.width = 64; + + uint64_t u64_value = c.scalar_u64(col, row); + + if (options.es) + SPIRV_CROSS_THROW("64-bit integers/float not supported in ES profile."); + require_extension_internal("GL_ARB_gpu_shader_int64"); + + char print_buffer[64]; + sprintf(print_buffer, "0x%llx%s", static_cast(u64_value), + backend.long_long_literal_suffix ? "ull" : "ul"); + res = join(bitcast_glsl_op(out_type, in_type), "(", print_buffer, ")"); + } + else + { + if (options.es) + SPIRV_CROSS_THROW("FP64 not supported in ES profile."); + if (options.version < 400) + require_extension_internal("GL_ARB_gpu_shader_fp64"); + + if (double_value == numeric_limits::infinity()) + { + if (backend.double_literal_suffix) + res = "(1.0lf / 0.0lf)"; + else + res = "(1.0 / 0.0)"; + } + else if (double_value == -numeric_limits::infinity()) + { + if (backend.double_literal_suffix) + res = "(-1.0lf / 0.0lf)"; + else + res = "(-1.0 / 0.0)"; + } + else if (std::isnan(double_value)) + { + if (backend.double_literal_suffix) + res = "(0.0lf / 0.0lf)"; + else + res = "(0.0 / 0.0)"; + } + else + SPIRV_CROSS_THROW("Cannot represent non-finite floating point constant."); + } + } + else + { + res = convert_to_string(double_value); + if (backend.double_literal_suffix) + res += "lf"; + } + + return res; +} + +#ifdef _MSC_VER +#pragma warning(pop) +#endif + +string CompilerGLSL::constant_expression_vector(const SPIRConstant &c, uint32_t vector) +{ + auto type = get(c.constant_type); + type.columns = 1; + + string res; + bool splat = backend.use_constructor_splatting && c.vector_size() > 1; + bool swizzle_splat = backend.can_swizzle_scalar && c.vector_size() > 1; + + if (!type_is_floating_point(type)) + { + // Cannot swizzle literal integers as a special case. + swizzle_splat = false; + } + + if (splat || swizzle_splat) + { + // Cannot use constant splatting if we have specialization constants somewhere in the vector. + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.specialization_constant_id(vector, i) != 0) + { + splat = false; + swizzle_splat = false; + break; + } + } + } + + if (splat || swizzle_splat) + { + if (type.width == 64) + { + uint64_t ident = c.scalar_u64(vector, 0); + for (uint32_t i = 1; i < c.vector_size(); i++) + { + if (ident != c.scalar_u64(vector, i)) + { + splat = false; + swizzle_splat = false; + break; + } + } + } + else + { + uint32_t ident = c.scalar(vector, 0); + for (uint32_t i = 1; i < c.vector_size(); i++) + { + if (ident != c.scalar(vector, i)) + { + splat = false; + swizzle_splat = false; + } + } + } + } + + if (c.vector_size() > 1 && !swizzle_splat) + res += type_to_glsl(type) + "("; + + switch (type.basetype) + { + case SPIRType::Half: + if (splat || swizzle_splat) + { + res += convert_half_to_string(c, vector, 0); + if (swizzle_splat) + res = remap_swizzle(get(c.constant_type), 1, res); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_half_to_string(c, vector, i); + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Float: + if (splat || swizzle_splat) + { + res += convert_float_to_string(c, vector, 0); + if (swizzle_splat) + res = remap_swizzle(get(c.constant_type), 1, res); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_float_to_string(c, vector, i); + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Double: + if (splat || swizzle_splat) + { + res += convert_double_to_string(c, vector, 0); + if (swizzle_splat) + res = remap_swizzle(get(c.constant_type), 1, res); + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_double_to_string(c, vector, i); + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Int64: + if (splat) + { + res += convert_to_string(c.scalar_i64(vector, 0)); + if (backend.long_long_literal_suffix) + res += "ll"; + else + res += "l"; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar_i64(vector, i)); + if (backend.long_long_literal_suffix) + res += "ll"; + else + res += "l"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::UInt64: + if (splat) + { + res += convert_to_string(c.scalar_u64(vector, 0)); + if (backend.long_long_literal_suffix) + res += "ull"; + else + res += "ul"; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar_u64(vector, i)); + if (backend.long_long_literal_suffix) + res += "ull"; + else + res += "ul"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::UInt: + if (splat) + { + res += convert_to_string(c.scalar(vector, 0)); + if (is_legacy()) + { + // Fake unsigned constant literals with signed ones if possible. + // Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed. + if (c.scalar_i32(vector, 0) < 0) + SPIRV_CROSS_THROW("Tried to convert uint literal into int, but this made the literal negative."); + } + else if (backend.uint32_t_literal_suffix) + res += "u"; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar(vector, i)); + if (is_legacy()) + { + // Fake unsigned constant literals with signed ones if possible. + // Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed. + if (c.scalar_i32(vector, i) < 0) + SPIRV_CROSS_THROW( + "Tried to convert uint literal into int, but this made the literal negative."); + } + else if (backend.uint32_t_literal_suffix) + res += "u"; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Int: + if (splat) + res += convert_to_string(c.scalar_i32(vector, 0)); + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += convert_to_string(c.scalar_i32(vector, i)); + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::UShort: + if (splat) + { + res += convert_to_string(c.scalar(vector, 0)); + if (is_legacy()) + { + // Fake unsigned constant literals with signed ones if possible. + // Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed. + if (c.scalar_i16(vector, 0) < 0) + SPIRV_CROSS_THROW("Tried to convert uint literal into int, but this made the literal negative."); + } + else if (backend.uint16_t_literal_suffix) + res += backend.uint16_t_literal_suffix; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar(vector, i)); + if (is_legacy()) + { + // Fake unsigned constant literals with signed ones if possible. + // Things like array sizes, etc, tend to be unsigned even though they could just as easily be signed. + if (c.scalar_i16(vector, i) < 0) + SPIRV_CROSS_THROW( + "Tried to convert uint literal into int, but this made the literal negative."); + } + else if (backend.uint16_t_literal_suffix) + res += backend.uint16_t_literal_suffix; + } + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Short: + if (splat) + { + res += convert_to_string(c.scalar_i16(vector, 0)); + if (backend.int16_t_literal_suffix) + res += backend.int16_t_literal_suffix; + } + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + { + res += convert_to_string(c.scalar_i16(vector, i)); + if (backend.int16_t_literal_suffix) + res += backend.int16_t_literal_suffix; + } + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + case SPIRType::Boolean: + if (splat) + res += c.scalar(vector, 0) ? "true" : "false"; + else + { + for (uint32_t i = 0; i < c.vector_size(); i++) + { + if (c.vector_size() > 1 && c.specialization_constant_id(vector, i) != 0) + res += to_name(c.specialization_constant_id(vector, i)); + else + res += c.scalar(vector, i) ? "true" : "false"; + + if (i + 1 < c.vector_size()) + res += ", "; + } + } + break; + + default: + SPIRV_CROSS_THROW("Invalid constant expression basetype."); + } + + if (c.vector_size() > 1 && !swizzle_splat) + res += ")"; + + return res; +} + +string CompilerGLSL::declare_temporary(uint32_t result_type, uint32_t result_id) +{ + auto &type = get(result_type); + auto &flags = ir.meta[result_id].decoration.decoration_flags; + + // If we're declaring temporaries inside continue blocks, + // we must declare the temporary in the loop header so that the continue block can avoid declaring new variables. + if (current_continue_block && !hoisted_temporaries.count(result_id)) + { + auto &header = get(current_continue_block->loop_dominator); + if (find_if(begin(header.declare_temporary), end(header.declare_temporary), + [result_type, result_id](const pair &tmp) { + return tmp.first == result_type && tmp.second == result_id; + }) == end(header.declare_temporary)) + { + header.declare_temporary.emplace_back(result_type, result_id); + hoisted_temporaries.insert(result_id); + force_recompile = true; + } + + return join(to_name(result_id), " = "); + } + else if (hoisted_temporaries.count(result_id)) + { + // The temporary has already been declared earlier, so just "declare" the temporary by writing to it. + return join(to_name(result_id), " = "); + } + else + { + // The result_id has not been made into an expression yet, so use flags interface. + add_local_variable_name(result_id); + return join(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), " = "); + } +} + +bool CompilerGLSL::expression_is_forwarded(uint32_t id) +{ + return forwarded_temporaries.find(id) != end(forwarded_temporaries); +} + +SPIRExpression &CompilerGLSL::emit_op(uint32_t result_type, uint32_t result_id, const string &rhs, bool forwarding, + bool suppress_usage_tracking) +{ + if (forwarding && (forced_temporaries.find(result_id) == end(forced_temporaries))) + { + // Just forward it without temporary. + // If the forward is trivial, we do not force flushing to temporary for this expression. + if (!suppress_usage_tracking) + forwarded_temporaries.insert(result_id); + + return set(result_id, rhs, result_type, true); + } + else + { + // If expression isn't immutable, bind it to a temporary and make the new temporary immutable (they always are). + statement(declare_temporary(result_type, result_id), rhs, ";"); + return set(result_id, to_name(result_id), result_type, true); + } +} + +void CompilerGLSL::emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op) +{ + bool forward = should_forward(op0); + emit_op(result_type, result_id, join(op, to_enclosed_unpacked_expression(op0)), forward); + inherit_expression_dependencies(result_id, op0); +} + +void CompilerGLSL::emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1); + emit_op(result_type, result_id, + join(to_enclosed_unpacked_expression(op0), " ", op, " ", to_enclosed_unpacked_expression(op1)), forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op) +{ + auto &type = get(result_type); + auto expr = type_to_glsl_constructor(type); + expr += '('; + for (uint32_t i = 0; i < type.vecsize; i++) + { + // Make sure to call to_expression multiple times to ensure + // that these expressions are properly flushed to temporaries if needed. + expr += op; + expr += to_extract_component_expression(operand, i); + + if (i + 1 < type.vecsize) + expr += ", "; + } + expr += ')'; + emit_op(result_type, result_id, expr, should_forward(operand)); + + inherit_expression_dependencies(result_id, operand); +} + +void CompilerGLSL::emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op) +{ + auto &type = get(result_type); + auto expr = type_to_glsl_constructor(type); + expr += '('; + for (uint32_t i = 0; i < type.vecsize; i++) + { + // Make sure to call to_expression multiple times to ensure + // that these expressions are properly flushed to temporaries if needed. + expr += to_extract_component_expression(op0, i); + expr += ' '; + expr += op; + expr += ' '; + expr += to_extract_component_expression(op1, i); + + if (i + 1 < type.vecsize) + expr += ", "; + } + expr += ')'; + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +SPIRType CompilerGLSL::binary_op_bitcast_helper(string &cast_op0, string &cast_op1, SPIRType::BaseType &input_type, + uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type) +{ + auto &type0 = expression_type(op0); + auto &type1 = expression_type(op1); + + // We have to bitcast if our inputs are of different type, or if our types are not equal to expected inputs. + // For some functions like OpIEqual and INotEqual, we don't care if inputs are of different types than expected + // since equality test is exactly the same. + bool cast = (type0.basetype != type1.basetype) || (!skip_cast_if_equal_type && type0.basetype != input_type); + + // Create a fake type so we can bitcast to it. + // We only deal with regular arithmetic types here like int, uints and so on. + SPIRType expected_type; + expected_type.basetype = input_type; + expected_type.vecsize = type0.vecsize; + expected_type.columns = type0.columns; + expected_type.width = type0.width; + + if (cast) + { + cast_op0 = bitcast_glsl(expected_type, op0); + cast_op1 = bitcast_glsl(expected_type, op1); + } + else + { + // If we don't cast, our actual input type is that of the first (or second) argument. + cast_op0 = to_enclosed_unpacked_expression(op0); + cast_op1 = to_enclosed_unpacked_expression(op1); + input_type = type0.basetype; + } + + return expected_type; +} + +void CompilerGLSL::emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) +{ + string cast_op0, cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); + auto &out_type = get(result_type); + + // We might have casted away from the result type, so bitcast again. + // For example, arithmetic right shift with uint inputs. + // Special case boolean outputs since relational opcodes output booleans instead of int/uint. + string expr; + if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) + { + expected_type.basetype = input_type; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(cast_op0, " ", op, " ", cast_op1); + expr += ')'; + } + else + expr += join(cast_op0, " ", op, " ", cast_op1); + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op) +{ + bool forward = should_forward(op0); + emit_op(result_type, result_id, join(op, "(", to_unpacked_expression(op0), ")"), forward); + inherit_expression_dependencies(result_id, op0); +} + +void CompilerGLSL::emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1); + emit_op(result_type, result_id, join(op, "(", to_unpacked_expression(op0), ", ", to_unpacked_expression(op1), ")"), + forward); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op, SPIRType::BaseType input_type, bool skip_cast_if_equal_type) +{ + string cast_op0, cast_op1; + auto expected_type = binary_op_bitcast_helper(cast_op0, cast_op1, input_type, op0, op1, skip_cast_if_equal_type); + auto &out_type = get(result_type); + + // Special case boolean outputs since relational opcodes output booleans instead of int/uint. + string expr; + if (out_type.basetype != input_type && out_type.basetype != SPIRType::Boolean) + { + expected_type.basetype = input_type; + expr = bitcast_glsl_op(out_type, expected_type); + expr += '('; + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + expr += ')'; + } + else + { + expr += join(op, "(", cast_op0, ", ", cast_op1, ")"); + } + + emit_op(result_type, result_id, expr, should_forward(op0) && should_forward(op1)); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +void CompilerGLSL::emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1) && should_forward(op2); + emit_op(result_type, result_id, + join(op, "(", to_unpacked_expression(op0), ", ", to_unpacked_expression(op1), ", ", + to_unpacked_expression(op2), ")"), + forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); +} + +void CompilerGLSL::emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + uint32_t op2, uint32_t op3, const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1) && should_forward(op2) && should_forward(op3); + emit_op(result_type, result_id, + join(op, "(", to_unpacked_expression(op0), ", ", to_unpacked_expression(op1), ", ", + to_unpacked_expression(op2), ", ", to_unpacked_expression(op3), ")"), + forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + inherit_expression_dependencies(result_id, op2); + inherit_expression_dependencies(result_id, op3); +} + +// EXT_shader_texture_lod only concerns fragment shaders so lod tex functions +// are not allowed in ES 2 vertex shaders. But SPIR-V only supports lod tex +// functions in vertex shaders so we revert those back to plain calls when +// the lod is a constant value of zero. +bool CompilerGLSL::check_explicit_lod_allowed(uint32_t lod) +{ + auto &execution = get_entry_point(); + bool allowed = !is_legacy_es() || execution.model == ExecutionModelFragment; + if (!allowed && lod != 0) + { + auto *lod_constant = maybe_get(lod); + if (!lod_constant || lod_constant->scalar_f32() != 0.0f) + { + SPIRV_CROSS_THROW("Explicit lod not allowed in legacy ES non-fragment shaders."); + } + } + return allowed; +} + +string CompilerGLSL::legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t lod, uint32_t tex) +{ + const char *type; + switch (imgtype.image.dim) + { + case spv::Dim1D: + type = (imgtype.image.arrayed && !options.es) ? "1DArray" : "1D"; + break; + case spv::Dim2D: + type = (imgtype.image.arrayed && !options.es) ? "2DArray" : "2D"; + break; + case spv::Dim3D: + type = "3D"; + break; + case spv::DimCube: + type = "Cube"; + break; + case spv::DimRect: + type = "2DRect"; + break; + case spv::DimBuffer: + type = "Buffer"; + break; + case spv::DimSubpassData: + type = "2D"; + break; + default: + type = ""; + break; + } + + bool use_explicit_lod = check_explicit_lod_allowed(lod); + + if (op == "textureLod" || op == "textureProjLod" || op == "textureGrad" || op == "textureProjGrad") + { + if (is_legacy_es()) + { + if (use_explicit_lod) + require_extension_internal("GL_EXT_shader_texture_lod"); + } + else if (is_legacy()) + require_extension_internal("GL_ARB_shader_texture_lod"); + } + + if (op == "textureLodOffset" || op == "textureProjLodOffset") + { + if (is_legacy_es()) + SPIRV_CROSS_THROW(join(op, " not allowed in legacy ES")); + + require_extension_internal("GL_EXT_gpu_shader4"); + } + + // GLES has very limited support for shadow samplers. + // Basically shadow2D and shadow2DProj work through EXT_shadow_samplers, + // everything else can just throw + if (image_is_comparison(imgtype, tex) && is_legacy_es()) + { + if (op == "texture" || op == "textureProj") + require_extension_internal("GL_EXT_shadow_samplers"); + else + SPIRV_CROSS_THROW(join(op, " not allowed on depth samplers in legacy ES")); + } + + bool is_es_and_depth = is_legacy_es() && image_is_comparison(imgtype, tex); + std::string type_prefix = image_is_comparison(imgtype, tex) ? "shadow" : "texture"; + + if (op == "texture") + return is_es_and_depth ? join(type_prefix, type, "EXT") : join(type_prefix, type); + else if (op == "textureLod") + { + if (use_explicit_lod) + return join(type_prefix, type, is_legacy_es() ? "LodEXT" : "Lod"); + else + return join(type_prefix, type); + } + else if (op == "textureProj") + return join(type_prefix, type, is_es_and_depth ? "ProjEXT" : "Proj"); + else if (op == "textureGrad") + return join(type_prefix, type, is_legacy_es() ? "GradEXT" : is_legacy_desktop() ? "GradARB" : "Grad"); + else if (op == "textureProjLod") + { + if (use_explicit_lod) + return join(type_prefix, type, is_legacy_es() ? "ProjLodEXT" : "ProjLod"); + else + return join(type_prefix, type, "Proj"); + } + else if (op == "textureLodOffset") + { + if (use_explicit_lod) + return join(type_prefix, type, "LodOffset"); + else + return join(type_prefix, type); + } + else if (op == "textureProjGrad") + return join(type_prefix, type, + is_legacy_es() ? "ProjGradEXT" : is_legacy_desktop() ? "ProjGradARB" : "ProjGrad"); + else if (op == "textureProjLodOffset") + { + if (use_explicit_lod) + return join(type_prefix, type, "ProjLodOffset"); + else + return join(type_prefix, type, "ProjOffset"); + } + else + { + SPIRV_CROSS_THROW(join("Unsupported legacy texture op: ", op)); + } +} + +bool CompilerGLSL::to_trivial_mix_op(const SPIRType &type, string &op, uint32_t left, uint32_t right, uint32_t lerp) +{ + auto *cleft = maybe_get(left); + auto *cright = maybe_get(right); + auto &lerptype = expression_type(lerp); + + // If our targets aren't constants, we cannot use construction. + if (!cleft || !cright) + return false; + + // If our targets are spec constants, we cannot use construction. + if (cleft->specialization || cright->specialization) + return false; + + // We can only use trivial construction if we have a scalar + // (should be possible to do it for vectors as well, but that is overkill for now). + if (lerptype.basetype != SPIRType::Boolean || lerptype.vecsize > 1) + return false; + + // If our bool selects between 0 and 1, we can cast from bool instead, making our trivial constructor. + bool ret = false; + switch (type.basetype) + { + case SPIRType::Short: + case SPIRType::UShort: + ret = cleft->scalar_u16() == 0 && cright->scalar_u16() == 1; + break; + + case SPIRType::Int: + case SPIRType::UInt: + ret = cleft->scalar() == 0 && cright->scalar() == 1; + break; + + case SPIRType::Half: + ret = cleft->scalar_f16() == 0.0f && cright->scalar_f16() == 1.0f; + break; + + case SPIRType::Float: + ret = cleft->scalar_f32() == 0.0f && cright->scalar_f32() == 1.0f; + break; + + case SPIRType::Double: + ret = cleft->scalar_f64() == 0.0 && cright->scalar_f64() == 1.0; + break; + + case SPIRType::Int64: + case SPIRType::UInt64: + ret = cleft->scalar_u64() == 0 && cright->scalar_u64() == 1; + break; + + default: + break; + } + + if (ret) + op = type_to_glsl_constructor(type); + return ret; +} + +string CompilerGLSL::to_ternary_expression(const SPIRType &restype, uint32_t select, uint32_t true_value, + uint32_t false_value) +{ + string expr; + auto &lerptype = expression_type(select); + + if (lerptype.vecsize == 1) + expr = join(to_enclosed_expression(select), " ? ", to_enclosed_pointer_expression(true_value), " : ", + to_enclosed_pointer_expression(false_value)); + else + { + auto swiz = [this](uint32_t expression, uint32_t i) { return to_extract_component_expression(expression, i); }; + + expr = type_to_glsl_constructor(restype); + expr += "("; + for (uint32_t i = 0; i < restype.vecsize; i++) + { + expr += swiz(select, i); + expr += " ? "; + expr += swiz(true_value, i); + expr += " : "; + expr += swiz(false_value, i); + if (i + 1 < restype.vecsize) + expr += ", "; + } + expr += ")"; + } + + return expr; +} + +void CompilerGLSL::emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp) +{ + auto &lerptype = expression_type(lerp); + auto &restype = get(result_type); + + // If this results in a variable pointer, assume it may be written through. + if (restype.pointer) + { + register_write(left); + register_write(right); + } + + string mix_op; + bool has_boolean_mix = backend.boolean_mix_support && + ((options.es && options.version >= 310) || (!options.es && options.version >= 450)); + bool trivial_mix = to_trivial_mix_op(restype, mix_op, left, right, lerp); + + // Cannot use boolean mix when the lerp argument is just one boolean, + // fall back to regular trinary statements. + if (lerptype.vecsize == 1) + has_boolean_mix = false; + + // If we can reduce the mix to a simple cast, do so. + // This helps for cases like int(bool), uint(bool) which is implemented with + // OpSelect bool 1 0. + if (trivial_mix) + { + emit_unary_func_op(result_type, id, lerp, mix_op.c_str()); + } + else if (!has_boolean_mix && lerptype.basetype == SPIRType::Boolean) + { + // Boolean mix not supported on desktop without extension. + // Was added in OpenGL 4.5 with ES 3.1 compat. + // + // Could use GL_EXT_shader_integer_mix on desktop at least, + // but Apple doesn't support it. :( + // Just implement it as ternary expressions. + auto expr = to_ternary_expression(get(result_type), lerp, right, left); + emit_op(result_type, id, expr, should_forward(left) && should_forward(right) && should_forward(lerp)); + inherit_expression_dependencies(id, left); + inherit_expression_dependencies(id, right); + inherit_expression_dependencies(id, lerp); + } + else + emit_trinary_func_op(result_type, id, left, right, lerp, "mix"); +} + +string CompilerGLSL::to_combined_image_sampler(uint32_t image_id, uint32_t samp_id) +{ + // Keep track of the array indices we have used to load the image. + // We'll need to use the same array index into the combined image sampler array. + auto image_expr = to_expression(image_id); + string array_expr; + auto array_index = image_expr.find_first_of('['); + if (array_index != string::npos) + array_expr = image_expr.substr(array_index, string::npos); + + auto &args = current_function->arguments; + + // For GLSL and ESSL targets, we must enumerate all possible combinations for sampler2D(texture2D, sampler) and redirect + // all possible combinations into new sampler2D uniforms. + auto *image = maybe_get_backing_variable(image_id); + auto *samp = maybe_get_backing_variable(samp_id); + if (image) + image_id = image->self; + if (samp) + samp_id = samp->self; + + auto image_itr = find_if(begin(args), end(args), + [image_id](const SPIRFunction::Parameter ¶m) { return param.id == image_id; }); + + auto sampler_itr = find_if(begin(args), end(args), + [samp_id](const SPIRFunction::Parameter ¶m) { return param.id == samp_id; }); + + if (image_itr != end(args) || sampler_itr != end(args)) + { + // If any parameter originates from a parameter, we will find it in our argument list. + bool global_image = image_itr == end(args); + bool global_sampler = sampler_itr == end(args); + uint32_t iid = global_image ? image_id : uint32_t(image_itr - begin(args)); + uint32_t sid = global_sampler ? samp_id : uint32_t(sampler_itr - begin(args)); + + auto &combined = current_function->combined_parameters; + auto itr = find_if(begin(combined), end(combined), [=](const SPIRFunction::CombinedImageSamplerParameter &p) { + return p.global_image == global_image && p.global_sampler == global_sampler && p.image_id == iid && + p.sampler_id == sid; + }); + + if (itr != end(combined)) + return to_expression(itr->id) + array_expr; + else + { + SPIRV_CROSS_THROW( + "Cannot find mapping for combined sampler parameter, was build_combined_image_samplers() used " + "before compile() was called?"); + } + } + else + { + // For global sampler2D, look directly at the global remapping table. + auto &mapping = combined_image_samplers; + auto itr = find_if(begin(mapping), end(mapping), [image_id, samp_id](const CombinedImageSampler &combined) { + return combined.image_id == image_id && combined.sampler_id == samp_id; + }); + + if (itr != end(combined_image_samplers)) + return to_expression(itr->combined_id) + array_expr; + else + { + SPIRV_CROSS_THROW("Cannot find mapping for combined sampler, was build_combined_image_samplers() used " + "before compile() was called?"); + } + } +} + +void CompilerGLSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) +{ + if (options.vulkan_semantics && combined_image_samplers.empty()) + { + emit_binary_func_op(result_type, result_id, image_id, samp_id, + type_to_glsl(get(result_type), result_id).c_str()); + + // Make sure to suppress usage tracking. It is illegal to create temporaries of opaque types. + forwarded_temporaries.erase(result_id); + } + else + { + // Make sure to suppress usage tracking. It is illegal to create temporaries of opaque types. + emit_op(result_type, result_id, to_combined_image_sampler(image_id, samp_id), true, true); + } +} + +void CompilerGLSL::emit_texture_op(const Instruction &i) +{ + auto *ops = stream(i); + auto op = static_cast(i.op); + uint32_t length = i.length; + + vector inherited_expressions; + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t img = ops[2]; + uint32_t coord = ops[3]; + uint32_t dref = 0; + uint32_t comp = 0; + bool gather = false; + bool proj = false; + bool fetch = false; + const uint32_t *opt = nullptr; + + inherited_expressions.push_back(coord); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleDrefExplicitLod: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + break; + + case OpImageSampleProjDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + proj = true; + break; + + case OpImageDrefGather: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + gather = true; + break; + + case OpImageGather: + comp = ops[4]; + opt = &ops[5]; + length -= 5; + gather = true; + break; + + case OpImageFetch: + case OpImageRead: // Reads == fetches in Metal (other langs will not get here) + opt = &ops[4]; + length -= 4; + fetch = true; + break; + + case OpImageSampleProjImplicitLod: + case OpImageSampleProjExplicitLod: + opt = &ops[4]; + length -= 4; + proj = true; + break; + + default: + opt = &ops[4]; + length -= 4; + break; + } + + // Bypass pointers because we need the real image struct + auto &type = expression_type(img); + auto &imgtype = get(type.self); + + uint32_t coord_components = 0; + switch (imgtype.image.dim) + { + case spv::Dim1D: + coord_components = 1; + break; + case spv::Dim2D: + coord_components = 2; + break; + case spv::Dim3D: + coord_components = 3; + break; + case spv::DimCube: + coord_components = 3; + break; + case spv::DimBuffer: + coord_components = 1; + break; + default: + coord_components = 2; + break; + } + + if (dref) + inherited_expressions.push_back(dref); + + if (proj) + coord_components++; + if (imgtype.image.arrayed) + coord_components++; + + uint32_t bias = 0; + uint32_t lod = 0; + uint32_t grad_x = 0; + uint32_t grad_y = 0; + uint32_t coffset = 0; + uint32_t offset = 0; + uint32_t coffsets = 0; + uint32_t sample = 0; + uint32_t flags = 0; + + if (length) + { + flags = *opt++; + length--; + } + + auto test = [&](uint32_t &v, uint32_t flag) { + if (length && (flags & flag)) + { + v = *opt++; + inherited_expressions.push_back(v); + length--; + } + }; + + test(bias, ImageOperandsBiasMask); + test(lod, ImageOperandsLodMask); + test(grad_x, ImageOperandsGradMask); + test(grad_y, ImageOperandsGradMask); + test(coffset, ImageOperandsConstOffsetMask); + test(offset, ImageOperandsOffsetMask); + test(coffsets, ImageOperandsConstOffsetsMask); + test(sample, ImageOperandsSampleMask); + + string expr; + bool forward = false; + expr += to_function_name(img, imgtype, !!fetch, !!gather, !!proj, !!coffsets, (!!coffset || !!offset), + (!!grad_x || !!grad_y), !!dref, lod); + expr += "("; + expr += to_function_args(img, imgtype, fetch, gather, proj, coord, coord_components, dref, grad_x, grad_y, lod, + coffset, offset, bias, comp, sample, &forward); + expr += ")"; + + // texture(samplerXShadow) returns float. shadowX() returns vec4. Swizzle here. + if (is_legacy() && image_is_comparison(imgtype, img)) + expr += ".r"; + + emit_op(result_type, id, expr, forward); + for (auto &inherit : inherited_expressions) + inherit_expression_dependencies(id, inherit); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleProjDrefImplicitLod: + register_control_dependent_expression(id); + break; + + default: + break; + } +} + +bool CompilerGLSL::expression_is_constant_null(uint32_t id) const +{ + auto *c = maybe_get(id); + if (!c) + return false; + return c->constant_is_null(); +} + +// Returns the function name for a texture sampling function for the specified image and sampling characteristics. +// For some subclasses, the function is a method on the specified image. +string CompilerGLSL::to_function_name(uint32_t tex, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, bool has_array_offsets, bool has_offset, bool has_grad, bool, + uint32_t lod) +{ + string fname; + + // textureLod on sampler2DArrayShadow and samplerCubeShadow does not exist in GLSL for some reason. + // To emulate this, we will have to use textureGrad with a constant gradient of 0. + // The workaround will assert that the LOD is in fact constant 0, or we cannot emit correct code. + // This happens for HLSL SampleCmpLevelZero on Texture2DArray and TextureCube. + bool workaround_lod_array_shadow_as_grad = false; + if (((imgtype.image.arrayed && imgtype.image.dim == Dim2D) || imgtype.image.dim == DimCube) && + image_is_comparison(imgtype, tex) && lod) + { + if (!expression_is_constant_null(lod)) + { + SPIRV_CROSS_THROW( + "textureLod on sampler2DArrayShadow is not constant 0.0. This cannot be expressed in GLSL."); + } + workaround_lod_array_shadow_as_grad = true; + } + + if (is_fetch) + fname += "texelFetch"; + else + { + fname += "texture"; + + if (is_gather) + fname += "Gather"; + if (has_array_offsets) + fname += "Offsets"; + if (is_proj) + fname += "Proj"; + if (has_grad || workaround_lod_array_shadow_as_grad) + fname += "Grad"; + if (!!lod && !workaround_lod_array_shadow_as_grad) + fname += "Lod"; + } + + if (has_offset) + fname += "Offset"; + + return is_legacy() ? legacy_tex_op(fname, imgtype, lod, tex) : fname; +} + +std::string CompilerGLSL::convert_separate_image_to_expression(uint32_t id) +{ + auto *var = maybe_get_backing_variable(id); + + // If we are fetching from a plain OpTypeImage, we must combine with a dummy sampler in GLSL. + // In Vulkan GLSL, we can make use of the newer GL_EXT_samplerless_texture_functions. + if (var) + { + auto &type = get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.sampled == 1 && type.image.dim != DimBuffer) + { + if (options.vulkan_semantics) + { + // Newer glslang supports this extension to deal with texture2D as argument to texture functions. + if (dummy_sampler_id) + SPIRV_CROSS_THROW("Vulkan GLSL should not have a dummy sampler for combining."); + require_extension_internal("GL_EXT_samplerless_texture_functions"); + } + else + { + if (!dummy_sampler_id) + SPIRV_CROSS_THROW( + "Cannot find dummy sampler ID. Was build_dummy_sampler_for_combined_images() called?"); + + return to_combined_image_sampler(id, dummy_sampler_id); + } + } + } + + return to_expression(id); +} + +// Returns the function args for a texture sampling function for the specified image and sampling characteristics. +string CompilerGLSL::to_function_args(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, uint32_t coord, uint32_t coord_components, uint32_t dref, + uint32_t grad_x, uint32_t grad_y, uint32_t lod, uint32_t coffset, uint32_t offset, + uint32_t bias, uint32_t comp, uint32_t sample, bool *p_forward) +{ + string farg_str; + if (is_fetch) + farg_str = convert_separate_image_to_expression(img); + else + farg_str = to_expression(img); + + bool swizz_func = backend.swizzle_is_function; + auto swizzle = [swizz_func](uint32_t comps, uint32_t in_comps) -> const char * { + if (comps == in_comps) + return ""; + + switch (comps) + { + case 1: + return ".x"; + case 2: + return swizz_func ? ".xy()" : ".xy"; + case 3: + return swizz_func ? ".xyz()" : ".xyz"; + default: + return ""; + } + }; + + bool forward = should_forward(coord); + + // The IR can give us more components than we need, so chop them off as needed. + auto swizzle_expr = swizzle(coord_components, expression_type(coord).vecsize); + // Only enclose the UV expression if needed. + auto coord_expr = (*swizzle_expr == '\0') ? to_expression(coord) : (to_enclosed_expression(coord) + swizzle_expr); + + // texelFetch only takes int, not uint. + auto &coord_type = expression_type(coord); + if (coord_type.basetype == SPIRType::UInt) + { + auto expected_type = coord_type; + expected_type.basetype = SPIRType::Int; + coord_expr = bitcast_expression(expected_type, coord_type.basetype, coord_expr); + } + + // textureLod on sampler2DArrayShadow and samplerCubeShadow does not exist in GLSL for some reason. + // To emulate this, we will have to use textureGrad with a constant gradient of 0. + // The workaround will assert that the LOD is in fact constant 0, or we cannot emit correct code. + // This happens for HLSL SampleCmpLevelZero on Texture2DArray and TextureCube. + bool workaround_lod_array_shadow_as_grad = + ((imgtype.image.arrayed && imgtype.image.dim == Dim2D) || imgtype.image.dim == DimCube) && + image_is_comparison(imgtype, img) && lod; + + if (dref) + { + forward = forward && should_forward(dref); + + // SPIR-V splits dref and coordinate. + if (is_gather || coord_components == 4) // GLSL also splits the arguments in two. Same for textureGather. + { + farg_str += ", "; + farg_str += to_expression(coord); + farg_str += ", "; + farg_str += to_expression(dref); + } + else if (is_proj) + { + // Have to reshuffle so we get vec4(coord, dref, proj), special case. + // Other shading languages splits up the arguments for coord and compare value like SPIR-V. + // The coordinate type for textureProj shadow is always vec4 even for sampler1DShadow. + farg_str += ", vec4("; + + if (imgtype.image.dim == Dim1D) + { + // Could reuse coord_expr, but we will mess up the temporary usage checking. + farg_str += to_enclosed_expression(coord) + ".x"; + farg_str += ", "; + farg_str += "0.0, "; + farg_str += to_expression(dref); + farg_str += ", "; + farg_str += to_enclosed_expression(coord) + ".y)"; + } + else if (imgtype.image.dim == Dim2D) + { + // Could reuse coord_expr, but we will mess up the temporary usage checking. + farg_str += to_enclosed_expression(coord) + (swizz_func ? ".xy()" : ".xy"); + farg_str += ", "; + farg_str += to_expression(dref); + farg_str += ", "; + farg_str += to_enclosed_expression(coord) + ".z)"; + } + else + SPIRV_CROSS_THROW("Invalid type for textureProj with shadow."); + } + else + { + // Create a composite which merges coord/dref into a single vector. + auto type = expression_type(coord); + type.vecsize = coord_components + 1; + farg_str += ", "; + farg_str += type_to_glsl_constructor(type); + farg_str += "("; + farg_str += coord_expr; + farg_str += ", "; + farg_str += to_expression(dref); + farg_str += ")"; + } + } + else + { + farg_str += ", "; + farg_str += coord_expr; + } + + if (grad_x || grad_y) + { + forward = forward && should_forward(grad_x); + forward = forward && should_forward(grad_y); + farg_str += ", "; + farg_str += to_expression(grad_x); + farg_str += ", "; + farg_str += to_expression(grad_y); + } + + if (lod) + { + if (workaround_lod_array_shadow_as_grad) + { + // Implement textureGrad() instead. LOD == 0.0 is implemented as gradient of 0.0. + // Implementing this as plain texture() is not safe on some implementations. + if (imgtype.image.dim == Dim2D) + farg_str += ", vec2(0.0), vec2(0.0)"; + else if (imgtype.image.dim == DimCube) + farg_str += ", vec3(0.0), vec3(0.0)"; + } + else + { + if (check_explicit_lod_allowed(lod)) + { + forward = forward && should_forward(lod); + farg_str += ", "; + farg_str += to_expression(lod); + } + } + } + else if (is_fetch && imgtype.image.dim != DimBuffer && !imgtype.image.ms) + { + // Lod argument is optional in OpImageFetch, but we require a LOD value, pick 0 as the default. + farg_str += ", 0"; + } + + if (coffset) + { + forward = forward && should_forward(coffset); + farg_str += ", "; + farg_str += to_expression(coffset); + } + else if (offset) + { + forward = forward && should_forward(offset); + farg_str += ", "; + farg_str += to_expression(offset); + } + + if (bias) + { + forward = forward && should_forward(bias); + farg_str += ", "; + farg_str += to_expression(bias); + } + + if (comp) + { + forward = forward && should_forward(comp); + farg_str += ", "; + farg_str += to_expression(comp); + } + + if (sample) + { + farg_str += ", "; + farg_str += to_expression(sample); + } + + *p_forward = forward; + + return farg_str; +} + +void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t) +{ + auto op = static_cast(eop); + + if (is_legacy() && is_unsigned_glsl_opcode(op)) + SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy GLSL targets."); + + switch (op) + { + // FP fiddling + case GLSLstd450Round: + emit_unary_func_op(result_type, id, args[0], "round"); + break; + + case GLSLstd450RoundEven: + if ((options.es && options.version >= 300) || (!options.es && options.version >= 130)) + emit_unary_func_op(result_type, id, args[0], "roundEven"); + else + SPIRV_CROSS_THROW("roundEven supported only in ESSL 300 and GLSL 130 and up."); + break; + + case GLSLstd450Trunc: + emit_unary_func_op(result_type, id, args[0], "trunc"); + break; + case GLSLstd450SAbs: + case GLSLstd450FAbs: + emit_unary_func_op(result_type, id, args[0], "abs"); + break; + case GLSLstd450SSign: + case GLSLstd450FSign: + emit_unary_func_op(result_type, id, args[0], "sign"); + break; + case GLSLstd450Floor: + emit_unary_func_op(result_type, id, args[0], "floor"); + break; + case GLSLstd450Ceil: + emit_unary_func_op(result_type, id, args[0], "ceil"); + break; + case GLSLstd450Fract: + emit_unary_func_op(result_type, id, args[0], "fract"); + break; + case GLSLstd450Radians: + emit_unary_func_op(result_type, id, args[0], "radians"); + break; + case GLSLstd450Degrees: + emit_unary_func_op(result_type, id, args[0], "degrees"); + break; + case GLSLstd450Fma: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "fma"); + break; + case GLSLstd450Modf: + register_call_out_argument(args[1]); + forced_temporaries.insert(id); + emit_binary_func_op(result_type, id, args[0], args[1], "modf"); + break; + + case GLSLstd450ModfStruct: + { + forced_temporaries.insert(id); + auto &type = get(result_type); + auto &flags = ir.meta[id].decoration.decoration_flags; + statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(id)), ";"); + set(id, to_name(id), result_type, true); + + statement(to_expression(id), ".", to_member_name(type, 0), " = ", "modf(", to_expression(args[0]), ", ", + to_expression(id), ".", to_member_name(type, 1), ");"); + break; + } + + // Minmax + case GLSLstd450UMin: + case GLSLstd450FMin: + case GLSLstd450SMin: + emit_binary_func_op(result_type, id, args[0], args[1], "min"); + break; + case GLSLstd450FMax: + case GLSLstd450UMax: + case GLSLstd450SMax: + emit_binary_func_op(result_type, id, args[0], args[1], "max"); + break; + case GLSLstd450FClamp: + case GLSLstd450UClamp: + case GLSLstd450SClamp: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp"); + break; + + // Trig + case GLSLstd450Sin: + emit_unary_func_op(result_type, id, args[0], "sin"); + break; + case GLSLstd450Cos: + emit_unary_func_op(result_type, id, args[0], "cos"); + break; + case GLSLstd450Tan: + emit_unary_func_op(result_type, id, args[0], "tan"); + break; + case GLSLstd450Asin: + emit_unary_func_op(result_type, id, args[0], "asin"); + break; + case GLSLstd450Acos: + emit_unary_func_op(result_type, id, args[0], "acos"); + break; + case GLSLstd450Atan: + emit_unary_func_op(result_type, id, args[0], "atan"); + break; + case GLSLstd450Sinh: + emit_unary_func_op(result_type, id, args[0], "sinh"); + break; + case GLSLstd450Cosh: + emit_unary_func_op(result_type, id, args[0], "cosh"); + break; + case GLSLstd450Tanh: + emit_unary_func_op(result_type, id, args[0], "tanh"); + break; + case GLSLstd450Asinh: + emit_unary_func_op(result_type, id, args[0], "asinh"); + break; + case GLSLstd450Acosh: + emit_unary_func_op(result_type, id, args[0], "acosh"); + break; + case GLSLstd450Atanh: + emit_unary_func_op(result_type, id, args[0], "atanh"); + break; + case GLSLstd450Atan2: + emit_binary_func_op(result_type, id, args[0], args[1], "atan"); + break; + + // Exponentials + case GLSLstd450Pow: + emit_binary_func_op(result_type, id, args[0], args[1], "pow"); + break; + case GLSLstd450Exp: + emit_unary_func_op(result_type, id, args[0], "exp"); + break; + case GLSLstd450Log: + emit_unary_func_op(result_type, id, args[0], "log"); + break; + case GLSLstd450Exp2: + emit_unary_func_op(result_type, id, args[0], "exp2"); + break; + case GLSLstd450Log2: + emit_unary_func_op(result_type, id, args[0], "log2"); + break; + case GLSLstd450Sqrt: + emit_unary_func_op(result_type, id, args[0], "sqrt"); + break; + case GLSLstd450InverseSqrt: + emit_unary_func_op(result_type, id, args[0], "inversesqrt"); + break; + + // Matrix math + case GLSLstd450Determinant: + emit_unary_func_op(result_type, id, args[0], "determinant"); + break; + case GLSLstd450MatrixInverse: + emit_unary_func_op(result_type, id, args[0], "inverse"); + break; + + // Lerping + case GLSLstd450FMix: + case GLSLstd450IMix: + { + emit_mix_op(result_type, id, args[0], args[1], args[2]); + break; + } + case GLSLstd450Step: + emit_binary_func_op(result_type, id, args[0], args[1], "step"); + break; + case GLSLstd450SmoothStep: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "smoothstep"); + break; + + // Packing + case GLSLstd450Frexp: + register_call_out_argument(args[1]); + forced_temporaries.insert(id); + emit_binary_func_op(result_type, id, args[0], args[1], "frexp"); + break; + + case GLSLstd450FrexpStruct: + { + forced_temporaries.insert(id); + auto &type = get(result_type); + auto &flags = ir.meta[id].decoration.decoration_flags; + statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(id)), ";"); + set(id, to_name(id), result_type, true); + + statement(to_expression(id), ".", to_member_name(type, 0), " = ", "frexp(", to_expression(args[0]), ", ", + to_expression(id), ".", to_member_name(type, 1), ");"); + break; + } + + case GLSLstd450Ldexp: + emit_binary_func_op(result_type, id, args[0], args[1], "ldexp"); + break; + case GLSLstd450PackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "packSnorm4x8"); + break; + case GLSLstd450PackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "packUnorm4x8"); + break; + case GLSLstd450PackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "packSnorm2x16"); + break; + case GLSLstd450PackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "packUnorm2x16"); + break; + case GLSLstd450PackHalf2x16: + emit_unary_func_op(result_type, id, args[0], "packHalf2x16"); + break; + case GLSLstd450UnpackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpackSnorm4x8"); + break; + case GLSLstd450UnpackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpackUnorm4x8"); + break; + case GLSLstd450UnpackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpackSnorm2x16"); + break; + case GLSLstd450UnpackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpackUnorm2x16"); + break; + case GLSLstd450UnpackHalf2x16: + emit_unary_func_op(result_type, id, args[0], "unpackHalf2x16"); + break; + + case GLSLstd450PackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "packDouble2x32"); + break; + case GLSLstd450UnpackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "unpackDouble2x32"); + break; + + // Vector math + case GLSLstd450Length: + emit_unary_func_op(result_type, id, args[0], "length"); + break; + case GLSLstd450Distance: + emit_binary_func_op(result_type, id, args[0], args[1], "distance"); + break; + case GLSLstd450Cross: + emit_binary_func_op(result_type, id, args[0], args[1], "cross"); + break; + case GLSLstd450Normalize: + emit_unary_func_op(result_type, id, args[0], "normalize"); + break; + case GLSLstd450FaceForward: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "faceforward"); + break; + case GLSLstd450Reflect: + emit_binary_func_op(result_type, id, args[0], args[1], "reflect"); + break; + case GLSLstd450Refract: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "refract"); + break; + + // Bit-fiddling + case GLSLstd450FindILsb: + emit_unary_func_op(result_type, id, args[0], "findLSB"); + break; + case GLSLstd450FindSMsb: + case GLSLstd450FindUMsb: + emit_unary_func_op(result_type, id, args[0], "findMSB"); + break; + + // Multisampled varying + case GLSLstd450InterpolateAtCentroid: + emit_unary_func_op(result_type, id, args[0], "interpolateAtCentroid"); + break; + case GLSLstd450InterpolateAtSample: + emit_binary_func_op(result_type, id, args[0], args[1], "interpolateAtSample"); + break; + case GLSLstd450InterpolateAtOffset: + emit_binary_func_op(result_type, id, args[0], args[1], "interpolateAtOffset"); + break; + + case GLSLstd450NMin: + emit_binary_func_op(result_type, id, args[0], args[1], "unsupported_glsl450_nmin"); + break; + case GLSLstd450NMax: + emit_binary_func_op(result_type, id, args[0], args[1], "unsupported_glsl450_nmax"); + break; + case GLSLstd450NClamp: + emit_binary_func_op(result_type, id, args[0], args[1], "unsupported_glsl450_nclamp"); + break; + + default: + statement("// unimplemented GLSL op ", eop); + break; + } +} + +void CompilerGLSL::emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, + uint32_t) +{ + require_extension_internal("GL_AMD_shader_ballot"); + + enum AMDShaderBallot + { + SwizzleInvocationsAMD = 1, + SwizzleInvocationsMaskedAMD = 2, + WriteInvocationAMD = 3, + MbcntAMD = 4 + }; + + auto op = static_cast(eop); + + switch (op) + { + case SwizzleInvocationsAMD: + emit_binary_func_op(result_type, id, args[0], args[1], "swizzleInvocationsAMD"); + register_control_dependent_expression(id); + break; + + case SwizzleInvocationsMaskedAMD: + emit_binary_func_op(result_type, id, args[0], args[1], "swizzleInvocationsMaskedAMD"); + register_control_dependent_expression(id); + break; + + case WriteInvocationAMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "writeInvocationAMD"); + register_control_dependent_expression(id); + break; + + case MbcntAMD: + emit_unary_func_op(result_type, id, args[0], "mbcntAMD"); + register_control_dependent_expression(id); + break; + + default: + statement("// unimplemented SPV AMD shader ballot op ", eop); + break; + } +} + +void CompilerGLSL::emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t id, uint32_t eop, + const uint32_t *args, uint32_t) +{ + require_extension_internal("GL_AMD_shader_explicit_vertex_parameter"); + + enum AMDShaderExplicitVertexParameter + { + InterpolateAtVertexAMD = 1 + }; + + auto op = static_cast(eop); + + switch (op) + { + case InterpolateAtVertexAMD: + emit_binary_func_op(result_type, id, args[0], args[1], "interpolateAtVertexAMD"); + break; + + default: + statement("// unimplemented SPV AMD shader explicit vertex parameter op ", eop); + break; + } +} + +void CompilerGLSL::emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t id, uint32_t eop, + const uint32_t *args, uint32_t) +{ + require_extension_internal("GL_AMD_shader_trinary_minmax"); + + enum AMDShaderTrinaryMinMax + { + FMin3AMD = 1, + UMin3AMD = 2, + SMin3AMD = 3, + FMax3AMD = 4, + UMax3AMD = 5, + SMax3AMD = 6, + FMid3AMD = 7, + UMid3AMD = 8, + SMid3AMD = 9 + }; + + auto op = static_cast(eop); + + switch (op) + { + case FMin3AMD: + case UMin3AMD: + case SMin3AMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "min3"); + break; + + case FMax3AMD: + case UMax3AMD: + case SMax3AMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "max3"); + break; + + case FMid3AMD: + case UMid3AMD: + case SMid3AMD: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "mid3"); + break; + + default: + statement("// unimplemented SPV AMD shader trinary minmax op ", eop); + break; + } +} + +void CompilerGLSL::emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, + uint32_t) +{ + require_extension_internal("GL_AMD_gcn_shader"); + + enum AMDGCNShader + { + CubeFaceIndexAMD = 1, + CubeFaceCoordAMD = 2, + TimeAMD = 3 + }; + + auto op = static_cast(eop); + + switch (op) + { + case CubeFaceIndexAMD: + emit_unary_func_op(result_type, id, args[0], "cubeFaceIndexAMD"); + break; + case CubeFaceCoordAMD: + emit_unary_func_op(result_type, id, args[0], "cubeFaceCoordAMD"); + break; + case TimeAMD: + { + string expr = "timeAMD()"; + emit_op(result_type, id, expr, true); + register_control_dependent_expression(id); + break; + } + + default: + statement("// unimplemented SPV AMD gcn shader op ", eop); + break; + } +} + +void CompilerGLSL::emit_subgroup_op(const Instruction &i) +{ + const uint32_t *ops = stream(i); + auto op = static_cast(i.op); + + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Can only use subgroup operations in Vulkan semantics."); + + switch (op) + { + case OpGroupNonUniformElect: + require_extension_internal("GL_KHR_shader_subgroup_basic"); + break; + + case OpGroupNonUniformBroadcast: + case OpGroupNonUniformBroadcastFirst: + case OpGroupNonUniformBallot: + case OpGroupNonUniformInverseBallot: + case OpGroupNonUniformBallotBitExtract: + case OpGroupNonUniformBallotBitCount: + case OpGroupNonUniformBallotFindLSB: + case OpGroupNonUniformBallotFindMSB: + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + break; + + case OpGroupNonUniformShuffle: + case OpGroupNonUniformShuffleXor: + require_extension_internal("GL_KHR_shader_subgroup_shuffle"); + break; + + case OpGroupNonUniformShuffleUp: + case OpGroupNonUniformShuffleDown: + require_extension_internal("GL_KHR_shader_subgroup_shuffle_relative"); + break; + + case OpGroupNonUniformAll: + case OpGroupNonUniformAny: + case OpGroupNonUniformAllEqual: + require_extension_internal("GL_KHR_shader_subgroup_vote"); + break; + + case OpGroupNonUniformFAdd: + case OpGroupNonUniformFMul: + case OpGroupNonUniformFMin: + case OpGroupNonUniformFMax: + case OpGroupNonUniformIAdd: + case OpGroupNonUniformIMul: + case OpGroupNonUniformSMin: + case OpGroupNonUniformSMax: + case OpGroupNonUniformUMin: + case OpGroupNonUniformUMax: + case OpGroupNonUniformBitwiseAnd: + case OpGroupNonUniformBitwiseOr: + case OpGroupNonUniformBitwiseXor: + { + auto operation = static_cast(ops[3]); + if (operation == GroupOperationClusteredReduce) + { + require_extension_internal("GL_KHR_shader_subgroup_clustered"); + } + else if (operation == GroupOperationExclusiveScan || operation == GroupOperationInclusiveScan || + operation == GroupOperationReduce) + { + require_extension_internal("GL_KHR_shader_subgroup_arithmetic"); + } + else + SPIRV_CROSS_THROW("Invalid group operation."); + break; + } + + case OpGroupNonUniformQuadSwap: + case OpGroupNonUniformQuadBroadcast: + require_extension_internal("GL_KHR_shader_subgroup_quad"); + break; + + default: + SPIRV_CROSS_THROW("Invalid opcode for subgroup."); + } + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto scope = static_cast(get(ops[2]).scalar()); + if (scope != ScopeSubgroup) + SPIRV_CROSS_THROW("Only subgroup scope is supported."); + + switch (op) + { + case OpGroupNonUniformElect: + emit_op(result_type, id, "subgroupElect()", true); + break; + + case OpGroupNonUniformBroadcast: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupBroadcast"); + break; + + case OpGroupNonUniformBroadcastFirst: + emit_unary_func_op(result_type, id, ops[3], "subgroupBroadcastFirst"); + break; + + case OpGroupNonUniformBallot: + emit_unary_func_op(result_type, id, ops[3], "subgroupBallot"); + break; + + case OpGroupNonUniformInverseBallot: + emit_unary_func_op(result_type, id, ops[3], "subgroupInverseBallot"); + break; + + case OpGroupNonUniformBallotBitExtract: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupBallotBitExtract"); + break; + + case OpGroupNonUniformBallotFindLSB: + emit_unary_func_op(result_type, id, ops[3], "subgroupBallotFindLSB"); + break; + + case OpGroupNonUniformBallotFindMSB: + emit_unary_func_op(result_type, id, ops[3], "subgroupBallotFindMSB"); + break; + + case OpGroupNonUniformBallotBitCount: + { + auto operation = static_cast(ops[3]); + if (operation == GroupOperationReduce) + emit_unary_func_op(result_type, id, ops[4], "subgroupBallotBitCount"); + else if (operation == GroupOperationInclusiveScan) + emit_unary_func_op(result_type, id, ops[4], "subgroupBallotInclusiveBitCount"); + else if (operation == GroupOperationExclusiveScan) + emit_unary_func_op(result_type, id, ops[4], "subgroupBallotExclusiveBitCount"); + else + SPIRV_CROSS_THROW("Invalid BitCount operation."); + break; + } + + case OpGroupNonUniformShuffle: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffle"); + break; + + case OpGroupNonUniformShuffleXor: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffleXor"); + break; + + case OpGroupNonUniformShuffleUp: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffleUp"); + break; + + case OpGroupNonUniformShuffleDown: + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupShuffleDown"); + break; + + case OpGroupNonUniformAll: + emit_unary_func_op(result_type, id, ops[3], "subgroupAll"); + break; + + case OpGroupNonUniformAny: + emit_unary_func_op(result_type, id, ops[3], "subgroupAny"); + break; + + case OpGroupNonUniformAllEqual: + emit_unary_func_op(result_type, id, ops[3], "subgroupAllEqual"); + break; + + // clang-format off +#define GLSL_GROUP_OP(op, glsl_op) \ +case OpGroupNonUniform##op: \ + { \ + auto operation = static_cast(ops[3]); \ + if (operation == GroupOperationReduce) \ + emit_unary_func_op(result_type, id, ops[4], "subgroup" #glsl_op); \ + else if (operation == GroupOperationInclusiveScan) \ + emit_unary_func_op(result_type, id, ops[4], "subgroupInclusive" #glsl_op); \ + else if (operation == GroupOperationExclusiveScan) \ + emit_unary_func_op(result_type, id, ops[4], "subgroupExclusive" #glsl_op); \ + else if (operation == GroupOperationClusteredReduce) \ + emit_binary_func_op(result_type, id, ops[4], ops[5], "subgroupClustered" #glsl_op); \ + else \ + SPIRV_CROSS_THROW("Invalid group operation."); \ + break; \ + } + GLSL_GROUP_OP(FAdd, Add) + GLSL_GROUP_OP(FMul, Mul) + GLSL_GROUP_OP(FMin, Min) + GLSL_GROUP_OP(FMax, Max) + GLSL_GROUP_OP(IAdd, Add) + GLSL_GROUP_OP(IMul, Mul) + GLSL_GROUP_OP(SMin, Min) + GLSL_GROUP_OP(SMax, Max) + GLSL_GROUP_OP(UMin, Min) + GLSL_GROUP_OP(UMax, Max) + GLSL_GROUP_OP(BitwiseAnd, And) + GLSL_GROUP_OP(BitwiseOr, Or) + GLSL_GROUP_OP(BitwiseXor, Xor) +#undef GLSL_GROUP_OP + // clang-format on + + case OpGroupNonUniformQuadSwap: + { + uint32_t direction = get(ops[4]).scalar(); + if (direction == 0) + emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapHorizontal"); + else if (direction == 1) + emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapVertical"); + else if (direction == 2) + emit_unary_func_op(result_type, id, ops[3], "subgroupQuadSwapDiagonal"); + else + SPIRV_CROSS_THROW("Invalid quad swap direction."); + break; + } + + case OpGroupNonUniformQuadBroadcast: + { + emit_binary_func_op(result_type, id, ops[3], ops[4], "subgroupQuadBroadcast"); + break; + } + + default: + SPIRV_CROSS_THROW("Invalid opcode for subgroup."); + } + + register_control_dependent_expression(id); +} + +string CompilerGLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) +{ + if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::Short) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Int64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Float) + return "floatBitsToUint"; + else if (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::UShort) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::UInt) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::UInt64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Float) + return "floatBitsToInt"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::UInt) + return "uintBitsToFloat"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::Int) + return "intBitsToFloat"; + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Double) + return "doubleBitsToInt64"; + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Double) + return "doubleBitsToUint64"; + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::Int64) + return "int64BitsToDouble"; + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::UInt64) + return "uint64BitsToDouble"; + else if (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Half) + return "float16BitsToInt16"; + else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::Half) + return "float16BitsToUint16"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::Short) + return "int16BitsToFloat16"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UShort) + return "uint16BitsToFloat16"; + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::UInt && in_type.vecsize == 2) + return "packUint2x32"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) + return "unpackFloat2x16"; + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Half && in_type.vecsize == 2) + return "packFloat2x16"; + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Short && in_type.vecsize == 2) + return "packInt2x16"; + else if (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Int && in_type.vecsize == 1) + return "unpackInt2x16"; + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::UShort && in_type.vecsize == 2) + return "packUint2x16"; + else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) + return "unpackUint2x16"; + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Short && in_type.vecsize == 4) + return "packInt4x16"; + else if (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Int64 && in_type.vecsize == 1) + return "unpackInt4x16"; + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::UShort && in_type.vecsize == 4) + return "packUint4x16"; + else if (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::UInt64 && in_type.vecsize == 1) + return "unpackUint4x16"; + else + return ""; +} + +string CompilerGLSL::bitcast_glsl(const SPIRType &result_type, uint32_t argument) +{ + auto op = bitcast_glsl_op(result_type, expression_type(argument)); + if (op.empty()) + return to_enclosed_expression(argument); + else + return join(op, "(", to_expression(argument), ")"); +} + +std::string CompilerGLSL::bitcast_expression(SPIRType::BaseType target_type, uint32_t arg) +{ + auto expr = to_expression(arg); + auto &src_type = expression_type(arg); + if (src_type.basetype != target_type) + { + auto target = src_type; + target.basetype = target_type; + expr = join(bitcast_glsl_op(target, src_type), "(", expr, ")"); + } + + return expr; +} + +std::string CompilerGLSL::bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, + const std::string &expr) +{ + if (target_type.basetype == expr_type) + return expr; + + auto src_type = target_type; + src_type.basetype = expr_type; + return join(bitcast_glsl_op(target_type, src_type), "(", expr, ")"); +} + +string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage) +{ + switch (builtin) + { + case BuiltInPosition: + return "gl_Position"; + case BuiltInPointSize: + return "gl_PointSize"; + case BuiltInClipDistance: + return "gl_ClipDistance"; + case BuiltInCullDistance: + return "gl_CullDistance"; + case BuiltInVertexId: + if (options.vulkan_semantics) + SPIRV_CROSS_THROW( + "Cannot implement gl_VertexID in Vulkan GLSL. This shader was created with GL semantics."); + return "gl_VertexID"; + case BuiltInInstanceId: + if (options.vulkan_semantics) + SPIRV_CROSS_THROW( + "Cannot implement gl_InstanceID in Vulkan GLSL. This shader was created with GL semantics."); + return "gl_InstanceID"; + case BuiltInVertexIndex: + if (options.vulkan_semantics) + return "gl_VertexIndex"; + else + return "gl_VertexID"; // gl_VertexID already has the base offset applied. + case BuiltInInstanceIndex: + if (options.vulkan_semantics) + return "gl_InstanceIndex"; + else if (options.vertex.support_nonzero_base_instance) + return "(gl_InstanceID + SPIRV_Cross_BaseInstance)"; // ... but not gl_InstanceID. + else + return "gl_InstanceID"; + case BuiltInPrimitiveId: + if (storage == StorageClassInput && get_entry_point().model == ExecutionModelGeometry) + return "gl_PrimitiveIDIn"; + else + return "gl_PrimitiveID"; + case BuiltInInvocationId: + return "gl_InvocationID"; + case BuiltInLayer: + return "gl_Layer"; + case BuiltInViewportIndex: + return "gl_ViewportIndex"; + case BuiltInTessLevelOuter: + return "gl_TessLevelOuter"; + case BuiltInTessLevelInner: + return "gl_TessLevelInner"; + case BuiltInTessCoord: + return "gl_TessCoord"; + case BuiltInFragCoord: + return "gl_FragCoord"; + case BuiltInPointCoord: + return "gl_PointCoord"; + case BuiltInFrontFacing: + return "gl_FrontFacing"; + case BuiltInFragDepth: + return "gl_FragDepth"; + case BuiltInNumWorkgroups: + return "gl_NumWorkGroups"; + case BuiltInWorkgroupSize: + return "gl_WorkGroupSize"; + case BuiltInWorkgroupId: + return "gl_WorkGroupID"; + case BuiltInLocalInvocationId: + return "gl_LocalInvocationID"; + case BuiltInGlobalInvocationId: + return "gl_GlobalInvocationID"; + case BuiltInLocalInvocationIndex: + return "gl_LocalInvocationIndex"; + case BuiltInHelperInvocation: + return "gl_HelperInvocation"; + case BuiltInBaseVertex: + if (options.es) + SPIRV_CROSS_THROW("BaseVertex not supported in ES profile."); + if (options.version < 460) + { + require_extension_internal("GL_ARB_shader_draw_parameters"); + return "gl_BaseVertexARB"; + } + return "gl_BaseVertex"; + case BuiltInBaseInstance: + if (options.es) + SPIRV_CROSS_THROW("BaseInstance not supported in ES profile."); + if (options.version < 460) + { + require_extension_internal("GL_ARB_shader_draw_parameters"); + return "gl_BaseInstanceARB"; + } + return "gl_BaseInstance"; + case BuiltInDrawIndex: + if (options.es) + SPIRV_CROSS_THROW("DrawIndex not supported in ES profile."); + if (options.version < 460) + { + require_extension_internal("GL_ARB_shader_draw_parameters"); + return "gl_DrawIDARB"; + } + return "gl_DrawID"; + + case BuiltInSampleId: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_sample_variables"); + if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("gl_SampleID not supported before GLSL 400."); + return "gl_SampleID"; + + case BuiltInSampleMask: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_sample_variables"); + if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("gl_SampleMask/gl_SampleMaskIn not supported before GLSL 400."); + + if (storage == StorageClassInput) + return "gl_SampleMaskIn"; + else + return "gl_SampleMask"; + + case BuiltInSamplePosition: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_sample_variables"); + if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("gl_SamplePosition not supported before GLSL 400."); + return "gl_SamplePosition"; + + case BuiltInViewIndex: + if (options.vulkan_semantics) + { + require_extension_internal("GL_EXT_multiview"); + return "gl_ViewIndex"; + } + else + { + require_extension_internal("GL_OVR_multiview2"); + return "gl_ViewID_OVR"; + } + + case BuiltInNumSubgroups: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_NumSubgroups"; + + case BuiltInSubgroupId: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_SubgroupID"; + + case BuiltInSubgroupSize: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_SubgroupSize"; + + case BuiltInSubgroupLocalInvocationId: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + return "gl_SubgroupInvocationID"; + + case BuiltInSubgroupEqMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupEqMask"; + + case BuiltInSubgroupGeMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupGeMask"; + + case BuiltInSubgroupGtMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupGtMask"; + + case BuiltInSubgroupLeMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupLeMask"; + + case BuiltInSubgroupLtMask: + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Need Vulkan semantics for subgroup."); + require_extension_internal("GL_KHR_shader_subgroup_ballot"); + return "gl_SubgroupLtMask"; + + default: + return join("gl_BuiltIn_", convert_to_string(builtin)); + } +} + +const char *CompilerGLSL::index_to_swizzle(uint32_t index) +{ + switch (index) + { + case 0: + return "x"; + case 1: + return "y"; + case 2: + return "z"; + case 3: + return "w"; + default: + SPIRV_CROSS_THROW("Swizzle index out of range"); + } +} + +string CompilerGLSL::access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, + bool index_is_literal, bool chain_only, bool ptr_chain, + AccessChainMeta *meta, bool register_expression_read) +{ + string expr; + if (!chain_only) + expr = to_enclosed_expression(base, register_expression_read); + + // Start traversing type hierarchy at the proper non-pointer types, + // but keep type_id referencing the original pointer for use below. + uint32_t type_id = expression_type_id(base); + const auto *type = &get_pointee_type(type_id); + + bool access_chain_is_arrayed = expr.find_first_of('[') != string::npos; + bool row_major_matrix_needs_conversion = is_non_native_row_major_matrix(base); + bool is_packed = has_decoration(base, DecorationCPacked); + bool is_invariant = has_decoration(base, DecorationInvariant); + bool pending_array_enclose = false; + bool dimension_flatten = false; + + for (uint32_t i = 0; i < count; i++) + { + uint32_t index = indices[i]; + + const auto append_index = [&]() { + expr += "["; + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_expression(index, register_expression_read); + expr += "]"; + }; + + // Pointer chains + if (ptr_chain && i == 0) + { + // If we are flattening multidimensional arrays, only create opening bracket on first + // array index. + if (options.flatten_multidimensional_arrays) + { + dimension_flatten = type->array.size() >= 1; + pending_array_enclose = dimension_flatten; + if (pending_array_enclose) + expr += "["; + } + + if (options.flatten_multidimensional_arrays && dimension_flatten) + { + // If we are flattening multidimensional arrays, do manual stride computation. + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_enclosed_expression(index, register_expression_read); + + for (auto j = uint32_t(type->array.size()); j; j--) + { + expr += " * "; + expr += enclose_expression(to_array_size(*type, j - 1)); + } + + if (type->array.empty()) + pending_array_enclose = false; + else + expr += " + "; + + if (!pending_array_enclose) + expr += "]"; + } + else + { + append_index(); + } + + access_chain_is_arrayed = true; + } + // Arrays + else if (!type->array.empty()) + { + // If we are flattening multidimensional arrays, only create opening bracket on first + // array index. + if (options.flatten_multidimensional_arrays && !pending_array_enclose) + { + dimension_flatten = type->array.size() > 1; + pending_array_enclose = dimension_flatten; + if (pending_array_enclose) + expr += "["; + } + + assert(type->parent_type); + + auto *var = maybe_get(base); + if (backend.force_gl_in_out_block && i == 0 && var && is_builtin_variable(*var) && + !has_decoration(type->self, DecorationBlock)) + { + // This deals with scenarios for tesc/geom where arrays of gl_Position[] are declared. + // Normally, these variables live in blocks when compiled from GLSL, + // but HLSL seems to just emit straight arrays here. + // We must pretend this access goes through gl_in/gl_out arrays + // to be able to access certain builtins as arrays. + auto builtin = ir.meta[base].decoration.builtin_type; + switch (builtin) + { + // case BuiltInCullDistance: // These are already arrays, need to figure out rules for these in tess/geom. + // case BuiltInClipDistance: + case BuiltInPosition: + case BuiltInPointSize: + if (var->storage == StorageClassInput) + expr = join("gl_in[", to_expression(index, register_expression_read), "].", expr); + else if (var->storage == StorageClassOutput) + expr = join("gl_out[", to_expression(index, register_expression_read), "].", expr); + else + append_index(); + break; + + default: + append_index(); + break; + } + } + else if (options.flatten_multidimensional_arrays && dimension_flatten) + { + // If we are flattening multidimensional arrays, do manual stride computation. + auto &parent_type = get(type->parent_type); + + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_enclosed_expression(index, register_expression_read); + + for (auto j = uint32_t(parent_type.array.size()); j; j--) + { + expr += " * "; + expr += enclose_expression(to_array_size(parent_type, j - 1)); + } + + if (parent_type.array.empty()) + pending_array_enclose = false; + else + expr += " + "; + + if (!pending_array_enclose) + expr += "]"; + } + else + { + append_index(); + } + + type_id = type->parent_type; + type = &get(type_id); + + access_chain_is_arrayed = true; + } + // For structs, the index refers to a constant, which indexes into the members. + // We also check if this member is a builtin, since we then replace the entire expression with the builtin one. + else if (type->basetype == SPIRType::Struct) + { + if (!index_is_literal) + index = get(index).scalar(); + + if (index >= type->member_types.size()) + SPIRV_CROSS_THROW("Member index is out of bounds!"); + + BuiltIn builtin; + if (is_member_builtin(*type, index, &builtin)) + { + // FIXME: We rely here on OpName on gl_in/gl_out to make this work properly. + // To make this properly work by omitting all OpName opcodes, + // we need to infer gl_in or gl_out based on the builtin, and stage. + if (access_chain_is_arrayed) + { + expr += "."; + expr += builtin_to_glsl(builtin, type->storage); + } + else + expr = builtin_to_glsl(builtin, type->storage); + } + else + { + // If the member has a qualified name, use it as the entire chain + string qual_mbr_name = get_member_qualified_name(type_id, index); + if (!qual_mbr_name.empty()) + expr = qual_mbr_name; + else + expr += to_member_reference(base, *type, index, ptr_chain); + } + + if (has_member_decoration(type->self, index, DecorationInvariant)) + is_invariant = true; + + is_packed = member_is_packed_type(*type, index); + row_major_matrix_needs_conversion = member_is_non_native_row_major_matrix(*type, index); + type = &get(type->member_types[index]); + } + // Matrix -> Vector + else if (type->columns > 1) + { + if (row_major_matrix_needs_conversion) + { + expr = convert_row_major_matrix(expr, *type, is_packed); + row_major_matrix_needs_conversion = false; + is_packed = false; + } + + expr += "["; + if (index_is_literal) + expr += convert_to_string(index); + else + expr += to_expression(index, register_expression_read); + expr += "]"; + + type_id = type->parent_type; + type = &get(type_id); + } + // Vector -> Scalar + else if (type->vecsize > 1) + { + if (index_is_literal && !is_packed) + { + expr += "."; + expr += index_to_swizzle(index); + } + else if (ir.ids[index].get_type() == TypeConstant && !is_packed) + { + auto &c = get(index); + expr += "."; + expr += index_to_swizzle(c.scalar()); + } + else if (index_is_literal) + { + // For packed vectors, we can only access them as an array, not by swizzle. + expr += join("[", index, "]"); + } + else + { + expr += "["; + expr += to_expression(index, register_expression_read); + expr += "]"; + } + + is_packed = false; + type_id = type->parent_type; + type = &get(type_id); + } + else if (!backend.allow_truncated_access_chain) + SPIRV_CROSS_THROW("Cannot subdivide a scalar value!"); + } + + if (pending_array_enclose) + { + SPIRV_CROSS_THROW("Flattening of multidimensional arrays were enabled, " + "but the access chain was terminated in the middle of a multidimensional array. " + "This is not supported."); + } + + if (meta) + { + meta->need_transpose = row_major_matrix_needs_conversion; + meta->storage_is_packed = is_packed; + meta->storage_is_invariant = is_invariant; + } + + return expr; +} + +string CompilerGLSL::to_flattened_struct_member(const SPIRVariable &var, uint32_t index) +{ + auto &type = get(var.basetype); + return sanitize_underscores(join(to_name(var.self), "_", to_member_name(type, index))); +} + +string CompilerGLSL::access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, + AccessChainMeta *meta, bool ptr_chain) +{ + if (flattened_buffer_blocks.count(base)) + { + uint32_t matrix_stride = 0; + bool need_transpose = false; + flattened_access_chain_offset(expression_type(base), indices, count, 0, 16, &need_transpose, &matrix_stride, + ptr_chain); + + if (meta) + { + meta->need_transpose = target_type.columns > 1 && need_transpose; + meta->storage_is_packed = false; + } + + return flattened_access_chain(base, indices, count, target_type, 0, matrix_stride, need_transpose); + } + else if (flattened_structs.count(base) && count > 0) + { + auto chain = access_chain_internal(base, indices, count, false, true, ptr_chain, nullptr, false).substr(1); + if (meta) + { + meta->need_transpose = false; + meta->storage_is_packed = false; + } + return sanitize_underscores(join(to_name(base), "_", chain)); + } + else + { + return access_chain_internal(base, indices, count, false, false, ptr_chain, meta, false); + } +} + +string CompilerGLSL::load_flattened_struct(SPIRVariable &var) +{ + auto expr = type_to_glsl_constructor(get(var.basetype)); + expr += '('; + + auto &type = get(var.basetype); + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + if (i) + expr += ", "; + + // Flatten the varyings. + // Apply name transformation for flattened I/O blocks. + expr += to_flattened_struct_member(var, i); + } + expr += ')'; + return expr; +} + +void CompilerGLSL::store_flattened_struct(SPIRVariable &var, uint32_t value) +{ + // We're trying to store a structure which has been flattened. + // Need to copy members one by one. + auto rhs = to_expression(value); + + // Store result locally. + // Since we're declaring a variable potentially multiple times here, + // store the variable in an isolated scope. + begin_scope(); + statement(variable_decl_function_local(var), " = ", rhs, ";"); + + auto &type = get(var.basetype); + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + // Flatten the varyings. + // Apply name transformation for flattened I/O blocks. + + auto lhs = sanitize_underscores(join(to_name(var.self), "_", to_member_name(type, i))); + rhs = join(to_name(var.self), ".", to_member_name(type, i)); + statement(lhs, " = ", rhs, ";"); + } + end_scope(); +} + +std::string CompilerGLSL::flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose) +{ + if (!target_type.array.empty()) + SPIRV_CROSS_THROW("Access chains that result in an array can not be flattened"); + else if (target_type.basetype == SPIRType::Struct) + return flattened_access_chain_struct(base, indices, count, target_type, offset); + else if (target_type.columns > 1) + return flattened_access_chain_matrix(base, indices, count, target_type, offset, matrix_stride, need_transpose); + else + return flattened_access_chain_vector(base, indices, count, target_type, offset, matrix_stride, need_transpose); +} + +std::string CompilerGLSL::flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset) +{ + std::string expr; + + expr += type_to_glsl_constructor(target_type); + expr += "("; + + for (uint32_t i = 0; i < uint32_t(target_type.member_types.size()); ++i) + { + if (i != 0) + expr += ", "; + + const SPIRType &member_type = get(target_type.member_types[i]); + uint32_t member_offset = type_struct_member_offset(target_type, i); + + // The access chain terminates at the struct, so we need to find matrix strides and row-major information + // ahead of time. + bool need_transpose = false; + uint32_t matrix_stride = 0; + if (member_type.columns > 1) + { + need_transpose = combined_decoration_for_member(target_type, i).get(DecorationRowMajor); + matrix_stride = type_struct_member_matrix_stride(target_type, i); + } + + auto tmp = flattened_access_chain(base, indices, count, member_type, offset + member_offset, matrix_stride, + need_transpose); + + // Cannot forward transpositions, so resolve them here. + if (need_transpose) + expr += convert_row_major_matrix(tmp, member_type, false); + else + expr += tmp; + } + + expr += ")"; + + return expr; +} + +std::string CompilerGLSL::flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, + uint32_t matrix_stride, bool need_transpose) +{ + assert(matrix_stride); + SPIRType tmp_type = target_type; + if (need_transpose) + swap(tmp_type.vecsize, tmp_type.columns); + + std::string expr; + + expr += type_to_glsl_constructor(tmp_type); + expr += "("; + + for (uint32_t i = 0; i < tmp_type.columns; i++) + { + if (i != 0) + expr += ", "; + + expr += flattened_access_chain_vector(base, indices, count, tmp_type, offset + i * matrix_stride, matrix_stride, + /* need_transpose= */ false); + } + + expr += ")"; + + return expr; +} + +std::string CompilerGLSL::flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, + uint32_t matrix_stride, bool need_transpose) +{ + auto result = flattened_access_chain_offset(expression_type(base), indices, count, offset, 16); + + auto buffer_name = to_name(expression_type(base).self); + + if (need_transpose) + { + std::string expr; + + if (target_type.vecsize > 1) + { + expr += type_to_glsl_constructor(target_type); + expr += "("; + } + + for (uint32_t i = 0; i < target_type.vecsize; ++i) + { + if (i != 0) + expr += ", "; + + uint32_t component_offset = result.second + i * matrix_stride; + + assert(component_offset % (target_type.width / 8) == 0); + uint32_t index = component_offset / (target_type.width / 8); + + expr += buffer_name; + expr += "["; + expr += result.first; // this is a series of N1 * k1 + N2 * k2 + ... that is either empty or ends with a + + expr += convert_to_string(index / 4); + expr += "]"; + + expr += vector_swizzle(1, index % 4); + } + + if (target_type.vecsize > 1) + { + expr += ")"; + } + + return expr; + } + else + { + assert(result.second % (target_type.width / 8) == 0); + uint32_t index = result.second / (target_type.width / 8); + + std::string expr; + + expr += buffer_name; + expr += "["; + expr += result.first; // this is a series of N1 * k1 + N2 * k2 + ... that is either empty or ends with a + + expr += convert_to_string(index / 4); + expr += "]"; + + expr += vector_swizzle(target_type.vecsize, index % 4); + + return expr; + } +} + +std::pair CompilerGLSL::flattened_access_chain_offset( + const SPIRType &basetype, const uint32_t *indices, uint32_t count, uint32_t offset, uint32_t word_stride, + bool *need_transpose, uint32_t *out_matrix_stride, bool ptr_chain) +{ + // Start traversing type hierarchy at the proper non-pointer types. + const auto *type = &get_pointee_type(basetype); + + // This holds the type of the current pointer which we are traversing through. + // We always start out from a struct type which is the block. + // This is primarily used to reflect the array strides and matrix strides later. + // For the first access chain index, type_id won't be needed, so just keep it as 0, it will be set + // accordingly as members of structs are accessed. + assert(type->basetype == SPIRType::Struct); + uint32_t type_id = 0; + + std::string expr; + + // Inherit matrix information in case we are access chaining a vector which might have come from a row major layout. + bool row_major_matrix_needs_conversion = need_transpose ? *need_transpose : false; + uint32_t matrix_stride = out_matrix_stride ? *out_matrix_stride : 0; + + for (uint32_t i = 0; i < count; i++) + { + uint32_t index = indices[i]; + + // Pointers + if (ptr_chain && i == 0) + { + // Here, the pointer type will be decorated with an array stride. + uint32_t array_stride = get_decoration(basetype.self, DecorationArrayStride); + if (!array_stride) + SPIRV_CROSS_THROW("SPIR-V does not define ArrayStride for buffer block."); + + auto *constant = maybe_get(index); + if (constant) + { + // Constant array access. + offset += constant->scalar() * array_stride; + } + else + { + // Dynamic array access. + if (array_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Array stride for dynamic indexing must be divisible by the size of a 4-component vector. " + "Likely culprit here is a float or vec2 array inside a push constant block which is std430. " + "This cannot be flattened. Try using std140 layout instead."); + } + + expr += to_enclosed_expression(index); + expr += " * "; + expr += convert_to_string(array_stride / word_stride); + expr += " + "; + } + // Type ID is unchanged. + } + // Arrays + else if (!type->array.empty()) + { + // Here, the type_id will be a type ID for the array type itself. + uint32_t array_stride = get_decoration(type_id, DecorationArrayStride); + if (!array_stride) + SPIRV_CROSS_THROW("SPIR-V does not define ArrayStride for buffer block."); + + auto *constant = maybe_get(index); + if (constant) + { + // Constant array access. + offset += constant->scalar() * array_stride; + } + else + { + // Dynamic array access. + if (array_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Array stride for dynamic indexing must be divisible by the size of a 4-component vector. " + "Likely culprit here is a float or vec2 array inside a push constant block which is std430. " + "This cannot be flattened. Try using std140 layout instead."); + } + + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(array_stride / word_stride); + expr += " + "; + } + + uint32_t parent_type = type->parent_type; + type = &get(parent_type); + type_id = parent_type; + + // Type ID now refers to the array type with one less dimension. + } + // For structs, the index refers to a constant, which indexes into the members. + // We also check if this member is a builtin, since we then replace the entire expression with the builtin one. + else if (type->basetype == SPIRType::Struct) + { + index = get(index).scalar(); + + if (index >= type->member_types.size()) + SPIRV_CROSS_THROW("Member index is out of bounds!"); + + offset += type_struct_member_offset(*type, index); + type_id = type->member_types[index]; + + auto &struct_type = *type; + type = &get(type->member_types[index]); + + if (type->columns > 1) + { + matrix_stride = type_struct_member_matrix_stride(struct_type, index); + row_major_matrix_needs_conversion = + combined_decoration_for_member(struct_type, index).get(DecorationRowMajor); + } + else + row_major_matrix_needs_conversion = false; + } + // Matrix -> Vector + else if (type->columns > 1) + { + auto *constant = maybe_get(index); + if (constant) + { + index = get(index).scalar(); + offset += index * (row_major_matrix_needs_conversion ? (type->width / 8) : matrix_stride); + } + else + { + uint32_t indexing_stride = row_major_matrix_needs_conversion ? (type->width / 8) : matrix_stride; + // Dynamic array access. + if (indexing_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Matrix stride for dynamic indexing must be divisible by the size of a 4-component vector. " + "Likely culprit here is a row-major matrix being accessed dynamically. " + "This cannot be flattened. Try using std140 layout instead."); + } + + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(indexing_stride / word_stride); + expr += " + "; + } + + uint32_t parent_type = type->parent_type; + type = &get(type->parent_type); + type_id = parent_type; + } + // Vector -> Scalar + else if (type->vecsize > 1) + { + auto *constant = maybe_get(index); + if (constant) + { + index = get(index).scalar(); + offset += index * (row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8)); + } + else + { + uint32_t indexing_stride = row_major_matrix_needs_conversion ? matrix_stride : (type->width / 8); + + // Dynamic array access. + if (indexing_stride % word_stride) + { + SPIRV_CROSS_THROW( + "Stride for dynamic vector indexing must be divisible by the size of a 4-component vector. " + "This cannot be flattened in legacy targets."); + } + + expr += to_enclosed_expression(index, false); + expr += " * "; + expr += convert_to_string(indexing_stride / word_stride); + expr += " + "; + } + + uint32_t parent_type = type->parent_type; + type = &get(type->parent_type); + type_id = parent_type; + } + else + SPIRV_CROSS_THROW("Cannot subdivide a scalar value!"); + } + + if (need_transpose) + *need_transpose = row_major_matrix_needs_conversion; + if (out_matrix_stride) + *out_matrix_stride = matrix_stride; + + return std::make_pair(expr, offset); +} + +bool CompilerGLSL::should_dereference(uint32_t id) +{ + const auto &type = expression_type(id); + // Non-pointer expressions don't need to be dereferenced. + if (!type.pointer) + return false; + + // Handles shouldn't be dereferenced either. + if (!expression_is_lvalue(id)) + return false; + + // If id is a variable but not a phi variable, we should not dereference it. + if (auto *var = maybe_get(id)) + return var->phi_variable; + + // If id is an access chain, we should not dereference it. + if (auto *expr = maybe_get(id)) + return !expr->access_chain; + + // Otherwise, we should dereference this pointer expression. + return true; +} + +bool CompilerGLSL::should_forward(uint32_t id) +{ + // If id is a variable we will try to forward it regardless of force_temporary check below + // This is important because otherwise we'll get local sampler copies (highp sampler2D foo = bar) that are invalid in OpenGL GLSL + auto *var = maybe_get(id); + if (var && var->forwardable) + return true; + + // For debugging emit temporary variables for all expressions + if (options.force_temporary) + return false; + + // Immutable expression can always be forwarded. + if (is_immutable(id)) + return true; + + return false; +} + +void CompilerGLSL::track_expression_read(uint32_t id) +{ + switch (ir.ids[id].get_type()) + { + case TypeExpression: + { + auto &e = get(id); + for (auto implied_read : e.implied_read_expressions) + track_expression_read(implied_read); + break; + } + + case TypeAccessChain: + { + auto &e = get(id); + for (auto implied_read : e.implied_read_expressions) + track_expression_read(implied_read); + break; + } + + default: + break; + } + + // If we try to read a forwarded temporary more than once we will stamp out possibly complex code twice. + // In this case, it's better to just bind the complex expression to the temporary and read that temporary twice. + if (expression_is_forwarded(id)) + { + auto &v = expression_usage_counts[id]; + v++; + + if (v >= 2) + { + //if (v == 2) + // fprintf(stderr, "ID %u was forced to temporary due to more than 1 expression use!\n", id); + + forced_temporaries.insert(id); + // Force a recompile after this pass to avoid forwarding this variable. + force_recompile = true; + } + } +} + +bool CompilerGLSL::args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure) +{ + if (forced_temporaries.find(id) != end(forced_temporaries)) + return false; + + for (uint32_t i = 0; i < num_args; i++) + if (!should_forward(args[i])) + return false; + + // We need to forward globals as well. + if (!pure) + { + for (auto global : global_variables) + if (!should_forward(global)) + return false; + for (auto aliased : aliased_variables) + if (!should_forward(aliased)) + return false; + } + + return true; +} + +void CompilerGLSL::register_impure_function_call() +{ + // Impure functions can modify globals and aliased variables, so invalidate them as well. + for (auto global : global_variables) + flush_dependees(get(global)); + for (auto aliased : aliased_variables) + flush_dependees(get(aliased)); +} + +void CompilerGLSL::register_call_out_argument(uint32_t id) +{ + register_write(id); + + auto *var = maybe_get(id); + if (var) + flush_variable_declaration(var->self); +} + +string CompilerGLSL::variable_decl_function_local(SPIRVariable &var) +{ + // These variables are always function local, + // so make sure we emit the variable without storage qualifiers. + // Some backends will inject custom variables locally in a function + // with a storage qualifier which is not function-local. + auto old_storage = var.storage; + var.storage = StorageClassFunction; + auto expr = variable_decl(var); + var.storage = old_storage; + return expr; +} + +void CompilerGLSL::flush_variable_declaration(uint32_t id) +{ + auto *var = maybe_get(id); + if (var && var->deferred_declaration) + { + statement(variable_decl_function_local(*var), ";"); + if (var->allocate_temporary_copy) + { + auto &type = get(var->basetype); + auto &flags = ir.meta[id].decoration.decoration_flags; + statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, join("_", id, "_copy")), + ";"); + } + var->deferred_declaration = false; + } +} + +bool CompilerGLSL::remove_duplicate_swizzle(string &op) +{ + auto pos = op.find_last_of('.'); + if (pos == string::npos || pos == 0) + return false; + + string final_swiz = op.substr(pos + 1, string::npos); + + if (backend.swizzle_is_function) + { + if (final_swiz.size() < 2) + return false; + + if (final_swiz.substr(final_swiz.size() - 2, string::npos) == "()") + final_swiz.erase(final_swiz.size() - 2, string::npos); + else + return false; + } + + // Check if final swizzle is of form .x, .xy, .xyz, .xyzw or similar. + // If so, and previous swizzle is of same length, + // we can drop the final swizzle altogether. + for (uint32_t i = 0; i < final_swiz.size(); i++) + { + static const char expected[] = { 'x', 'y', 'z', 'w' }; + if (i >= 4 || final_swiz[i] != expected[i]) + return false; + } + + auto prevpos = op.find_last_of('.', pos - 1); + if (prevpos == string::npos) + return false; + + prevpos++; + + // Make sure there are only swizzles here ... + for (auto i = prevpos; i < pos; i++) + { + if (op[i] < 'w' || op[i] > 'z') + { + // If swizzles are foo.xyz() like in C++ backend for example, check for that. + if (backend.swizzle_is_function && i + 2 == pos && op[i] == '(' && op[i + 1] == ')') + break; + return false; + } + } + + // If original swizzle is large enough, just carve out the components we need. + // E.g. foobar.wyx.xy will turn into foobar.wy. + if (pos - prevpos >= final_swiz.size()) + { + op.erase(prevpos + final_swiz.size(), string::npos); + + // Add back the function call ... + if (backend.swizzle_is_function) + op += "()"; + } + return true; +} + +// Optimizes away vector swizzles where we have something like +// vec3 foo; +// foo.xyz <-- swizzle expression does nothing. +// This is a very common pattern after OpCompositeCombine. +bool CompilerGLSL::remove_unity_swizzle(uint32_t base, string &op) +{ + auto pos = op.find_last_of('.'); + if (pos == string::npos || pos == 0) + return false; + + string final_swiz = op.substr(pos + 1, string::npos); + + if (backend.swizzle_is_function) + { + if (final_swiz.size() < 2) + return false; + + if (final_swiz.substr(final_swiz.size() - 2, string::npos) == "()") + final_swiz.erase(final_swiz.size() - 2, string::npos); + else + return false; + } + + // Check if final swizzle is of form .x, .xy, .xyz, .xyzw or similar. + // If so, and previous swizzle is of same length, + // we can drop the final swizzle altogether. + for (uint32_t i = 0; i < final_swiz.size(); i++) + { + static const char expected[] = { 'x', 'y', 'z', 'w' }; + if (i >= 4 || final_swiz[i] != expected[i]) + return false; + } + + auto &type = expression_type(base); + + // Sanity checking ... + assert(type.columns == 1 && type.array.empty()); + + if (type.vecsize == final_swiz.size()) + op.erase(pos, string::npos); + return true; +} + +string CompilerGLSL::build_composite_combiner(uint32_t return_type, const uint32_t *elems, uint32_t length) +{ + uint32_t base = 0; + string op; + string subop; + + // Can only merge swizzles for vectors. + auto &type = get(return_type); + bool can_apply_swizzle_opt = type.basetype != SPIRType::Struct && type.array.empty() && type.columns == 1; + bool swizzle_optimization = false; + + for (uint32_t i = 0; i < length; i++) + { + auto *e = maybe_get(elems[i]); + + // If we're merging another scalar which belongs to the same base + // object, just merge the swizzles to avoid triggering more than 1 expression read as much as possible! + if (can_apply_swizzle_opt && e && e->base_expression && e->base_expression == base) + { + // Only supposed to be used for vector swizzle -> scalar. + assert(!e->expression.empty() && e->expression.front() == '.'); + subop += e->expression.substr(1, string::npos); + swizzle_optimization = true; + } + else + { + // We'll likely end up with duplicated swizzles, e.g. + // foobar.xyz.xyz from patterns like + // OpVectorShuffle + // OpCompositeExtract x 3 + // OpCompositeConstruct 3x + other scalar. + // Just modify op in-place. + if (swizzle_optimization) + { + if (backend.swizzle_is_function) + subop += "()"; + + // Don't attempt to remove unity swizzling if we managed to remove duplicate swizzles. + // The base "foo" might be vec4, while foo.xyz is vec3 (OpVectorShuffle) and looks like a vec3 due to the .xyz tacked on. + // We only want to remove the swizzles if we're certain that the resulting base will be the same vecsize. + // Essentially, we can only remove one set of swizzles, since that's what we have control over ... + // Case 1: + // foo.yxz.xyz: Duplicate swizzle kicks in, giving foo.yxz, we are done. + // foo.yxz was the result of OpVectorShuffle and we don't know the type of foo. + // Case 2: + // foo.xyz: Duplicate swizzle won't kick in. + // If foo is vec3, we can remove xyz, giving just foo. + if (!remove_duplicate_swizzle(subop)) + remove_unity_swizzle(base, subop); + + // Strips away redundant parens if we created them during component extraction. + strip_enclosed_expression(subop); + swizzle_optimization = false; + op += subop; + } + else + op += subop; + + if (i) + op += ", "; + subop = to_expression(elems[i]); + } + + base = e ? e->base_expression : 0; + } + + if (swizzle_optimization) + { + if (backend.swizzle_is_function) + subop += "()"; + + if (!remove_duplicate_swizzle(subop)) + remove_unity_swizzle(base, subop); + // Strips away redundant parens if we created them during component extraction. + strip_enclosed_expression(subop); + } + + op += subop; + return op; +} + +bool CompilerGLSL::skip_argument(uint32_t id) const +{ + if (!combined_image_samplers.empty() || !options.vulkan_semantics) + { + auto &type = expression_type(id); + if (type.basetype == SPIRType::Sampler || (type.basetype == SPIRType::Image && type.image.sampled == 1)) + return true; + } + return false; +} + +bool CompilerGLSL::optimize_read_modify_write(const SPIRType &type, const string &lhs, const string &rhs) +{ + // Do this with strings because we have a very clear pattern we can check for and it avoids + // adding lots of special cases to the code emission. + if (rhs.size() < lhs.size() + 3) + return false; + + // Do not optimize matrices. They are a bit awkward to reason about in general + // (in which order does operation happen?), and it does not work on MSL anyways. + if (type.vecsize > 1 && type.columns > 1) + return false; + + auto index = rhs.find(lhs); + if (index != 0) + return false; + + // TODO: Shift operators, but it's not important for now. + auto op = rhs.find_first_of("+-/*%|&^", lhs.size() + 1); + if (op != lhs.size() + 1) + return false; + + // Check that the op is followed by space. This excludes && and ||. + if (rhs[op + 1] != ' ') + return false; + + char bop = rhs[op]; + auto expr = rhs.substr(lhs.size() + 3); + // Try to find increments and decrements. Makes it look neater as += 1, -= 1 is fairly rare to see in real code. + // Find some common patterns which are equivalent. + if ((bop == '+' || bop == '-') && (expr == "1" || expr == "uint(1)" || expr == "1u" || expr == "int(1u)")) + statement(lhs, bop, bop, ";"); + else + statement(lhs, " ", bop, "= ", expr, ";"); + return true; +} + +void CompilerGLSL::register_control_dependent_expression(uint32_t expr) +{ + if (forwarded_temporaries.find(expr) == end(forwarded_temporaries)) + return; + + assert(current_emitting_block); + current_emitting_block->invalidate_expressions.push_back(expr); +} + +void CompilerGLSL::emit_block_instructions(SPIRBlock &block) +{ + current_emitting_block = █ + for (auto &op : block.ops) + emit_instruction(op); + current_emitting_block = nullptr; +} + +void CompilerGLSL::disallow_forwarding_in_expression_chain(const SPIRExpression &expr) +{ + if (forwarded_temporaries.count(expr.self)) + { + forced_temporaries.insert(expr.self); + force_recompile = true; + } + + for (auto &dependent : expr.expression_dependencies) + disallow_forwarding_in_expression_chain(get(dependent)); +} + +void CompilerGLSL::handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id) +{ + // Variables or access chains marked invariant are complicated. We will need to make sure the code-gen leading up to + // this variable is consistent. The failure case for SPIRV-Cross is when an expression is forced to a temporary + // in one translation unit, but not another, e.g. due to multiple use of an expression. + // This causes variance despite the output variable being marked invariant, so the solution here is to force all dependent + // expressions to be temporaries. + // It is uncertain if this is enough to support invariant in all possible cases, but it should be good enough + // for all reasonable uses of invariant. + if (!has_decoration(store_id, DecorationInvariant)) + return; + + auto *expr = maybe_get(value_id); + if (!expr) + return; + + disallow_forwarding_in_expression_chain(*expr); +} + +void CompilerGLSL::emit_instruction(const Instruction &instruction) +{ + auto ops = stream(instruction); + auto opcode = static_cast(instruction.op); + uint32_t length = instruction.length; + +#define GLSL_BOP(op) emit_binary_op(ops[0], ops[1], ops[2], ops[3], #op) +#define GLSL_BOP_CAST(op, type) \ + emit_binary_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, glsl_opcode_is_sign_invariant(opcode)) +#define GLSL_UOP(op) emit_unary_op(ops[0], ops[1], ops[2], #op) +#define GLSL_QFOP(op) emit_quaternary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], #op) +#define GLSL_TFOP(op) emit_trinary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], #op) +#define GLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define GLSL_BFOP_CAST(op, type) \ + emit_binary_func_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, glsl_opcode_is_sign_invariant(opcode)) +#define GLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define GLSL_UFOP(op) emit_unary_func_op(ops[0], ops[1], ops[2], #op) + + switch (opcode) + { + // Dealing with memory + case OpLoad: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + + flush_variable_declaration(ptr); + + // If we're loading from memory that cannot be changed by the shader, + // just forward the expression directly to avoid needless temporaries. + // If an expression is mutable and forwardable, we speculate that it is immutable. + bool forward = should_forward(ptr) && forced_temporaries.find(id) == end(forced_temporaries); + + // If loading a non-native row-major matrix, mark the expression as need_transpose. + bool need_transpose = false; + bool old_need_transpose = false; + + auto *ptr_expression = maybe_get(ptr); + if (ptr_expression && ptr_expression->need_transpose) + { + old_need_transpose = true; + ptr_expression->need_transpose = false; + need_transpose = true; + } + else if (is_non_native_row_major_matrix(ptr)) + need_transpose = true; + + // If we are forwarding this load, + // don't register the read to access chain here, defer that to when we actually use the expression, + // using the add_implied_read_expression mechanism. + auto expr = to_dereferenced_expression(ptr, !forward); + + // We might need to bitcast in order to load from a builtin. + bitcast_from_builtin_load(ptr, expr, get(result_type)); + + if (ptr_expression) + ptr_expression->need_transpose = old_need_transpose; + + // By default, suppress usage tracking since using same expression multiple times does not imply any extra work. + // However, if we try to load a complex, composite object from a flattened buffer, + // we should avoid emitting the same code over and over and lower the result to a temporary. + auto &type = get(result_type); + bool usage_tracking = ptr_expression && flattened_buffer_blocks.count(ptr_expression->loaded_from) != 0 && + (type.basetype == SPIRType::Struct || (type.columns > 1)); + + auto &e = emit_op(result_type, id, expr, forward, !usage_tracking); + e.need_transpose = need_transpose; + register_read(id, ptr, forward); + + // Pass through whether the result is of a packed type. + if (has_decoration(ptr, DecorationCPacked)) + set_decoration(id, DecorationCPacked); + + inherit_expression_dependencies(id, ptr); + if (forward) + add_implied_read_expression(e, ptr); + break; + } + + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + auto *var = maybe_get(ops[2]); + if (var) + flush_variable_declaration(var->self); + + // If the base is immutable, the access chain pointer must also be. + // If an expression is mutable and forwardable, we speculate that it is immutable. + AccessChainMeta meta; + bool ptr_chain = opcode == OpPtrAccessChain; + auto e = access_chain(ops[2], &ops[3], length - 3, get(ops[0]), &meta, ptr_chain); + + auto &expr = set(ops[1], move(e), ops[0], should_forward(ops[2])); + + auto *backing_variable = maybe_get_backing_variable(ops[2]); + expr.loaded_from = backing_variable ? backing_variable->self : ops[2]; + expr.need_transpose = meta.need_transpose; + expr.access_chain = true; + + // Mark the result as being packed. Some platforms handled packed vectors differently than non-packed. + if (meta.storage_is_packed) + set_decoration(ops[1], DecorationCPacked); + else + unset_decoration(ops[1], DecorationCPacked); + + if (meta.storage_is_invariant) + set_decoration(ops[1], DecorationInvariant); + else + unset_decoration(ops[1], DecorationInvariant); + + for (uint32_t i = 2; i < length; i++) + { + inherit_expression_dependencies(ops[1], ops[i]); + add_implied_read_expression(expr, ops[i]); + } + + break; + } + + case OpStore: + { + auto *var = maybe_get(ops[0]); + + if (var && var->statically_assigned) + var->static_expression = ops[1]; + else if (var && var->loop_variable && !var->loop_variable_enable) + var->static_expression = ops[1]; + else if (var && var->remapped_variable) + { + // Skip the write. + } + else if (var && flattened_structs.count(ops[0])) + { + store_flattened_struct(*var, ops[1]); + register_write(ops[0]); + } + else + { + auto rhs = to_pointer_expression(ops[1]); + + // Statements to OpStore may be empty if it is a struct with zero members. Just forward the store to /dev/null. + if (!rhs.empty()) + { + handle_store_to_invariant_variable(ops[0], ops[1]); + + auto lhs = to_dereferenced_expression(ops[0]); + + // We might need to bitcast in order to store to a builtin. + bitcast_to_builtin_store(ops[0], rhs, expression_type(ops[1])); + + // Tries to optimize assignments like " = op expr". + // While this is purely cosmetic, this is important for legacy ESSL where loop + // variable increments must be in either i++ or i += const-expr. + // Without this, we end up with i = i + 1, which is correct GLSL, but not correct GLES 2.0. + if (!optimize_read_modify_write(expression_type(ops[1]), lhs, rhs)) + statement(lhs, " = ", rhs, ";"); + register_write(ops[0]); + } + } + // Storing a pointer results in a variable pointer, so we must conservatively assume + // we can write through it. + if (expression_type(ops[1]).pointer) + register_write(ops[1]); + break; + } + + case OpArrayLength: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto e = access_chain_internal(ops[2], &ops[3], length - 3, true); + set(id, e + ".length()", result_type, true); + break; + } + + // Function calls + case OpFunctionCall: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t func = ops[2]; + const auto *arg = &ops[3]; + length -= 3; + + auto &callee = get(func); + auto &return_type = get(callee.return_type); + bool pure = function_is_pure(callee); + + bool callee_has_out_variables = false; + bool emit_return_value_as_argument = false; + + // Invalidate out variables passed to functions since they can be OpStore'd to. + for (uint32_t i = 0; i < length; i++) + { + if (callee.arguments[i].write_count) + { + register_call_out_argument(arg[i]); + callee_has_out_variables = true; + } + + flush_variable_declaration(arg[i]); + } + + if (!return_type.array.empty() && !backend.can_return_array) + { + callee_has_out_variables = true; + emit_return_value_as_argument = true; + } + + if (!pure) + register_impure_function_call(); + + string funexpr; + vector arglist; + funexpr += to_name(func) + "("; + + if (emit_return_value_as_argument) + { + statement(type_to_glsl(return_type), " ", to_name(id), type_to_array_glsl(return_type), ";"); + arglist.push_back(to_name(id)); + } + + for (uint32_t i = 0; i < length; i++) + { + // Do not pass in separate images or samplers if we're remapping + // to combined image samplers. + if (skip_argument(arg[i])) + continue; + + arglist.push_back(to_func_call_arg(arg[i])); + } + + for (auto &combined : callee.combined_parameters) + { + uint32_t image_id = combined.global_image ? combined.image_id : arg[combined.image_id]; + uint32_t sampler_id = combined.global_sampler ? combined.sampler_id : arg[combined.sampler_id]; + arglist.push_back(to_combined_image_sampler(image_id, sampler_id)); + } + + append_global_func_args(callee, length, arglist); + + funexpr += merge(arglist); + funexpr += ")"; + + // Check for function call constraints. + check_function_call_constraints(arg, length); + + if (return_type.basetype != SPIRType::Void) + { + // If the function actually writes to an out variable, + // take the conservative route and do not forward. + // The problem is that we might not read the function + // result (and emit the function) before an out variable + // is read (common case when return value is ignored! + // In order to avoid start tracking invalid variables, + // just avoid the forwarding problem altogether. + bool forward = args_will_forward(id, arg, length, pure) && !callee_has_out_variables && pure && + (forced_temporaries.find(id) == end(forced_temporaries)); + + if (emit_return_value_as_argument) + { + statement(funexpr, ";"); + set(id, to_name(id), result_type, true); + } + else + emit_op(result_type, id, funexpr, forward); + + // Function calls are implicit loads from all variables in question. + // Set dependencies for them. + for (uint32_t i = 0; i < length; i++) + register_read(id, arg[i], forward); + + // If we're going to forward the temporary result, + // put dependencies on every variable that must not change. + if (forward) + register_global_read_dependencies(callee, id); + } + else + statement(funexpr, ";"); + + break; + } + + // Composite munging + case OpCompositeConstruct: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + const auto *const elems = &ops[2]; + length -= 2; + + bool forward = true; + for (uint32_t i = 0; i < length; i++) + forward = forward && should_forward(elems[i]); + + auto &out_type = get(result_type); + auto *in_type = length > 0 ? &expression_type(elems[0]) : nullptr; + + // Only splat if we have vector constructors. + // Arrays and structs must be initialized properly in full. + bool composite = !out_type.array.empty() || out_type.basetype == SPIRType::Struct; + + bool splat = false; + bool swizzle_splat = false; + + if (in_type) + { + splat = in_type->vecsize == 1 && in_type->columns == 1 && !composite && backend.use_constructor_splatting; + swizzle_splat = in_type->vecsize == 1 && in_type->columns == 1 && backend.can_swizzle_scalar; + + if (ir.ids[elems[0]].get_type() == TypeConstant && !type_is_floating_point(*in_type)) + { + // Cannot swizzle literal integers as a special case. + swizzle_splat = false; + } + } + + if (splat || swizzle_splat) + { + uint32_t input = elems[0]; + for (uint32_t i = 0; i < length; i++) + { + if (input != elems[i]) + { + splat = false; + swizzle_splat = false; + } + } + } + + if (out_type.basetype == SPIRType::Struct && !backend.can_declare_struct_inline) + forward = false; + if (!out_type.array.empty() && !backend.can_declare_arrays_inline) + forward = false; + if (type_is_empty(out_type) && !backend.supports_empty_struct) + forward = false; + + string constructor_op; + if (!backend.array_is_value_type && out_type.array.size() > 1) + { + // We cannot construct array of arrays because we cannot treat the inputs + // as value types. Need to declare the array-of-arrays, and copy in elements one by one. + forced_temporaries.insert(id); + auto &flags = ir.meta[id].decoration.decoration_flags; + statement(flags_to_precision_qualifiers_glsl(out_type, flags), variable_decl(out_type, to_name(id)), ";"); + set(id, to_name(id), result_type, true); + for (uint32_t i = 0; i < length; i++) + emit_array_copy(join(to_expression(id), "[", i, "]"), elems[i]); + } + else if (backend.use_initializer_list && composite) + { + // Only use this path if we are building composites. + // This path cannot be used for arithmetic. + if (backend.use_typed_initializer_list && out_type.basetype == SPIRType::Struct && out_type.array.empty()) + constructor_op += type_to_glsl_constructor(get(result_type)); + constructor_op += "{ "; + if (type_is_empty(out_type) && !backend.supports_empty_struct) + constructor_op += "0"; + else if (splat) + constructor_op += to_expression(elems[0]); + else + constructor_op += build_composite_combiner(result_type, elems, length); + constructor_op += " }"; + } + else if (swizzle_splat && !composite) + { + constructor_op = remap_swizzle(get(result_type), 1, to_expression(elems[0])); + } + else + { + constructor_op = type_to_glsl_constructor(get(result_type)) + "("; + if (type_is_empty(out_type) && !backend.supports_empty_struct) + constructor_op += "0"; + else if (splat) + constructor_op += to_expression(elems[0]); + else + constructor_op += build_composite_combiner(result_type, elems, length); + constructor_op += ")"; + } + + if (!constructor_op.empty()) + { + emit_op(result_type, id, constructor_op, forward); + for (uint32_t i = 0; i < length; i++) + inherit_expression_dependencies(id, elems[i]); + } + break; + } + + case OpVectorInsertDynamic: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t vec = ops[2]; + uint32_t comp = ops[3]; + uint32_t index = ops[4]; + + flush_variable_declaration(vec); + + // Make a copy, then use access chain to store the variable. + statement(declare_temporary(result_type, id), to_expression(vec), ";"); + set(id, to_name(id), result_type, true); + auto chain = access_chain_internal(id, &index, 1, false); + statement(chain, " = ", to_expression(comp), ";"); + break; + } + + case OpVectorExtractDynamic: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto expr = access_chain_internal(ops[2], &ops[3], 1, false); + emit_op(result_type, id, expr, should_forward(ops[2])); + inherit_expression_dependencies(id, ops[2]); + inherit_expression_dependencies(id, ops[3]); + break; + } + + case OpCompositeExtract: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + length -= 3; + + auto &type = get(result_type); + + // We can only split the expression here if our expression is forwarded as a temporary. + bool allow_base_expression = forced_temporaries.find(id) == end(forced_temporaries); + + // Do not allow base expression for struct members. We risk doing "swizzle" optimizations in this case. + auto &composite_type = expression_type(ops[2]); + if (composite_type.basetype == SPIRType::Struct || !composite_type.array.empty()) + allow_base_expression = false; + + // Packed expressions cannot be split up. + if (has_decoration(ops[2], DecorationCPacked)) + allow_base_expression = false; + + AccessChainMeta meta; + SPIRExpression *e = nullptr; + + // Only apply this optimization if result is scalar. + if (allow_base_expression && should_forward(ops[2]) && type.vecsize == 1 && type.columns == 1 && length == 1) + { + // We want to split the access chain from the base. + // This is so we can later combine different CompositeExtract results + // with CompositeConstruct without emitting code like + // + // vec3 temp = texture(...).xyz + // vec4(temp.x, temp.y, temp.z, 1.0). + // + // when we actually wanted to emit this + // vec4(texture(...).xyz, 1.0). + // + // Including the base will prevent this and would trigger multiple reads + // from expression causing it to be forced to an actual temporary in GLSL. + auto expr = access_chain_internal(ops[2], &ops[3], length, true, true, false, &meta); + e = &emit_op(result_type, id, expr, true, !expression_is_forwarded(ops[2])); + inherit_expression_dependencies(id, ops[2]); + e->base_expression = ops[2]; + } + else + { + auto expr = access_chain_internal(ops[2], &ops[3], length, true, false, false, &meta); + e = &emit_op(result_type, id, expr, should_forward(ops[2]), !expression_is_forwarded(ops[2])); + inherit_expression_dependencies(id, ops[2]); + } + + // Pass through some meta information to the loaded expression. + // We can still end up loading a buffer type to a variable, then CompositeExtract from it + // instead of loading everything through an access chain. + e->need_transpose = meta.need_transpose; + if (meta.storage_is_packed) + set_decoration(id, DecorationCPacked); + if (meta.storage_is_invariant) + set_decoration(id, DecorationInvariant); + + break; + } + + case OpCompositeInsert: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t obj = ops[2]; + uint32_t composite = ops[3]; + const auto *elems = &ops[4]; + length -= 4; + + flush_variable_declaration(composite); + + // Make a copy, then use access chain to store the variable. + statement(declare_temporary(result_type, id), to_expression(composite), ";"); + set(id, to_name(id), result_type, true); + auto chain = access_chain_internal(id, elems, length, true); + statement(chain, " = ", to_expression(obj), ";"); + + break; + } + + case OpCopyMemory: + { + uint32_t lhs = ops[0]; + uint32_t rhs = ops[1]; + if (lhs != rhs) + { + flush_variable_declaration(lhs); + flush_variable_declaration(rhs); + statement(to_expression(lhs), " = ", to_expression(rhs), ";"); + register_write(lhs); + } + break; + } + + case OpCopyObject: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t rhs = ops[2]; + bool pointer = get(result_type).pointer; + + if (expression_is_lvalue(rhs) && !pointer) + { + // Need a copy. + // For pointer types, we copy the pointer itself. + statement(declare_temporary(result_type, id), to_expression(rhs), ";"); + set(id, to_name(id), result_type, true); + inherit_expression_dependencies(id, rhs); + } + else + { + // RHS expression is immutable, so just forward it. + // Copying these things really make no sense, but + // seems to be allowed anyways. + auto &e = set(id, to_expression(rhs), result_type, true); + if (pointer) + { + auto *var = maybe_get_backing_variable(rhs); + e.loaded_from = var ? var->self : 0; + } + } + break; + } + + case OpVectorShuffle: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t vec0 = ops[2]; + uint32_t vec1 = ops[3]; + const auto *elems = &ops[4]; + length -= 4; + + auto &type0 = expression_type(vec0); + + bool shuffle = false; + for (uint32_t i = 0; i < length; i++) + if (elems[i] >= type0.vecsize) + shuffle = true; + + // Cannot use swizzles with packed expressions, force shuffle path. + if (!shuffle && has_decoration(vec0, DecorationCPacked)) + shuffle = true; + + string expr; + bool should_fwd, trivial_forward; + + if (shuffle) + { + should_fwd = should_forward(vec0) && should_forward(vec1); + trivial_forward = !expression_is_forwarded(vec0) && !expression_is_forwarded(vec1); + + // Constructor style and shuffling from two different vectors. + vector args; + for (uint32_t i = 0; i < length; i++) + { + if (elems[i] >= type0.vecsize) + args.push_back(to_extract_component_expression(vec1, elems[i] - type0.vecsize)); + else + args.push_back(to_extract_component_expression(vec0, elems[i])); + } + expr += join(type_to_glsl_constructor(get(result_type)), "(", merge(args), ")"); + } + else + { + should_fwd = should_forward(vec0); + trivial_forward = !expression_is_forwarded(vec0); + + // We only source from first vector, so can use swizzle. + // If the vector is packed, unpack it before applying a swizzle (needed for MSL) + expr += to_enclosed_unpacked_expression(vec0); + expr += "."; + for (uint32_t i = 0; i < length; i++) + expr += index_to_swizzle(elems[i]); + + if (backend.swizzle_is_function && length > 1) + expr += "()"; + } + + // A shuffle is trivial in that it doesn't actually *do* anything. + // We inherit the forwardedness from our arguments to avoid flushing out to temporaries when it's not really needed. + + emit_op(result_type, id, expr, should_fwd, trivial_forward); + inherit_expression_dependencies(id, vec0); + inherit_expression_dependencies(id, vec1); + break; + } + + // ALU + case OpIsNan: + GLSL_UFOP(isnan); + break; + + case OpIsInf: + GLSL_UFOP(isinf); + break; + + case OpSNegate: + case OpFNegate: + GLSL_UOP(-); + break; + + case OpIAdd: + { + // For simple arith ops, prefer the output type if there's a mismatch to avoid extra bitcasts. + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(+, type); + break; + } + + case OpFAdd: + GLSL_BOP(+); + break; + + case OpISub: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(-, type); + break; + } + + case OpFSub: + GLSL_BOP(-); + break; + + case OpIMul: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(*, type); + break; + } + + case OpVectorTimesMatrix: + case OpMatrixTimesVector: + { + // If the matrix needs transpose, just flip the multiply order. + auto *e = maybe_get(ops[opcode == OpMatrixTimesVector ? 2 : 3]); + if (e && e->need_transpose) + { + e->need_transpose = false; + emit_binary_op(ops[0], ops[1], ops[3], ops[2], "*"); + e->need_transpose = true; + } + else + GLSL_BOP(*); + break; + } + + case OpFMul: + case OpMatrixTimesScalar: + case OpVectorTimesScalar: + case OpMatrixTimesMatrix: + GLSL_BOP(*); + break; + + case OpOuterProduct: + GLSL_BFOP(outerProduct); + break; + + case OpDot: + GLSL_BFOP(dot); + break; + + case OpTranspose: + GLSL_UFOP(transpose); + break; + + case OpSRem: + { + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + + // Needs special handling. + bool forward = should_forward(op0) && should_forward(op1); + auto expr = join(to_enclosed_expression(op0), " - ", to_enclosed_expression(op1), " * ", "(", + to_enclosed_expression(op0), " / ", to_enclosed_expression(op1), ")"); + + emit_op(result_type, result_id, expr, forward); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + break; + } + + case OpSDiv: + GLSL_BOP_CAST(/, SPIRType::Int); + break; + + case OpUDiv: + GLSL_BOP_CAST(/, SPIRType::UInt); + break; + + case OpIAddCarry: + case OpISubBorrow: + { + if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Extended arithmetic is only available from ESSL 310."); + else if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("Extended arithmetic is only available from GLSL 400."); + + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + forced_temporaries.insert(result_id); + auto &type = get(result_type); + auto &flags = ir.meta[result_id].decoration.decoration_flags; + statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";"); + set(result_id, to_name(result_id), result_type, true); + + const char *op = opcode == OpIAddCarry ? "uaddCarry" : "usubBorrow"; + + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", op, "(", to_expression(op0), ", ", + to_expression(op1), ", ", to_expression(result_id), ".", to_member_name(type, 1), ");"); + break; + } + + case OpUMulExtended: + case OpSMulExtended: + { + if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Extended arithmetic is only available from ESSL 310."); + else if (!options.es && options.version < 400) + SPIRV_CROSS_THROW("Extended arithmetic is only available from GLSL 4000."); + + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + forced_temporaries.insert(result_id); + auto &type = get(result_type); + auto &flags = ir.meta[result_id].decoration.decoration_flags; + statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";"); + set(result_id, to_name(result_id), result_type, true); + + const char *op = opcode == OpUMulExtended ? "umulExtended" : "imulExtended"; + + statement(op, "(", to_expression(op0), ", ", to_expression(op1), ", ", to_expression(result_id), ".", + to_member_name(type, 1), ", ", to_expression(result_id), ".", to_member_name(type, 0), ");"); + break; + } + + case OpFDiv: + GLSL_BOP(/); + break; + + case OpShiftRightLogical: + GLSL_BOP_CAST(>>, SPIRType::UInt); + break; + + case OpShiftRightArithmetic: + GLSL_BOP_CAST(>>, SPIRType::Int); + break; + + case OpShiftLeftLogical: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(<<, type); + break; + } + + case OpBitwiseOr: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(|, type); + break; + } + + case OpBitwiseXor: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(^, type); + break; + } + + case OpBitwiseAnd: + { + auto type = get(ops[0]).basetype; + GLSL_BOP_CAST(&, type); + break; + } + + case OpNot: + GLSL_UOP(~); + break; + + case OpUMod: + GLSL_BOP_CAST(%, SPIRType::UInt); + break; + + case OpSMod: + GLSL_BOP_CAST(%, SPIRType::Int); + break; + + case OpFMod: + GLSL_BFOP(mod); + break; + + case OpFRem: + { + if (is_legacy()) + SPIRV_CROSS_THROW("OpFRem requires trunc() and is only supported on non-legacy targets. A workaround is " + "needed for legacy."); + + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + + // Needs special handling. + bool forward = should_forward(op0) && should_forward(op1); + auto expr = join(to_enclosed_expression(op0), " - ", to_enclosed_expression(op1), " * ", "trunc(", + to_enclosed_expression(op0), " / ", to_enclosed_expression(op1), ")"); + + emit_op(result_type, result_id, expr, forward); + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); + break; + } + + // Relational + case OpAny: + GLSL_UFOP(any); + break; + + case OpAll: + GLSL_UFOP(all); + break; + + case OpSelect: + emit_mix_op(ops[0], ops[1], ops[4], ops[3], ops[2]); + break; + + case OpLogicalOr: + { + // No vector variant in GLSL for logical OR. + auto result_type = ops[0]; + auto id = ops[1]; + auto &type = get(result_type); + + if (type.vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "||"); + else + GLSL_BOP(||); + break; + } + + case OpLogicalAnd: + { + // No vector variant in GLSL for logical AND. + auto result_type = ops[0]; + auto id = ops[1]; + auto &type = get(result_type); + + if (type.vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "&&"); + else + GLSL_BOP(&&); + break; + } + + case OpLogicalNot: + { + auto &type = get(ops[0]); + if (type.vecsize > 1) + GLSL_UFOP(not); + else + GLSL_UOP(!); + break; + } + + case OpIEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(equal, SPIRType::Int); + else + GLSL_BOP_CAST(==, SPIRType::Int); + break; + } + + case OpLogicalEqual: + case OpFOrdEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(equal); + else + GLSL_BOP(==); + break; + } + + case OpINotEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(notEqual, SPIRType::Int); + else + GLSL_BOP_CAST(!=, SPIRType::Int); + break; + } + + case OpLogicalNotEqual: + case OpFOrdNotEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(notEqual); + else + GLSL_BOP(!=); + break; + } + + case OpUGreaterThan: + case OpSGreaterThan: + { + auto type = opcode == OpUGreaterThan ? SPIRType::UInt : SPIRType::Int; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(greaterThan, type); + else + GLSL_BOP_CAST(>, type); + break; + } + + case OpFOrdGreaterThan: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(greaterThan); + else + GLSL_BOP(>); + break; + } + + case OpUGreaterThanEqual: + case OpSGreaterThanEqual: + { + auto type = opcode == OpUGreaterThanEqual ? SPIRType::UInt : SPIRType::Int; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(greaterThanEqual, type); + else + GLSL_BOP_CAST(>=, type); + break; + } + + case OpFOrdGreaterThanEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(greaterThanEqual); + else + GLSL_BOP(>=); + break; + } + + case OpULessThan: + case OpSLessThan: + { + auto type = opcode == OpULessThan ? SPIRType::UInt : SPIRType::Int; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(lessThan, type); + else + GLSL_BOP_CAST(<, type); + break; + } + + case OpFOrdLessThan: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(lessThan); + else + GLSL_BOP(<); + break; + } + + case OpULessThanEqual: + case OpSLessThanEqual: + { + auto type = opcode == OpULessThanEqual ? SPIRType::UInt : SPIRType::Int; + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP_CAST(lessThanEqual, type); + else + GLSL_BOP_CAST(<=, type); + break; + } + + case OpFOrdLessThanEqual: + { + if (expression_type(ops[2]).vecsize > 1) + GLSL_BFOP(lessThanEqual); + else + GLSL_BOP(<=); + break; + } + + // Conversion + case OpConvertFToU: + case OpConvertFToS: + case OpConvertSToF: + case OpConvertUToF: + case OpUConvert: + case OpSConvert: + case OpFConvert: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto func = type_to_glsl_constructor(get(result_type)); + emit_unary_func_op(result_type, id, ops[2], func.c_str()); + break; + } + + case OpBitcast: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t arg = ops[2]; + + auto op = bitcast_glsl_op(get(result_type), expression_type(arg)); + emit_unary_func_op(result_type, id, arg, op.c_str()); + break; + } + + case OpQuantizeToF16: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t arg = ops[2]; + + string op; + auto &type = get(result_type); + + switch (type.vecsize) + { + case 1: + op = join("unpackHalf2x16(packHalf2x16(vec2(", to_expression(arg), "))).x"); + break; + case 2: + op = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), "))"); + break; + case 3: + { + auto op0 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".xy))"); + auto op1 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".zz)).x"); + op = join("vec3(", op0, ", ", op1, ")"); + break; + } + case 4: + { + auto op0 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".xy))"); + auto op1 = join("unpackHalf2x16(packHalf2x16(", to_expression(arg), ".zw))"); + op = join("vec4(", op0, ", ", op1, ")"); + break; + } + default: + SPIRV_CROSS_THROW("Illegal argument to OpQuantizeToF16."); + } + + emit_op(result_type, id, op, should_forward(arg)); + inherit_expression_dependencies(id, arg); + break; + } + + // Derivatives + case OpDPdx: + GLSL_UFOP(dFdx); + if (is_legacy_es()) + require_extension_internal("GL_OES_standard_derivatives"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdy: + GLSL_UFOP(dFdy); + if (is_legacy_es()) + require_extension_internal("GL_OES_standard_derivatives"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxFine: + GLSL_UFOP(dFdxFine); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyFine: + GLSL_UFOP(dFdyFine); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxCoarse: + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + GLSL_UFOP(dFdxCoarse); + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyCoarse: + GLSL_UFOP(dFdyCoarse); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidth: + GLSL_UFOP(fwidth); + if (is_legacy_es()) + require_extension_internal("GL_OES_standard_derivatives"); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidthCoarse: + GLSL_UFOP(fwidthCoarse); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidthFine: + GLSL_UFOP(fwidthFine); + if (options.es) + { + SPIRV_CROSS_THROW("GL_ARB_derivative_control is unavailable in OpenGL ES."); + } + if (options.version < 450) + require_extension_internal("GL_ARB_derivative_control"); + register_control_dependent_expression(ops[1]); + break; + + // Bitfield + case OpBitFieldInsert: + // TODO: The signedness of inputs is strict in GLSL, but not in SPIR-V, bitcast if necessary. + GLSL_QFOP(bitfieldInsert); + break; + + case OpBitFieldSExtract: + case OpBitFieldUExtract: + // TODO: The signedness of inputs is strict in GLSL, but not in SPIR-V, bitcast if necessary. + GLSL_TFOP(bitfieldExtract); + break; + + case OpBitReverse: + GLSL_UFOP(bitfieldReverse); + break; + + case OpBitCount: + GLSL_UFOP(bitCount); + break; + + // Atomics + case OpAtomicExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + // Ignore semantics for now, probably only relevant to CL. + uint32_t val = ops[5]; + const char *op = check_atomic_image(ptr) ? "imageAtomicExchange" : "atomicExchange"; + forced_temporaries.insert(id); + emit_binary_func_op(result_type, id, ptr, val, op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicCompareExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t val = ops[6]; + uint32_t comp = ops[7]; + const char *op = check_atomic_image(ptr) ? "imageAtomicCompSwap" : "atomicCompSwap"; + + forced_temporaries.insert(id); + emit_trinary_func_op(result_type, id, ptr, comp, val, op); + flush_all_atomic_capable_variables(); + break; + } + + case OpAtomicLoad: + flush_all_atomic_capable_variables(); + // FIXME: Image? + // OpAtomicLoad seems to only be relevant for atomic counters. + GLSL_UFOP(atomicCounter); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + + case OpAtomicStore: + SPIRV_CROSS_THROW("Unsupported opcode OpAtomicStore."); + + case OpAtomicIIncrement: + case OpAtomicIDecrement: + { + forced_temporaries.insert(ops[1]); + auto &type = expression_type(ops[2]); + if (type.storage == StorageClassAtomicCounter) + { + // Legacy GLSL stuff, not sure if this is relevant to support. + if (opcode == OpAtomicIIncrement) + GLSL_UFOP(atomicCounterIncrement); + else + GLSL_UFOP(atomicCounterDecrement); + } + else + { + bool atomic_image = check_atomic_image(ops[2]); + bool unsigned_type = (type.basetype == SPIRType::UInt) || + (atomic_image && get(type.image.type).basetype == SPIRType::UInt); + const char *op = atomic_image ? "imageAtomicAdd" : "atomicAdd"; + + const char *increment = nullptr; + if (opcode == OpAtomicIIncrement && unsigned_type) + increment = "1u"; + else if (opcode == OpAtomicIIncrement) + increment = "1"; + else if (unsigned_type) + increment = "uint(-1)"; + else + increment = "-1"; + + emit_op(ops[0], ops[1], join(op, "(", to_expression(ops[2]), ", ", increment, ")"), false); + } + + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + case OpAtomicIAdd: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicAdd" : "atomicAdd"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + case OpAtomicISub: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicAdd" : "atomicAdd"; + forced_temporaries.insert(ops[1]); + auto expr = join(op, "(", to_expression(ops[2]), ", -", to_enclosed_expression(ops[5]), ")"); + emit_op(ops[0], ops[1], expr, should_forward(ops[2]) && should_forward(ops[5])); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + case OpAtomicSMin: + case OpAtomicUMin: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicMin" : "atomicMin"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + case OpAtomicSMax: + case OpAtomicUMax: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicMax" : "atomicMax"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + case OpAtomicAnd: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicAnd" : "atomicAnd"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + case OpAtomicOr: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicOr" : "atomicOr"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + case OpAtomicXor: + { + const char *op = check_atomic_image(ops[2]) ? "imageAtomicXor" : "atomicXor"; + forced_temporaries.insert(ops[1]); + emit_binary_func_op(ops[0], ops[1], ops[2], ops[5], op); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); + break; + } + + // Geometry shaders + case OpEmitVertex: + statement("EmitVertex();"); + break; + + case OpEndPrimitive: + statement("EndPrimitive();"); + break; + + case OpEmitStreamVertex: + statement("EmitStreamVertex();"); + break; + + case OpEndStreamPrimitive: + statement("EndStreamPrimitive();"); + break; + + // Textures + case OpImageSampleExplicitLod: + case OpImageSampleProjExplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleDrefImplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageFetch: + case OpImageGather: + case OpImageDrefGather: + // Gets a bit hairy, so move this to a separate instruction. + emit_texture_op(instruction); + break; + + case OpImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + // Suppress usage tracking. + auto &e = emit_op(result_type, id, to_expression(ops[2]), true, true); + + // When using the image, we need to know which variable it is actually loaded from. + auto *var = maybe_get_backing_variable(ops[2]); + e.loaded_from = var ? var->self : 0; + break; + } + + case OpImageQueryLod: + { + if (!options.es && options.version < 400) + { + require_extension_internal("GL_ARB_texture_query_lod"); + // For some reason, the ARB spec is all-caps. + GLSL_BFOP(textureQueryLOD); + } + else if (options.es) + SPIRV_CROSS_THROW("textureQueryLod not supported in ES profile."); + else + GLSL_BFOP(textureQueryLod); + register_control_dependent_expression(ops[1]); + break; + } + + case OpImageQueryLevels: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_texture_query_levels"); + if (options.es) + SPIRV_CROSS_THROW("textureQueryLevels not supported in ES profile."); + + auto expr = join("textureQueryLevels(", convert_separate_image_to_expression(ops[2]), ")"); + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpImageQuerySamples: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + string expr; + if (type.image.sampled == 2) + expr = join("imageSamples(", to_expression(ops[2]), ")"); + else + expr = join("textureSamples(", convert_separate_image_to_expression(ops[2]), ")"); + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpSampledImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_sampled_image_op(result_type, id, ops[2], ops[3]); + break; + } + + case OpImageQuerySizeLod: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto expr = join("textureSize(", convert_separate_image_to_expression(ops[2]), ", ", + bitcast_expression(SPIRType::Int, ops[3]), ")"); + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + break; + } + + // Image load/store + case OpImageRead: + { + // We added Nonreadable speculatively to the OpImage variable due to glslangValidator + // not adding the proper qualifiers. + // If it turns out we need to read the image after all, remove the qualifier and recompile. + auto *var = maybe_get_backing_variable(ops[2]); + if (var) + { + auto &flags = ir.meta.at(var->self).decoration.decoration_flags; + if (flags.get(DecorationNonReadable)) + { + flags.clear(DecorationNonReadable); + force_recompile = true; + } + } + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + bool pure; + string imgexpr; + auto &type = expression_type(ops[2]); + + if (var && var->remapped_variable) // Remapped input, just read as-is without any op-code + { + if (type.image.ms) + SPIRV_CROSS_THROW("Trying to remap multisampled image to variable, this is not possible."); + + auto itr = + find_if(begin(pls_inputs), end(pls_inputs), [var](const PlsRemap &pls) { return pls.id == var->self; }); + + if (itr == end(pls_inputs)) + { + // For non-PLS inputs, we rely on subpass type remapping information to get it right + // since ImageRead always returns 4-component vectors and the backing type is opaque. + if (!var->remapped_components) + SPIRV_CROSS_THROW("subpassInput was remapped, but remap_components is not set correctly."); + imgexpr = remap_swizzle(get(result_type), var->remapped_components, to_expression(ops[2])); + } + else + { + // PLS input could have different number of components than what the SPIR expects, swizzle to + // the appropriate vector size. + uint32_t components = pls_format_to_components(itr->format); + imgexpr = remap_swizzle(get(result_type), components, to_expression(ops[2])); + } + pure = true; + } + else if (type.image.dim == DimSubpassData) + { + if (options.vulkan_semantics) + { + // With Vulkan semantics, use the proper Vulkan GLSL construct. + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || length != 6) + SPIRV_CROSS_THROW( + "Multisampled image used in OpImageRead, but unexpected operand mask was used."); + + uint32_t samples = ops[5]; + imgexpr = join("subpassLoad(", to_expression(ops[2]), ", ", to_expression(samples), ")"); + } + else + imgexpr = join("subpassLoad(", to_expression(ops[2]), ")"); + } + else + { + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || length != 6) + SPIRV_CROSS_THROW( + "Multisampled image used in OpImageRead, but unexpected operand mask was used."); + + uint32_t samples = ops[5]; + imgexpr = join("texelFetch(", to_expression(ops[2]), ", ivec2(gl_FragCoord.xy), ", + to_expression(samples), ")"); + } + else + { + // Implement subpass loads via texture barrier style sampling. + imgexpr = join("texelFetch(", to_expression(ops[2]), ", ivec2(gl_FragCoord.xy), 0)"); + } + } + imgexpr = remap_swizzle(get(result_type), 4, imgexpr); + pure = true; + } + else + { + // imageLoad only accepts int coords, not uint. + auto coord_expr = to_expression(ops[3]); + auto target_coord_type = expression_type(ops[3]); + target_coord_type.basetype = SPIRType::Int; + coord_expr = bitcast_expression(target_coord_type, expression_type(ops[3]).basetype, coord_expr); + + // Plain image load/store. + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || length != 6) + SPIRV_CROSS_THROW("Multisampled image used in OpImageRead, but unexpected operand mask was used."); + + uint32_t samples = ops[5]; + imgexpr = + join("imageLoad(", to_expression(ops[2]), ", ", coord_expr, ", ", to_expression(samples), ")"); + } + else + imgexpr = join("imageLoad(", to_expression(ops[2]), ", ", coord_expr, ")"); + + imgexpr = remap_swizzle(get(result_type), 4, imgexpr); + pure = false; + } + + if (var && var->forwardable) + { + bool forward = forced_temporaries.find(id) == end(forced_temporaries); + auto &e = emit_op(result_type, id, imgexpr, forward); + + // We only need to track dependencies if we're reading from image load/store. + if (!pure) + { + e.loaded_from = var->self; + if (forward) + var->dependees.push_back(id); + } + } + else + emit_op(result_type, id, imgexpr, false); + + inherit_expression_dependencies(id, ops[2]); + if (type.image.ms) + inherit_expression_dependencies(id, ops[5]); + break; + } + + case OpImageTexelPointer: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto &e = set(id, join(to_expression(ops[2]), ", ", to_expression(ops[3])), result_type, true); + + // When using the pointer, we need to know which variable it is actually loaded from. + auto *var = maybe_get_backing_variable(ops[2]); + e.loaded_from = var ? var->self : 0; + break; + } + + case OpImageWrite: + { + // We added Nonwritable speculatively to the OpImage variable due to glslangValidator + // not adding the proper qualifiers. + // If it turns out we need to write to the image after all, remove the qualifier and recompile. + auto *var = maybe_get_backing_variable(ops[0]); + if (var) + { + auto &flags = ir.meta.at(var->self).decoration.decoration_flags; + if (flags.get(DecorationNonWritable)) + { + flags.clear(DecorationNonWritable); + force_recompile = true; + } + } + + auto &type = expression_type(ops[0]); + auto &value_type = expression_type(ops[2]); + auto store_type = value_type; + store_type.vecsize = 4; + + // imageStore only accepts int coords, not uint. + auto coord_expr = to_expression(ops[1]); + auto target_coord_type = expression_type(ops[1]); + target_coord_type.basetype = SPIRType::Int; + coord_expr = bitcast_expression(target_coord_type, expression_type(ops[1]).basetype, coord_expr); + + if (type.image.ms) + { + uint32_t operands = ops[3]; + if (operands != ImageOperandsSampleMask || length != 5) + SPIRV_CROSS_THROW("Multisampled image used in OpImageWrite, but unexpected operand mask was used."); + uint32_t samples = ops[4]; + statement("imageStore(", to_expression(ops[0]), ", ", coord_expr, ", ", to_expression(samples), ", ", + remap_swizzle(store_type, value_type.vecsize, to_expression(ops[2])), ");"); + } + else + statement("imageStore(", to_expression(ops[0]), ", ", coord_expr, ", ", + remap_swizzle(store_type, value_type.vecsize, to_expression(ops[2])), ");"); + + if (var && variable_storage_is_aliased(*var)) + flush_all_aliased_variables(); + break; + } + + case OpImageQuerySize: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (type.basetype == SPIRType::Image) + { + string expr; + if (type.image.sampled == 2) + { + // The size of an image is always constant. + expr = join("imageSize(", to_expression(ops[2]), ")"); + } + else + { + // This path is hit for samplerBuffers and multisampled images which do not have LOD. + expr = join("textureSize(", convert_separate_image_to_expression(ops[2]), ")"); + } + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::Int, expr); + emit_op(result_type, id, expr, true); + } + else + SPIRV_CROSS_THROW("Invalid type for OpImageQuerySize."); + break; + } + + // Compute + case OpControlBarrier: + case OpMemoryBarrier: + { + uint32_t execution_scope = 0; + uint32_t memory; + uint32_t semantics; + + if (opcode == OpMemoryBarrier) + { + memory = get(ops[0]).scalar(); + semantics = get(ops[1]).scalar(); + } + else + { + execution_scope = get(ops[0]).scalar(); + memory = get(ops[1]).scalar(); + semantics = get(ops[2]).scalar(); + } + + if (execution_scope == ScopeSubgroup || memory == ScopeSubgroup) + { + if (!options.vulkan_semantics) + SPIRV_CROSS_THROW("Can only use subgroup operations in Vulkan semantics."); + require_extension_internal("GL_KHR_shader_subgroup_basic"); + } + + if (execution_scope != ScopeSubgroup && get_entry_point().model == ExecutionModelTessellationControl) + { + // Control shaders only have barriers, and it implies memory barriers. + if (opcode == OpControlBarrier) + statement("barrier();"); + break; + } + + // We only care about these flags, acquire/release and friends are not relevant to GLSL. + semantics = mask_relevant_memory_semantics(semantics); + + if (opcode == OpMemoryBarrier) + { + // If we are a memory barrier, and the next instruction is a control barrier, check if that memory barrier + // does what we need, so we avoid redundant barriers. + const Instruction *next = get_next_instruction_in_block(instruction); + if (next && next->op == OpControlBarrier) + { + auto *next_ops = stream(*next); + uint32_t next_memory = get(next_ops[1]).scalar(); + uint32_t next_semantics = get(next_ops[2]).scalar(); + next_semantics = mask_relevant_memory_semantics(next_semantics); + + bool memory_scope_covered = false; + if (next_memory == memory) + memory_scope_covered = true; + else if (next_semantics == MemorySemanticsWorkgroupMemoryMask) + { + // If we only care about workgroup memory, either Device or Workgroup scope is fine, + // scope does not have to match. + if ((next_memory == ScopeDevice || next_memory == ScopeWorkgroup) && + (memory == ScopeDevice || memory == ScopeWorkgroup)) + { + memory_scope_covered = true; + } + } + else if (memory == ScopeWorkgroup && next_memory == ScopeDevice) + { + // The control barrier has device scope, but the memory barrier just has workgroup scope. + memory_scope_covered = true; + } + + // If we have the same memory scope, and all memory types are covered, we're good. + if (memory_scope_covered && (semantics & next_semantics) == semantics) + break; + } + } + + // We are synchronizing some memory or syncing execution, + // so we cannot forward any loads beyond the memory barrier. + if (semantics || opcode == OpControlBarrier) + { + assert(current_emitting_block); + flush_control_dependent_expressions(current_emitting_block->self); + flush_all_active_variables(); + } + + if (memory == ScopeWorkgroup) // Only need to consider memory within a group + { + if (semantics == MemorySemanticsWorkgroupMemoryMask) + statement("memoryBarrierShared();"); + else if (semantics != 0) + statement("groupMemoryBarrier();"); + } + else if (memory == ScopeSubgroup) + { + const uint32_t all_barriers = + MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | MemorySemanticsImageMemoryMask; + + if (semantics & (MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask)) + { + // These are not relevant for GLSL, but assume it means memoryBarrier(). + // memoryBarrier() does everything, so no need to test anything else. + statement("subgroupMemoryBarrier();"); + } + else if ((semantics & all_barriers) == all_barriers) + { + // Short-hand instead of emitting 3 barriers. + statement("subgroupMemoryBarrier();"); + } + else + { + // Pick out individual barriers. + if (semantics & MemorySemanticsWorkgroupMemoryMask) + statement("subgroupMemoryBarrierShared();"); + if (semantics & MemorySemanticsUniformMemoryMask) + statement("subgroupMemoryBarrierBuffer();"); + if (semantics & MemorySemanticsImageMemoryMask) + statement("subgroupMemoryBarrierImage();"); + } + } + else + { + const uint32_t all_barriers = MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | + MemorySemanticsImageMemoryMask | MemorySemanticsAtomicCounterMemoryMask; + + if (semantics & (MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask)) + { + // These are not relevant for GLSL, but assume it means memoryBarrier(). + // memoryBarrier() does everything, so no need to test anything else. + statement("memoryBarrier();"); + } + else if ((semantics & all_barriers) == all_barriers) + { + // Short-hand instead of emitting 4 barriers. + statement("memoryBarrier();"); + } + else + { + // Pick out individual barriers. + if (semantics & MemorySemanticsWorkgroupMemoryMask) + statement("memoryBarrierShared();"); + if (semantics & MemorySemanticsUniformMemoryMask) + statement("memoryBarrierBuffer();"); + if (semantics & MemorySemanticsImageMemoryMask) + statement("memoryBarrierImage();"); + if (semantics & MemorySemanticsAtomicCounterMemoryMask) + statement("memoryBarrierAtomicCounter();"); + } + } + + if (opcode == OpControlBarrier) + { + if (execution_scope == ScopeSubgroup) + statement("subgroupBarrier();"); + else + statement("barrier();"); + } + break; + } + + case OpExtInst: + { + uint32_t extension_set = ops[2]; + + if (get(extension_set).ext == SPIRExtension::GLSL) + { + emit_glsl_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_shader_ballot) + { + emit_spv_amd_shader_ballot_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter) + { + emit_spv_amd_shader_explicit_vertex_parameter_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_shader_trinary_minmax) + { + emit_spv_amd_shader_trinary_minmax_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else if (get(extension_set).ext == SPIRExtension::SPV_AMD_gcn_shader) + { + emit_spv_amd_gcn_shader_op(ops[0], ops[1], ops[3], &ops[4], length - 4); + } + else + { + statement("// unimplemented ext op ", instruction.op); + break; + } + + break; + } + + // Legacy sub-group stuff ... + case OpSubgroupBallotKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + string expr; + expr = join("uvec4(unpackUint2x32(ballotARB(" + to_expression(ops[2]) + ")), 0u, 0u)"); + emit_op(result_type, id, expr, should_forward(ops[2])); + + require_extension_internal("GL_ARB_shader_ballot"); + inherit_expression_dependencies(id, ops[2]); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupFirstInvocationKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "readFirstInvocationARB"); + + require_extension_internal("GL_ARB_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupReadInvocationKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_binary_func_op(result_type, id, ops[2], ops[3], "readInvocationARB"); + + require_extension_internal("GL_ARB_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupAllKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "allInvocationsARB"); + + require_extension_internal("GL_ARB_shader_group_vote"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupAnyKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "anyInvocationARB"); + + require_extension_internal("GL_ARB_shader_group_vote"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpSubgroupAllEqualKHR: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[2], "allInvocationsEqualARB"); + + require_extension_internal("GL_ARB_shader_group_vote"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpGroupIAddNonUniformAMD: + case OpGroupFAddNonUniformAMD: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[4], "addInvocationsNonUniformAMD"); + + require_extension_internal("GL_AMD_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpGroupFMinNonUniformAMD: + case OpGroupUMinNonUniformAMD: + case OpGroupSMinNonUniformAMD: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[4], "minInvocationsNonUniformAMD"); + + require_extension_internal("GL_AMD_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpGroupFMaxNonUniformAMD: + case OpGroupUMaxNonUniformAMD: + case OpGroupSMaxNonUniformAMD: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + emit_unary_func_op(result_type, id, ops[4], "maxInvocationsNonUniformAMD"); + + require_extension_internal("GL_AMD_shader_ballot"); + register_control_dependent_expression(ops[1]); + break; + } + + case OpFragmentMaskFetchAMD: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (type.image.dim == spv::DimSubpassData) + { + emit_unary_func_op(result_type, id, ops[2], "fragmentMaskFetchAMD"); + } + else + { + emit_binary_func_op(result_type, id, ops[2], ops[3], "fragmentMaskFetchAMD"); + } + + require_extension_internal("GL_AMD_shader_fragment_mask"); + break; + } + + case OpFragmentFetchAMD: + { + auto &type = expression_type(ops[2]); + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + if (type.image.dim == spv::DimSubpassData) + { + emit_binary_func_op(result_type, id, ops[2], ops[4], "fragmentFetchAMD"); + } + else + { + emit_trinary_func_op(result_type, id, ops[2], ops[3], ops[4], "fragmentFetchAMD"); + } + + require_extension_internal("GL_AMD_shader_fragment_mask"); + break; + } + + // Vulkan 1.1 sub-group stuff ... + case OpGroupNonUniformElect: + case OpGroupNonUniformBroadcast: + case OpGroupNonUniformBroadcastFirst: + case OpGroupNonUniformBallot: + case OpGroupNonUniformInverseBallot: + case OpGroupNonUniformBallotBitExtract: + case OpGroupNonUniformBallotBitCount: + case OpGroupNonUniformBallotFindLSB: + case OpGroupNonUniformBallotFindMSB: + case OpGroupNonUniformShuffle: + case OpGroupNonUniformShuffleXor: + case OpGroupNonUniformShuffleUp: + case OpGroupNonUniformShuffleDown: + case OpGroupNonUniformAll: + case OpGroupNonUniformAny: + case OpGroupNonUniformAllEqual: + case OpGroupNonUniformFAdd: + case OpGroupNonUniformIAdd: + case OpGroupNonUniformFMul: + case OpGroupNonUniformIMul: + case OpGroupNonUniformFMin: + case OpGroupNonUniformFMax: + case OpGroupNonUniformSMin: + case OpGroupNonUniformSMax: + case OpGroupNonUniformUMin: + case OpGroupNonUniformUMax: + case OpGroupNonUniformBitwiseAnd: + case OpGroupNonUniformBitwiseOr: + case OpGroupNonUniformBitwiseXor: + case OpGroupNonUniformQuadSwap: + case OpGroupNonUniformQuadBroadcast: + emit_subgroup_op(instruction); + break; + + case OpFUnordEqual: + GLSL_BFOP(unsupported_FUnordEqual); + break; + + case OpFUnordNotEqual: + GLSL_BFOP(unsupported_FUnordNotEqual); + break; + + case OpFUnordLessThan: + GLSL_BFOP(unsupported_FUnordLessThan); + break; + + case OpFUnordGreaterThan: + GLSL_BFOP(unsupported_FUnordGreaterThan); + break; + + case OpFUnordLessThanEqual: + GLSL_BFOP(unsupported_FUnordLessThanEqual); + break; + + case OpFUnordGreaterThanEqual: + GLSL_BFOP(unsupported_FUnordGreaterThanEqual); + break; + + default: + statement("// unimplemented op ", instruction.op); + break; + } +} + +// Appends function arguments, mapped from global variables, beyond the specified arg index. +// This is used when a function call uses fewer arguments than the function defines. +// This situation may occur if the function signature has been dynamically modified to +// extract global variables referenced from within the function, and convert them to +// function arguments. This is necessary for shader languages that do not support global +// access to shader input content from within a function (eg. Metal). Each additional +// function args uses the name of the global variable. Function nesting will modify the +// functions and function calls all the way up the nesting chain. +void CompilerGLSL::append_global_func_args(const SPIRFunction &func, uint32_t index, vector &arglist) +{ + auto &args = func.arguments; + uint32_t arg_cnt = uint32_t(args.size()); + for (uint32_t arg_idx = index; arg_idx < arg_cnt; arg_idx++) + { + auto &arg = args[arg_idx]; + assert(arg.alias_global_variable); + + // If the underlying variable needs to be declared + // (ie. a local variable with deferred declaration), do so now. + uint32_t var_id = get(arg.id).basevariable; + if (var_id) + flush_variable_declaration(var_id); + + arglist.push_back(to_func_call_arg(arg.id)); + } +} + +string CompilerGLSL::to_member_name(const SPIRType &type, uint32_t index) +{ + auto &memb = ir.meta[type.self].members; + if (index < memb.size() && !memb[index].alias.empty()) + return memb[index].alias; + else + return join("_m", index); +} + +string CompilerGLSL::to_member_reference(uint32_t, const SPIRType &type, uint32_t index, bool) +{ + return join(".", to_member_name(type, index)); +} + +void CompilerGLSL::add_member_name(SPIRType &type, uint32_t index) +{ + auto &memb = ir.meta[type.self].members; + if (index < memb.size() && !memb[index].alias.empty()) + { + auto &name = memb[index].alias; + if (name.empty()) + return; + + // Reserved for temporaries. + if (name[0] == '_' && name.size() >= 2 && isdigit(name[1])) + { + name.clear(); + return; + } + + update_name_cache(type.member_name_cache, name); + } +} + +// Checks whether the ID is a row_major matrix that requires conversion before use +bool CompilerGLSL::is_non_native_row_major_matrix(uint32_t id) +{ + // Natively supported row-major matrices do not need to be converted. + // Legacy targets do not support row major. + if (backend.native_row_major_matrix && !is_legacy()) + return false; + + // Non-matrix or column-major matrix types do not need to be converted. + if (!ir.meta[id].decoration.decoration_flags.get(DecorationRowMajor)) + return false; + + // Only square row-major matrices can be converted at this time. + // Converting non-square matrices will require defining custom GLSL function that + // swaps matrix elements while retaining the original dimensional form of the matrix. + const auto type = expression_type(id); + if (type.columns != type.vecsize) + SPIRV_CROSS_THROW("Row-major matrices must be square on this platform."); + + return true; +} + +// Checks whether the member is a row_major matrix that requires conversion before use +bool CompilerGLSL::member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) +{ + // Natively supported row-major matrices do not need to be converted. + if (backend.native_row_major_matrix && !is_legacy()) + return false; + + // Non-matrix or column-major matrix types do not need to be converted. + if (!combined_decoration_for_member(type, index).get(DecorationRowMajor)) + return false; + + // Only square row-major matrices can be converted at this time. + // Converting non-square matrices will require defining custom GLSL function that + // swaps matrix elements while retaining the original dimensional form of the matrix. + const auto mbr_type = get(type.member_types[index]); + if (mbr_type.columns != mbr_type.vecsize) + SPIRV_CROSS_THROW("Row-major matrices must be square on this platform."); + + return true; +} + +// Checks whether the member is in packed data type, that might need to be unpacked. +// GLSL does not define packed data types, but certain subclasses do. +bool CompilerGLSL::member_is_packed_type(const SPIRType &type, uint32_t index) const +{ + return has_member_decoration(type.self, index, DecorationCPacked); +} + +// Wraps the expression string in a function call that converts the +// row_major matrix result of the expression to a column_major matrix. +// Base implementation uses the standard library transpose() function. +// Subclasses may override to use a different function. +string CompilerGLSL::convert_row_major_matrix(string exp_str, const SPIRType & /*exp_type*/, bool /*is_packed*/) +{ + strip_enclosed_expression(exp_str); + return join("transpose(", exp_str, ")"); +} + +string CompilerGLSL::variable_decl(const SPIRType &type, const string &name, uint32_t id) +{ + string type_name = type_to_glsl(type, id); + remap_variable_type_name(type, name, type_name); + return join(type_name, " ", name, type_to_array_glsl(type)); +} + +// Emit a structure member. Subclasses may override to modify output, +// or to dynamically add a padding member if needed. +void CompilerGLSL::emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const string &qualifier, uint32_t) +{ + auto &membertype = get(member_type_id); + + Bitset memberflags; + auto &memb = ir.meta[type.self].members; + if (index < memb.size()) + memberflags = memb[index].decoration_flags; + + string qualifiers; + bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (is_block) + qualifiers = to_interpolation_qualifiers(memberflags); + + statement(layout_for_member(type, index), qualifiers, qualifier, + flags_to_precision_qualifiers_glsl(membertype, memberflags), + variable_decl(membertype, to_member_name(type, index)), ";"); +} + +const char *CompilerGLSL::flags_to_precision_qualifiers_glsl(const SPIRType &type, const Bitset &flags) +{ + // Structs do not have precision qualifiers, neither do doubles (desktop only anyways, so no mediump/highp). + if (type.basetype != SPIRType::Float && type.basetype != SPIRType::Int && type.basetype != SPIRType::UInt && + type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage && + type.basetype != SPIRType::Sampler) + return ""; + + if (options.es) + { + auto &execution = get_entry_point(); + + if (flags.get(DecorationRelaxedPrecision)) + { + bool implied_fmediump = type.basetype == SPIRType::Float && + options.fragment.default_float_precision == Options::Mediump && + execution.model == ExecutionModelFragment; + + bool implied_imediump = (type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt) && + options.fragment.default_int_precision == Options::Mediump && + execution.model == ExecutionModelFragment; + + return implied_fmediump || implied_imediump ? "" : "mediump "; + } + else + { + bool implied_fhighp = + type.basetype == SPIRType::Float && ((options.fragment.default_float_precision == Options::Highp && + execution.model == ExecutionModelFragment) || + (execution.model != ExecutionModelFragment)); + + bool implied_ihighp = (type.basetype == SPIRType::Int || type.basetype == SPIRType::UInt) && + ((options.fragment.default_int_precision == Options::Highp && + execution.model == ExecutionModelFragment) || + (execution.model != ExecutionModelFragment)); + + return implied_fhighp || implied_ihighp ? "" : "highp "; + } + } + else if (backend.allow_precision_qualifiers) + { + // Vulkan GLSL supports precision qualifiers, even in desktop profiles, which is convenient. + // The default is highp however, so only emit mediump in the rare case that a shader has these. + if (flags.get(DecorationRelaxedPrecision)) + return "mediump "; + else + return ""; + } + else + return ""; +} + +const char *CompilerGLSL::to_precision_qualifiers_glsl(uint32_t id) +{ + return flags_to_precision_qualifiers_glsl(expression_type(id), ir.meta[id].decoration.decoration_flags); +} + +string CompilerGLSL::to_qualifiers_glsl(uint32_t id) +{ + auto &flags = ir.meta[id].decoration.decoration_flags; + string res; + + auto *var = maybe_get(id); + + if (var && var->storage == StorageClassWorkgroup && !backend.shared_is_implied) + res += "shared "; + + res += to_interpolation_qualifiers(flags); + if (var) + res += to_storage_qualifiers_glsl(*var); + + auto &type = expression_type(id); + if (type.image.dim != DimSubpassData && type.image.sampled == 2) + { + if (flags.get(DecorationCoherent)) + res += "coherent "; + if (flags.get(DecorationRestrict)) + res += "restrict "; + if (flags.get(DecorationNonWritable)) + res += "readonly "; + if (flags.get(DecorationNonReadable)) + res += "writeonly "; + } + + res += to_precision_qualifiers_glsl(id); + + return res; +} + +string CompilerGLSL::argument_decl(const SPIRFunction::Parameter &arg) +{ + // glslangValidator seems to make all arguments pointer no matter what which is rather bizarre ... + auto &type = expression_type(arg.id); + const char *direction = ""; + + if (type.pointer) + { + if (arg.write_count && arg.read_count) + direction = "inout "; + else if (arg.write_count) + direction = "out "; + } + + return join(direction, to_qualifiers_glsl(arg.id), variable_decl(type, to_name(arg.id), arg.id)); +} + +string CompilerGLSL::to_initializer_expression(const SPIRVariable &var) +{ + return to_expression(var.initializer); +} + +string CompilerGLSL::variable_decl(const SPIRVariable &variable) +{ + // Ignore the pointer type since GLSL doesn't have pointers. + auto &type = get_variable_data_type(variable); + + if (type.pointer_depth > 1) + SPIRV_CROSS_THROW("Cannot declare pointer-to-pointer types."); + + auto res = join(to_qualifiers_glsl(variable.self), variable_decl(type, to_name(variable.self), variable.self)); + + if (variable.loop_variable && variable.static_expression) + { + uint32_t expr = variable.static_expression; + if (ir.ids[expr].get_type() != TypeUndef) + res += join(" = ", to_expression(variable.static_expression)); + } + else if (variable.initializer) + { + uint32_t expr = variable.initializer; + if (ir.ids[expr].get_type() != TypeUndef) + res += join(" = ", to_initializer_expression(variable)); + } + return res; +} + +const char *CompilerGLSL::to_pls_qualifiers_glsl(const SPIRVariable &variable) +{ + auto &flags = ir.meta[variable.self].decoration.decoration_flags; + if (flags.get(DecorationRelaxedPrecision)) + return "mediump "; + else + return "highp "; +} + +string CompilerGLSL::pls_decl(const PlsRemap &var) +{ + auto &variable = get(var.id); + + SPIRType type; + type.vecsize = pls_format_to_components(var.format); + type.basetype = pls_format_to_basetype(var.format); + + return join(to_pls_layout(var.format), to_pls_qualifiers_glsl(variable), type_to_glsl(type), " ", + to_name(variable.self)); +} + +uint32_t CompilerGLSL::to_array_size_literal(const SPIRType &type) const +{ + return to_array_size_literal(type, uint32_t(type.array.size() - 1)); +} + +uint32_t CompilerGLSL::to_array_size_literal(const SPIRType &type, uint32_t index) const +{ + assert(type.array.size() == type.array_size_literal.size()); + + if (type.array_size_literal[index]) + { + return type.array[index]; + } + else + { + // Use the default spec constant value. + // This is the best we can do. + uint32_t array_size_id = type.array[index]; + + // Explicitly check for this case. The error message you would get (bad cast) makes no sense otherwise. + if (ir.ids[array_size_id].get_type() == TypeConstantOp) + SPIRV_CROSS_THROW("An array size was found to be an OpSpecConstantOp. This is not supported since " + "SPIRV-Cross cannot deduce the actual size here."); + + uint32_t array_size = get(array_size_id).scalar(); + return array_size; + } +} + +string CompilerGLSL::to_array_size(const SPIRType &type, uint32_t index) +{ + assert(type.array.size() == type.array_size_literal.size()); + + // Tessellation control and evaluation shaders must have either gl_MaxPatchVertices or unsized arrays for input arrays. + // Opt for unsized as it's the more "correct" variant to use. + if (type.storage == StorageClassInput && (get_entry_point().model == ExecutionModelTessellationControl || + get_entry_point().model == ExecutionModelTessellationEvaluation)) + return ""; + + auto &size = type.array[index]; + if (!type.array_size_literal[index]) + return to_expression(size); + else if (size) + return convert_to_string(size); + else if (!backend.flexible_member_array_supported) + { + // For runtime-sized arrays, we can work around + // lack of standard support for this by simply having + // a single element array. + // + // Runtime length arrays must always be the last element + // in an interface block. + return "1"; + } + else + return ""; +} + +string CompilerGLSL::type_to_array_glsl(const SPIRType &type) +{ + if (type.array.empty()) + return ""; + + if (options.flatten_multidimensional_arrays) + { + string res; + res += "["; + for (auto i = uint32_t(type.array.size()); i; i--) + { + res += enclose_expression(to_array_size(type, i - 1)); + if (i > 1) + res += " * "; + } + res += "]"; + return res; + } + else + { + if (type.array.size() > 1) + { + if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_arrays_of_arrays"); + else if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Arrays of arrays not supported before ESSL version 310. " + "Try using --flatten-multidimensional-arrays or set " + "options.flatten_multidimensional_arrays to true."); + } + + string res; + for (auto i = uint32_t(type.array.size()); i; i--) + { + res += "["; + res += to_array_size(type, i - 1); + res += "]"; + } + return res; + } +} + +string CompilerGLSL::image_type_glsl(const SPIRType &type, uint32_t id) +{ + auto &imagetype = get(type.image.type); + string res; + + switch (imagetype.basetype) + { + case SPIRType::Int: + res = "i"; + break; + case SPIRType::UInt: + res = "u"; + break; + default: + break; + } + + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData && options.vulkan_semantics) + return res + "subpassInput" + (type.image.ms ? "MS" : ""); + + // If we're emulating subpassInput with samplers, force sampler2D + // so we don't have to specify format. + if (type.basetype == SPIRType::Image && type.image.dim != DimSubpassData) + { + // Sampler buffers are always declared as samplerBuffer even though they might be separate images in the SPIR-V. + if (type.image.dim == DimBuffer && type.image.sampled == 1) + res += "sampler"; + else + res += type.image.sampled == 2 ? "image" : "texture"; + } + else + res += "sampler"; + + switch (type.image.dim) + { + case Dim1D: + res += "1D"; + break; + case Dim2D: + res += "2D"; + break; + case Dim3D: + res += "3D"; + break; + case DimCube: + res += "Cube"; + break; + case DimRect: + if (options.es) + SPIRV_CROSS_THROW("Rectangle textures are not supported on OpenGL ES."); + + if (is_legacy_desktop()) + require_extension_internal("GL_ARB_texture_rectangle"); + + res += "2DRect"; + break; + + case DimBuffer: + if (options.es && options.version < 320) + require_extension_internal("GL_OES_texture_buffer"); + else if (!options.es && options.version < 300) + require_extension_internal("GL_EXT_texture_buffer_object"); + res += "Buffer"; + break; + + case DimSubpassData: + res += "2D"; + break; + default: + SPIRV_CROSS_THROW("Only 1D, 2D, 2DRect, 3D, Buffer, InputTarget and Cube textures supported."); + } + + if (type.image.ms) + res += "MS"; + if (type.image.arrayed) + { + if (is_legacy_desktop()) + require_extension_internal("GL_EXT_texture_array"); + res += "Array"; + } + + // "Shadow" state in GLSL only exists for samplers and combined image samplers. + if (((type.basetype == SPIRType::SampledImage) || (type.basetype == SPIRType::Sampler)) && + image_is_comparison(type, id)) + { + res += "Shadow"; + } + + return res; +} + +string CompilerGLSL::type_to_glsl_constructor(const SPIRType &type) +{ + if (type.array.size() > 1) + { + if (options.flatten_multidimensional_arrays) + SPIRV_CROSS_THROW("Cannot flatten constructors of multidimensional array constructors, e.g. float[][]()."); + else if (!options.es && options.version < 430) + require_extension_internal("GL_ARB_arrays_of_arrays"); + else if (options.es && options.version < 310) + SPIRV_CROSS_THROW("Arrays of arrays not supported before ESSL version 310."); + } + + auto e = type_to_glsl(type); + for (uint32_t i = 0; i < type.array.size(); i++) + e += "[]"; + return e; +} + +// The optional id parameter indicates the object whose type we are trying +// to find the description for. It is optional. Most type descriptions do not +// depend on a specific object's use of that type. +string CompilerGLSL::type_to_glsl(const SPIRType &type, uint32_t id) +{ + // Ignore the pointer type since GLSL doesn't have pointers. + + switch (type.basetype) + { + case SPIRType::Struct: + // Need OpName lookup here to get a "sensible" name for a struct. + if (backend.explicit_struct_type) + return join("struct ", to_name(type.self)); + else + return to_name(type.self); + + case SPIRType::Image: + case SPIRType::SampledImage: + return image_type_glsl(type, id); + + case SPIRType::Sampler: + // The depth field is set by calling code based on the variable ID of the sampler, effectively reintroducing + // this distinction into the type system. + return comparison_ids.count(id) ? "samplerShadow" : "sampler"; + + case SPIRType::Void: + return "void"; + + default: + break; + } + + if (type.basetype == SPIRType::UInt && is_legacy()) + SPIRV_CROSS_THROW("Unsigned integers are not supported on legacy targets."); + + if (type.vecsize == 1 && type.columns == 1) // Scalar builtin + { + switch (type.basetype) + { + case SPIRType::Boolean: + return "bool"; + case SPIRType::SByte: + return backend.basic_int8_type; + case SPIRType::UByte: + return backend.basic_uint8_type; + case SPIRType::Short: + return backend.basic_int16_type; + case SPIRType::UShort: + return backend.basic_uint16_type; + case SPIRType::Int: + return backend.basic_int_type; + case SPIRType::UInt: + return backend.basic_uint_type; + case SPIRType::AtomicCounter: + return "atomic_uint"; + case SPIRType::Half: + return "float16_t"; + case SPIRType::Float: + return "float"; + case SPIRType::Double: + return "double"; + case SPIRType::Int64: + return "int64_t"; + case SPIRType::UInt64: + return "uint64_t"; + default: + return "???"; + } + } + else if (type.vecsize > 1 && type.columns == 1) // Vector builtin + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bvec", type.vecsize); + case SPIRType::SByte: + return join("i8vec", type.vecsize); + case SPIRType::UByte: + return join("u8vec", type.vecsize); + case SPIRType::Short: + return join("i16vec", type.vecsize); + case SPIRType::UShort: + return join("u16vec", type.vecsize); + case SPIRType::Int: + return join("ivec", type.vecsize); + case SPIRType::UInt: + return join("uvec", type.vecsize); + case SPIRType::Half: + return join("f16vec", type.vecsize); + case SPIRType::Float: + return join("vec", type.vecsize); + case SPIRType::Double: + return join("dvec", type.vecsize); + case SPIRType::Int64: + return join("i64vec", type.vecsize); + case SPIRType::UInt64: + return join("u64vec", type.vecsize); + default: + return "???"; + } + } + else if (type.vecsize == type.columns) // Simple Matrix builtin + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bmat", type.vecsize); + case SPIRType::Int: + return join("imat", type.vecsize); + case SPIRType::UInt: + return join("umat", type.vecsize); + case SPIRType::Half: + return join("f16mat", type.vecsize); + case SPIRType::Float: + return join("mat", type.vecsize); + case SPIRType::Double: + return join("dmat", type.vecsize); + // Matrix types not supported for int64/uint64. + default: + return "???"; + } + } + else + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bmat", type.columns, "x", type.vecsize); + case SPIRType::Int: + return join("imat", type.columns, "x", type.vecsize); + case SPIRType::UInt: + return join("umat", type.columns, "x", type.vecsize); + case SPIRType::Half: + return join("f16mat", type.columns, "x", type.vecsize); + case SPIRType::Float: + return join("mat", type.columns, "x", type.vecsize); + case SPIRType::Double: + return join("dmat", type.columns, "x", type.vecsize); + // Matrix types not supported for int64/uint64. + default: + return "???"; + } + } +} + +void CompilerGLSL::add_variable(unordered_set &variables_primary, + const unordered_set &variables_secondary, string &name) +{ + if (name.empty()) + return; + + // Reserved for temporaries. + if (name[0] == '_' && name.size() >= 2 && isdigit(name[1])) + { + name.clear(); + return; + } + + // Avoid double underscores. + name = sanitize_underscores(name); + + update_name_cache(variables_primary, variables_secondary, name); +} + +void CompilerGLSL::add_local_variable_name(uint32_t id) +{ + add_variable(local_variable_names, block_names, ir.meta[id].decoration.alias); +} + +void CompilerGLSL::add_resource_name(uint32_t id) +{ + add_variable(resource_names, block_names, ir.meta[id].decoration.alias); +} + +void CompilerGLSL::add_header_line(const std::string &line) +{ + header_lines.push_back(line); +} + +bool CompilerGLSL::has_extension(const std::string &ext) const +{ + auto itr = find(begin(forced_extensions), end(forced_extensions), ext); + return itr != end(forced_extensions); +} + +void CompilerGLSL::require_extension(const std::string &ext) +{ + if (!has_extension(ext)) + forced_extensions.push_back(ext); +} + +void CompilerGLSL::require_extension_internal(const string &ext) +{ + if (backend.supports_extensions && !has_extension(ext)) + { + forced_extensions.push_back(ext); + force_recompile = true; + } +} + +void CompilerGLSL::flatten_buffer_block(uint32_t id) +{ + auto &var = get(id); + auto &type = get(var.basetype); + auto name = to_name(type.self, false); + auto &flags = ir.meta.at(type.self).decoration.decoration_flags; + + if (!type.array.empty()) + SPIRV_CROSS_THROW(name + " is an array of UBOs."); + if (type.basetype != SPIRType::Struct) + SPIRV_CROSS_THROW(name + " is not a struct."); + if (!flags.get(DecorationBlock)) + SPIRV_CROSS_THROW(name + " is not a block."); + if (type.member_types.empty()) + SPIRV_CROSS_THROW(name + " is an empty struct."); + + flattened_buffer_blocks.insert(id); +} + +bool CompilerGLSL::check_atomic_image(uint32_t id) +{ + auto &type = expression_type(id); + if (type.storage == StorageClassImage) + { + if (options.es && options.version < 320) + require_extension_internal("GL_OES_shader_image_atomic"); + + auto *var = maybe_get_backing_variable(id); + if (var) + { + auto &flags = ir.meta.at(var->self).decoration.decoration_flags; + if (flags.get(DecorationNonWritable) || flags.get(DecorationNonReadable)) + { + flags.clear(DecorationNonWritable); + flags.clear(DecorationNonReadable); + force_recompile = true; + } + } + return true; + } + else + return false; +} + +void CompilerGLSL::add_function_overload(const SPIRFunction &func) +{ + Hasher hasher; + for (auto &arg : func.arguments) + { + // Parameters can vary with pointer type or not, + // but that will not change the signature in GLSL/HLSL, + // so strip the pointer type before hashing. + uint32_t type_id = get_pointee_type_id(arg.type); + auto &type = get(type_id); + + if (!combined_image_samplers.empty()) + { + // If we have combined image samplers, we cannot really trust the image and sampler arguments + // we pass down to callees, because they may be shuffled around. + // Ignore these arguments, to make sure that functions need to differ in some other way + // to be considered different overloads. + if (type.basetype == SPIRType::SampledImage || + (type.basetype == SPIRType::Image && type.image.sampled == 1) || type.basetype == SPIRType::Sampler) + { + continue; + } + } + + hasher.u32(type_id); + } + uint64_t types_hash = hasher.get(); + + auto function_name = to_name(func.self); + auto itr = function_overloads.find(function_name); + if (itr != end(function_overloads)) + { + // There exists a function with this name already. + auto &overloads = itr->second; + if (overloads.count(types_hash) != 0) + { + // Overload conflict, assign a new name. + add_resource_name(func.self); + function_overloads[to_name(func.self)].insert(types_hash); + } + else + { + // Can reuse the name. + overloads.insert(types_hash); + } + } + else + { + // First time we see this function name. + add_resource_name(func.self); + function_overloads[to_name(func.self)].insert(types_hash); + } +} + +void CompilerGLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + // Avoid shadow declarations. + local_variable_names = resource_names; + + string decl; + + auto &type = get(func.return_type); + decl += flags_to_precision_qualifiers_glsl(type, return_flags); + decl += type_to_glsl(type); + decl += type_to_array_glsl(type); + decl += " "; + + if (func.self == ir.default_entry_point) + { + decl += "main"; + processing_entry_point = true; + } + else + decl += to_name(func.self); + + decl += "("; + vector arglist; + for (auto &arg : func.arguments) + { + // Do not pass in separate images or samplers if we're remapping + // to combined image samplers. + if (skip_argument(arg.id)) + continue; + + // Might change the variable name if it already exists in this function. + // SPIRV OpName doesn't have any semantic effect, so it's valid for an implementation + // to use same name for variables. + // Since we want to make the GLSL debuggable and somewhat sane, use fallback names for variables which are duplicates. + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + // Hold a pointer to the parameter so we can invalidate the readonly field if needed. + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + for (auto &arg : func.shadow_arguments) + { + // Might change the variable name if it already exists in this function. + // SPIRV OpName doesn't have any semantic effect, so it's valid for an implementation + // to use same name for variables. + // Since we want to make the GLSL debuggable and somewhat sane, use fallback names for variables which are duplicates. + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + // Hold a pointer to the parameter so we can invalidate the readonly field if needed. + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + decl += merge(arglist); + decl += ")"; + statement(decl); +} + +void CompilerGLSL::emit_function(SPIRFunction &func, const Bitset &return_flags) +{ + // Avoid potential cycles. + if (func.active) + return; + func.active = true; + + // If we depend on a function, emit that function before we emit our own function. + for (auto block : func.blocks) + { + auto &b = get(block); + for (auto &i : b.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + if (op == OpFunctionCall) + { + // Recursively emit functions which are called. + uint32_t id = ops[2]; + emit_function(get(id), ir.meta[ops[1]].decoration.decoration_flags); + } + } + } + + emit_function_prototype(func, return_flags); + begin_scope(); + + if (func.self == ir.default_entry_point) + emit_entry_point_declarations(); + + current_function = &func; + auto &entry_block = get(func.entry_block); + + for (auto &v : func.local_variables) + { + auto &var = get(v); + if (var.storage == StorageClassWorkgroup) + { + // Special variable type which cannot have initializer, + // need to be declared as standalone variables. + // Comes from MSL which can push global variables as local variables in main function. + add_local_variable_name(var.self); + statement(variable_decl(var), ";"); + var.deferred_declaration = false; + } + else if (var.storage == StorageClassPrivate) + { + // These variables will not have had their CFG usage analyzed, so move it to the entry block. + // Comes from MSL which can push global variables as local variables in main function. + // We could just declare them right now, but we would miss out on an important initialization case which is + // LUT declaration in MSL. + // If we don't declare the variable when it is assigned we're forced to go through a helper function + // which copies elements one by one. + add_local_variable_name(var.self); + auto &dominated = entry_block.dominated_variables; + if (find(begin(dominated), end(dominated), var.self) == end(dominated)) + entry_block.dominated_variables.push_back(var.self); + var.deferred_declaration = true; + } + else if (var.storage == StorageClassFunction && var.remapped_variable && var.static_expression) + { + // No need to declare this variable, it has a static expression. + var.deferred_declaration = false; + } + else if (expression_is_lvalue(v)) + { + add_local_variable_name(var.self); + + if (var.initializer) + statement(variable_decl_function_local(var), ";"); + else + { + // Don't declare variable until first use to declutter the GLSL output quite a lot. + // If we don't touch the variable before first branch, + // declare it then since we need variable declaration to be in top scope. + var.deferred_declaration = true; + } + } + else + { + // HACK: SPIR-V in older glslang output likes to use samplers and images as local variables, but GLSL does not allow this. + // For these types (non-lvalue), we enforce forwarding through a shadowed variable. + // This means that when we OpStore to these variables, we just write in the expression ID directly. + // This breaks any kind of branching, since the variable must be statically assigned. + // Branching on samplers and images would be pretty much impossible to fake in GLSL. + var.statically_assigned = true; + } + + var.loop_variable_enable = false; + + // Loop variables are never declared outside their for-loop, so block any implicit declaration. + if (var.loop_variable) + var.deferred_declaration = false; + } + + for (auto &line : current_function->fixup_hooks_in) + line(); + + entry_block.loop_dominator = SPIRBlock::NoDominator; + emit_block_chain(entry_block); + + end_scope(); + processing_entry_point = false; + statement(""); +} + +void CompilerGLSL::emit_fixup() +{ + auto &execution = get_entry_point(); + if (execution.model == ExecutionModelVertex) + { + if (options.vertex.fixup_clipspace) + { + const char *suffix = backend.float_literal_suffix ? "f" : ""; + statement("gl_Position.z = 2.0", suffix, " * gl_Position.z - gl_Position.w;"); + } + + if (options.vertex.flip_vert_y) + statement("gl_Position.y = -gl_Position.y;"); + } +} + +bool CompilerGLSL::flush_phi_required(uint32_t from, uint32_t to) +{ + auto &child = get(to); + for (auto &phi : child.phi_variables) + if (phi.parent == from) + return true; + return false; +} + +void CompilerGLSL::flush_phi(uint32_t from, uint32_t to) +{ + auto &child = get(to); + + unordered_set temporary_phi_variables; + + for (auto itr = begin(child.phi_variables); itr != end(child.phi_variables); ++itr) + { + auto &phi = *itr; + + if (phi.parent == from) + { + auto &var = get(phi.function_variable); + + // A Phi variable might be a loop variable, so flush to static expression. + if (var.loop_variable && !var.loop_variable_enable) + var.static_expression = phi.local_variable; + else + { + flush_variable_declaration(phi.function_variable); + + // Check if we are going to write to a Phi variable that another statement will read from + // as part of another Phi node in our target block. + // For this case, we will need to copy phi.function_variable to a temporary, and use that for future reads. + // This is judged to be extremely rare, so deal with it here using a simple, but suboptimal algorithm. + bool need_saved_temporary = + find_if(itr + 1, end(child.phi_variables), [&](const SPIRBlock::Phi &future_phi) -> bool { + return future_phi.local_variable == phi.function_variable && future_phi.parent == from; + }) != end(child.phi_variables); + + if (need_saved_temporary) + { + // Need to make sure we declare the phi variable with a copy at the right scope. + // We cannot safely declare a temporary here since we might be inside a continue block. + if (!var.allocate_temporary_copy) + { + var.allocate_temporary_copy = true; + force_recompile = true; + } + statement("_", phi.function_variable, "_copy", " = ", to_name(phi.function_variable), ";"); + temporary_phi_variables.insert(phi.function_variable); + } + + // This might be called in continue block, so make sure we + // use this to emit ESSL 1.0 compliant increments/decrements. + auto lhs = to_expression(phi.function_variable); + + string rhs; + if (temporary_phi_variables.count(phi.local_variable)) + rhs = join("_", phi.local_variable, "_copy"); + else + rhs = to_pointer_expression(phi.local_variable); + + if (!optimize_read_modify_write(get(var.basetype), lhs, rhs)) + statement(lhs, " = ", rhs, ";"); + } + + register_write(phi.function_variable); + } + } +} + +void CompilerGLSL::branch_to_continue(uint32_t from, uint32_t to) +{ + auto &to_block = get(to); + if (from == to) + return; + + assert(is_continue(to)); + if (to_block.complex_continue) + { + // Just emit the whole block chain as is. + auto usage_counts = expression_usage_counts; + auto invalid = invalid_expressions; + + emit_block_chain(to_block); + + // Expression usage counts and invalid expressions + // are moot after returning from the continue block. + // Since we emit the same block multiple times, + // we don't want to invalidate ourselves. + expression_usage_counts = usage_counts; + invalid_expressions = invalid; + } + else + { + auto &from_block = get(from); + bool outside_control_flow = false; + uint32_t loop_dominator = 0; + + // FIXME: Refactor this to not use the old loop_dominator tracking. + if (from_block.merge_block) + { + // If we are a loop header, we don't set the loop dominator, + // so just use "self" here. + loop_dominator = from; + } + else if (from_block.loop_dominator != SPIRBlock::NoDominator) + { + loop_dominator = from_block.loop_dominator; + } + + if (loop_dominator != 0) + { + auto &dominator = get(loop_dominator); + + // For non-complex continue blocks, we implicitly branch to the continue block + // by having the continue block be part of the loop header in for (; ; continue-block). + outside_control_flow = block_is_outside_flow_control_from_block(dominator, from_block); + } + + // Some simplification for for-loops. We always end up with a useless continue; + // statement since we branch to a loop block. + // Walk the CFG, if we uncoditionally execute the block calling continue assuming we're in the loop block, + // we can avoid writing out an explicit continue statement. + // Similar optimization to return statements if we know we're outside flow control. + if (!outside_control_flow) + statement("continue;"); + } +} + +void CompilerGLSL::branch(uint32_t from, uint32_t to) +{ + flush_phi(from, to); + flush_control_dependent_expressions(from); + flush_all_active_variables(); + + // This is only a continue if we branch to our loop dominator. + if ((ir.block_meta[to] & ParsedIR::BLOCK_META_LOOP_HEADER_BIT) != 0 && get(from).loop_dominator == to) + { + // This can happen if we had a complex continue block which was emitted. + // Once the continue block tries to branch to the loop header, just emit continue; + // and end the chain here. + statement("continue;"); + } + else if (is_break(to)) + { + // Very dirty workaround. + // Switch constructs are able to break, but they cannot break out of a loop at the same time. + // Only sensible solution is to make a ladder variable, which we declare at the top of the switch block, + // write to the ladder here, and defer the break. + // The loop we're breaking out of must dominate the switch block, or there is no ladder breaking case. + if (current_emitting_switch && is_loop_break(to) && current_emitting_switch->loop_dominator != ~0u && + get(current_emitting_switch->loop_dominator).merge_block == to) + { + if (!current_emitting_switch->need_ladder_break) + { + force_recompile = true; + current_emitting_switch->need_ladder_break = true; + } + + statement("_", current_emitting_switch->self, "_ladder_break = true;"); + } + statement("break;"); + } + else if (is_continue(to) || (from == to)) + { + // For from == to case can happen for a do-while loop which branches into itself. + // We don't mark these cases as continue blocks, but the only possible way to branch into + // ourselves is through means of continue blocks. + branch_to_continue(from, to); + } + else if (!is_conditional(to)) + emit_block_chain(get(to)); + + // It is important that we check for break before continue. + // A block might serve two purposes, a break block for the inner scope, and + // a continue block in the outer scope. + // Inner scope always takes precedence. +} + +void CompilerGLSL::branch(uint32_t from, uint32_t cond, uint32_t true_block, uint32_t false_block) +{ + // If we branch directly to a selection merge target, we don't really need a code path. + bool true_sub = !is_conditional(true_block); + bool false_sub = !is_conditional(false_block); + + if (true_sub) + { + emit_block_hints(get(from)); + statement("if (", to_expression(cond), ")"); + begin_scope(); + branch(from, true_block); + end_scope(); + + if (false_sub || is_continue(false_block) || is_break(false_block)) + { + statement("else"); + begin_scope(); + branch(from, false_block); + end_scope(); + } + else if (flush_phi_required(from, false_block)) + { + statement("else"); + begin_scope(); + flush_phi(from, false_block); + end_scope(); + } + } + else if (false_sub && !true_sub) + { + // Only need false path, use negative conditional. + emit_block_hints(get(from)); + statement("if (!", to_enclosed_expression(cond), ")"); + begin_scope(); + branch(from, false_block); + end_scope(); + + if (is_continue(true_block) || is_break(true_block)) + { + statement("else"); + begin_scope(); + branch(from, true_block); + end_scope(); + } + else if (flush_phi_required(from, true_block)) + { + statement("else"); + begin_scope(); + flush_phi(from, true_block); + end_scope(); + } + } +} + +void CompilerGLSL::propagate_loop_dominators(const SPIRBlock &block) +{ + // Propagate down the loop dominator block, so that dominated blocks can back trace. + if (block.merge == SPIRBlock::MergeLoop || block.loop_dominator) + { + uint32_t dominator = block.merge == SPIRBlock::MergeLoop ? block.self : block.loop_dominator; + + auto set_dominator = [this](uint32_t self, uint32_t new_dominator) { + auto &dominated_block = this->get(self); + + // If we already have a loop dominator, we're trying to break out to merge targets + // which should not update the loop dominator. + if (!dominated_block.loop_dominator) + dominated_block.loop_dominator = new_dominator; + }; + + // After merging a loop, we inherit the loop dominator always. + if (block.merge_block) + set_dominator(block.merge_block, block.loop_dominator); + + if (block.true_block) + set_dominator(block.true_block, dominator); + if (block.false_block) + set_dominator(block.false_block, dominator); + if (block.next_block) + set_dominator(block.next_block, dominator); + if (block.default_block) + set_dominator(block.default_block, dominator); + + for (auto &c : block.cases) + set_dominator(c.block, dominator); + + // In older glslang output continue_block can be == loop header. + if (block.continue_block && block.continue_block != block.self) + set_dominator(block.continue_block, dominator); + } +} + +// FIXME: This currently cannot handle complex continue blocks +// as in do-while. +// This should be seen as a "trivial" continue block. +string CompilerGLSL::emit_continue_block(uint32_t continue_block) +{ + auto *block = &get(continue_block); + + // While emitting the continue block, declare_temporary will check this + // if we have to emit temporaries. + current_continue_block = block; + + vector statements; + + // Capture all statements into our list. + auto *old = redirect_statement; + redirect_statement = &statements; + + // Stamp out all blocks one after each other. + while ((ir.block_meta[block->self] & ParsedIR::BLOCK_META_LOOP_HEADER_BIT) == 0) + { + propagate_loop_dominators(*block); + // Write out all instructions we have in this block. + emit_block_instructions(*block); + + // For plain branchless for/while continue blocks. + if (block->next_block) + { + flush_phi(continue_block, block->next_block); + block = &get(block->next_block); + } + // For do while blocks. The last block will be a select block. + else if (block->true_block) + { + flush_phi(continue_block, block->true_block); + block = &get(block->true_block); + } + } + + // Restore old pointer. + redirect_statement = old; + + // Somewhat ugly, strip off the last ';' since we use ',' instead. + // Ideally, we should select this behavior in statement(). + for (auto &s : statements) + { + if (!s.empty() && s.back() == ';') + s.erase(s.size() - 1, 1); + } + + current_continue_block = nullptr; + return merge(statements); +} + +void CompilerGLSL::emit_while_loop_initializers(const SPIRBlock &block) +{ + // While loops do not take initializers, so declare all of them outside. + for (auto &loop_var : block.loop_variables) + { + auto &var = get(loop_var); + statement(variable_decl(var), ";"); + } +} + +string CompilerGLSL::emit_for_loop_initializers(const SPIRBlock &block) +{ + if (block.loop_variables.empty()) + return ""; + + bool same_types = for_loop_initializers_are_same_type(block); + // We can only declare for loop initializers if all variables are of same type. + // If we cannot do this, declare individual variables before the loop header. + + // We might have a loop variable candidate which was not assigned to for some reason. + uint32_t missing_initializers = 0; + for (auto &variable : block.loop_variables) + { + uint32_t expr = get(variable).static_expression; + + // Sometimes loop variables are initialized with OpUndef, but we can just declare + // a plain variable without initializer in this case. + if (expr == 0 || ir.ids[expr].get_type() == TypeUndef) + missing_initializers++; + } + + if (block.loop_variables.size() == 1 && missing_initializers == 0) + { + return variable_decl(get(block.loop_variables.front())); + } + else if (!same_types || missing_initializers == uint32_t(block.loop_variables.size())) + { + for (auto &loop_var : block.loop_variables) + statement(variable_decl(get(loop_var)), ";"); + return ""; + } + else + { + // We have a mix of loop variables, either ones with a clear initializer, or ones without. + // Separate the two streams. + string expr; + + for (auto &loop_var : block.loop_variables) + { + uint32_t static_expr = get(loop_var).static_expression; + if (static_expr == 0 || ir.ids[static_expr].get_type() == TypeUndef) + { + statement(variable_decl(get(loop_var)), ";"); + } + else + { + auto &var = get(loop_var); + auto &type = get_variable_data_type(var); + if (expr.empty()) + { + // For loop initializers are of the form (block.next_block); + + // This block may be a dominating block, so make sure we flush undeclared variables before building the for loop header. + flush_undeclared_variables(child); + + uint32_t current_count = statement_count; + + // If we're trying to create a true for loop, + // we need to make sure that all opcodes before branch statement do not actually emit any code. + // We can then take the condition expression and create a for (; cond ; ) { body; } structure instead. + emit_block_instructions(child); + + bool condition_is_temporary = forced_temporaries.find(child.condition) == end(forced_temporaries); + + if (current_count == statement_count && condition_is_temporary) + { + propagate_loop_dominators(child); + + switch (continue_type) + { + case SPIRBlock::ForLoop: + { + // Important that we do this in this order because + // emitting the continue block can invalidate the condition expression. + auto initializer = emit_for_loop_initializers(block); + auto condition = to_expression(child.condition); + auto continue_block = emit_continue_block(block.continue_block); + emit_block_hints(block); + statement("for (", initializer, "; ", condition, "; ", continue_block, ")"); + break; + } + + case SPIRBlock::WhileLoop: + emit_while_loop_initializers(block); + emit_block_hints(block); + statement("while (", to_expression(child.condition), ")"); + break; + + default: + SPIRV_CROSS_THROW("For/while loop detected, but need while/for loop semantics."); + } + + begin_scope(); + branch(child.self, child.true_block); + return true; + } + else + { + block.disable_block_optimization = true; + force_recompile = true; + begin_scope(); // We'll see an end_scope() later. + return false; + } + } + else + return false; +} + +void CompilerGLSL::flush_undeclared_variables(SPIRBlock &block) +{ + // Enforce declaration order for regression testing purposes. + sort(begin(block.dominated_variables), end(block.dominated_variables)); + for (auto &v : block.dominated_variables) + flush_variable_declaration(v); +} + +void CompilerGLSL::emit_hoisted_temporaries(vector> &temporaries) +{ + // If we need to force temporaries for certain IDs due to continue blocks, do it before starting loop header. + // Need to sort these to ensure that reference output is stable. + sort(begin(temporaries), end(temporaries), + [](const pair &a, const pair &b) { return a.second < b.second; }); + + for (auto &tmp : temporaries) + { + add_local_variable_name(tmp.second); + auto &flags = ir.meta[tmp.second].decoration.decoration_flags; + auto &type = get(tmp.first); + statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(tmp.second)), ";"); + + hoisted_temporaries.insert(tmp.second); + forced_temporaries.insert(tmp.second); + + // The temporary might be read from before it's assigned, set up the expression now. + set(tmp.second, to_name(tmp.second), tmp.first, true); + } +} + +void CompilerGLSL::emit_block_chain(SPIRBlock &block) +{ + propagate_loop_dominators(block); + + bool select_branch_to_true_block = false; + bool skip_direct_branch = false; + bool emitted_for_loop_header = false; + bool force_complex_continue_block = false; + + emit_hoisted_temporaries(block.declare_temporary); + + SPIRBlock::ContinueBlockType continue_type = SPIRBlock::ContinueNone; + if (block.continue_block) + continue_type = continue_block_type(get(block.continue_block)); + + // If we have loop variables, stop masking out access to the variable now. + for (auto var : block.loop_variables) + get(var).loop_variable_enable = true; + + // This is the method often used by spirv-opt to implement loops. + // The loop header goes straight into the continue block. + // However, don't attempt this on ESSL 1.0, because if a loop variable is used in a continue block, + // it *MUST* be used in the continue block. This loop method will not work. + if (!is_legacy_es() && block_is_loop_candidate(block, SPIRBlock::MergeToSelectContinueForLoop)) + { + flush_undeclared_variables(block); + if (attempt_emit_loop_header(block, SPIRBlock::MergeToSelectContinueForLoop)) + { + select_branch_to_true_block = true; + emitted_for_loop_header = true; + force_complex_continue_block = true; + } + } + // This is the older loop behavior in glslang which branches to loop body directly from the loop header. + else if (block_is_loop_candidate(block, SPIRBlock::MergeToSelectForLoop)) + { + flush_undeclared_variables(block); + if (attempt_emit_loop_header(block, SPIRBlock::MergeToSelectForLoop)) + { + // The body of while, is actually just the true block, so always branch there unconditionally. + select_branch_to_true_block = true; + emitted_for_loop_header = true; + } + } + // This is the newer loop behavior in glslang which branches from Loop header directly to + // a new block, which in turn has a OpBranchSelection without a selection merge. + else if (block_is_loop_candidate(block, SPIRBlock::MergeToDirectForLoop)) + { + flush_undeclared_variables(block); + if (attempt_emit_loop_header(block, SPIRBlock::MergeToDirectForLoop)) + { + skip_direct_branch = true; + emitted_for_loop_header = true; + } + } + else if (continue_type == SPIRBlock::DoWhileLoop) + { + flush_undeclared_variables(block); + emit_while_loop_initializers(block); + // We have some temporaries where the loop header is the dominator. + // We risk a case where we have code like: + // for (;;) { create-temporary; break; } consume-temporary; + // so force-declare temporaries here. + emit_hoisted_temporaries(block.potential_declare_temporary); + statement("do"); + begin_scope(); + + emit_block_instructions(block); + } + else if (block.merge == SPIRBlock::MergeLoop) + { + flush_undeclared_variables(block); + emit_while_loop_initializers(block); + + // We have a generic loop without any distinguishable pattern like for, while or do while. + get(block.continue_block).complex_continue = true; + continue_type = SPIRBlock::ComplexLoop; + + // We have some temporaries where the loop header is the dominator. + // We risk a case where we have code like: + // for (;;) { create-temporary; break; } consume-temporary; + // so force-declare temporaries here. + emit_hoisted_temporaries(block.potential_declare_temporary); + statement("for (;;)"); + begin_scope(); + + emit_block_instructions(block); + } + else + { + emit_block_instructions(block); + } + + // If we didn't successfully emit a loop header and we had loop variable candidates, we have a problem + // as writes to said loop variables might have been masked out, we need a recompile. + if (!emitted_for_loop_header && !block.loop_variables.empty()) + { + force_recompile = true; + for (auto var : block.loop_variables) + get(var).loop_variable = false; + block.loop_variables.clear(); + } + + flush_undeclared_variables(block); + bool emit_next_block = true; + + // Handle end of block. + switch (block.terminator) + { + case SPIRBlock::Direct: + // True when emitting complex continue block. + if (block.loop_dominator == block.next_block) + { + branch(block.self, block.next_block); + emit_next_block = false; + } + // True if MergeToDirectForLoop succeeded. + else if (skip_direct_branch) + emit_next_block = false; + else if (is_continue(block.next_block) || is_break(block.next_block) || is_conditional(block.next_block)) + { + branch(block.self, block.next_block); + emit_next_block = false; + } + break; + + case SPIRBlock::Select: + // True if MergeToSelectForLoop or MergeToSelectContinueForLoop succeeded. + if (select_branch_to_true_block) + { + if (force_complex_continue_block) + { + assert(block.true_block == block.continue_block); + + // We're going to emit a continue block directly here, so make sure it's marked as complex. + auto &complex_continue = get(block.continue_block).complex_continue; + bool old_complex = complex_continue; + complex_continue = true; + branch(block.self, block.true_block); + complex_continue = old_complex; + } + else + branch(block.self, block.true_block); + } + else + branch(block.self, block.condition, block.true_block, block.false_block); + break; + + case SPIRBlock::MultiSelect: + { + auto &type = expression_type(block.condition); + bool unsigned_case = type.basetype == SPIRType::UInt || type.basetype == SPIRType::UShort; + + if (type.basetype == SPIRType::UInt64 || type.basetype == SPIRType::Int64) + { + // SPIR-V spec suggests this is allowed, but we cannot support it in higher level languages. + SPIRV_CROSS_THROW("Cannot use 64-bit switch selectors."); + } + + const char *label_suffix = ""; + if (type.basetype == SPIRType::UInt && backend.uint32_t_literal_suffix) + label_suffix = "u"; + else if (type.basetype == SPIRType::UShort) + label_suffix = backend.uint16_t_literal_suffix; + else if (type.basetype == SPIRType::Short) + label_suffix = backend.int16_t_literal_suffix; + + SPIRBlock *old_emitting_switch = current_emitting_switch; + current_emitting_switch = █ + + if (block.need_ladder_break) + statement("bool _", block.self, "_ladder_break = false;"); + + emit_block_hints(block); + statement("switch (", to_expression(block.condition), ")"); + begin_scope(); + + // Multiple case labels can branch to same block, so find all unique blocks. + bool emitted_default = false; + unordered_set emitted_blocks; + + for (auto &c : block.cases) + { + if (emitted_blocks.count(c.block) != 0) + continue; + + // Emit all case labels which branch to our target. + // FIXME: O(n^2), revisit if we hit shaders with 100++ case labels ... + for (auto &other_case : block.cases) + { + if (other_case.block == c.block) + { + // The case label value must be sign-extended properly in SPIR-V, so we can assume 32-bit values here. + auto case_value = unsigned_case ? convert_to_string(uint32_t(other_case.value)) : + convert_to_string(int32_t(other_case.value)); + statement("case ", case_value, label_suffix, ":"); + } + } + + // Maybe we share with default block? + if (block.default_block == c.block) + { + statement("default:"); + emitted_default = true; + } + + // Complete the target. + emitted_blocks.insert(c.block); + + begin_scope(); + branch(block.self, c.block); + end_scope(); + } + + if (!emitted_default) + { + if (block.default_block != block.next_block) + { + statement("default:"); + begin_scope(); + if (is_break(block.default_block)) + SPIRV_CROSS_THROW("Cannot break; out of a switch statement and out of a loop at the same time ..."); + branch(block.self, block.default_block); + end_scope(); + } + else if (flush_phi_required(block.self, block.next_block)) + { + statement("default:"); + begin_scope(); + flush_phi(block.self, block.next_block); + statement("break;"); + end_scope(); + } + } + + end_scope(); + + if (block.need_ladder_break) + { + statement("if (_", block.self, "_ladder_break)"); + begin_scope(); + statement("break;"); + end_scope(); + } + + current_emitting_switch = old_emitting_switch; + break; + } + + case SPIRBlock::Return: + for (auto &line : current_function->fixup_hooks_out) + line(); + + if (processing_entry_point) + emit_fixup(); + + if (block.return_value) + { + auto &type = expression_type(block.return_value); + if (!type.array.empty() && !backend.can_return_array) + { + // If we cannot return arrays, we will have a special out argument we can write to instead. + // The backend is responsible for setting this up, and redirection the return values as appropriate. + if (ir.ids.at(block.return_value).get_type() != TypeUndef) + emit_array_copy("SPIRV_Cross_return_value", block.return_value); + + if (!block_is_outside_flow_control_from_block(get(current_function->entry_block), block) || + block.loop_dominator != SPIRBlock::NoDominator) + { + statement("return;"); + } + } + else + { + // OpReturnValue can return Undef, so don't emit anything for this case. + if (ir.ids.at(block.return_value).get_type() != TypeUndef) + statement("return ", to_expression(block.return_value), ";"); + } + } + // If this block is the very final block and not called from control flow, + // we do not need an explicit return which looks out of place. Just end the function here. + // In the very weird case of for(;;) { return; } executing return is unconditional, + // but we actually need a return here ... + else if (!block_is_outside_flow_control_from_block(get(current_function->entry_block), block) || + block.loop_dominator != SPIRBlock::NoDominator) + { + statement("return;"); + } + break; + + case SPIRBlock::Kill: + statement(backend.discard_literal, ";"); + break; + + case SPIRBlock::Unreachable: + emit_next_block = false; + break; + + default: + SPIRV_CROSS_THROW("Unimplemented block terminator."); + } + + if (block.next_block && emit_next_block) + { + // If we hit this case, we're dealing with an unconditional branch, which means we will output + // that block after this. If we had selection merge, we already flushed phi variables. + if (block.merge != SPIRBlock::MergeSelection) + flush_phi(block.self, block.next_block); + + // For merge selects we might have ignored the fact that a merge target + // could have been a break; or continue; + // We will need to deal with it here. + if (is_loop_break(block.next_block)) + { + // Cannot check for just break, because switch statements will also use break. + assert(block.merge == SPIRBlock::MergeSelection); + statement("break;"); + } + else if (is_continue(block.next_block)) + { + assert(block.merge == SPIRBlock::MergeSelection); + branch_to_continue(block.self, block.next_block); + } + else + emit_block_chain(get(block.next_block)); + } + + if (block.merge == SPIRBlock::MergeLoop) + { + if (continue_type == SPIRBlock::DoWhileLoop) + { + // Make sure that we run the continue block to get the expressions set, but this + // should become an empty string. + // We have no fallbacks if we cannot forward everything to temporaries ... + auto statements = emit_continue_block(block.continue_block); + if (!statements.empty()) + { + // The DoWhile block has side effects, force ComplexLoop pattern next pass. + get(block.continue_block).complex_continue = true; + force_recompile = true; + } + + end_scope_decl(join("while (", to_expression(get(block.continue_block).condition), ")")); + } + else + end_scope(); + + // We cannot break out of two loops at once, so don't check for break; here. + // Using block.self as the "from" block isn't quite right, but it has the same scope + // and dominance structure, so it's fine. + if (is_continue(block.merge_block)) + branch_to_continue(block.self, block.merge_block); + else + emit_block_chain(get(block.merge_block)); + } + + // Forget about control dependent expressions now. + block.invalidate_expressions.clear(); +} + +void CompilerGLSL::begin_scope() +{ + statement("{"); + indent++; +} + +void CompilerGLSL::end_scope() +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("}"); +} + +void CompilerGLSL::end_scope_decl() +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("};"); +} + +void CompilerGLSL::end_scope_decl(const string &decl) +{ + if (!indent) + SPIRV_CROSS_THROW("Popping empty indent stack."); + indent--; + statement("} ", decl, ";"); +} + +void CompilerGLSL::check_function_call_constraints(const uint32_t *args, uint32_t length) +{ + // If our variable is remapped, and we rely on type-remapping information as + // well, then we cannot pass the variable as a function parameter. + // Fixing this is non-trivial without stamping out variants of the same function, + // so for now warn about this and suggest workarounds instead. + for (uint32_t i = 0; i < length; i++) + { + auto *var = maybe_get(args[i]); + if (!var || !var->remapped_variable) + continue; + + auto &type = get(var->basetype); + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData) + { + SPIRV_CROSS_THROW("Tried passing a remapped subpassInput variable to a function. " + "This will not work correctly because type-remapping information is lost. " + "To workaround, please consider not passing the subpass input as a function parameter, " + "or use in/out variables instead which do not need type remapping information."); + } + } +} + +const Instruction *CompilerGLSL::get_next_instruction_in_block(const Instruction &instr) +{ + // FIXME: This is kind of hacky. There should be a cleaner way. + auto offset = uint32_t(&instr - current_emitting_block->ops.data()); + if ((offset + 1) < current_emitting_block->ops.size()) + return ¤t_emitting_block->ops[offset + 1]; + else + return nullptr; +} + +uint32_t CompilerGLSL::mask_relevant_memory_semantics(uint32_t semantics) +{ + return semantics & (MemorySemanticsAtomicCounterMemoryMask | MemorySemanticsImageMemoryMask | + MemorySemanticsWorkgroupMemoryMask | MemorySemanticsUniformMemoryMask | + MemorySemanticsCrossWorkgroupMemoryMask | MemorySemanticsSubgroupMemoryMask); +} + +void CompilerGLSL::emit_array_copy(const string &lhs, uint32_t rhs_id) +{ + statement(lhs, " = ", to_expression(rhs_id), ";"); +} + +void CompilerGLSL::bitcast_from_builtin_load(uint32_t source_id, std::string &expr, + const spirv_cross::SPIRType &expr_type) +{ + auto *var = maybe_get_backing_variable(source_id); + if (var) + source_id = var->self; + + // Only interested in standalone builtin variables. + if (!has_decoration(source_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(source_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + + // TODO: Fill in for more builtins. + switch (builtin) + { + case BuiltInLayer: + case BuiltInPrimitiveId: + case BuiltInViewportIndex: + case BuiltInInstanceId: + case BuiltInInstanceIndex: + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInSampleId: + case BuiltInBaseVertex: + case BuiltInBaseInstance: + case BuiltInDrawIndex: + expected_type = SPIRType::Int; + break; + + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInWorkgroupId: + case BuiltInLocalInvocationIndex: + case BuiltInWorkgroupSize: + case BuiltInNumWorkgroups: + expected_type = SPIRType::UInt; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + expr = bitcast_expression(expr_type, expected_type, expr); +} + +void CompilerGLSL::bitcast_to_builtin_store(uint32_t target_id, std::string &expr, + const spirv_cross::SPIRType &expr_type) +{ + // Only interested in standalone builtin variables. + if (!has_decoration(target_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(target_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + + // TODO: Fill in for more builtins. + switch (builtin) + { + case BuiltInLayer: + case BuiltInPrimitiveId: + case BuiltInViewportIndex: + expected_type = SPIRType::Int; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + { + auto type = expr_type; + type.basetype = expected_type; + expr = bitcast_expression(type, expr_type.basetype, expr); + } +} + +void CompilerGLSL::emit_block_hints(const SPIRBlock &) +{ +} diff --git a/3rdparty/spirv-cross/spirv_glsl.hpp b/3rdparty/spirv-cross/spirv_glsl.hpp new file mode 100644 index 000000000..f42e98d37 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_glsl.hpp @@ -0,0 +1,639 @@ +/* + * Copyright 2015-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_GLSL_HPP +#define SPIRV_CROSS_GLSL_HPP + +#include "spirv_cross.hpp" +#include +#include +#include +#include + +namespace spirv_cross +{ +enum PlsFormat +{ + PlsNone = 0, + + PlsR11FG11FB10F, + PlsR32F, + PlsRG16F, + PlsRGB10A2, + PlsRGBA8, + PlsRG16, + + PlsRGBA8I, + PlsRG16I, + + PlsRGB10A2UI, + PlsRGBA8UI, + PlsRG16UI, + PlsR32UI +}; + +struct PlsRemap +{ + uint32_t id; + PlsFormat format; +}; + +class CompilerGLSL : public Compiler +{ +public: + struct Options + { + // The shading language version. Corresponds to #version $VALUE. + uint32_t version = 450; + + // Emit the OpenGL ES shading language instead of desktop OpenGL. + bool es = false; + + // Debug option to always emit temporary variables for all expressions. + bool force_temporary = false; + + // If true, Vulkan GLSL features are used instead of GL-compatible features. + // Mostly useful for debugging SPIR-V files. + bool vulkan_semantics = false; + + // If true, gl_PerVertex is explicitly redeclared in vertex, geometry and tessellation shaders. + // The members of gl_PerVertex is determined by which built-ins are declared by the shader. + // This option is ignored in ES versions, as redeclaration in ES is not required, and it depends on a different extension + // (EXT_shader_io_blocks) which makes things a bit more fuzzy. + bool separate_shader_objects = false; + + // Flattens multidimensional arrays, e.g. float foo[a][b][c] into single-dimensional arrays, + // e.g. float foo[a * b * c]. + // This function does not change the actual SPIRType of any object. + // Only the generated code, including declarations of interface variables are changed to be single array dimension. + bool flatten_multidimensional_arrays = false; + + // For older desktop GLSL targets than version 420, the + // GL_ARB_shading_language_420pack extensions is used to be able to support + // layout(binding) on UBOs and samplers. + // If disabled on older targets, binding decorations will be stripped. + bool enable_420pack_extension = true; + + enum Precision + { + DontCare, + Lowp, + Mediump, + Highp + }; + + struct + { + // GLSL: In vertex shaders, rewrite [0, w] depth (Vulkan/D3D style) to [-w, w] depth (GL style). + // MSL: In vertex shaders, rewrite [-w, w] depth (GL style) to [0, w] depth. + // HLSL: In vertex shaders, rewrite [-w, w] depth (GL style) to [0, w] depth. + bool fixup_clipspace = false; + + // Inverts gl_Position.y or equivalent. + bool flip_vert_y = false; + + // If true, the backend will assume that InstanceIndex will need to apply + // a base instance offset. Set to false if you know you will never use base instance + // functionality as it might remove some internal uniforms. + bool support_nonzero_base_instance = true; + } vertex; + + struct + { + // Add precision mediump float in ES targets when emitting GLES source. + // Add precision highp int in ES targets when emitting GLES source. + Precision default_float_precision = Mediump; + Precision default_int_precision = Highp; + } fragment; + }; + + void remap_pixel_local_storage(std::vector inputs, std::vector outputs) + { + pls_inputs = std::move(inputs); + pls_outputs = std::move(outputs); + remap_pls_variables(); + } + + explicit CompilerGLSL(std::vector spirv_) + : Compiler(move(spirv_)) + { + init(); + } + + CompilerGLSL(const uint32_t *ir_, size_t word_count) + : Compiler(ir_, word_count) + { + init(); + } + + explicit CompilerGLSL(const ParsedIR &ir_) + : Compiler(ir_) + { + init(); + } + + explicit CompilerGLSL(ParsedIR &&ir_) + : Compiler(std::move(ir_)) + { + init(); + } + + // Deprecate this interface because it doesn't overload properly with subclasses. + // Requires awkward static casting, which was a mistake. + SPIRV_CROSS_DEPRECATED("get_options() is obsolete, use get_common_options() instead.") + const Options &get_options() const + { + return options; + } + + const Options &get_common_options() const + { + return options; + } + + // Deprecate this interface because it doesn't overload properly with subclasses. + // Requires awkward static casting, which was a mistake. + SPIRV_CROSS_DEPRECATED("set_options() is obsolete, use set_common_options() instead.") + void set_options(Options &opts) + { + options = opts; + } + + void set_common_options(const Options &opts) + { + options = opts; + } + + std::string compile() override; + + // Returns the current string held in the conversion buffer. Useful for + // capturing what has been converted so far when compile() throws an error. + std::string get_partial_source(); + + // Adds a line to be added right after #version in GLSL backend. + // This is useful for enabling custom extensions which are outside the scope of SPIRV-Cross. + // This can be combined with variable remapping. + // A new-line will be added. + // + // While add_header_line() is a more generic way of adding arbitrary text to the header + // of a GLSL file, require_extension() should be used when adding extensions since it will + // avoid creating collisions with SPIRV-Cross generated extensions. + // + // Code added via add_header_line() is typically backend-specific. + void add_header_line(const std::string &str); + + // Adds an extension which is required to run this shader, e.g. + // require_extension("GL_KHR_my_extension"); + void require_extension(const std::string &ext); + + // Legacy GLSL compatibility method. + // Takes a uniform or push constant variable and flattens it into a (i|u)vec4 array[N]; array instead. + // For this to work, all types in the block must be the same basic type, e.g. mixing vec2 and vec4 is fine, but + // mixing int and float is not. + // The name of the uniform array will be the same as the interface block name. + void flatten_buffer_block(uint32_t id); + +protected: + void reset(); + void emit_function(SPIRFunction &func, const Bitset &return_flags); + + bool has_extension(const std::string &ext) const; + void require_extension_internal(const std::string &ext); + + // Virtualize methods which need to be overridden by subclass targets like C++ and such. + virtual void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags); + + SPIRBlock *current_emitting_block = nullptr; + SPIRBlock *current_emitting_switch = nullptr; + + virtual void emit_instruction(const Instruction &instr); + void emit_block_instructions(SPIRBlock &block); + virtual void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count); + virtual void emit_spv_amd_shader_ballot_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_shader_explicit_vertex_parameter_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_shader_trinary_minmax_op(uint32_t result_type, uint32_t result_id, uint32_t op, + const uint32_t *args, uint32_t count); + virtual void emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count); + virtual void emit_header(); + void build_workgroup_size(std::vector &arguments, const SpecializationConstant &x, + const SpecializationConstant &y, const SpecializationConstant &z); + + virtual void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id); + virtual void emit_texture_op(const Instruction &i); + virtual void emit_subgroup_op(const Instruction &i); + virtual std::string type_to_glsl(const SPIRType &type, uint32_t id = 0); + virtual std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage); + virtual void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = "", uint32_t base_offset = 0); + virtual std::string image_type_glsl(const SPIRType &type, uint32_t id = 0); + std::string constant_expression(const SPIRConstant &c); + std::string constant_op_expression(const SPIRConstantOp &cop); + virtual std::string constant_expression_vector(const SPIRConstant &c, uint32_t vector); + virtual void emit_fixup(); + virtual std::string variable_decl(const SPIRType &type, const std::string &name, uint32_t id = 0); + virtual std::string to_func_call_arg(uint32_t id); + virtual std::string to_function_name(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, bool has_array_offsets, bool has_offset, bool has_grad, + bool has_dref, uint32_t lod); + virtual std::string to_function_args(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, + bool is_proj, uint32_t coord, uint32_t coord_components, uint32_t dref, + uint32_t grad_x, uint32_t grad_y, uint32_t lod, uint32_t coffset, + uint32_t offset, uint32_t bias, uint32_t comp, uint32_t sample, + bool *p_forward); + virtual void emit_buffer_block(const SPIRVariable &type); + virtual void emit_push_constant_block(const SPIRVariable &var); + virtual void emit_uniform(const SPIRVariable &var); + virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type); + + std::unique_ptr buffer; + + template + inline void statement_inner(T &&t) + { + (*buffer) << std::forward(t); + statement_count++; + } + + template + inline void statement_inner(T &&t, Ts &&... ts) + { + (*buffer) << std::forward(t); + statement_count++; + statement_inner(std::forward(ts)...); + } + + template + inline void statement(Ts &&... ts) + { + if (force_recompile) + { + // Do not bother emitting code while force_recompile is active. + // We will compile again. + statement_count++; + return; + } + + if (redirect_statement) + redirect_statement->push_back(join(std::forward(ts)...)); + else + { + for (uint32_t i = 0; i < indent; i++) + (*buffer) << " "; + statement_inner(std::forward(ts)...); + (*buffer) << '\n'; + } + } + + template + inline void statement_no_indent(Ts &&... ts) + { + auto old_indent = indent; + indent = 0; + statement(std::forward(ts)...); + indent = old_indent; + } + + // Used for implementing continue blocks where + // we want to obtain a list of statements we can merge + // on a single line separated by comma. + std::vector *redirect_statement = nullptr; + const SPIRBlock *current_continue_block = nullptr; + + void begin_scope(); + void end_scope(); + void end_scope_decl(); + void end_scope_decl(const std::string &decl); + + Options options; + + std::string type_to_array_glsl(const SPIRType &type); + std::string to_array_size(const SPIRType &type, uint32_t index); + uint32_t to_array_size_literal(const SPIRType &type, uint32_t index) const; + uint32_t to_array_size_literal(const SPIRType &type) const; + std::string variable_decl(const SPIRVariable &variable); + std::string variable_decl_function_local(SPIRVariable &variable); + + void add_local_variable_name(uint32_t id); + void add_resource_name(uint32_t id); + void add_member_name(SPIRType &type, uint32_t name); + void add_function_overload(const SPIRFunction &func); + + virtual bool is_non_native_row_major_matrix(uint32_t id); + virtual bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index); + bool member_is_packed_type(const SPIRType &type, uint32_t index) const; + virtual std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, bool is_packed); + + std::unordered_set local_variable_names; + std::unordered_set resource_names; + std::unordered_set block_input_names; + std::unordered_set block_output_names; + std::unordered_set block_ubo_names; + std::unordered_set block_ssbo_names; + std::unordered_set block_names; // A union of all block_*_names. + std::unordered_map> function_overloads; + + bool processing_entry_point = false; + + // Can be overriden by subclass backends for trivial things which + // shouldn't need polymorphism. + struct BackendVariations + { + std::string discard_literal = "discard"; + std::string null_pointer_literal = ""; + bool float_literal_suffix = false; + bool double_literal_suffix = true; + bool uint32_t_literal_suffix = true; + bool long_long_literal_suffix = false; + const char *basic_int_type = "int"; + const char *basic_uint_type = "uint"; + const char *basic_int8_type = "int8_t"; + const char *basic_uint8_type = "uint8_t"; + const char *basic_int16_type = "int16_t"; + const char *basic_uint16_type = "uint16_t"; + const char *half_literal_suffix = "hf"; + const char *int16_t_literal_suffix = "s"; + const char *uint16_t_literal_suffix = "us"; + bool swizzle_is_function = false; + bool shared_is_implied = false; + bool flexible_member_array_supported = true; + bool explicit_struct_type = false; + bool use_initializer_list = false; + bool use_typed_initializer_list = false; + bool can_declare_struct_inline = true; + bool can_declare_arrays_inline = true; + bool native_row_major_matrix = true; + bool use_constructor_splatting = true; + bool boolean_mix_support = true; + bool allow_precision_qualifiers = false; + bool can_swizzle_scalar = false; + bool force_gl_in_out_block = false; + bool can_return_array = true; + bool allow_truncated_access_chain = false; + bool supports_extensions = false; + bool supports_empty_struct = false; + bool array_is_value_type = true; + } backend; + + void emit_struct(SPIRType &type); + void emit_resources(); + void emit_buffer_block_native(const SPIRVariable &var); + void emit_buffer_block_legacy(const SPIRVariable &var); + void emit_buffer_block_flattened(const SPIRVariable &type); + void emit_declared_builtin_block(spv::StorageClass storage, spv::ExecutionModel model); + void emit_push_constant_block_vulkan(const SPIRVariable &var); + void emit_push_constant_block_glsl(const SPIRVariable &var); + void emit_interface_block(const SPIRVariable &type); + void emit_flattened_io_block(const SPIRVariable &var, const char *qual); + void emit_block_chain(SPIRBlock &block); + void emit_hoisted_temporaries(std::vector> &temporaries); + std::string constant_value_macro_name(uint32_t id); + void emit_constant(const SPIRConstant &constant); + void emit_specialization_constant_op(const SPIRConstantOp &constant); + std::string emit_continue_block(uint32_t continue_block); + bool attempt_emit_loop_header(SPIRBlock &block, SPIRBlock::Method method); + void propagate_loop_dominators(const SPIRBlock &block); + + void branch(uint32_t from, uint32_t to); + void branch_to_continue(uint32_t from, uint32_t to); + void branch(uint32_t from, uint32_t cond, uint32_t true_block, uint32_t false_block); + void flush_phi(uint32_t from, uint32_t to); + bool flush_phi_required(uint32_t from, uint32_t to); + void flush_variable_declaration(uint32_t id); + void flush_undeclared_variables(SPIRBlock &block); + + bool should_dereference(uint32_t id); + bool should_forward(uint32_t id); + void emit_mix_op(uint32_t result_type, uint32_t id, uint32_t left, uint32_t right, uint32_t lerp); + bool to_trivial_mix_op(const SPIRType &type, std::string &op, uint32_t left, uint32_t right, uint32_t lerp); + void emit_quaternary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + uint32_t op3, const char *op); + void emit_trinary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, + const char *op); + void emit_binary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + SPIRType::BaseType input_type, bool skip_cast_if_equal_type); + void emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); + void emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op); + void emit_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_unrolled_binary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_binary_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op, + SPIRType::BaseType input_type, bool skip_cast_if_equal_type); + + SPIRType binary_op_bitcast_helper(std::string &cast_op0, std::string &cast_op1, SPIRType::BaseType &input_type, + uint32_t op0, uint32_t op1, bool skip_cast_if_equal_type); + + std::string to_ternary_expression(const SPIRType &result_type, uint32_t select, uint32_t true_value, + uint32_t false_value); + + void emit_unary_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op); + bool expression_is_forwarded(uint32_t id); + SPIRExpression &emit_op(uint32_t result_type, uint32_t result_id, const std::string &rhs, bool forward_rhs, + bool suppress_usage_tracking = false); + std::string access_chain_internal(uint32_t base, const uint32_t *indices, uint32_t count, bool index_is_literal, + bool chain_only = false, bool ptr_chain = false, AccessChainMeta *meta = nullptr, + bool register_expression_read = true); + std::string access_chain(uint32_t base, const uint32_t *indices, uint32_t count, const SPIRType &target_type, + AccessChainMeta *meta = nullptr, bool ptr_chain = false); + + std::string flattened_access_chain(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::string flattened_access_chain_struct(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset); + std::string flattened_access_chain_matrix(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::string flattened_access_chain_vector(uint32_t base, const uint32_t *indices, uint32_t count, + const SPIRType &target_type, uint32_t offset, uint32_t matrix_stride, + bool need_transpose); + std::pair flattened_access_chain_offset(const SPIRType &basetype, const uint32_t *indices, + uint32_t count, uint32_t offset, + uint32_t word_stride, bool *need_transpose = nullptr, + uint32_t *matrix_stride = nullptr, + bool ptr_chain = false); + + const char *index_to_swizzle(uint32_t index); + std::string remap_swizzle(const SPIRType &result_type, uint32_t input_components, const std::string &expr); + std::string declare_temporary(uint32_t type, uint32_t id); + void append_global_func_args(const SPIRFunction &func, uint32_t index, std::vector &arglist); + std::string to_expression(uint32_t id, bool register_expression_read = true); + std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true); + std::string to_unpacked_expression(uint32_t id); + std::string to_enclosed_unpacked_expression(uint32_t id); + std::string to_dereferenced_expression(uint32_t id, bool register_expression_read = true); + std::string to_pointer_expression(uint32_t id); + std::string to_enclosed_pointer_expression(uint32_t id); + std::string to_extract_component_expression(uint32_t id, uint32_t index); + std::string enclose_expression(const std::string &expr); + std::string dereference_expression(const std::string &expr); + std::string address_of_expression(const std::string &expr); + void strip_enclosed_expression(std::string &expr); + std::string to_member_name(const SPIRType &type, uint32_t index); + virtual std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain); + std::string type_to_glsl_constructor(const SPIRType &type); + std::string argument_decl(const SPIRFunction::Parameter &arg); + virtual std::string to_qualifiers_glsl(uint32_t id); + const char *to_precision_qualifiers_glsl(uint32_t id); + virtual const char *to_storage_qualifiers_glsl(const SPIRVariable &var); + const char *flags_to_precision_qualifiers_glsl(const SPIRType &type, const Bitset &flags); + const char *format_to_glsl(spv::ImageFormat format); + virtual std::string layout_for_member(const SPIRType &type, uint32_t index); + virtual std::string to_interpolation_qualifiers(const Bitset &flags); + std::string layout_for_variable(const SPIRVariable &variable); + std::string to_combined_image_sampler(uint32_t image_id, uint32_t samp_id); + virtual bool skip_argument(uint32_t id) const; + virtual void emit_array_copy(const std::string &lhs, uint32_t rhs_id); + virtual void emit_block_hints(const SPIRBlock &block); + virtual std::string to_initializer_expression(const SPIRVariable &var); + + bool buffer_is_packing_standard(const SPIRType &type, BufferPackingStandard packing, uint32_t start_offset = 0, + uint32_t end_offset = ~(0u)); + uint32_t type_to_packed_base_size(const SPIRType &type, BufferPackingStandard packing); + uint32_t type_to_packed_alignment(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + uint32_t type_to_packed_array_stride(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + uint32_t type_to_packed_size(const SPIRType &type, const Bitset &flags, BufferPackingStandard packing); + + std::string bitcast_glsl(const SPIRType &result_type, uint32_t arg); + virtual std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type); + + std::string bitcast_expression(SPIRType::BaseType target_type, uint32_t arg); + std::string bitcast_expression(const SPIRType &target_type, SPIRType::BaseType expr_type, const std::string &expr); + + std::string build_composite_combiner(uint32_t result_type, const uint32_t *elems, uint32_t length); + bool remove_duplicate_swizzle(std::string &op); + bool remove_unity_swizzle(uint32_t base, std::string &op); + + // Can modify flags to remote readonly/writeonly if image type + // and force recompile. + bool check_atomic_image(uint32_t id); + + virtual void replace_illegal_names(); + virtual void emit_entry_point_declarations(); + + void replace_fragment_output(SPIRVariable &var); + void replace_fragment_outputs(); + bool check_explicit_lod_allowed(uint32_t lod); + std::string legacy_tex_op(const std::string &op, const SPIRType &imgtype, uint32_t lod, uint32_t id); + + uint32_t indent = 0; + + std::unordered_set emitted_functions; + + std::unordered_set flattened_buffer_blocks; + std::unordered_set flattened_structs; + + std::string load_flattened_struct(SPIRVariable &var); + std::string to_flattened_struct_member(const SPIRVariable &var, uint32_t index); + void store_flattened_struct(SPIRVariable &var, uint32_t value); + + // Usage tracking. If a temporary is used more than once, use the temporary instead to + // avoid AST explosion when SPIRV is generated with pure SSA and doesn't write stuff to variables. + std::unordered_map expression_usage_counts; + void track_expression_read(uint32_t id); + + std::vector forced_extensions; + std::vector header_lines; + + uint32_t statement_count; + + inline bool is_legacy() const + { + return (options.es && options.version < 300) || (!options.es && options.version < 130); + } + + inline bool is_legacy_es() const + { + return options.es && options.version < 300; + } + + inline bool is_legacy_desktop() const + { + return !options.es && options.version < 130; + } + + bool args_will_forward(uint32_t id, const uint32_t *args, uint32_t num_args, bool pure); + void register_call_out_argument(uint32_t id); + void register_impure_function_call(); + void register_control_dependent_expression(uint32_t expr); + + // GL_EXT_shader_pixel_local_storage support. + std::vector pls_inputs; + std::vector pls_outputs; + std::string pls_decl(const PlsRemap &variable); + const char *to_pls_qualifiers_glsl(const SPIRVariable &variable); + void emit_pls(); + void remap_pls_variables(); + + // A variant which takes two sets of name. The secondary is only used to verify there are no collisions, + // but the set is not updated when we have found a new name. + // Used primarily when adding block interface names. + void add_variable(std::unordered_set &variables_primary, + const std::unordered_set &variables_secondary, std::string &name); + + void check_function_call_constraints(const uint32_t *args, uint32_t length); + void handle_invalid_expression(uint32_t id); + void find_static_extensions(); + + std::string emit_for_loop_initializers(const SPIRBlock &block); + void emit_while_loop_initializers(const SPIRBlock &block); + bool for_loop_initializers_are_same_type(const SPIRBlock &block); + bool optimize_read_modify_write(const SPIRType &type, const std::string &lhs, const std::string &rhs); + void fixup_image_load_store_access(); + + bool type_is_empty(const SPIRType &type); + + virtual void declare_undefined_values(); + + static std::string sanitize_underscores(const std::string &str); + + bool can_use_io_location(spv::StorageClass storage, bool block); + const Instruction *get_next_instruction_in_block(const Instruction &instr); + static uint32_t mask_relevant_memory_semantics(uint32_t semantics); + + std::string convert_half_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + std::string convert_float_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + std::string convert_double_to_string(const SPIRConstant &value, uint32_t col, uint32_t row); + + std::string convert_separate_image_to_expression(uint32_t id); + + // Builtins in GLSL are always specific signedness, but the SPIR-V can declare them + // as either unsigned or signed. + // Sometimes we will need to automatically perform bitcasts on load and store to make this work. + virtual void bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type); + virtual void bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type); + + void handle_store_to_invariant_variable(uint32_t store_id, uint32_t value_id); + void disallow_forwarding_in_expression_chain(const SPIRExpression &expr); + + bool expression_is_constant_null(uint32_t id) const; + +private: + void init() + { + if (ir.source.known) + { + options.es = ir.source.es; + options.version = ir.source.version; + } + } +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_hlsl.cpp b/3rdparty/spirv-cross/spirv_hlsl.cpp new file mode 100644 index 000000000..486fec540 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_hlsl.cpp @@ -0,0 +1,4762 @@ +/* + * Copyright 2016-2019 Robert Konrad + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_hlsl.hpp" +#include "GLSL.std.450.h" +#include +#include + +using namespace spv; +using namespace spirv_cross; +using namespace std; + +static unsigned image_format_to_components(ImageFormat fmt) +{ + switch (fmt) + { + case ImageFormatR8: + case ImageFormatR16: + case ImageFormatR8Snorm: + case ImageFormatR16Snorm: + case ImageFormatR16f: + case ImageFormatR32f: + case ImageFormatR8i: + case ImageFormatR16i: + case ImageFormatR32i: + case ImageFormatR8ui: + case ImageFormatR16ui: + case ImageFormatR32ui: + return 1; + + case ImageFormatRg8: + case ImageFormatRg16: + case ImageFormatRg8Snorm: + case ImageFormatRg16Snorm: + case ImageFormatRg16f: + case ImageFormatRg32f: + case ImageFormatRg8i: + case ImageFormatRg16i: + case ImageFormatRg32i: + case ImageFormatRg8ui: + case ImageFormatRg16ui: + case ImageFormatRg32ui: + return 2; + + case ImageFormatR11fG11fB10f: + return 3; + + case ImageFormatRgba8: + case ImageFormatRgba16: + case ImageFormatRgb10A2: + case ImageFormatRgba8Snorm: + case ImageFormatRgba16Snorm: + case ImageFormatRgba16f: + case ImageFormatRgba32f: + case ImageFormatRgba8i: + case ImageFormatRgba16i: + case ImageFormatRgba32i: + case ImageFormatRgba8ui: + case ImageFormatRgba16ui: + case ImageFormatRgba32ui: + case ImageFormatRgb10a2ui: + return 4; + + case ImageFormatUnknown: + return 4; // Assume 4. + + default: + SPIRV_CROSS_THROW("Unrecognized typed image format."); + } +} + +static string image_format_to_type(ImageFormat fmt, SPIRType::BaseType basetype) +{ + switch (fmt) + { + case ImageFormatR8: + case ImageFormatR16: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float"; + case ImageFormatRg8: + case ImageFormatRg16: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float2"; + case ImageFormatRgba8: + case ImageFormatRgba16: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float4"; + case ImageFormatRgb10A2: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "unorm float4"; + + case ImageFormatR8Snorm: + case ImageFormatR16Snorm: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "snorm float"; + case ImageFormatRg8Snorm: + case ImageFormatRg16Snorm: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "snorm float2"; + case ImageFormatRgba8Snorm: + case ImageFormatRgba16Snorm: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "snorm float4"; + + case ImageFormatR16f: + case ImageFormatR32f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float"; + case ImageFormatRg16f: + case ImageFormatRg32f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float2"; + case ImageFormatRgba16f: + case ImageFormatRgba32f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float4"; + + case ImageFormatR11fG11fB10f: + if (basetype != SPIRType::Float) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "float3"; + + case ImageFormatR8i: + case ImageFormatR16i: + case ImageFormatR32i: + if (basetype != SPIRType::Int) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "int"; + case ImageFormatRg8i: + case ImageFormatRg16i: + case ImageFormatRg32i: + if (basetype != SPIRType::Int) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "int2"; + case ImageFormatRgba8i: + case ImageFormatRgba16i: + case ImageFormatRgba32i: + if (basetype != SPIRType::Int) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "int4"; + + case ImageFormatR8ui: + case ImageFormatR16ui: + case ImageFormatR32ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint"; + case ImageFormatRg8ui: + case ImageFormatRg16ui: + case ImageFormatRg32ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint2"; + case ImageFormatRgba8ui: + case ImageFormatRgba16ui: + case ImageFormatRgba32ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint4"; + case ImageFormatRgb10a2ui: + if (basetype != SPIRType::UInt) + SPIRV_CROSS_THROW("Mismatch in image type and base type of image."); + return "uint4"; + + case ImageFormatUnknown: + switch (basetype) + { + case SPIRType::Float: + return "float4"; + case SPIRType::Int: + return "int4"; + case SPIRType::UInt: + return "uint4"; + default: + SPIRV_CROSS_THROW("Unsupported base type for image."); + } + + default: + SPIRV_CROSS_THROW("Unrecognized typed image format."); + } +} + +// Returns true if an arithmetic operation does not change behavior depending on signedness. +static bool hlsl_opcode_is_sign_invariant(Op opcode) +{ + switch (opcode) + { + case OpIEqual: + case OpINotEqual: + case OpISub: + case OpIAdd: + case OpIMul: + case OpShiftLeftLogical: + case OpBitwiseOr: + case OpBitwiseXor: + case OpBitwiseAnd: + return true; + + default: + return false; + } +} + +string CompilerHLSL::image_type_hlsl_modern(const SPIRType &type, uint32_t) +{ + auto &imagetype = get(type.image.type); + const char *dim = nullptr; + bool typed_load = false; + uint32_t components = 4; + + switch (type.image.dim) + { + case Dim1D: + typed_load = type.image.sampled == 2; + dim = "1D"; + break; + case Dim2D: + typed_load = type.image.sampled == 2; + dim = "2D"; + break; + case Dim3D: + typed_load = type.image.sampled == 2; + dim = "3D"; + break; + case DimCube: + if (type.image.sampled == 2) + SPIRV_CROSS_THROW("RWTextureCube does not exist in HLSL."); + dim = "Cube"; + break; + case DimRect: + SPIRV_CROSS_THROW("Rectangle texture support is not yet implemented for HLSL."); // TODO + case DimBuffer: + if (type.image.sampled == 1) + return join("Buffer<", type_to_glsl(imagetype), components, ">"); + else if (type.image.sampled == 2) + return join("RWBuffer<", image_format_to_type(type.image.format, imagetype.basetype), ">"); + else + SPIRV_CROSS_THROW("Sampler buffers must be either sampled or unsampled. Cannot deduce in runtime."); + case DimSubpassData: + dim = "2D"; + typed_load = false; + break; + default: + SPIRV_CROSS_THROW("Invalid dimension."); + } + const char *arrayed = type.image.arrayed ? "Array" : ""; + const char *ms = type.image.ms ? "MS" : ""; + const char *rw = typed_load ? "RW" : ""; + return join(rw, "Texture", dim, ms, arrayed, "<", + typed_load ? image_format_to_type(type.image.format, imagetype.basetype) : + join(type_to_glsl(imagetype), components), + ">"); +} + +string CompilerHLSL::image_type_hlsl_legacy(const SPIRType &type, uint32_t id) +{ + auto &imagetype = get(type.image.type); + string res; + + switch (imagetype.basetype) + { + case SPIRType::Int: + res = "i"; + break; + case SPIRType::UInt: + res = "u"; + break; + default: + break; + } + + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData) + return res + "subpassInput" + (type.image.ms ? "MS" : ""); + + // If we're emulating subpassInput with samplers, force sampler2D + // so we don't have to specify format. + if (type.basetype == SPIRType::Image && type.image.dim != DimSubpassData) + { + // Sampler buffers are always declared as samplerBuffer even though they might be separate images in the SPIR-V. + if (type.image.dim == DimBuffer && type.image.sampled == 1) + res += "sampler"; + else + res += type.image.sampled == 2 ? "image" : "texture"; + } + else + res += "sampler"; + + switch (type.image.dim) + { + case Dim1D: + res += "1D"; + break; + case Dim2D: + res += "2D"; + break; + case Dim3D: + res += "3D"; + break; + case DimCube: + res += "CUBE"; + break; + + case DimBuffer: + res += "Buffer"; + break; + + case DimSubpassData: + res += "2D"; + break; + default: + SPIRV_CROSS_THROW("Only 1D, 2D, 3D, Buffer, InputTarget and Cube textures supported."); + } + + if (type.image.ms) + res += "MS"; + if (type.image.arrayed) + res += "Array"; + if (image_is_comparison(type, id)) + res += "Shadow"; + + return res; +} + +string CompilerHLSL::image_type_hlsl(const SPIRType &type, uint32_t id) +{ + if (hlsl_options.shader_model <= 30) + return image_type_hlsl_legacy(type, id); + else + return image_type_hlsl_modern(type, id); +} + +// The optional id parameter indicates the object whose type we are trying +// to find the description for. It is optional. Most type descriptions do not +// depend on a specific object's use of that type. +string CompilerHLSL::type_to_glsl(const SPIRType &type, uint32_t id) +{ + // Ignore the pointer type since GLSL doesn't have pointers. + + switch (type.basetype) + { + case SPIRType::Struct: + // Need OpName lookup here to get a "sensible" name for a struct. + if (backend.explicit_struct_type) + return join("struct ", to_name(type.self)); + else + return to_name(type.self); + + case SPIRType::Image: + case SPIRType::SampledImage: + return image_type_hlsl(type, id); + + case SPIRType::Sampler: + return comparison_ids.count(id) ? "SamplerComparisonState" : "SamplerState"; + + case SPIRType::Void: + return "void"; + + default: + break; + } + + if (type.vecsize == 1 && type.columns == 1) // Scalar builtin + { + switch (type.basetype) + { + case SPIRType::Boolean: + return "bool"; + case SPIRType::Int: + return backend.basic_int_type; + case SPIRType::UInt: + return backend.basic_uint_type; + case SPIRType::AtomicCounter: + return "atomic_uint"; + case SPIRType::Half: + return "min16float"; + case SPIRType::Float: + return "float"; + case SPIRType::Double: + return "double"; + case SPIRType::Int64: + return "int64_t"; + case SPIRType::UInt64: + return "uint64_t"; + default: + return "???"; + } + } + else if (type.vecsize > 1 && type.columns == 1) // Vector builtin + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bool", type.vecsize); + case SPIRType::Int: + return join("int", type.vecsize); + case SPIRType::UInt: + return join("uint", type.vecsize); + case SPIRType::Half: + return join("min16float", type.vecsize); + case SPIRType::Float: + return join("float", type.vecsize); + case SPIRType::Double: + return join("double", type.vecsize); + case SPIRType::Int64: + return join("i64vec", type.vecsize); + case SPIRType::UInt64: + return join("u64vec", type.vecsize); + default: + return "???"; + } + } + else + { + switch (type.basetype) + { + case SPIRType::Boolean: + return join("bool", type.columns, "x", type.vecsize); + case SPIRType::Int: + return join("int", type.columns, "x", type.vecsize); + case SPIRType::UInt: + return join("uint", type.columns, "x", type.vecsize); + case SPIRType::Half: + return join("min16float", type.columns, "x", type.vecsize); + case SPIRType::Float: + return join("float", type.columns, "x", type.vecsize); + case SPIRType::Double: + return join("double", type.columns, "x", type.vecsize); + // Matrix types not supported for int64/uint64. + default: + return "???"; + } + } +} + +void CompilerHLSL::emit_header() +{ + for (auto &header : header_lines) + statement(header); + + if (header_lines.size() > 0) + { + statement(""); + } +} + +void CompilerHLSL::emit_interface_block_globally(const SPIRVariable &var) +{ + add_resource_name(var.self); + + // The global copies of I/O variables should not contain interpolation qualifiers. + // These are emitted inside the interface structs. + auto &flags = ir.meta[var.self].decoration.decoration_flags; + auto old_flags = flags; + flags.reset(); + statement("static ", variable_decl(var), ";"); + flags = old_flags; +} + +const char *CompilerHLSL::to_storage_qualifiers_glsl(const SPIRVariable &var) +{ + // Input and output variables are handled specially in HLSL backend. + // The variables are declared as global, private variables, and do not need any qualifiers. + if (var.storage == StorageClassUniformConstant || var.storage == StorageClassUniform || + var.storage == StorageClassPushConstant) + { + return "uniform "; + } + + return ""; +} + +void CompilerHLSL::emit_builtin_outputs_in_struct() +{ + auto &execution = get_entry_point(); + + bool legacy = hlsl_options.shader_model <= 30; + active_output_builtins.for_each_bit([&](uint32_t i) { + const char *type = nullptr; + const char *semantic = nullptr; + auto builtin = static_cast(i); + switch (builtin) + { + case BuiltInPosition: + type = "float4"; + semantic = legacy ? "POSITION" : "SV_Position"; + break; + + case BuiltInFragDepth: + type = "float"; + if (legacy) + { + semantic = "DEPTH"; + } + else + { + if (hlsl_options.shader_model >= 50 && execution.flags.get(ExecutionModeDepthGreater)) + semantic = "SV_DepthGreaterEqual"; + else if (hlsl_options.shader_model >= 50 && execution.flags.get(ExecutionModeDepthLess)) + semantic = "SV_DepthLessEqual"; + else + semantic = "SV_Depth"; + } + break; + + case BuiltInClipDistance: + // HLSL is a bit weird here, use SV_ClipDistance0, SV_ClipDistance1 and so on with vectors. + for (uint32_t clip = 0; clip < clip_distance_count; clip += 4) + { + uint32_t to_declare = clip_distance_count - clip; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = clip / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassOutput), semantic_index, + " : SV_ClipDistance", semantic_index, ";"); + } + break; + + case BuiltInCullDistance: + // HLSL is a bit weird here, use SV_CullDistance0, SV_CullDistance1 and so on with vectors. + for (uint32_t cull = 0; cull < cull_distance_count; cull += 4) + { + uint32_t to_declare = cull_distance_count - cull; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = cull / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassOutput), semantic_index, + " : SV_CullDistance", semantic_index, ";"); + } + break; + + case BuiltInPointSize: + // If point_size_compat is enabled, just ignore PointSize. + // PointSize does not exist in HLSL, but some code bases might want to be able to use these shaders, + // even if it means working around the missing feature. + if (hlsl_options.point_size_compat) + break; + else + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + + default: + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + break; + } + + if (type && semantic) + statement(type, " ", builtin_to_glsl(builtin, StorageClassOutput), " : ", semantic, ";"); + }); +} + +void CompilerHLSL::emit_builtin_inputs_in_struct() +{ + bool legacy = hlsl_options.shader_model <= 30; + active_input_builtins.for_each_bit([&](uint32_t i) { + const char *type = nullptr; + const char *semantic = nullptr; + auto builtin = static_cast(i); + switch (builtin) + { + case BuiltInFragCoord: + type = "float4"; + semantic = legacy ? "VPOS" : "SV_Position"; + break; + + case BuiltInVertexId: + case BuiltInVertexIndex: + if (legacy) + SPIRV_CROSS_THROW("Vertex index not supported in SM 3.0 or lower."); + type = "uint"; + semantic = "SV_VertexID"; + break; + + case BuiltInInstanceId: + case BuiltInInstanceIndex: + if (legacy) + SPIRV_CROSS_THROW("Instance index not supported in SM 3.0 or lower."); + type = "uint"; + semantic = "SV_InstanceID"; + break; + + case BuiltInSampleId: + if (legacy) + SPIRV_CROSS_THROW("Sample ID not supported in SM 3.0 or lower."); + type = "uint"; + semantic = "SV_SampleIndex"; + break; + + case BuiltInGlobalInvocationId: + type = "uint3"; + semantic = "SV_DispatchThreadID"; + break; + + case BuiltInLocalInvocationId: + type = "uint3"; + semantic = "SV_GroupThreadID"; + break; + + case BuiltInLocalInvocationIndex: + type = "uint"; + semantic = "SV_GroupIndex"; + break; + + case BuiltInWorkgroupId: + type = "uint3"; + semantic = "SV_GroupID"; + break; + + case BuiltInFrontFacing: + type = "bool"; + semantic = "SV_IsFrontFace"; + break; + + case BuiltInNumWorkgroups: + case BuiltInSubgroupSize: + case BuiltInSubgroupLocalInvocationId: + case BuiltInSubgroupEqMask: + case BuiltInSubgroupLtMask: + case BuiltInSubgroupLeMask: + case BuiltInSubgroupGtMask: + case BuiltInSubgroupGeMask: + // Handled specially. + break; + + case BuiltInClipDistance: + // HLSL is a bit weird here, use SV_ClipDistance0, SV_ClipDistance1 and so on with vectors. + for (uint32_t clip = 0; clip < clip_distance_count; clip += 4) + { + uint32_t to_declare = clip_distance_count - clip; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = clip / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassInput), semantic_index, + " : SV_ClipDistance", semantic_index, ";"); + } + break; + + case BuiltInCullDistance: + // HLSL is a bit weird here, use SV_CullDistance0, SV_CullDistance1 and so on with vectors. + for (uint32_t cull = 0; cull < cull_distance_count; cull += 4) + { + uint32_t to_declare = cull_distance_count - cull; + if (to_declare > 4) + to_declare = 4; + + uint32_t semantic_index = cull / 4; + + static const char *types[] = { "float", "float2", "float3", "float4" }; + statement(types[to_declare - 1], " ", builtin_to_glsl(builtin, StorageClassInput), semantic_index, + " : SV_CullDistance", semantic_index, ";"); + } + break; + + case BuiltInPointCoord: + // PointCoord is not supported, but provide a way to just ignore that, similar to PointSize. + if (hlsl_options.point_coord_compat) + break; + else + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + + default: + SPIRV_CROSS_THROW("Unsupported builtin in HLSL."); + break; + } + + if (type && semantic) + statement(type, " ", builtin_to_glsl(builtin, StorageClassInput), " : ", semantic, ";"); + }); +} + +uint32_t CompilerHLSL::type_to_consumed_locations(const SPIRType &type) const +{ + // TODO: Need to verify correctness. + uint32_t elements = 0; + + if (type.basetype == SPIRType::Struct) + { + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + elements += type_to_consumed_locations(get(type.member_types[i])); + } + else + { + uint32_t array_multiplier = 1; + for (uint32_t i = 0; i < uint32_t(type.array.size()); i++) + { + if (type.array_size_literal[i]) + array_multiplier *= type.array[i]; + else + array_multiplier *= get(type.array[i]).scalar(); + } + elements += array_multiplier * type.columns; + } + return elements; +} + +string CompilerHLSL::to_interpolation_qualifiers(const Bitset &flags) +{ + string res; + //if (flags & (1ull << DecorationSmooth)) + // res += "linear "; + if (flags.get(DecorationFlat)) + res += "nointerpolation "; + if (flags.get(DecorationNoPerspective)) + res += "noperspective "; + if (flags.get(DecorationCentroid)) + res += "centroid "; + if (flags.get(DecorationPatch)) + res += "patch "; // Seems to be different in actual HLSL. + if (flags.get(DecorationSample)) + res += "sample "; + if (flags.get(DecorationInvariant)) + res += "invariant "; // Not supported? + + return res; +} + +std::string CompilerHLSL::to_semantic(uint32_t vertex_location) +{ + for (auto &attribute : remap_vertex_attributes) + if (attribute.location == vertex_location) + return attribute.semantic; + + return join("TEXCOORD", vertex_location); +} + +void CompilerHLSL::emit_io_block(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + add_resource_name(type.self); + + statement("struct ", to_name(type.self)); + begin_scope(); + type.member_name_cache.clear(); + + uint32_t base_location = get_decoration(var.self, DecorationLocation); + + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + string semantic; + if (has_member_decoration(type.self, i, DecorationLocation)) + { + uint32_t location = get_member_decoration(type.self, i, DecorationLocation); + semantic = join(" : ", to_semantic(location)); + } + else + { + // If the block itself has a location, but not its members, use the implicit location. + // There could be a conflict if the block members partially specialize the locations. + // It is unclear how SPIR-V deals with this. Assume this does not happen for now. + uint32_t location = base_location + i; + semantic = join(" : ", to_semantic(location)); + } + + add_member_name(type, i); + + auto &membertype = get(type.member_types[i]); + statement(to_interpolation_qualifiers(get_member_decoration_bitset(type.self, i)), + variable_decl(membertype, to_member_name(type, i)), semantic, ";"); + } + + end_scope_decl(); + statement(""); + + statement("static ", variable_decl(var), ";"); + statement(""); +} + +void CompilerHLSL::emit_interface_block_in_struct(const SPIRVariable &var, unordered_set &active_locations) +{ + auto &execution = get_entry_point(); + auto type = get(var.basetype); + + string binding; + bool use_location_number = true; + bool legacy = hlsl_options.shader_model <= 30; + if (execution.model == ExecutionModelFragment && var.storage == StorageClassOutput) + { + binding = join(legacy ? "COLOR" : "SV_Target", get_decoration(var.self, DecorationLocation)); + use_location_number = false; + if (legacy) // COLOR must be a four-component vector on legacy shader model targets (HLSL ERR_COLOR_4COMP) + type.vecsize = 4; + } + + const auto get_vacant_location = [&]() -> uint32_t { + for (uint32_t i = 0; i < 64; i++) + if (!active_locations.count(i)) + return i; + SPIRV_CROSS_THROW("All locations from 0 to 63 are exhausted."); + }; + + bool need_matrix_unroll = var.storage == StorageClassInput && execution.model == ExecutionModelVertex; + + auto &m = ir.meta[var.self].decoration; + auto name = to_name(var.self); + if (use_location_number) + { + uint32_t location_number; + + // If an explicit location exists, use it with TEXCOORD[N] semantic. + // Otherwise, pick a vacant location. + if (m.decoration_flags.get(DecorationLocation)) + location_number = m.location; + else + location_number = get_vacant_location(); + + // Allow semantic remap if specified. + auto semantic = to_semantic(location_number); + + if (need_matrix_unroll && type.columns > 1) + { + if (!type.array.empty()) + SPIRV_CROSS_THROW("Arrays of matrices used as input/output. This is not supported."); + + // Unroll matrices. + for (uint32_t i = 0; i < type.columns; i++) + { + SPIRType newtype = type; + newtype.columns = 1; + statement(to_interpolation_qualifiers(get_decoration_bitset(var.self)), + variable_decl(newtype, join(name, "_", i)), " : ", semantic, "_", i, ";"); + active_locations.insert(location_number++); + } + } + else + { + statement(to_interpolation_qualifiers(get_decoration_bitset(var.self)), variable_decl(type, name), " : ", + semantic, ";"); + + // Structs and arrays should consume more locations. + uint32_t consumed_locations = type_to_consumed_locations(type); + for (uint32_t i = 0; i < consumed_locations; i++) + active_locations.insert(location_number + i); + } + } + else + statement(variable_decl(type, name), " : ", binding, ";"); +} + +std::string CompilerHLSL::builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) +{ + switch (builtin) + { + case BuiltInVertexId: + return "gl_VertexID"; + case BuiltInInstanceId: + return "gl_InstanceID"; + case BuiltInNumWorkgroups: + { + if (!num_workgroups_builtin) + SPIRV_CROSS_THROW("NumWorkgroups builtin is used, but remap_num_workgroups_builtin() was not called. " + "Cannot emit code for this builtin."); + + auto &var = get(num_workgroups_builtin); + auto &type = get(var.basetype); + return sanitize_underscores(join(to_name(num_workgroups_builtin), "_", get_member_name(type.self, 0))); + } + case BuiltInPointCoord: + // Crude hack, but there is no real alternative. This path is only enabled if point_coord_compat is set. + return "float2(0.5f, 0.5f)"; + case BuiltInSubgroupLocalInvocationId: + return "WaveGetLaneIndex()"; + case BuiltInSubgroupSize: + return "WaveGetLaneCount()"; + + default: + return CompilerGLSL::builtin_to_glsl(builtin, storage); + } +} + +void CompilerHLSL::emit_builtin_variables() +{ + Bitset builtins = active_input_builtins; + builtins.merge_or(active_output_builtins); + + // Emit global variables for the interface variables which are statically used by the shader. + builtins.for_each_bit([&](uint32_t i) { + const char *type = nullptr; + auto builtin = static_cast(i); + uint32_t array_size = 0; + + switch (builtin) + { + case BuiltInFragCoord: + case BuiltInPosition: + type = "float4"; + break; + + case BuiltInFragDepth: + type = "float"; + break; + + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInInstanceId: + case BuiltInInstanceIndex: + case BuiltInSampleId: + type = "int"; + break; + + case BuiltInPointSize: + if (hlsl_options.point_size_compat) + { + // Just emit the global variable, it will be ignored. + type = "float"; + break; + } + else + SPIRV_CROSS_THROW(join("Unsupported builtin in HLSL: ", unsigned(builtin))); + + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInWorkgroupId: + type = "uint3"; + break; + + case BuiltInLocalInvocationIndex: + type = "uint"; + break; + + case BuiltInFrontFacing: + type = "bool"; + break; + + case BuiltInNumWorkgroups: + case BuiltInPointCoord: + // Handled specially. + break; + + case BuiltInSubgroupLocalInvocationId: + case BuiltInSubgroupSize: + if (hlsl_options.shader_model < 60) + SPIRV_CROSS_THROW("Need SM 6.0 for Wave ops."); + break; + + case BuiltInSubgroupEqMask: + case BuiltInSubgroupLtMask: + case BuiltInSubgroupLeMask: + case BuiltInSubgroupGtMask: + case BuiltInSubgroupGeMask: + if (hlsl_options.shader_model < 60) + SPIRV_CROSS_THROW("Need SM 6.0 for Wave ops."); + type = "uint4"; + break; + + case BuiltInClipDistance: + array_size = clip_distance_count; + type = "float"; + break; + + case BuiltInCullDistance: + array_size = cull_distance_count; + type = "float"; + break; + + default: + SPIRV_CROSS_THROW(join("Unsupported builtin in HLSL: ", unsigned(builtin))); + } + + StorageClass storage = active_input_builtins.get(i) ? StorageClassInput : StorageClassOutput; + // FIXME: SampleMask can be both in and out with sample builtin, + // need to distinguish that when we add support for that. + + if (type) + { + if (array_size) + statement("static ", type, " ", builtin_to_glsl(builtin, storage), "[", array_size, "];"); + else + statement("static ", type, " ", builtin_to_glsl(builtin, storage), ";"); + } + }); +} + +void CompilerHLSL::emit_composite_constants() +{ + // HLSL cannot declare structs or arrays inline, so we must move them out to + // global constants directly. + bool emitted = false; + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + if (c.specialization) + continue; + + auto &type = get(c.constant_type); + if (type.basetype == SPIRType::Struct || !type.array.empty()) + { + auto name = to_name(c.self); + statement("static const ", variable_decl(type, name), " = ", constant_expression(c), ";"); + emitted = true; + } + } + } + + if (emitted) + statement(""); +} + +void CompilerHLSL::emit_specialization_constants() +{ + bool emitted = false; + SpecializationConstant wg_x, wg_y, wg_z; + uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + if (c.self == workgroup_size_id) + { + statement("static const uint3 gl_WorkGroupSize = ", + constant_expression(get(workgroup_size_id)), ";"); + emitted = true; + } + else if (c.specialization) + { + auto &type = get(c.constant_type); + auto name = to_name(c.self); + + // HLSL does not support specialization constants, so fallback to macros. + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + + statement("#ifndef ", c.specialization_constant_macro_name); + statement("#define ", c.specialization_constant_macro_name, " ", constant_expression(c)); + statement("#endif"); + statement("static const ", variable_decl(type, name), " = ", c.specialization_constant_macro_name, ";"); + emitted = true; + } + } + else if (id.get_type() == TypeConstantOp) + { + auto &c = id.get(); + auto &type = get(c.basetype); + auto name = to_name(c.self); + statement("static const ", variable_decl(type, name), " = ", constant_op_expression(c), ";"); + emitted = true; + } + } + + if (emitted) + statement(""); +} + +void CompilerHLSL::replace_illegal_names() +{ + static const unordered_set keywords = { + // Additional HLSL specific keywords. + "line", "linear", "matrix", "point", "row_major", "sampler", + }; + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + if (!is_hidden_variable(var)) + { + auto &m = ir.meta[var.self].decoration; + if (keywords.find(m.alias) != end(keywords)) + m.alias = join("_", m.alias); + } + } + } + + CompilerGLSL::replace_illegal_names(); +} + +void CompilerHLSL::emit_resources() +{ + auto &execution = get_entry_point(); + + replace_illegal_names(); + + emit_specialization_constants(); + + // Output all basic struct types which are not Block or BufferBlock as these are declared inplace + // when such variables are instantiated. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer && + (!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) && + !ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock))) + { + emit_struct(type); + } + } + } + + emit_composite_constants(); + + bool emitted = false; + + // Output UBOs and SSBOs + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + bool is_block_storage = type.storage == StorageClassStorageBuffer || type.storage == StorageClassUniform; + bool has_block_flags = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (var.storage != StorageClassFunction && type.pointer && is_block_storage && !is_hidden_variable(var) && + has_block_flags) + { + emit_buffer_block(var); + emitted = true; + } + } + } + + // Output push constant blocks + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassPushConstant && + !is_hidden_variable(var)) + { + emit_push_constant_block(var); + emitted = true; + } + } + } + + if (execution.model == ExecutionModelVertex && hlsl_options.shader_model <= 30) + { + statement("uniform float4 gl_HalfPixel;"); + emitted = true; + } + + bool skip_separate_image_sampler = !combined_image_samplers.empty() || hlsl_options.shader_model <= 30; + + // Output Uniform Constants (values, samplers, images, etc). + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + + // If we're remapping separate samplers and images, only emit the combined samplers. + if (skip_separate_image_sampler) + { + // Sampler buffers are always used without a sampler, and they will also work in regular D3D. + bool sampler_buffer = type.basetype == SPIRType::Image && type.image.dim == DimBuffer; + bool separate_image = type.basetype == SPIRType::Image && type.image.sampled == 1; + bool separate_sampler = type.basetype == SPIRType::Sampler; + if (!sampler_buffer && (separate_image || separate_sampler)) + continue; + } + + if (var.storage != StorageClassFunction && !is_builtin_variable(var) && !var.remapped_variable && + type.pointer && + (type.storage == StorageClassUniformConstant || type.storage == StorageClassAtomicCounter)) + { + emit_uniform(var); + emitted = true; + } + } + } + + if (emitted) + statement(""); + emitted = false; + + // Emit builtin input and output variables here. + emit_builtin_variables(); + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + // Do not emit I/O blocks here. + // I/O blocks can be arrayed, so we must deal with them separately to support geometry shaders + // and tessellation down the line. + if (!block && var.storage != StorageClassFunction && !var.remapped_variable && type.pointer && + (var.storage == StorageClassInput || var.storage == StorageClassOutput) && !is_builtin_variable(var) && + interface_variable_exists_in_entry_point(var.self)) + { + // Only emit non-builtins which are not blocks here. Builtin variables are handled separately. + emit_interface_block_globally(var); + emitted = true; + } + } + } + + if (emitted) + statement(""); + emitted = false; + + require_input = false; + require_output = false; + unordered_set active_inputs; + unordered_set active_outputs; + vector input_variables; + vector output_variables; + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassInput && var.storage != StorageClassOutput) + continue; + + // Do not emit I/O blocks here. + // I/O blocks can be arrayed, so we must deal with them separately to support geometry shaders + // and tessellation down the line. + if (!block && !var.remapped_variable && type.pointer && !is_builtin_variable(var) && + interface_variable_exists_in_entry_point(var.self)) + { + if (var.storage == StorageClassInput) + input_variables.push_back(&var); + else + output_variables.push_back(&var); + } + + // Reserve input and output locations for block variables as necessary. + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto &active = var.storage == StorageClassInput ? active_inputs : active_outputs; + for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++) + { + if (has_member_decoration(type.self, i, DecorationLocation)) + { + uint32_t location = get_member_decoration(type.self, i, DecorationLocation); + active.insert(location); + } + } + + // Emit the block struct and a global variable here. + emit_io_block(var); + } + } + } + + const auto variable_compare = [&](const SPIRVariable *a, const SPIRVariable *b) -> bool { + // Sort input and output variables based on, from more robust to less robust: + // - Location + // - Variable has a location + // - Name comparison + // - Variable has a name + // - Fallback: ID + bool has_location_a = has_decoration(a->self, DecorationLocation); + bool has_location_b = has_decoration(b->self, DecorationLocation); + + if (has_location_a && has_location_b) + { + return get_decoration(a->self, DecorationLocation) < get_decoration(b->self, DecorationLocation); + } + else if (has_location_a && !has_location_b) + return true; + else if (!has_location_a && has_location_b) + return false; + + const auto &name1 = to_name(a->self); + const auto &name2 = to_name(b->self); + + if (name1.empty() && name2.empty()) + return a->self < b->self; + else if (name1.empty()) + return true; + else if (name2.empty()) + return false; + + return name1.compare(name2) < 0; + }; + + auto input_builtins = active_input_builtins; + input_builtins.clear(BuiltInNumWorkgroups); + input_builtins.clear(BuiltInPointCoord); + input_builtins.clear(BuiltInSubgroupSize); + input_builtins.clear(BuiltInSubgroupLocalInvocationId); + input_builtins.clear(BuiltInSubgroupEqMask); + input_builtins.clear(BuiltInSubgroupLtMask); + input_builtins.clear(BuiltInSubgroupLeMask); + input_builtins.clear(BuiltInSubgroupGtMask); + input_builtins.clear(BuiltInSubgroupGeMask); + + if (!input_variables.empty() || !input_builtins.empty()) + { + require_input = true; + statement("struct SPIRV_Cross_Input"); + + begin_scope(); + sort(input_variables.begin(), input_variables.end(), variable_compare); + for (auto var : input_variables) + emit_interface_block_in_struct(*var, active_inputs); + emit_builtin_inputs_in_struct(); + end_scope_decl(); + statement(""); + } + + if (!output_variables.empty() || !active_output_builtins.empty()) + { + require_output = true; + statement("struct SPIRV_Cross_Output"); + + begin_scope(); + // FIXME: Use locations properly if they exist. + sort(output_variables.begin(), output_variables.end(), variable_compare); + for (auto var : output_variables) + emit_interface_block_in_struct(*var, active_outputs); + emit_builtin_outputs_in_struct(); + end_scope_decl(); + statement(""); + } + + // Global variables. + for (auto global : global_variables) + { + auto &var = get(global); + if (var.storage != StorageClassOutput) + { + add_resource_name(var.self); + + const char *storage = nullptr; + switch (var.storage) + { + case StorageClassWorkgroup: + storage = "groupshared"; + break; + + default: + storage = "static"; + break; + } + statement(storage, " ", variable_decl(var), ";"); + emitted = true; + } + } + + if (emitted) + statement(""); + + declare_undefined_values(); + + if (requires_op_fmod) + { + static const char *types[] = { + "float", + "float2", + "float3", + "float4", + }; + + for (auto &type : types) + { + statement(type, " mod(", type, " x, ", type, " y)"); + begin_scope(); + statement("return x - y * floor(x / y);"); + end_scope(); + statement(""); + } + } + + if (requires_textureProj) + { + if (hlsl_options.shader_model >= 40) + { + statement("float SPIRV_Cross_projectTextureCoordinate(float2 coord)"); + begin_scope(); + statement("return coord.x / coord.y;"); + end_scope(); + statement(""); + + statement("float2 SPIRV_Cross_projectTextureCoordinate(float3 coord)"); + begin_scope(); + statement("return float2(coord.x, coord.y) / coord.z;"); + end_scope(); + statement(""); + + statement("float3 SPIRV_Cross_projectTextureCoordinate(float4 coord)"); + begin_scope(); + statement("return float3(coord.x, coord.y, coord.z) / coord.w;"); + end_scope(); + statement(""); + } + else + { + statement("float4 SPIRV_Cross_projectTextureCoordinate(float2 coord)"); + begin_scope(); + statement("return float4(coord.x, 0.0, 0.0, coord.y);"); + end_scope(); + statement(""); + + statement("float4 SPIRV_Cross_projectTextureCoordinate(float3 coord)"); + begin_scope(); + statement("return float4(coord.x, coord.y, 0.0, coord.z);"); + end_scope(); + statement(""); + + statement("float4 SPIRV_Cross_projectTextureCoordinate(float4 coord)"); + begin_scope(); + statement("return coord;"); + end_scope(); + statement(""); + } + } + + if (required_textureSizeVariants != 0) + { + static const char *types[QueryTypeCount] = { "float4", "int4", "uint4" }; + static const char *dims[QueryDimCount] = { "Texture1D", "Texture1DArray", "Texture2D", "Texture2DArray", + "Texture3D", "Buffer", "TextureCube", "TextureCubeArray", + "Texture2DMS", "Texture2DMSArray" }; + + static const bool has_lod[QueryDimCount] = { true, true, true, true, true, false, true, true, false, false }; + + static const char *ret_types[QueryDimCount] = { + "uint", "uint2", "uint2", "uint3", "uint3", "uint", "uint2", "uint3", "uint2", "uint3", + }; + + static const uint32_t return_arguments[QueryDimCount] = { + 1, 2, 2, 3, 3, 1, 2, 3, 2, 3, + }; + + for (uint32_t index = 0; index < QueryDimCount; index++) + { + for (uint32_t type_index = 0; type_index < QueryTypeCount; type_index++) + { + uint32_t bit = 16 * type_index + index; + uint64_t mask = 1ull << bit; + + if ((required_textureSizeVariants & mask) == 0) + continue; + + statement(ret_types[index], " SPIRV_Cross_textureSize(", dims[index], "<", types[type_index], + "> Tex, uint Level, out uint Param)"); + begin_scope(); + statement(ret_types[index], " ret;"); + switch (return_arguments[index]) + { + case 1: + if (has_lod[index]) + statement("Tex.GetDimensions(Level, ret.x, Param);"); + else + { + statement("Tex.GetDimensions(ret.x);"); + statement("Param = 0u;"); + } + break; + case 2: + if (has_lod[index]) + statement("Tex.GetDimensions(Level, ret.x, ret.y, Param);"); + else + statement("Tex.GetDimensions(ret.x, ret.y, Param);"); + break; + case 3: + if (has_lod[index]) + statement("Tex.GetDimensions(Level, ret.x, ret.y, ret.z, Param);"); + else + statement("Tex.GetDimensions(ret.x, ret.y, ret.z, Param);"); + break; + } + + statement("return ret;"); + end_scope(); + statement(""); + } + } + } + + if (requires_fp16_packing) + { + // HLSL does not pack into a single word sadly :( + statement("uint SPIRV_Cross_packHalf2x16(float2 value)"); + begin_scope(); + statement("uint2 Packed = f32tof16(value);"); + statement("return Packed.x | (Packed.y << 16);"); + end_scope(); + statement(""); + + statement("float2 SPIRV_Cross_unpackHalf2x16(uint value)"); + begin_scope(); + statement("return f16tof32(uint2(value & 0xffff, value >> 16));"); + end_scope(); + statement(""); + } + + if (requires_explicit_fp16_packing) + { + // HLSL does not pack into a single word sadly :( + statement("uint SPIRV_Cross_packFloat2x16(min16float2 value)"); + begin_scope(); + statement("uint2 Packed = f32tof16(value);"); + statement("return Packed.x | (Packed.y << 16);"); + end_scope(); + statement(""); + + statement("min16float2 SPIRV_Cross_unpackFloat2x16(uint value)"); + begin_scope(); + statement("return min16float2(f16tof32(uint2(value & 0xffff, value >> 16)));"); + end_scope(); + statement(""); + } + + // HLSL does not seem to have builtins for these operation, so roll them by hand ... + if (requires_unorm8_packing) + { + statement("uint SPIRV_Cross_packUnorm4x8(float4 value)"); + begin_scope(); + statement("uint4 Packed = uint4(round(saturate(value) * 255.0));"); + statement("return Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24);"); + end_scope(); + statement(""); + + statement("float4 SPIRV_Cross_unpackUnorm4x8(uint value)"); + begin_scope(); + statement("uint4 Packed = uint4(value & 0xff, (value >> 8) & 0xff, (value >> 16) & 0xff, value >> 24);"); + statement("return float4(Packed) / 255.0;"); + end_scope(); + statement(""); + } + + if (requires_snorm8_packing) + { + statement("uint SPIRV_Cross_packSnorm4x8(float4 value)"); + begin_scope(); + statement("int4 Packed = int4(round(clamp(value, -1.0, 1.0) * 127.0)) & 0xff;"); + statement("return uint(Packed.x | (Packed.y << 8) | (Packed.z << 16) | (Packed.w << 24));"); + end_scope(); + statement(""); + + statement("float4 SPIRV_Cross_unpackSnorm4x8(uint value)"); + begin_scope(); + statement("int SignedValue = int(value);"); + statement("int4 Packed = int4(SignedValue << 24, SignedValue << 16, SignedValue << 8, SignedValue) >> 24;"); + statement("return clamp(float4(Packed) / 127.0, -1.0, 1.0);"); + end_scope(); + statement(""); + } + + if (requires_unorm16_packing) + { + statement("uint SPIRV_Cross_packUnorm2x16(float2 value)"); + begin_scope(); + statement("uint2 Packed = uint2(round(saturate(value) * 65535.0));"); + statement("return Packed.x | (Packed.y << 16);"); + end_scope(); + statement(""); + + statement("float2 SPIRV_Cross_unpackUnorm2x16(uint value)"); + begin_scope(); + statement("uint2 Packed = uint2(value & 0xffff, value >> 16);"); + statement("return float2(Packed) / 65535.0;"); + end_scope(); + statement(""); + } + + if (requires_snorm16_packing) + { + statement("uint SPIRV_Cross_packSnorm2x16(float2 value)"); + begin_scope(); + statement("int2 Packed = int2(round(clamp(value, -1.0, 1.0) * 32767.0)) & 0xffff;"); + statement("return uint(Packed.x | (Packed.y << 16));"); + end_scope(); + statement(""); + + statement("float2 SPIRV_Cross_unpackSnorm2x16(uint value)"); + begin_scope(); + statement("int SignedValue = int(value);"); + statement("int2 Packed = int2(SignedValue << 16, SignedValue) >> 16;"); + statement("return clamp(float2(Packed) / 32767.0, -1.0, 1.0);"); + end_scope(); + statement(""); + } + + if (requires_bitfield_insert) + { + static const char *types[] = { "uint", "uint2", "uint3", "uint4" }; + for (auto &type : types) + { + statement(type, " SPIRV_Cross_bitfieldInsert(", type, " Base, ", type, " Insert, uint Offset, uint Count)"); + begin_scope(); + statement("uint Mask = Count == 32 ? 0xffffffff : (((1u << Count) - 1) << (Offset & 31));"); + statement("return (Base & ~Mask) | ((Insert << Offset) & Mask);"); + end_scope(); + statement(""); + } + } + + if (requires_bitfield_extract) + { + static const char *unsigned_types[] = { "uint", "uint2", "uint3", "uint4" }; + for (auto &type : unsigned_types) + { + statement(type, " SPIRV_Cross_bitfieldUExtract(", type, " Base, uint Offset, uint Count)"); + begin_scope(); + statement("uint Mask = Count == 32 ? 0xffffffff : ((1 << Count) - 1);"); + statement("return (Base >> Offset) & Mask;"); + end_scope(); + statement(""); + } + + // In this overload, we will have to do sign-extension, which we will emulate by shifting up and down. + static const char *signed_types[] = { "int", "int2", "int3", "int4" }; + for (auto &type : signed_types) + { + statement(type, " SPIRV_Cross_bitfieldSExtract(", type, " Base, int Offset, int Count)"); + begin_scope(); + statement("int Mask = Count == 32 ? -1 : ((1 << Count) - 1);"); + statement(type, " Masked = (Base >> Offset) & Mask;"); + statement("int ExtendShift = (32 - Count) & 31;"); + statement("return (Masked << ExtendShift) >> ExtendShift;"); + end_scope(); + statement(""); + } + } + + if (requires_inverse_2x2) + { + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement("float2x2 SPIRV_Cross_Inverse(float2x2 m)"); + begin_scope(); + statement("float2x2 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement("adj[0][0] = m[1][1];"); + statement("adj[0][1] = -m[0][1];"); + statement_no_indent(""); + statement("adj[1][0] = -m[1][0];"); + statement("adj[1][1] = m[0][0];"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + } + + if (requires_inverse_3x3) + { + statement("// Returns the determinant of a 2x2 matrix."); + statement("float SPIRV_Cross_Det2x2(float a1, float a2, float b1, float b2)"); + begin_scope(); + statement("return a1 * b2 - b1 * a2;"); + end_scope(); + statement_no_indent(""); + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement("float3x3 SPIRV_Cross_Inverse(float3x3 m)"); + begin_scope(); + statement("float3x3 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement("adj[0][0] = SPIRV_Cross_Det2x2(m[1][1], m[1][2], m[2][1], m[2][2]);"); + statement("adj[0][1] = -SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[2][1], m[2][2]);"); + statement("adj[0][2] = SPIRV_Cross_Det2x2(m[0][1], m[0][2], m[1][1], m[1][2]);"); + statement_no_indent(""); + statement("adj[1][0] = -SPIRV_Cross_Det2x2(m[1][0], m[1][2], m[2][0], m[2][2]);"); + statement("adj[1][1] = SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[2][0], m[2][2]);"); + statement("adj[1][2] = -SPIRV_Cross_Det2x2(m[0][0], m[0][2], m[1][0], m[1][2]);"); + statement_no_indent(""); + statement("adj[2][0] = SPIRV_Cross_Det2x2(m[1][0], m[1][1], m[2][0], m[2][1]);"); + statement("adj[2][1] = -SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[2][0], m[2][1]);"); + statement("adj[2][2] = SPIRV_Cross_Det2x2(m[0][0], m[0][1], m[1][0], m[1][1]);"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + } + + if (requires_inverse_4x4) + { + if (!requires_inverse_3x3) + { + statement("// Returns the determinant of a 2x2 matrix."); + statement("float SPIRV_Cross_Det2x2(float a1, float a2, float b1, float b2)"); + begin_scope(); + statement("return a1 * b2 - b1 * a2;"); + end_scope(); + statement(""); + } + + statement("// Returns the determinant of a 3x3 matrix."); + statement("float SPIRV_Cross_Det3x3(float a1, float a2, float a3, float b1, float b2, float b3, float c1, " + "float c2, float c3)"); + begin_scope(); + statement("return a1 * SPIRV_Cross_Det2x2(b2, b3, c2, c3) - b1 * SPIRV_Cross_Det2x2(a2, a3, c2, c3) + c1 * " + "SPIRV_Cross_Det2x2(a2, a3, " + "b2, b3);"); + end_scope(); + statement_no_indent(""); + statement("// Returns the inverse of a matrix, by using the algorithm of calculating the classical"); + statement("// adjoint and dividing by the determinant. The contents of the matrix are changed."); + statement("float4x4 SPIRV_Cross_Inverse(float4x4 m)"); + begin_scope(); + statement("float4x4 adj; // The adjoint matrix (inverse after dividing by determinant)"); + statement_no_indent(""); + statement("// Create the transpose of the cofactors, as the classical adjoint of the matrix."); + statement( + "adj[0][0] = SPIRV_Cross_Det3x3(m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement( + "adj[0][1] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[2][1], m[2][2], m[2][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement( + "adj[0][2] = SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[3][1], m[3][2], " + "m[3][3]);"); + statement( + "adj[0][3] = -SPIRV_Cross_Det3x3(m[0][1], m[0][2], m[0][3], m[1][1], m[1][2], m[1][3], m[2][1], m[2][2], " + "m[2][3]);"); + statement_no_indent(""); + statement( + "adj[1][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement( + "adj[1][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[2][0], m[2][2], m[2][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement( + "adj[1][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[3][0], m[3][2], " + "m[3][3]);"); + statement( + "adj[1][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][2], m[0][3], m[1][0], m[1][2], m[1][3], m[2][0], m[2][2], " + "m[2][3]);"); + statement_no_indent(""); + statement( + "adj[2][0] = SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement( + "adj[2][1] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[2][0], m[2][1], m[2][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement( + "adj[2][2] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[3][0], m[3][1], " + "m[3][3]);"); + statement( + "adj[2][3] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][3], m[1][0], m[1][1], m[1][3], m[2][0], m[2][1], " + "m[2][3]);"); + statement_no_indent(""); + statement( + "adj[3][0] = -SPIRV_Cross_Det3x3(m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement( + "adj[3][1] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[2][0], m[2][1], m[2][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement( + "adj[3][2] = -SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[3][0], m[3][1], " + "m[3][2]);"); + statement( + "adj[3][3] = SPIRV_Cross_Det3x3(m[0][0], m[0][1], m[0][2], m[1][0], m[1][1], m[1][2], m[2][0], m[2][1], " + "m[2][2]);"); + statement_no_indent(""); + statement("// Calculate the determinant as a combination of the cofactors of the first row."); + statement("float det = (adj[0][0] * m[0][0]) + (adj[0][1] * m[1][0]) + (adj[0][2] * m[2][0]) + (adj[0][3] " + "* m[3][0]);"); + statement_no_indent(""); + statement("// Divide the classical adjoint matrix by the determinant."); + statement("// If determinant is zero, matrix is not invertable, so leave it unchanged."); + statement("return (det != 0.0f) ? (adj * (1.0f / det)) : m;"); + end_scope(); + statement(""); + } +} + +string CompilerHLSL::layout_for_member(const SPIRType &type, uint32_t index) +{ + auto &flags = get_member_decoration_bitset(type.self, index); + + // HLSL can emit row_major or column_major decoration in any struct. + // Do not try to merge combined decorations for children like in GLSL. + + // Flip the convention. HLSL is a bit odd in that the memory layout is column major ... but the language API is "row-major". + // The way to deal with this is to multiply everything in inverse order, and reverse the memory layout. + if (flags.get(DecorationColMajor)) + return "row_major "; + else if (flags.get(DecorationRowMajor)) + return "column_major "; + + return ""; +} + +void CompilerHLSL::emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const string &qualifier, uint32_t base_offset) +{ + auto &membertype = get(member_type_id); + + Bitset memberflags; + auto &memb = ir.meta[type.self].members; + if (index < memb.size()) + memberflags = memb[index].decoration_flags; + + string qualifiers; + bool is_block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + + if (is_block) + qualifiers = to_interpolation_qualifiers(memberflags); + + string packing_offset; + bool is_push_constant = type.storage == StorageClassPushConstant; + + if ((has_decoration(type.self, DecorationCPacked) || is_push_constant) && + has_member_decoration(type.self, index, DecorationOffset)) + { + uint32_t offset = memb[index].offset - base_offset; + if (offset & 3) + SPIRV_CROSS_THROW("Cannot pack on tighter bounds than 4 bytes in HLSL."); + + static const char *packing_swizzle[] = { "", ".y", ".z", ".w" }; + packing_offset = join(" : packoffset(c", offset / 16, packing_swizzle[(offset & 15) >> 2], ")"); + } + + statement(layout_for_member(type, index), qualifiers, qualifier, + variable_decl(membertype, to_member_name(type, index)), packing_offset, ";"); +} + +void CompilerHLSL::emit_buffer_block(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + + bool is_uav = var.storage == StorageClassStorageBuffer || has_decoration(type.self, DecorationBufferBlock); + + if (is_uav) + { + Bitset flags = ir.get_buffer_block_flags(var); + bool is_readonly = flags.get(DecorationNonWritable); + bool is_coherent = flags.get(DecorationCoherent); + add_resource_name(var.self); + statement(is_coherent ? "globallycoherent " : "", is_readonly ? "ByteAddressBuffer " : "RWByteAddressBuffer ", + to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), ";"); + } + else + { + if (type.array.empty()) + { + if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset)) + set_decoration(type.self, DecorationCPacked); + else + SPIRV_CROSS_THROW("cbuffer cannot be expressed with either HLSL packing layout or packoffset."); + + // Flatten the top-level struct so we can use packoffset, + // this restriction is similar to GLSL where layout(offset) is not possible on sub-structs. + flattened_structs.insert(var.self); + + // Prefer the block name if possible. + auto buffer_name = to_name(type.self, false); + if (ir.meta[type.self].decoration.alias.empty() || + resource_names.find(buffer_name) != end(resource_names) || + block_names.find(buffer_name) != end(block_names)) + { + buffer_name = get_block_fallback_name(var.self); + } + + add_variable(block_names, resource_names, buffer_name); + + // If for some reason buffer_name is an illegal name, make a final fallback to a workaround name. + // This cannot conflict with anything else, so we're safe now. + if (buffer_name.empty()) + buffer_name = join("_", get(var.basetype).self, "_", var.self); + + block_names.insert(buffer_name); + + // Save for post-reflection later. + declared_block_names[var.self] = buffer_name; + + type.member_name_cache.clear(); + add_resource_name(var.self); + statement("cbuffer ", buffer_name, to_resource_binding(var)); + begin_scope(); + + uint32_t i = 0; + for (auto &member : type.member_types) + { + add_member_name(type, i); + auto backup_name = get_member_name(type.self, i); + auto member_name = to_member_name(type, i); + set_member_name(type.self, i, sanitize_underscores(join(to_name(var.self), "_", member_name))); + emit_struct_member(type, member, i, ""); + set_member_name(type.self, i, backup_name); + i++; + } + + end_scope_decl(); + statement(""); + } + else + { + if (hlsl_options.shader_model < 51) + SPIRV_CROSS_THROW( + "Need ConstantBuffer to use arrays of UBOs, but this is only supported in SM 5.1."); + + // ConstantBuffer does not support packoffset, so it is unuseable unless everything aligns as we expect. + if (!buffer_is_packing_standard(type, BufferPackingHLSLCbuffer)) + SPIRV_CROSS_THROW("HLSL ConstantBuffer cannot be expressed with normal HLSL packing rules."); + + add_resource_name(type.self); + add_resource_name(var.self); + + emit_struct(get(type.self)); + statement("ConstantBuffer<", to_name(type.self), "> ", to_name(var.self), type_to_array_glsl(type), + to_resource_binding(var), ";"); + } + } +} + +void CompilerHLSL::emit_push_constant_block(const SPIRVariable &var) +{ + if (root_constants_layout.empty()) + { + emit_buffer_block(var); + } + else + { + for (const auto &layout : root_constants_layout) + { + auto &type = get(var.basetype); + + if (buffer_is_packing_standard(type, BufferPackingHLSLCbufferPackOffset, layout.start, layout.end)) + set_decoration(type.self, DecorationCPacked); + else + SPIRV_CROSS_THROW( + "root constant cbuffer cannot be expressed with either HLSL packing layout or packoffset."); + + flattened_structs.insert(var.self); + type.member_name_cache.clear(); + add_resource_name(var.self); + auto &memb = ir.meta[type.self].members; + + statement("cbuffer SPIRV_CROSS_RootConstant_", to_name(var.self), + to_resource_register('b', layout.binding, layout.space)); + begin_scope(); + + // Index of the next field in the generated root constant constant buffer + auto constant_index = 0u; + + // Iterate over all member of the push constant and check which of the fields + // fit into the given root constant layout. + for (auto i = 0u; i < memb.size(); i++) + { + const auto offset = memb[i].offset; + if (layout.start <= offset && offset < layout.end) + { + const auto &member = type.member_types[i]; + + add_member_name(type, constant_index); + auto backup_name = get_member_name(type.self, i); + auto member_name = to_member_name(type, i); + set_member_name(type.self, constant_index, + sanitize_underscores(join(to_name(var.self), "_", member_name))); + emit_struct_member(type, member, i, "", layout.start); + set_member_name(type.self, constant_index, backup_name); + + constant_index++; + } + } + + end_scope_decl(); + } + } +} + +string CompilerHLSL::to_sampler_expression(uint32_t id) +{ + auto expr = join("_", to_expression(id)); + auto index = expr.find_first_of('['); + if (index == string::npos) + { + return expr + "_sampler"; + } + else + { + // We have an expression like _ident[array], so we cannot tack on _sampler, insert it inside the string instead. + return expr.insert(index, "_sampler"); + } +} + +void CompilerHLSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) +{ + if (hlsl_options.shader_model >= 40 && combined_image_samplers.empty()) + { + set(result_id, result_type, image_id, samp_id); + } + else + { + // Make sure to suppress usage tracking. It is illegal to create temporaries of opaque types. + emit_op(result_type, result_id, to_combined_image_sampler(image_id, samp_id), true, true); + } +} + +string CompilerHLSL::to_func_call_arg(uint32_t id) +{ + string arg_str = CompilerGLSL::to_func_call_arg(id); + + if (hlsl_options.shader_model <= 30) + return arg_str; + + // Manufacture automatic sampler arg if the arg is a SampledImage texture and we're in modern HLSL. + auto &type = expression_type(id); + + // We don't have to consider combined image samplers here via OpSampledImage because + // those variables cannot be passed as arguments to functions. + // Only global SampledImage variables may be used as arguments. + if (type.basetype == SPIRType::SampledImage && type.image.dim != DimBuffer) + arg_str += ", " + to_sampler_expression(id); + + return arg_str; +} + +void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + auto &execution = get_entry_point(); + // Avoid shadow declarations. + local_variable_names = resource_names; + + string decl; + + auto &type = get(func.return_type); + if (type.array.empty()) + { + decl += flags_to_precision_qualifiers_glsl(type, return_flags); + decl += type_to_glsl(type); + decl += " "; + } + else + { + // We cannot return arrays in HLSL, so "return" through an out variable. + decl = "void "; + } + + if (func.self == ir.default_entry_point) + { + if (execution.model == ExecutionModelVertex) + decl += "vert_main"; + else if (execution.model == ExecutionModelFragment) + decl += "frag_main"; + else if (execution.model == ExecutionModelGLCompute) + decl += "comp_main"; + else + SPIRV_CROSS_THROW("Unsupported execution model."); + processing_entry_point = true; + } + else + decl += to_name(func.self); + + decl += "("; + vector arglist; + + if (!type.array.empty()) + { + // Fake array returns by writing to an out array instead. + string out_argument; + out_argument += "out "; + out_argument += type_to_glsl(type); + out_argument += " "; + out_argument += "SPIRV_Cross_return_value"; + out_argument += type_to_array_glsl(type); + arglist.push_back(move(out_argument)); + } + + for (auto &arg : func.arguments) + { + // Do not pass in separate images or samplers if we're remapping + // to combined image samplers. + if (skip_argument(arg.id)) + continue; + + // Might change the variable name if it already exists in this function. + // SPIRV OpName doesn't have any semantic effect, so it's valid for an implementation + // to use same name for variables. + // Since we want to make the GLSL debuggable and somewhat sane, use fallback names for variables which are duplicates. + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + // Flatten a combined sampler to two separate arguments in modern HLSL. + auto &arg_type = get(arg.type); + if (hlsl_options.shader_model > 30 && arg_type.basetype == SPIRType::SampledImage && + arg_type.image.dim != DimBuffer) + { + // Manufacture automatic sampler arg for SampledImage texture + arglist.push_back(join(image_is_comparison(arg_type, arg.id) ? "SamplerComparisonState " : "SamplerState ", + to_sampler_expression(arg.id), type_to_array_glsl(arg_type))); + } + + // Hold a pointer to the parameter so we can invalidate the readonly field if needed. + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + for (auto &arg : func.shadow_arguments) + { + // Might change the variable name if it already exists in this function. + // SPIRV OpName doesn't have any semantic effect, so it's valid for an implementation + // to use same name for variables. + // Since we want to make the GLSL debuggable and somewhat sane, use fallback names for variables which are duplicates. + add_local_variable_name(arg.id); + + arglist.push_back(argument_decl(arg)); + + // Hold a pointer to the parameter so we can invalidate the readonly field if needed. + auto *var = maybe_get(arg.id); + if (var) + var->parameter = &arg; + } + + decl += merge(arglist); + decl += ")"; + statement(decl); +} + +void CompilerHLSL::emit_hlsl_entry_point() +{ + vector arguments; + + if (require_input) + arguments.push_back("SPIRV_Cross_Input stage_input"); + + // Add I/O blocks as separate arguments with appropriate storage qualifier. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassInput && var.storage != StorageClassOutput) + continue; + + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + if (var.storage == StorageClassInput) + { + arguments.push_back(join("in ", variable_decl(type, join("stage_input", to_name(var.self))))); + } + else if (var.storage == StorageClassOutput) + { + arguments.push_back(join("out ", variable_decl(type, join("stage_output", to_name(var.self))))); + } + } + } + } + + auto &execution = get_entry_point(); + + switch (execution.model) + { + case ExecutionModelGLCompute: + { + SpecializationConstant wg_x, wg_y, wg_z; + get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + + uint32_t x = execution.workgroup_size.x; + uint32_t y = execution.workgroup_size.y; + uint32_t z = execution.workgroup_size.z; + + auto x_expr = wg_x.id ? get(wg_x.id).specialization_constant_macro_name : to_string(x); + auto y_expr = wg_y.id ? get(wg_y.id).specialization_constant_macro_name : to_string(y); + auto z_expr = wg_z.id ? get(wg_z.id).specialization_constant_macro_name : to_string(z); + + statement("[numthreads(", x_expr, ", ", y_expr, ", ", z_expr, ")]"); + break; + } + case ExecutionModelFragment: + if (execution.flags.get(ExecutionModeEarlyFragmentTests)) + statement("[earlydepthstencil]"); + break; + default: + break; + } + + statement(require_output ? "SPIRV_Cross_Output " : "void ", "main(", merge(arguments), ")"); + begin_scope(); + bool legacy = hlsl_options.shader_model <= 30; + + // Copy builtins from entry point arguments to globals. + active_input_builtins.for_each_bit([&](uint32_t i) { + auto builtin = builtin_to_glsl(static_cast(i), StorageClassInput); + switch (static_cast(i)) + { + case BuiltInFragCoord: + // VPOS in D3D9 is sampled at integer locations, apply half-pixel offset to be consistent. + // TODO: Do we need an option here? Any reason why a D3D9 shader would be used + // on a D3D10+ system with a different rasterization config? + if (legacy) + statement(builtin, " = stage_input.", builtin, " + float4(0.5f, 0.5f, 0.0f, 0.0f);"); + else + statement(builtin, " = stage_input.", builtin, ";"); + break; + + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInInstanceId: + case BuiltInInstanceIndex: + // D3D semantics are uint, but shader wants int. + statement(builtin, " = int(stage_input.", builtin, ");"); + break; + + case BuiltInNumWorkgroups: + case BuiltInPointCoord: + case BuiltInSubgroupSize: + case BuiltInSubgroupLocalInvocationId: + break; + + case BuiltInSubgroupEqMask: + // Emulate these ... + // No 64-bit in HLSL, so have to do it in 32-bit and unroll. + statement("gl_SubgroupEqMask = 1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96));"); + statement("if (WaveGetLaneIndex() >= 32) gl_SubgroupEqMask.x = 0;"); + statement("if (WaveGetLaneIndex() >= 64 || WaveGetLaneIndex() < 32) gl_SubgroupEqMask.y = 0;"); + statement("if (WaveGetLaneIndex() >= 96 || WaveGetLaneIndex() < 64) gl_SubgroupEqMask.z = 0;"); + statement("if (WaveGetLaneIndex() < 96) gl_SubgroupEqMask.w = 0;"); + break; + + case BuiltInSubgroupGeMask: + // Emulate these ... + // No 64-bit in HLSL, so have to do it in 32-bit and unroll. + statement("gl_SubgroupGeMask = ~((1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u);"); + statement("if (WaveGetLaneIndex() >= 32) gl_SubgroupGeMask.x = 0u;"); + statement("if (WaveGetLaneIndex() >= 64) gl_SubgroupGeMask.y = 0u;"); + statement("if (WaveGetLaneIndex() >= 96) gl_SubgroupGeMask.z = 0u;"); + statement("if (WaveGetLaneIndex() < 32) gl_SubgroupGeMask.y = ~0u;"); + statement("if (WaveGetLaneIndex() < 64) gl_SubgroupGeMask.z = ~0u;"); + statement("if (WaveGetLaneIndex() < 96) gl_SubgroupGeMask.w = ~0u;"); + break; + + case BuiltInSubgroupGtMask: + // Emulate these ... + // No 64-bit in HLSL, so have to do it in 32-bit and unroll. + statement("uint gt_lane_index = WaveGetLaneIndex() + 1;"); + statement("gl_SubgroupGtMask = ~((1u << (gt_lane_index - uint4(0, 32, 64, 96))) - 1u);"); + statement("if (gt_lane_index >= 32) gl_SubgroupGtMask.x = 0u;"); + statement("if (gt_lane_index >= 64) gl_SubgroupGtMask.y = 0u;"); + statement("if (gt_lane_index >= 96) gl_SubgroupGtMask.z = 0u;"); + statement("if (gt_lane_index >= 128) gl_SubgroupGtMask.w = 0u;"); + statement("if (gt_lane_index < 32) gl_SubgroupGtMask.y = ~0u;"); + statement("if (gt_lane_index < 64) gl_SubgroupGtMask.z = ~0u;"); + statement("if (gt_lane_index < 96) gl_SubgroupGtMask.w = ~0u;"); + break; + + case BuiltInSubgroupLeMask: + // Emulate these ... + // No 64-bit in HLSL, so have to do it in 32-bit and unroll. + statement("uint le_lane_index = WaveGetLaneIndex() + 1;"); + statement("gl_SubgroupLeMask = (1u << (le_lane_index - uint4(0, 32, 64, 96))) - 1u;"); + statement("if (le_lane_index >= 32) gl_SubgroupLeMask.x = ~0u;"); + statement("if (le_lane_index >= 64) gl_SubgroupLeMask.y = ~0u;"); + statement("if (le_lane_index >= 96) gl_SubgroupLeMask.z = ~0u;"); + statement("if (le_lane_index >= 128) gl_SubgroupLeMask.w = ~0u;"); + statement("if (le_lane_index < 32) gl_SubgroupLeMask.y = 0u;"); + statement("if (le_lane_index < 64) gl_SubgroupLeMask.z = 0u;"); + statement("if (le_lane_index < 96) gl_SubgroupLeMask.w = 0u;"); + break; + + case BuiltInSubgroupLtMask: + // Emulate these ... + // No 64-bit in HLSL, so have to do it in 32-bit and unroll. + statement("gl_SubgroupLtMask = (1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u;"); + statement("if (WaveGetLaneIndex() >= 32) gl_SubgroupLtMask.x = ~0u;"); + statement("if (WaveGetLaneIndex() >= 64) gl_SubgroupLtMask.y = ~0u;"); + statement("if (WaveGetLaneIndex() >= 96) gl_SubgroupLtMask.z = ~0u;"); + statement("if (WaveGetLaneIndex() < 32) gl_SubgroupLtMask.y = 0u;"); + statement("if (WaveGetLaneIndex() < 64) gl_SubgroupLtMask.z = 0u;"); + statement("if (WaveGetLaneIndex() < 96) gl_SubgroupLtMask.w = 0u;"); + break; + + case BuiltInClipDistance: + for (uint32_t clip = 0; clip < clip_distance_count; clip++) + statement("gl_ClipDistance[", clip, "] = stage_input.gl_ClipDistance", clip / 4, ".", "xyzw"[clip & 3], + ";"); + break; + + case BuiltInCullDistance: + for (uint32_t cull = 0; cull < cull_distance_count; cull++) + statement("gl_CullDistance[", cull, "] = stage_input.gl_CullDistance", cull / 4, ".", "xyzw"[cull & 3], + ";"); + break; + + default: + statement(builtin, " = stage_input.", builtin, ";"); + break; + } + }); + + // Copy from stage input struct to globals. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassInput) + continue; + + bool need_matrix_unroll = var.storage == StorageClassInput && execution.model == ExecutionModelVertex; + + if (!block && !var.remapped_variable && type.pointer && !is_builtin_variable(var) && + interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + auto &mtype = get(var.basetype); + if (need_matrix_unroll && mtype.columns > 1) + { + // Unroll matrices. + for (uint32_t col = 0; col < mtype.columns; col++) + statement(name, "[", col, "] = stage_input.", name, "_", col, ";"); + } + else + { + statement(name, " = stage_input.", name, ";"); + } + } + + // I/O blocks don't use the common stage input/output struct, but separate outputs. + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + statement(name, " = stage_input", name, ";"); + } + } + } + + // Run the shader. + if (execution.model == ExecutionModelVertex) + statement("vert_main();"); + else if (execution.model == ExecutionModelFragment) + statement("frag_main();"); + else if (execution.model == ExecutionModelGLCompute) + statement("comp_main();"); + else + SPIRV_CROSS_THROW("Unsupported shader stage."); + + // Copy block outputs. + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassOutput) + continue; + + // I/O blocks don't use the common stage input/output struct, but separate outputs. + if (block && !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + statement("stage_output", name, " = ", name, ";"); + } + } + } + + // Copy stage outputs. + if (require_output) + { + statement("SPIRV_Cross_Output stage_output;"); + + // Copy builtins from globals to return struct. + active_output_builtins.for_each_bit([&](uint32_t i) { + // PointSize doesn't exist in HLSL. + if (i == BuiltInPointSize) + return; + + switch (static_cast(i)) + { + case BuiltInClipDistance: + for (uint32_t clip = 0; clip < clip_distance_count; clip++) + statement("stage_output.gl_ClipDistance", clip / 4, ".", "xyzw"[clip & 3], " = gl_ClipDistance[", + clip, "];"); + break; + + case BuiltInCullDistance: + for (uint32_t cull = 0; cull < cull_distance_count; cull++) + statement("stage_output.gl_CullDistance", cull / 4, ".", "xyzw"[cull & 3], " = gl_CullDistance[", + cull, "];"); + break; + + default: + { + auto builtin_expr = builtin_to_glsl(static_cast(i), StorageClassOutput); + statement("stage_output.", builtin_expr, " = ", builtin_expr, ";"); + break; + } + } + }); + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock); + + if (var.storage != StorageClassOutput) + continue; + + if (!block && var.storage != StorageClassFunction && !var.remapped_variable && type.pointer && + !is_builtin_variable(var) && interface_variable_exists_in_entry_point(var.self)) + { + auto name = to_name(var.self); + + if (legacy && execution.model == ExecutionModelFragment) + { + string output_filler; + for (uint32_t size = type.vecsize; size < 4; ++size) + output_filler += ", 0.0"; + + statement("stage_output.", name, " = float4(", name, output_filler, ");"); + } + else + { + statement("stage_output.", name, " = ", name, ";"); + } + } + } + } + + statement("return stage_output;"); + } + + end_scope(); +} + +void CompilerHLSL::emit_fixup() +{ + if (get_entry_point().model == ExecutionModelVertex) + { + // Do various mangling on the gl_Position. + if (hlsl_options.shader_model <= 30) + { + statement("gl_Position.x = gl_Position.x - gl_HalfPixel.x * " + "gl_Position.w;"); + statement("gl_Position.y = gl_Position.y + gl_HalfPixel.y * " + "gl_Position.w;"); + } + + if (options.vertex.flip_vert_y) + statement("gl_Position.y = -gl_Position.y;"); + if (options.vertex.fixup_clipspace) + statement("gl_Position.z = (gl_Position.z + gl_Position.w) * 0.5;"); + } +} + +void CompilerHLSL::emit_texture_op(const Instruction &i) +{ + auto *ops = stream(i); + auto op = static_cast(i.op); + uint32_t length = i.length; + + vector inherited_expressions; + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t img = ops[2]; + uint32_t coord = ops[3]; + uint32_t dref = 0; + uint32_t comp = 0; + bool gather = false; + bool proj = false; + const uint32_t *opt = nullptr; + auto *combined_image = maybe_get(img); + auto img_expr = to_expression(combined_image ? combined_image->image : img); + + inherited_expressions.push_back(coord); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleDrefExplicitLod: + dref = ops[4]; + opt = &ops[5]; + length -= 5; + break; + + case OpImageSampleProjDrefImplicitLod: + case OpImageSampleProjDrefExplicitLod: + dref = ops[4]; + proj = true; + opt = &ops[5]; + length -= 5; + break; + + case OpImageDrefGather: + dref = ops[4]; + opt = &ops[5]; + gather = true; + length -= 5; + break; + + case OpImageGather: + comp = ops[4]; + opt = &ops[5]; + gather = true; + length -= 5; + break; + + case OpImageSampleProjImplicitLod: + case OpImageSampleProjExplicitLod: + opt = &ops[4]; + length -= 4; + proj = true; + break; + + case OpImageQueryLod: + opt = &ops[4]; + length -= 4; + break; + + default: + opt = &ops[4]; + length -= 4; + break; + } + + auto &imgtype = expression_type(img); + uint32_t coord_components = 0; + switch (imgtype.image.dim) + { + case spv::Dim1D: + coord_components = 1; + break; + case spv::Dim2D: + coord_components = 2; + break; + case spv::Dim3D: + coord_components = 3; + break; + case spv::DimCube: + coord_components = 3; + break; + case spv::DimBuffer: + coord_components = 1; + break; + default: + coord_components = 2; + break; + } + + if (dref) + inherited_expressions.push_back(dref); + + if (proj) + coord_components++; + if (imgtype.image.arrayed) + coord_components++; + + uint32_t bias = 0; + uint32_t lod = 0; + uint32_t grad_x = 0; + uint32_t grad_y = 0; + uint32_t coffset = 0; + uint32_t offset = 0; + uint32_t coffsets = 0; + uint32_t sample = 0; + uint32_t flags = 0; + + if (length) + { + flags = opt[0]; + opt++; + length--; + } + + auto test = [&](uint32_t &v, uint32_t flag) { + if (length && (flags & flag)) + { + v = *opt++; + inherited_expressions.push_back(v); + length--; + } + }; + + test(bias, ImageOperandsBiasMask); + test(lod, ImageOperandsLodMask); + test(grad_x, ImageOperandsGradMask); + test(grad_y, ImageOperandsGradMask); + test(coffset, ImageOperandsConstOffsetMask); + test(offset, ImageOperandsOffsetMask); + test(coffsets, ImageOperandsConstOffsetsMask); + test(sample, ImageOperandsSampleMask); + + string expr; + string texop; + + if (op == OpImageFetch) + { + if (hlsl_options.shader_model < 40) + { + SPIRV_CROSS_THROW("texelFetch is not supported in HLSL shader model 2/3."); + } + texop += img_expr; + texop += ".Load"; + } + else if (op == OpImageQueryLod) + { + texop += img_expr; + texop += ".CalculateLevelOfDetail"; + } + else + { + auto &imgformat = get(imgtype.image.type); + if (imgformat.basetype != SPIRType::Float) + { + SPIRV_CROSS_THROW("Sampling non-float textures is not supported in HLSL."); + } + + if (hlsl_options.shader_model >= 40) + { + texop += img_expr; + + if (image_is_comparison(imgtype, img)) + { + if (gather) + { + SPIRV_CROSS_THROW("GatherCmp does not exist in HLSL."); + } + else if (lod || grad_x || grad_y) + { + // Assume we want a fixed level, and the only thing we can get in HLSL is SampleCmpLevelZero. + texop += ".SampleCmpLevelZero"; + } + else + texop += ".SampleCmp"; + } + else if (gather) + { + uint32_t comp_num = get(comp).scalar(); + if (hlsl_options.shader_model >= 50) + { + switch (comp_num) + { + case 0: + texop += ".GatherRed"; + break; + case 1: + texop += ".GatherGreen"; + break; + case 2: + texop += ".GatherBlue"; + break; + case 3: + texop += ".GatherAlpha"; + break; + default: + SPIRV_CROSS_THROW("Invalid component."); + } + } + else + { + if (comp_num == 0) + texop += ".Gather"; + else + SPIRV_CROSS_THROW("HLSL shader model 4 can only gather from the red component."); + } + } + else if (bias) + texop += ".SampleBias"; + else if (grad_x || grad_y) + texop += ".SampleGrad"; + else if (lod) + texop += ".SampleLevel"; + else + texop += ".Sample"; + } + else + { + switch (imgtype.image.dim) + { + case Dim1D: + texop += "tex1D"; + break; + case Dim2D: + texop += "tex2D"; + break; + case Dim3D: + texop += "tex3D"; + break; + case DimCube: + texop += "texCUBE"; + break; + case DimRect: + case DimBuffer: + case DimSubpassData: + SPIRV_CROSS_THROW("Buffer texture support is not yet implemented for HLSL"); // TODO + default: + SPIRV_CROSS_THROW("Invalid dimension."); + } + + if (gather) + SPIRV_CROSS_THROW("textureGather is not supported in HLSL shader model 2/3."); + if (offset || coffset) + SPIRV_CROSS_THROW("textureOffset is not supported in HLSL shader model 2/3."); + if (proj) + texop += "proj"; + if (grad_x || grad_y) + texop += "grad"; + if (lod) + texop += "lod"; + if (bias) + texop += "bias"; + } + } + + expr += texop; + expr += "("; + if (hlsl_options.shader_model < 40) + { + if (combined_image) + SPIRV_CROSS_THROW("Separate images/samplers are not supported in HLSL shader model 2/3."); + expr += to_expression(img); + } + else if (op != OpImageFetch) + { + string sampler_expr; + if (combined_image) + sampler_expr = to_expression(combined_image->sampler); + else + sampler_expr = to_sampler_expression(img); + expr += sampler_expr; + } + + auto swizzle = [](uint32_t comps, uint32_t in_comps) -> const char * { + if (comps == in_comps) + return ""; + + switch (comps) + { + case 1: + return ".x"; + case 2: + return ".xy"; + case 3: + return ".xyz"; + default: + return ""; + } + }; + + bool forward = should_forward(coord); + + // The IR can give us more components than we need, so chop them off as needed. + auto coord_expr = to_expression(coord) + swizzle(coord_components, expression_type(coord).vecsize); + + if (proj) + { + if (!requires_textureProj) + { + requires_textureProj = true; + force_recompile = true; + } + coord_expr = "SPIRV_Cross_projectTextureCoordinate(" + coord_expr + ")"; + } + + if (hlsl_options.shader_model < 40 && lod) + { + auto &coordtype = expression_type(coord); + string coord_filler; + for (uint32_t size = coordtype.vecsize; size < 3; ++size) + { + coord_filler += ", 0.0"; + } + coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(lod) + ")"; + } + + if (hlsl_options.shader_model < 40 && bias) + { + auto &coordtype = expression_type(coord); + string coord_filler; + for (uint32_t size = coordtype.vecsize; size < 3; ++size) + { + coord_filler += ", 0.0"; + } + coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(bias) + ")"; + } + + if (op == OpImageFetch) + { + auto &coordtype = expression_type(coord); + if (imgtype.image.dim != DimBuffer && !imgtype.image.ms) + coord_expr = + join("int", coordtype.vecsize + 1, "(", coord_expr, ", ", lod ? to_expression(lod) : string("0"), ")"); + } + else + expr += ", "; + expr += coord_expr; + + if (dref) + { + forward = forward && should_forward(dref); + expr += ", "; + expr += to_expression(dref); + } + + if (!dref && (grad_x || grad_y)) + { + forward = forward && should_forward(grad_x); + forward = forward && should_forward(grad_y); + expr += ", "; + expr += to_expression(grad_x); + expr += ", "; + expr += to_expression(grad_y); + } + + if (!dref && lod && hlsl_options.shader_model >= 40 && op != OpImageFetch) + { + forward = forward && should_forward(lod); + expr += ", "; + expr += to_expression(lod); + } + + if (!dref && bias && hlsl_options.shader_model >= 40) + { + forward = forward && should_forward(bias); + expr += ", "; + expr += to_expression(bias); + } + + if (coffset) + { + forward = forward && should_forward(coffset); + expr += ", "; + expr += to_expression(coffset); + } + else if (offset) + { + forward = forward && should_forward(offset); + expr += ", "; + expr += to_expression(offset); + } + + if (sample) + { + expr += ", "; + expr += to_expression(sample); + } + + expr += ")"; + + if (op == OpImageQueryLod) + { + // This is rather awkward. + // textureQueryLod returns two values, the "accessed level", + // as well as the actual LOD lambda. + // As far as I can tell, there is no way to get the .x component + // according to GLSL spec, and it depends on the sampler itself. + // Just assume X == Y, so we will need to splat the result to a float2. + statement("float _", id, "_tmp = ", expr, ";"); + emit_op(result_type, id, join("float2(_", id, "_tmp, _", id, "_tmp)"), true, true); + } + else + { + emit_op(result_type, id, expr, forward, false); + } + + for (auto &inherit : inherited_expressions) + inherit_expression_dependencies(id, inherit); + + switch (op) + { + case OpImageSampleDrefImplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageQueryLod: + register_control_dependent_expression(id); + break; + + default: + break; + } +} + +string CompilerHLSL::to_resource_binding(const SPIRVariable &var) +{ + // TODO: Basic implementation, might need special consideration for RW/RO structured buffers, + // RW/RO images, and so on. + + if (!has_decoration(var.self, DecorationBinding)) + return ""; + + const auto &type = get(var.basetype); + char space = '\0'; + + switch (type.basetype) + { + case SPIRType::SampledImage: + space = 't'; // SRV + break; + + case SPIRType::Image: + if (type.image.sampled == 2 && type.image.dim != DimSubpassData) + space = 'u'; // UAV + else + space = 't'; // SRV + break; + + case SPIRType::Sampler: + space = 's'; + break; + + case SPIRType::Struct: + { + auto storage = type.storage; + if (storage == StorageClassUniform) + { + if (has_decoration(type.self, DecorationBufferBlock)) + { + Bitset flags = ir.get_buffer_block_flags(var); + bool is_readonly = flags.get(DecorationNonWritable); + space = is_readonly ? 't' : 'u'; // UAV + } + else if (has_decoration(type.self, DecorationBlock)) + space = 'b'; // Constant buffers + } + else if (storage == StorageClassPushConstant) + space = 'b'; // Constant buffers + else if (storage == StorageClassStorageBuffer) + space = 'u'; // UAV + + break; + } + default: + break; + } + + if (!space) + return ""; + + return to_resource_register(space, get_decoration(var.self, DecorationBinding), + get_decoration(var.self, DecorationDescriptorSet)); +} + +string CompilerHLSL::to_resource_binding_sampler(const SPIRVariable &var) +{ + // For combined image samplers. + if (!has_decoration(var.self, DecorationBinding)) + return ""; + + return to_resource_register('s', get_decoration(var.self, DecorationBinding), + get_decoration(var.self, DecorationDescriptorSet)); +} + +string CompilerHLSL::to_resource_register(char space, uint32_t binding, uint32_t space_set) +{ + if (hlsl_options.shader_model >= 51) + return join(" : register(", space, binding, ", space", space_set, ")"); + else + return join(" : register(", space, binding, ")"); +} + +void CompilerHLSL::emit_modern_uniform(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + switch (type.basetype) + { + case SPIRType::SampledImage: + case SPIRType::Image: + { + bool is_coherent = false; + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + is_coherent = has_decoration(var.self, DecorationCoherent); + + statement(is_coherent ? "globallycoherent " : "", image_type_hlsl_modern(type, var.self), " ", + to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), ";"); + + if (type.basetype == SPIRType::SampledImage && type.image.dim != DimBuffer) + { + // For combined image samplers, also emit a combined image sampler. + if (image_is_comparison(type, var.self)) + statement("SamplerComparisonState ", to_sampler_expression(var.self), type_to_array_glsl(type), + to_resource_binding_sampler(var), ";"); + else + statement("SamplerState ", to_sampler_expression(var.self), type_to_array_glsl(type), + to_resource_binding_sampler(var), ";"); + } + break; + } + + case SPIRType::Sampler: + if (comparison_ids.count(var.self)) + statement("SamplerComparisonState ", to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), + ";"); + else + statement("SamplerState ", to_name(var.self), type_to_array_glsl(type), to_resource_binding(var), ";"); + break; + + default: + statement(variable_decl(var), to_resource_binding(var), ";"); + break; + } +} + +void CompilerHLSL::emit_legacy_uniform(const SPIRVariable &var) +{ + auto &type = get(var.basetype); + switch (type.basetype) + { + case SPIRType::Sampler: + case SPIRType::Image: + SPIRV_CROSS_THROW("Separate image and samplers not supported in legacy HLSL."); + + default: + statement(variable_decl(var), ";"); + break; + } +} + +void CompilerHLSL::emit_uniform(const SPIRVariable &var) +{ + add_resource_name(var.self); + if (hlsl_options.shader_model >= 40) + emit_modern_uniform(var); + else + emit_legacy_uniform(var); +} + +string CompilerHLSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) +{ + if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Int64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Float) + return "asuint"; + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::UInt) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::UInt64) + return type_to_glsl(out_type); + else if (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Float) + return "asint"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::UInt) + return "asfloat"; + else if (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::Int) + return "asfloat"; + else if (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Double) + SPIRV_CROSS_THROW("Double to Int64 is not supported in HLSL."); + else if (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Double) + SPIRV_CROSS_THROW("Double to UInt64 is not supported in HLSL."); + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::Int64) + return "asdouble"; + else if (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::UInt64) + return "asdouble"; + else if (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) + { + if (!requires_explicit_fp16_packing) + { + requires_explicit_fp16_packing = true; + force_recompile = true; + } + return "SPIRV_Cross_unpackFloat2x16"; + } + else if (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Half && in_type.vecsize == 2) + { + if (!requires_explicit_fp16_packing) + { + requires_explicit_fp16_packing = true; + force_recompile = true; + } + return "SPIRV_Cross_packFloat2x16"; + } + else + return ""; +} + +void CompilerHLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t count) +{ + GLSLstd450 op = static_cast(eop); + + switch (op) + { + case GLSLstd450InverseSqrt: + emit_unary_func_op(result_type, id, args[0], "rsqrt"); + break; + + case GLSLstd450Fract: + emit_unary_func_op(result_type, id, args[0], "frac"); + break; + + case GLSLstd450RoundEven: + SPIRV_CROSS_THROW("roundEven is not supported on HLSL."); + + case GLSLstd450Acosh: + case GLSLstd450Asinh: + case GLSLstd450Atanh: + SPIRV_CROSS_THROW("Inverse hyperbolics are not supported on HLSL."); + + case GLSLstd450FMix: + case GLSLstd450IMix: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "lerp"); + break; + + case GLSLstd450Atan2: + emit_binary_func_op(result_type, id, args[0], args[1], "atan2"); + break; + + case GLSLstd450Fma: + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "mad"); + break; + + case GLSLstd450InterpolateAtCentroid: + emit_unary_func_op(result_type, id, args[0], "EvaluateAttributeAtCentroid"); + break; + case GLSLstd450InterpolateAtSample: + emit_binary_func_op(result_type, id, args[0], args[1], "EvaluateAttributeAtSample"); + break; + case GLSLstd450InterpolateAtOffset: + emit_binary_func_op(result_type, id, args[0], args[1], "EvaluateAttributeSnapped"); + break; + + case GLSLstd450PackHalf2x16: + if (!requires_fp16_packing) + { + requires_fp16_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packHalf2x16"); + break; + + case GLSLstd450UnpackHalf2x16: + if (!requires_fp16_packing) + { + requires_fp16_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackHalf2x16"); + break; + + case GLSLstd450PackSnorm4x8: + if (!requires_snorm8_packing) + { + requires_snorm8_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packSnorm4x8"); + break; + + case GLSLstd450UnpackSnorm4x8: + if (!requires_snorm8_packing) + { + requires_snorm8_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackSnorm4x8"); + break; + + case GLSLstd450PackUnorm4x8: + if (!requires_unorm8_packing) + { + requires_unorm8_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packUnorm4x8"); + break; + + case GLSLstd450UnpackUnorm4x8: + if (!requires_unorm8_packing) + { + requires_unorm8_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackUnorm4x8"); + break; + + case GLSLstd450PackSnorm2x16: + if (!requires_snorm16_packing) + { + requires_snorm16_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packSnorm2x16"); + break; + + case GLSLstd450UnpackSnorm2x16: + if (!requires_snorm16_packing) + { + requires_snorm16_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackSnorm2x16"); + break; + + case GLSLstd450PackUnorm2x16: + if (!requires_unorm16_packing) + { + requires_unorm16_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_packUnorm2x16"); + break; + + case GLSLstd450UnpackUnorm2x16: + if (!requires_unorm16_packing) + { + requires_unorm16_packing = true; + force_recompile = true; + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_unpackUnorm2x16"); + break; + + case GLSLstd450PackDouble2x32: + case GLSLstd450UnpackDouble2x32: + SPIRV_CROSS_THROW("packDouble2x32/unpackDouble2x32 not supported in HLSL."); + + case GLSLstd450FindILsb: + emit_unary_func_op(result_type, id, args[0], "firstbitlow"); + break; + case GLSLstd450FindSMsb: + case GLSLstd450FindUMsb: + emit_unary_func_op(result_type, id, args[0], "firstbithigh"); + break; + + case GLSLstd450MatrixInverse: + { + auto &type = get(result_type); + if (type.vecsize == 2 && type.columns == 2) + { + if (!requires_inverse_2x2) + { + requires_inverse_2x2 = true; + force_recompile = true; + } + } + else if (type.vecsize == 3 && type.columns == 3) + { + if (!requires_inverse_3x3) + { + requires_inverse_3x3 = true; + force_recompile = true; + } + } + else if (type.vecsize == 4 && type.columns == 4) + { + if (!requires_inverse_4x4) + { + requires_inverse_4x4 = true; + force_recompile = true; + } + } + emit_unary_func_op(result_type, id, args[0], "SPIRV_Cross_Inverse"); + break; + } + + default: + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + } +} + +string CompilerHLSL::read_access_chain(const SPIRAccessChain &chain) +{ + auto &type = get(chain.basetype); + + SPIRType target_type; + target_type.basetype = SPIRType::UInt; + target_type.vecsize = type.vecsize; + target_type.columns = type.columns; + + if (type.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Reading structs from ByteAddressBuffer not yet supported."); + + if (type.width != 32) + SPIRV_CROSS_THROW("Reading types other than 32-bit from ByteAddressBuffer not yet supported."); + + if (!type.array.empty()) + SPIRV_CROSS_THROW("Reading arrays from ByteAddressBuffer not yet supported."); + + string load_expr; + + // Load a vector or scalar. + if (type.columns == 1 && !chain.row_major_matrix) + { + const char *load_op = nullptr; + switch (type.vecsize) + { + case 1: + load_op = "Load"; + break; + case 2: + load_op = "Load2"; + break; + case 3: + load_op = "Load3"; + break; + case 4: + load_op = "Load4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + load_expr = join(chain.base, ".", load_op, "(", chain.dynamic_index, chain.static_index, ")"); + } + else if (type.columns == 1) + { + // Strided load since we are loading a column from a row-major matrix. + if (type.vecsize > 1) + { + load_expr = type_to_glsl(target_type); + load_expr += "("; + } + + for (uint32_t r = 0; r < type.vecsize; r++) + { + load_expr += + join(chain.base, ".Load(", chain.dynamic_index, chain.static_index + r * chain.matrix_stride, ")"); + if (r + 1 < type.vecsize) + load_expr += ", "; + } + + if (type.vecsize > 1) + load_expr += ")"; + } + else if (!chain.row_major_matrix) + { + // Load a matrix, column-major, the easy case. + const char *load_op = nullptr; + switch (type.vecsize) + { + case 1: + load_op = "Load"; + break; + case 2: + load_op = "Load2"; + break; + case 3: + load_op = "Load3"; + break; + case 4: + load_op = "Load4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + // Note, this loading style in HLSL is *actually* row-major, but we always treat matrices as transposed in this backend, + // so row-major is technically column-major ... + load_expr = type_to_glsl(target_type); + load_expr += "("; + for (uint32_t c = 0; c < type.columns; c++) + { + load_expr += join(chain.base, ".", load_op, "(", chain.dynamic_index, + chain.static_index + c * chain.matrix_stride, ")"); + if (c + 1 < type.columns) + load_expr += ", "; + } + load_expr += ")"; + } + else + { + // Pick out elements one by one ... Hopefully compilers are smart enough to recognize this pattern + // considering HLSL is "row-major decl", but "column-major" memory layout (basically implicit transpose model, ugh) ... + + load_expr = type_to_glsl(target_type); + load_expr += "("; + for (uint32_t c = 0; c < type.columns; c++) + { + for (uint32_t r = 0; r < type.vecsize; r++) + { + load_expr += join(chain.base, ".Load(", chain.dynamic_index, + chain.static_index + c * (type.width / 8) + r * chain.matrix_stride, ")"); + + if ((r + 1 < type.vecsize) || (c + 1 < type.columns)) + load_expr += ", "; + } + } + load_expr += ")"; + } + + auto bitcast_op = bitcast_glsl_op(type, target_type); + if (!bitcast_op.empty()) + load_expr = join(bitcast_op, "(", load_expr, ")"); + + return load_expr; +} + +void CompilerHLSL::emit_load(const Instruction &instruction) +{ + auto ops = stream(instruction); + + auto *chain = maybe_get(ops[2]); + if (chain) + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + + auto load_expr = read_access_chain(*chain); + + bool forward = should_forward(ptr) && forced_temporaries.find(id) == end(forced_temporaries); + + // If we are forwarding this load, + // don't register the read to access chain here, defer that to when we actually use the expression, + // using the add_implied_read_expression mechanism. + if (!forward) + track_expression_read(chain->self); + + // Do not forward complex load sequences like matrices, structs and arrays. + auto &type = get(result_type); + if (type.columns > 1 || !type.array.empty() || type.basetype == SPIRType::Struct) + forward = false; + + auto &e = emit_op(result_type, id, load_expr, forward, true); + e.need_transpose = false; + register_read(id, ptr, forward); + inherit_expression_dependencies(id, ptr); + if (forward) + add_implied_read_expression(e, chain->self); + } + else + CompilerGLSL::emit_instruction(instruction); +} + +void CompilerHLSL::write_access_chain(const SPIRAccessChain &chain, uint32_t value) +{ + auto &type = get(chain.basetype); + + // Make sure we trigger a read of the constituents in the access chain. + track_expression_read(chain.self); + + SPIRType target_type; + target_type.basetype = SPIRType::UInt; + target_type.vecsize = type.vecsize; + target_type.columns = type.columns; + + if (type.basetype == SPIRType::Struct) + SPIRV_CROSS_THROW("Writing structs to RWByteAddressBuffer not yet supported."); + if (type.width != 32) + SPIRV_CROSS_THROW("Writing types other than 32-bit to RWByteAddressBuffer not yet supported."); + if (!type.array.empty()) + SPIRV_CROSS_THROW("Reading arrays from ByteAddressBuffer not yet supported."); + + if (type.columns == 1 && !chain.row_major_matrix) + { + const char *store_op = nullptr; + switch (type.vecsize) + { + case 1: + store_op = "Store"; + break; + case 2: + store_op = "Store2"; + break; + case 3: + store_op = "Store3"; + break; + case 4: + store_op = "Store4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + auto store_expr = to_expression(value); + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".", store_op, "(", chain.dynamic_index, chain.static_index, ", ", store_expr, ");"); + } + else if (type.columns == 1) + { + // Strided store. + for (uint32_t r = 0; r < type.vecsize; r++) + { + auto store_expr = to_enclosed_expression(value); + if (type.vecsize > 1) + { + store_expr += "."; + store_expr += index_to_swizzle(r); + } + remove_duplicate_swizzle(store_expr); + + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".Store(", chain.dynamic_index, chain.static_index + chain.matrix_stride * r, ", ", + store_expr, ");"); + } + } + else if (!chain.row_major_matrix) + { + const char *store_op = nullptr; + switch (type.vecsize) + { + case 1: + store_op = "Store"; + break; + case 2: + store_op = "Store2"; + break; + case 3: + store_op = "Store3"; + break; + case 4: + store_op = "Store4"; + break; + default: + SPIRV_CROSS_THROW("Unknown vector size."); + } + + for (uint32_t c = 0; c < type.columns; c++) + { + auto store_expr = join(to_enclosed_expression(value), "[", c, "]"); + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".", store_op, "(", chain.dynamic_index, chain.static_index + c * chain.matrix_stride, + ", ", store_expr, ");"); + } + } + else + { + for (uint32_t r = 0; r < type.vecsize; r++) + { + for (uint32_t c = 0; c < type.columns; c++) + { + auto store_expr = join(to_enclosed_expression(value), "[", c, "].", index_to_swizzle(r)); + remove_duplicate_swizzle(store_expr); + auto bitcast_op = bitcast_glsl_op(target_type, type); + if (!bitcast_op.empty()) + store_expr = join(bitcast_op, "(", store_expr, ")"); + statement(chain.base, ".Store(", chain.dynamic_index, + chain.static_index + c * (type.width / 8) + r * chain.matrix_stride, ", ", store_expr, ");"); + } + } + } + + register_write(chain.self); +} + +void CompilerHLSL::emit_store(const Instruction &instruction) +{ + auto ops = stream(instruction); + auto *chain = maybe_get(ops[0]); + if (chain) + write_access_chain(*chain, ops[1]); + else + CompilerGLSL::emit_instruction(instruction); +} + +void CompilerHLSL::emit_access_chain(const Instruction &instruction) +{ + auto ops = stream(instruction); + uint32_t length = instruction.length; + + bool need_byte_access_chain = false; + auto &type = expression_type(ops[2]); + const SPIRAccessChain *chain = nullptr; + if (type.storage == StorageClassStorageBuffer || has_decoration(type.self, DecorationBufferBlock)) + { + // If we are starting to poke into an SSBO, we are dealing with ByteAddressBuffers, and we need + // to emit SPIRAccessChain rather than a plain SPIRExpression. + uint32_t chain_arguments = length - 3; + if (chain_arguments > type.array.size()) + need_byte_access_chain = true; + } + else + { + // Keep tacking on an existing access chain. + chain = maybe_get(ops[2]); + if (chain) + need_byte_access_chain = true; + } + + if (need_byte_access_chain) + { + uint32_t to_plain_buffer_length = static_cast(type.array.size()); + auto *backing_variable = maybe_get_backing_variable(ops[2]); + + string base; + if (to_plain_buffer_length != 0) + base = access_chain(ops[2], &ops[3], to_plain_buffer_length, get(ops[0])); + else if (chain) + base = chain->base; + else + base = to_expression(ops[2]); + + // Start traversing type hierarchy at the proper non-pointer types. + auto *basetype = &get_pointee_type(type); + + // Traverse the type hierarchy down to the actual buffer types. + for (uint32_t i = 0; i < to_plain_buffer_length; i++) + { + assert(basetype->parent_type); + basetype = &get(basetype->parent_type); + } + + uint32_t matrix_stride = 0; + bool row_major_matrix = false; + + // Inherit matrix information. + if (chain) + { + matrix_stride = chain->matrix_stride; + row_major_matrix = chain->row_major_matrix; + } + + auto offsets = + flattened_access_chain_offset(*basetype, &ops[3 + to_plain_buffer_length], + length - 3 - to_plain_buffer_length, 0, 1, &row_major_matrix, &matrix_stride); + + auto &e = set(ops[1], ops[0], type.storage, base, offsets.first, offsets.second); + e.row_major_matrix = row_major_matrix; + e.matrix_stride = matrix_stride; + e.immutable = should_forward(ops[2]); + e.loaded_from = backing_variable ? backing_variable->self : 0; + + if (chain) + { + e.dynamic_index += chain->dynamic_index; + e.static_index += chain->static_index; + } + + for (uint32_t i = 2; i < length; i++) + { + inherit_expression_dependencies(ops[1], ops[i]); + add_implied_read_expression(e, ops[i]); + } + } + else + { + CompilerGLSL::emit_instruction(instruction); + } +} + +void CompilerHLSL::emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op) +{ + const char *atomic_op = nullptr; + + string value_expr; + if (op != OpAtomicIDecrement && op != OpAtomicIIncrement) + value_expr = to_expression(ops[op == OpAtomicCompareExchange ? 6 : 5]); + + switch (op) + { + case OpAtomicIIncrement: + atomic_op = "InterlockedAdd"; + value_expr = "1"; + break; + + case OpAtomicIDecrement: + atomic_op = "InterlockedAdd"; + value_expr = "-1"; + break; + + case OpAtomicISub: + atomic_op = "InterlockedAdd"; + value_expr = join("-", enclose_expression(value_expr)); + break; + + case OpAtomicSMin: + case OpAtomicUMin: + atomic_op = "InterlockedMin"; + break; + + case OpAtomicSMax: + case OpAtomicUMax: + atomic_op = "InterlockedMax"; + break; + + case OpAtomicAnd: + atomic_op = "InterlockedAnd"; + break; + + case OpAtomicOr: + atomic_op = "InterlockedOr"; + break; + + case OpAtomicXor: + atomic_op = "InterlockedXor"; + break; + + case OpAtomicIAdd: + atomic_op = "InterlockedAdd"; + break; + + case OpAtomicExchange: + atomic_op = "InterlockedExchange"; + break; + + case OpAtomicCompareExchange: + if (length < 8) + SPIRV_CROSS_THROW("Not enough data for opcode."); + atomic_op = "InterlockedCompareExchange"; + value_expr = join(to_expression(ops[7]), ", ", value_expr); + break; + + default: + SPIRV_CROSS_THROW("Unknown atomic opcode."); + } + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + forced_temporaries.insert(ops[1]); + + auto &type = get(result_type); + statement(variable_decl(type, to_name(id)), ";"); + + auto &data_type = expression_type(ops[2]); + auto *chain = maybe_get(ops[2]); + SPIRType::BaseType expr_type; + if (data_type.storage == StorageClassImage || !chain) + { + statement(atomic_op, "(", to_expression(ops[2]), ", ", value_expr, ", ", to_name(id), ");"); + expr_type = data_type.basetype; + } + else + { + // RWByteAddress buffer is always uint in its underlying type. + expr_type = SPIRType::UInt; + statement(chain->base, ".", atomic_op, "(", chain->dynamic_index, chain->static_index, ", ", value_expr, ", ", + to_name(id), ");"); + } + + auto expr = bitcast_expression(type, expr_type, to_name(id)); + set(id, expr, result_type, true); + flush_all_atomic_capable_variables(); + register_read(ops[1], ops[2], should_forward(ops[2])); +} + +void CompilerHLSL::emit_subgroup_op(const Instruction &i) +{ + if (hlsl_options.shader_model < 60) + SPIRV_CROSS_THROW("Wave ops requires SM 6.0 or higher."); + + const uint32_t *ops = stream(i); + auto op = static_cast(i.op); + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + auto scope = static_cast(get(ops[2]).scalar()); + if (scope != ScopeSubgroup) + SPIRV_CROSS_THROW("Only subgroup scope is supported."); + + const auto make_inclusive_Sum = [&](const string &expr) -> string { + return join(expr, " + ", to_expression(ops[4])); + }; + + const auto make_inclusive_Product = [&](const string &expr) -> string { + return join(expr, " * ", to_expression(ops[4])); + }; + +#define make_inclusive_BitAnd(expr) "" +#define make_inclusive_BitOr(expr) "" +#define make_inclusive_BitXor(expr) "" +#define make_inclusive_Min(expr) "" +#define make_inclusive_Max(expr) "" + + switch (op) + { + case OpGroupNonUniformElect: + emit_op(result_type, id, "WaveIsFirstLane()", true); + break; + + case OpGroupNonUniformBroadcast: + emit_binary_func_op(result_type, id, ops[3], ops[4], "WaveReadLaneAt"); + break; + + case OpGroupNonUniformBroadcastFirst: + emit_unary_func_op(result_type, id, ops[3], "WaveReadLaneFirst"); + break; + + case OpGroupNonUniformBallot: + emit_unary_func_op(result_type, id, ops[3], "WaveActiveBallot"); + break; + + case OpGroupNonUniformInverseBallot: + SPIRV_CROSS_THROW("Cannot trivially implement InverseBallot in HLSL."); + break; + + case OpGroupNonUniformBallotBitExtract: + SPIRV_CROSS_THROW("Cannot trivially implement BallotBitExtract in HLSL."); + break; + + case OpGroupNonUniformBallotFindLSB: + SPIRV_CROSS_THROW("Cannot trivially implement BallotFindLSB in HLSL."); + break; + + case OpGroupNonUniformBallotFindMSB: + SPIRV_CROSS_THROW("Cannot trivially implement BallotFindMSB in HLSL."); + break; + + case OpGroupNonUniformBallotBitCount: + { + auto operation = static_cast(ops[3]); + if (operation == GroupOperationReduce) + { + bool forward = should_forward(ops[4]); + auto left = join("countbits(", to_enclosed_expression(ops[4]), ".x) + countbits(", + to_enclosed_expression(ops[4]), ".y)"); + auto right = join("countbits(", to_enclosed_expression(ops[4]), ".z) + countbits(", + to_enclosed_expression(ops[4]), ".w)"); + emit_op(result_type, id, join(left, " + ", right), forward); + inherit_expression_dependencies(id, ops[4]); + } + else if (operation == GroupOperationInclusiveScan) + SPIRV_CROSS_THROW("Cannot trivially implement BallotBitCount Inclusive Scan in HLSL."); + else if (operation == GroupOperationExclusiveScan) + SPIRV_CROSS_THROW("Cannot trivially implement BallotBitCount Exclusive Scan in HLSL."); + else + SPIRV_CROSS_THROW("Invalid BitCount operation."); + break; + } + + case OpGroupNonUniformShuffle: + SPIRV_CROSS_THROW("Cannot trivially implement Shuffle in HLSL."); + case OpGroupNonUniformShuffleXor: + SPIRV_CROSS_THROW("Cannot trivially implement ShuffleXor in HLSL."); + case OpGroupNonUniformShuffleUp: + SPIRV_CROSS_THROW("Cannot trivially implement ShuffleUp in HLSL."); + case OpGroupNonUniformShuffleDown: + SPIRV_CROSS_THROW("Cannot trivially implement ShuffleDown in HLSL."); + + case OpGroupNonUniformAll: + emit_unary_func_op(result_type, id, ops[3], "WaveActiveAllTrue"); + break; + + case OpGroupNonUniformAny: + emit_unary_func_op(result_type, id, ops[3], "WaveActiveAnyTrue"); + break; + + case OpGroupNonUniformAllEqual: + { + auto &type = get(result_type); + emit_unary_func_op(result_type, id, ops[3], + type.basetype == SPIRType::Boolean ? "WaveActiveAllEqualBool" : "WaveActiveAllEqual"); + break; + } + + // clang-format off +#define HLSL_GROUP_OP(op, hlsl_op, supports_scan) \ +case OpGroupNonUniform##op: \ + { \ + auto operation = static_cast(ops[3]); \ + if (operation == GroupOperationReduce) \ + emit_unary_func_op(result_type, id, ops[4], "WaveActive" #hlsl_op); \ + else if (operation == GroupOperationInclusiveScan && supports_scan) \ + { \ + bool forward = should_forward(ops[4]); \ + emit_op(result_type, id, make_inclusive_##hlsl_op (join("WavePrefix" #hlsl_op, "(", to_expression(ops[4]), ")")), forward); \ + inherit_expression_dependencies(id, ops[4]); \ + } \ + else if (operation == GroupOperationExclusiveScan && supports_scan) \ + emit_unary_func_op(result_type, id, ops[4], "WavePrefix" #hlsl_op); \ + else if (operation == GroupOperationClusteredReduce) \ + SPIRV_CROSS_THROW("Cannot trivially implement ClusteredReduce in HLSL."); \ + else \ + SPIRV_CROSS_THROW("Invalid group operation."); \ + break; \ + } + HLSL_GROUP_OP(FAdd, Sum, true) + HLSL_GROUP_OP(FMul, Product, true) + HLSL_GROUP_OP(FMin, Min, false) + HLSL_GROUP_OP(FMax, Max, false) + HLSL_GROUP_OP(IAdd, Sum, true) + HLSL_GROUP_OP(IMul, Product, true) + HLSL_GROUP_OP(SMin, Min, false) + HLSL_GROUP_OP(SMax, Max, false) + HLSL_GROUP_OP(UMin, Min, false) + HLSL_GROUP_OP(UMax, Max, false) + HLSL_GROUP_OP(BitwiseAnd, BitAnd, false) + HLSL_GROUP_OP(BitwiseOr, BitOr, false) + HLSL_GROUP_OP(BitwiseXor, BitXor, false) +#undef HLSL_GROUP_OP + // clang-format on + + case OpGroupNonUniformQuadSwap: + { + uint32_t direction = get(ops[4]).scalar(); + if (direction == 0) + emit_unary_func_op(result_type, id, ops[3], "QuadReadAcrossX"); + else if (direction == 1) + emit_unary_func_op(result_type, id, ops[3], "QuadReadAcrossY"); + else if (direction == 2) + emit_unary_func_op(result_type, id, ops[3], "QuadReadAcrossDiagonal"); + else + SPIRV_CROSS_THROW("Invalid quad swap direction."); + break; + } + + case OpGroupNonUniformQuadBroadcast: + { + emit_binary_func_op(result_type, id, ops[3], ops[4], "QuadReadLaneAt"); + break; + } + + default: + SPIRV_CROSS_THROW("Invalid opcode for subgroup."); + } + + register_control_dependent_expression(id); +} + +void CompilerHLSL::emit_instruction(const Instruction &instruction) +{ + auto ops = stream(instruction); + auto opcode = static_cast(instruction.op); + +#define HLSL_BOP(op) emit_binary_op(ops[0], ops[1], ops[2], ops[3], #op) +#define HLSL_BOP_CAST(op, type) \ + emit_binary_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, hlsl_opcode_is_sign_invariant(opcode)) +#define HLSL_UOP(op) emit_unary_op(ops[0], ops[1], ops[2], #op) +#define HLSL_QFOP(op) emit_quaternary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], #op) +#define HLSL_TFOP(op) emit_trinary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], #op) +#define HLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define HLSL_BFOP_CAST(op, type) \ + emit_binary_func_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, hlsl_opcode_is_sign_invariant(opcode)) +#define HLSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define HLSL_UFOP(op) emit_unary_func_op(ops[0], ops[1], ops[2], #op) + + switch (opcode) + { + case OpAccessChain: + case OpInBoundsAccessChain: + { + emit_access_chain(instruction); + break; + } + + case OpStore: + { + emit_store(instruction); + break; + } + + case OpLoad: + { + emit_load(instruction); + break; + } + + case OpMatrixTimesVector: + { + emit_binary_func_op(ops[0], ops[1], ops[3], ops[2], "mul"); + break; + } + + case OpVectorTimesMatrix: + { + emit_binary_func_op(ops[0], ops[1], ops[3], ops[2], "mul"); + break; + } + + case OpMatrixTimesMatrix: + { + emit_binary_func_op(ops[0], ops[1], ops[3], ops[2], "mul"); + break; + } + + case OpFMod: + { + if (!requires_op_fmod) + { + requires_op_fmod = true; + force_recompile = true; + } + CompilerGLSL::emit_instruction(instruction); + break; + } + + case OpFRem: + emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], "fmod"); + break; + + case OpImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto *combined = maybe_get(ops[2]); + + if (combined) + { + auto &e = emit_op(result_type, id, to_expression(combined->image), true, true); + auto *var = maybe_get_backing_variable(combined->image); + if (var) + e.loaded_from = var->self; + } + else + { + auto &e = emit_op(result_type, id, to_expression(ops[2]), true, true); + auto *var = maybe_get_backing_variable(ops[2]); + if (var) + e.loaded_from = var->self; + } + break; + } + + case OpDPdx: + HLSL_UFOP(ddx); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdy: + HLSL_UFOP(ddy); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxFine: + HLSL_UFOP(ddx_fine); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyFine: + HLSL_UFOP(ddy_fine); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdxCoarse: + HLSL_UFOP(ddx_coarse); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdyCoarse: + HLSL_UFOP(ddy_coarse); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidth: + case OpFwidthCoarse: + case OpFwidthFine: + HLSL_UFOP(fwidth); + register_control_dependent_expression(ops[1]); + break; + + case OpLogicalNot: + { + auto result_type = ops[0]; + auto id = ops[1]; + auto &type = get(result_type); + + if (type.vecsize > 1) + emit_unrolled_unary_op(result_type, id, ops[2], "!"); + else + HLSL_UOP(!); + break; + } + + case OpIEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "=="); + else + HLSL_BOP_CAST(==, SPIRType::Int); + break; + } + + case OpLogicalEqual: + case OpFOrdEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "=="); + else + HLSL_BOP(==); + break; + } + + case OpINotEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "!="); + else + HLSL_BOP_CAST(!=, SPIRType::Int); + break; + } + + case OpLogicalNotEqual: + case OpFOrdNotEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "!="); + else + HLSL_BOP(!=); + break; + } + + case OpUGreaterThan: + case OpSGreaterThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + auto type = opcode == OpUGreaterThan ? SPIRType::UInt : SPIRType::Int; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">"); + else + HLSL_BOP_CAST(>, type); + break; + } + + case OpFOrdGreaterThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">"); + else + HLSL_BOP(>); + break; + } + + case OpUGreaterThanEqual: + case OpSGreaterThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + auto type = opcode == OpUGreaterThanEqual ? SPIRType::UInt : SPIRType::Int; + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">="); + else + HLSL_BOP_CAST(>=, type); + break; + } + + case OpFOrdGreaterThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], ">="); + else + HLSL_BOP(>=); + break; + } + + case OpULessThan: + case OpSLessThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + auto type = opcode == OpULessThan ? SPIRType::UInt : SPIRType::Int; + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<"); + else + HLSL_BOP_CAST(<, type); + break; + } + + case OpFOrdLessThan: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<"); + else + HLSL_BOP(<); + break; + } + + case OpULessThanEqual: + case OpSLessThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + auto type = opcode == OpULessThanEqual ? SPIRType::UInt : SPIRType::Int; + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<="); + else + HLSL_BOP_CAST(<=, type); + break; + } + + case OpFOrdLessThanEqual: + { + auto result_type = ops[0]; + auto id = ops[1]; + + if (expression_type(ops[2]).vecsize > 1) + emit_unrolled_binary_op(result_type, id, ops[2], ops[3], "<="); + else + HLSL_BOP(<=); + break; + } + + case OpImageQueryLod: + emit_texture_op(instruction); + break; + + case OpImageQuerySizeLod: + { + auto result_type = ops[0]; + auto id = ops[1]; + + require_texture_query_variant(expression_type(ops[2])); + + auto dummy_samples_levels = join(get_fallback_name(id), "_dummy_parameter"); + statement("uint ", dummy_samples_levels, ";"); + + auto expr = join("SPIRV_Cross_textureSize(", to_expression(ops[2]), ", ", + bitcast_expression(SPIRType::UInt, ops[3]), ", ", dummy_samples_levels, ")"); + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::UInt, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpImageQuerySize: + { + auto result_type = ops[0]; + auto id = ops[1]; + + require_texture_query_variant(expression_type(ops[2])); + + auto dummy_samples_levels = join(get_fallback_name(id), "_dummy_parameter"); + statement("uint ", dummy_samples_levels, ";"); + + auto expr = join("SPIRV_Cross_textureSize(", to_expression(ops[2]), ", 0u, ", dummy_samples_levels, ")"); + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::UInt, expr); + emit_op(result_type, id, expr, true); + break; + } + + case OpImageQuerySamples: + case OpImageQueryLevels: + { + auto result_type = ops[0]; + auto id = ops[1]; + + require_texture_query_variant(expression_type(ops[2])); + + // Keep it simple and do not emit special variants to make this look nicer ... + // This stuff is barely, if ever, used. + forced_temporaries.insert(id); + auto &type = get(result_type); + statement(variable_decl(type, to_name(id)), ";"); + statement("SPIRV_Cross_textureSize(", to_expression(ops[2]), ", 0u, ", to_name(id), ");"); + + auto &restype = get(ops[0]); + auto expr = bitcast_expression(restype, SPIRType::UInt, to_name(id)); + set(id, expr, result_type, true); + break; + } + + case OpImageRead: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto *var = maybe_get_backing_variable(ops[2]); + auto &type = expression_type(ops[2]); + bool subpass_data = type.image.dim == DimSubpassData; + bool pure = false; + + string imgexpr; + + if (subpass_data) + { + if (hlsl_options.shader_model < 40) + SPIRV_CROSS_THROW("Subpass loads are not supported in HLSL shader model 2/3."); + + // Similar to GLSL, implement subpass loads using texelFetch. + if (type.image.ms) + { + uint32_t operands = ops[4]; + if (operands != ImageOperandsSampleMask || instruction.length != 6) + SPIRV_CROSS_THROW("Multisampled image used in OpImageRead, but unexpected operand mask was used."); + uint32_t sample = ops[5]; + imgexpr = join(to_expression(ops[2]), ".Load(int2(gl_FragCoord.xy), ", to_expression(sample), ")"); + } + else + imgexpr = join(to_expression(ops[2]), ".Load(int3(int2(gl_FragCoord.xy), 0))"); + + pure = true; + } + else + { + imgexpr = join(to_expression(ops[2]), "[", to_expression(ops[3]), "]"); + // The underlying image type in HLSL depends on the image format, unlike GLSL, where all images are "vec4", + // except that the underlying type changes how the data is interpreted. + if (var && !subpass_data) + imgexpr = remap_swizzle(get(result_type), + image_format_to_components(get(var->basetype).image.format), imgexpr); + } + + if (var && var->forwardable) + { + bool forward = forced_temporaries.find(id) == end(forced_temporaries); + auto &e = emit_op(result_type, id, imgexpr, forward); + + if (!pure) + { + e.loaded_from = var->self; + if (forward) + var->dependees.push_back(id); + } + } + else + emit_op(result_type, id, imgexpr, false); + + inherit_expression_dependencies(id, ops[2]); + if (type.image.ms) + inherit_expression_dependencies(id, ops[5]); + break; + } + + case OpImageWrite: + { + auto *var = maybe_get_backing_variable(ops[0]); + + // The underlying image type in HLSL depends on the image format, unlike GLSL, where all images are "vec4", + // except that the underlying type changes how the data is interpreted. + auto value_expr = to_expression(ops[2]); + if (var) + { + auto &type = get(var->basetype); + auto narrowed_type = get(type.image.type); + narrowed_type.vecsize = image_format_to_components(type.image.format); + value_expr = remap_swizzle(narrowed_type, expression_type(ops[2]).vecsize, value_expr); + } + + statement(to_expression(ops[0]), "[", to_expression(ops[1]), "] = ", value_expr, ";"); + if (var && variable_storage_is_aliased(*var)) + flush_all_aliased_variables(); + break; + } + + case OpImageTexelPointer: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto &e = + set(id, join(to_expression(ops[2]), "[", to_expression(ops[3]), "]"), result_type, true); + + // When using the pointer, we need to know which variable it is actually loaded from. + auto *var = maybe_get_backing_variable(ops[2]); + e.loaded_from = var ? var->self : 0; + break; + } + + case OpAtomicCompareExchange: + case OpAtomicExchange: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + case OpAtomicIAdd: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + { + emit_atomic(ops, instruction.length, opcode); + break; + } + + case OpControlBarrier: + case OpMemoryBarrier: + { + uint32_t memory; + uint32_t semantics; + + if (opcode == OpMemoryBarrier) + { + memory = get(ops[0]).scalar(); + semantics = get(ops[1]).scalar(); + } + else + { + memory = get(ops[1]).scalar(); + semantics = get(ops[2]).scalar(); + } + + if (memory == ScopeSubgroup) + { + // No Wave-barriers in HLSL. + break; + } + + // We only care about these flags, acquire/release and friends are not relevant to GLSL. + semantics = mask_relevant_memory_semantics(semantics); + + if (opcode == OpMemoryBarrier) + { + // If we are a memory barrier, and the next instruction is a control barrier, check if that memory barrier + // does what we need, so we avoid redundant barriers. + const Instruction *next = get_next_instruction_in_block(instruction); + if (next && next->op == OpControlBarrier) + { + auto *next_ops = stream(*next); + uint32_t next_memory = get(next_ops[1]).scalar(); + uint32_t next_semantics = get(next_ops[2]).scalar(); + next_semantics = mask_relevant_memory_semantics(next_semantics); + + // There is no "just execution barrier" in HLSL. + // If there are no memory semantics for next instruction, we will imply group shared memory is synced. + if (next_semantics == 0) + next_semantics = MemorySemanticsWorkgroupMemoryMask; + + bool memory_scope_covered = false; + if (next_memory == memory) + memory_scope_covered = true; + else if (next_semantics == MemorySemanticsWorkgroupMemoryMask) + { + // If we only care about workgroup memory, either Device or Workgroup scope is fine, + // scope does not have to match. + if ((next_memory == ScopeDevice || next_memory == ScopeWorkgroup) && + (memory == ScopeDevice || memory == ScopeWorkgroup)) + { + memory_scope_covered = true; + } + } + else if (memory == ScopeWorkgroup && next_memory == ScopeDevice) + { + // The control barrier has device scope, but the memory barrier just has workgroup scope. + memory_scope_covered = true; + } + + // If we have the same memory scope, and all memory types are covered, we're good. + if (memory_scope_covered && (semantics & next_semantics) == semantics) + break; + } + } + + // We are synchronizing some memory or syncing execution, + // so we cannot forward any loads beyond the memory barrier. + if (semantics || opcode == OpControlBarrier) + { + assert(current_emitting_block); + flush_control_dependent_expressions(current_emitting_block->self); + flush_all_active_variables(); + } + + if (opcode == OpControlBarrier) + { + // We cannot emit just execution barrier, for no memory semantics pick the cheapest option. + if (semantics == MemorySemanticsWorkgroupMemoryMask || semantics == 0) + statement("GroupMemoryBarrierWithGroupSync();"); + else if (semantics != 0 && (semantics & MemorySemanticsWorkgroupMemoryMask) == 0) + statement("DeviceMemoryBarrierWithGroupSync();"); + else + statement("AllMemoryBarrierWithGroupSync();"); + } + else + { + if (semantics == MemorySemanticsWorkgroupMemoryMask) + statement("GroupMemoryBarrier();"); + else if (semantics != 0 && (semantics & MemorySemanticsWorkgroupMemoryMask) == 0) + statement("DeviceMemoryBarrier();"); + else + statement("AllMemoryBarrier();"); + } + break; + } + + case OpBitFieldInsert: + { + if (!requires_bitfield_insert) + { + requires_bitfield_insert = true; + force_recompile = true; + } + + auto expr = join("SPIRV_Cross_bitfieldInsert(", to_expression(ops[2]), ", ", to_expression(ops[3]), ", ", + to_expression(ops[4]), ", ", to_expression(ops[5]), ")"); + + bool forward = + should_forward(ops[2]) && should_forward(ops[3]) && should_forward(ops[4]) && should_forward(ops[5]); + + auto &restype = get(ops[0]); + expr = bitcast_expression(restype, SPIRType::UInt, expr); + emit_op(ops[0], ops[1], expr, forward); + break; + } + + case OpBitFieldSExtract: + case OpBitFieldUExtract: + { + if (!requires_bitfield_extract) + { + requires_bitfield_extract = true; + force_recompile = true; + } + + if (opcode == OpBitFieldSExtract) + HLSL_TFOP(SPIRV_Cross_bitfieldSExtract); + else + HLSL_TFOP(SPIRV_Cross_bitfieldUExtract); + break; + } + + case OpBitCount: + HLSL_UFOP(countbits); + break; + + case OpBitReverse: + HLSL_UFOP(reversebits); + break; + + default: + CompilerGLSL::emit_instruction(instruction); + break; + } +} + +void CompilerHLSL::require_texture_query_variant(const SPIRType &type) +{ + uint32_t bit = 0; + switch (type.image.dim) + { + case Dim1D: + bit = type.image.arrayed ? Query1DArray : Query1D; + break; + + case Dim2D: + if (type.image.ms) + bit = type.image.arrayed ? Query2DMSArray : Query2DMS; + else + bit = type.image.arrayed ? Query2DArray : Query2D; + break; + + case Dim3D: + bit = Query3D; + break; + + case DimCube: + bit = type.image.arrayed ? QueryCubeArray : QueryCube; + break; + + case DimBuffer: + bit = QueryBuffer; + break; + + default: + SPIRV_CROSS_THROW("Unsupported query type."); + } + + switch (get(type.image.type).basetype) + { + case SPIRType::Float: + bit += QueryTypeFloat; + break; + + case SPIRType::Int: + bit += QueryTypeInt; + break; + + case SPIRType::UInt: + bit += QueryTypeUInt; + break; + + default: + SPIRV_CROSS_THROW("Unsupported query type."); + } + + uint64_t mask = 1ull << bit; + if ((required_textureSizeVariants & mask) == 0) + { + force_recompile = true; + required_textureSizeVariants |= mask; + } +} + +string CompilerHLSL::compile(std::vector vertex_attributes) +{ + remap_vertex_attributes = move(vertex_attributes); + return compile(); +} + +uint32_t CompilerHLSL::remap_num_workgroups_builtin() +{ + update_active_builtins(); + + if (!active_input_builtins.get(BuiltInNumWorkgroups)) + return 0; + + // Create a new, fake UBO. + uint32_t offset = ir.increase_bound_by(4); + + uint32_t uint_type_id = offset; + uint32_t block_type_id = offset + 1; + uint32_t block_pointer_type_id = offset + 2; + uint32_t variable_id = offset + 3; + + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + uint_type.vecsize = 3; + uint_type.columns = 1; + set(uint_type_id, uint_type); + + SPIRType block_type; + block_type.basetype = SPIRType::Struct; + block_type.member_types.push_back(uint_type_id); + set(block_type_id, block_type); + set_decoration(block_type_id, DecorationBlock); + set_member_name(block_type_id, 0, "count"); + set_member_decoration(block_type_id, 0, DecorationOffset, 0); + + SPIRType block_pointer_type = block_type; + block_pointer_type.pointer = true; + block_pointer_type.storage = StorageClassUniform; + block_pointer_type.parent_type = block_type_id; + auto &ptr_type = set(block_pointer_type_id, block_pointer_type); + + // Preserve self. + ptr_type.self = block_type_id; + + set(variable_id, block_pointer_type_id, StorageClassUniform); + ir.meta[variable_id].decoration.alias = "SPIRV_Cross_NumWorkgroups"; + + num_workgroups_builtin = variable_id; + return variable_id; +} + +string CompilerHLSL::compile() +{ + // Do not deal with ES-isms like precision, older extensions and such. + options.es = false; + options.version = 450; + options.vulkan_semantics = true; + backend.float_literal_suffix = true; + backend.double_literal_suffix = false; + backend.half_literal_suffix = nullptr; + backend.long_long_literal_suffix = true; + backend.uint32_t_literal_suffix = true; + backend.int16_t_literal_suffix = nullptr; + backend.uint16_t_literal_suffix = "u"; + backend.basic_int_type = "int"; + backend.basic_uint_type = "uint"; + backend.swizzle_is_function = false; + backend.shared_is_implied = true; + backend.flexible_member_array_supported = false; + backend.explicit_struct_type = false; + backend.use_initializer_list = true; + backend.use_constructor_splatting = false; + backend.boolean_mix_support = false; + backend.can_swizzle_scalar = true; + backend.can_declare_struct_inline = false; + backend.can_declare_arrays_inline = false; + backend.can_return_array = false; + + build_function_control_flow_graphs_and_analyze(); + update_active_builtins(); + analyze_image_and_sampler_usage(); + + // Subpass input needs SV_Position. + if (need_subpass_input) + active_input_builtins.set(BuiltInFragCoord); + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + reset(); + + // Move constructor for this type is broken on GCC 4.9 ... + buffer = unique_ptr(new ostringstream()); + + emit_header(); + emit_resources(); + + emit_function(get(ir.default_entry_point), Bitset()); + emit_hlsl_entry_point(); + + pass_count++; + } while (force_recompile); + + // Entry point in HLSL is always main() for the time being. + get_entry_point().name = "main"; + + return buffer->str(); +} + +void CompilerHLSL::emit_block_hints(const SPIRBlock &block) +{ + switch (block.hint) + { + case SPIRBlock::HintFlatten: + statement("[flatten]"); + break; + case SPIRBlock::HintDontFlatten: + statement("[branch]"); + break; + case SPIRBlock::HintUnroll: + statement("[unroll]"); + break; + case SPIRBlock::HintDontUnroll: + statement("[loop]"); + break; + default: + break; + } +} diff --git a/3rdparty/spirv-cross/spirv_hlsl.hpp b/3rdparty/spirv-cross/spirv_hlsl.hpp new file mode 100644 index 000000000..672436376 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_hlsl.hpp @@ -0,0 +1,237 @@ +/* + * Copyright 2016-2019 Robert Konrad + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_HLSL_HPP +#define SPIRV_HLSL_HPP + +#include "spirv_glsl.hpp" +#include +#include + +namespace spirv_cross +{ +// Interface which remaps vertex inputs to a fixed semantic name to make linking easier. +struct HLSLVertexAttributeRemap +{ + uint32_t location; + std::string semantic; +}; +// Specifying a root constant (d3d12) or push constant range (vulkan). +// +// `start` and `end` denotes the range of the root constant in bytes. +// Both values need to be multiple of 4. +struct RootConstants +{ + uint32_t start; + uint32_t end; + + uint32_t binding; + uint32_t space; +}; + +class CompilerHLSL : public CompilerGLSL +{ +public: + struct Options + { + uint32_t shader_model = 30; // TODO: map ps_4_0_level_9_0,... somehow + + // Allows the PointSize builtin, and ignores it, as PointSize is not supported in HLSL. + bool point_size_compat = false; + + // Allows the PointCoord builtin, returns float2(0.5, 0.5), as PointCoord is not supported in HLSL. + bool point_coord_compat = false; + }; + + explicit CompilerHLSL(std::vector spirv_) + : CompilerGLSL(move(spirv_)) + { + } + + CompilerHLSL(const uint32_t *ir_, size_t size) + : CompilerGLSL(ir_, size) + { + } + + explicit CompilerHLSL(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + } + + explicit CompilerHLSL(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + } + + SPIRV_CROSS_DEPRECATED("CompilerHLSL::get_options() is obsolete, use get_hlsl_options() instead.") + const Options &get_options() const + { + return hlsl_options; + } + + const Options &get_hlsl_options() const + { + return hlsl_options; + } + + SPIRV_CROSS_DEPRECATED("CompilerHLSL::get_options() is obsolete, use set_hlsl_options() instead.") + void set_options(Options &opts) + { + hlsl_options = opts; + } + + void set_hlsl_options(const Options &opts) + { + hlsl_options = opts; + } + + // Optionally specify a custom root constant layout. + // + // Push constants ranges will be split up according to the + // layout specified. + void set_root_constant_layouts(std::vector layout) + { + root_constants_layout = std::move(layout); + } + + // Compiles and remaps vertex attributes at specific locations to a fixed semantic. + // The default is TEXCOORD# where # denotes location. + // Matrices are unrolled to vectors with notation ${SEMANTIC}_#, where # denotes row. + // $SEMANTIC is either TEXCOORD# or a semantic name specified here. + std::string compile(std::vector vertex_attributes); + std::string compile() override; + + // This is a special HLSL workaround for the NumWorkGroups builtin. + // This does not exist in HLSL, so the calling application must create a dummy cbuffer in + // which the application will store this builtin. + // The cbuffer layout will be: + // cbuffer SPIRV_Cross_NumWorkgroups : register(b#, space#) { uint3 SPIRV_Cross_NumWorkgroups_count; }; + // This must be called before compile(). + // The function returns 0 if NumWorkGroups builtin is not statically used in the shader from the current entry point. + // If non-zero, this returns the variable ID of a cbuffer which corresponds to + // the cbuffer declared above. By default, no binding or descriptor set decoration is set, + // so the calling application should declare explicit bindings on this ID before calling compile(). + uint32_t remap_num_workgroups_builtin(); + +private: + std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override; + std::string image_type_hlsl(const SPIRType &type, uint32_t id); + std::string image_type_hlsl_modern(const SPIRType &type, uint32_t id); + std::string image_type_hlsl_legacy(const SPIRType &type, uint32_t id); + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + void emit_hlsl_entry_point(); + void emit_header() override; + void emit_resources(); + void emit_interface_block_globally(const SPIRVariable &type); + void emit_interface_block_in_struct(const SPIRVariable &type, std::unordered_set &active_locations); + void emit_builtin_inputs_in_struct(); + void emit_builtin_outputs_in_struct(); + void emit_texture_op(const Instruction &i) override; + void emit_instruction(const Instruction &instruction) override; + void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count) override; + void emit_buffer_block(const SPIRVariable &type) override; + void emit_push_constant_block(const SPIRVariable &var) override; + void emit_uniform(const SPIRVariable &var) override; + void emit_modern_uniform(const SPIRVariable &var); + void emit_legacy_uniform(const SPIRVariable &var); + void emit_specialization_constants(); + void emit_composite_constants(); + void emit_fixup() override; + std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override; + std::string layout_for_member(const SPIRType &type, uint32_t index) override; + std::string to_interpolation_qualifiers(const Bitset &flags) override; + std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override; + std::string to_func_call_arg(uint32_t id) override; + std::string to_sampler_expression(uint32_t id); + std::string to_resource_binding(const SPIRVariable &var); + std::string to_resource_binding_sampler(const SPIRVariable &var); + std::string to_resource_register(char space, uint32_t binding, uint32_t set); + void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override; + void emit_access_chain(const Instruction &instruction); + void emit_load(const Instruction &instruction); + std::string read_access_chain(const SPIRAccessChain &chain); + void write_access_chain(const SPIRAccessChain &chain, uint32_t value); + void emit_store(const Instruction &instruction); + void emit_atomic(const uint32_t *ops, uint32_t length, spv::Op op); + void emit_subgroup_op(const Instruction &i) override; + void emit_block_hints(const SPIRBlock &block) override; + + void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, const std::string &qualifier, + uint32_t base_offset = 0) override; + + const char *to_storage_qualifiers_glsl(const SPIRVariable &var) override; + void replace_illegal_names() override; + + Options hlsl_options; + bool requires_op_fmod = false; + bool requires_textureProj = false; + bool requires_fp16_packing = false; + bool requires_explicit_fp16_packing = false; + bool requires_unorm8_packing = false; + bool requires_snorm8_packing = false; + bool requires_unorm16_packing = false; + bool requires_snorm16_packing = false; + bool requires_bitfield_insert = false; + bool requires_bitfield_extract = false; + bool requires_inverse_2x2 = false; + bool requires_inverse_3x3 = false; + bool requires_inverse_4x4 = false; + uint64_t required_textureSizeVariants = 0; + void require_texture_query_variant(const SPIRType &type); + + enum TextureQueryVariantDim + { + Query1D = 0, + Query1DArray, + Query2D, + Query2DArray, + Query3D, + QueryBuffer, + QueryCube, + QueryCubeArray, + Query2DMS, + Query2DMSArray, + QueryDimCount + }; + + enum TextureQueryVariantType + { + QueryTypeFloat = 0, + QueryTypeInt = 16, + QueryTypeUInt = 32, + QueryTypeCount = 3 + }; + + void emit_builtin_variables(); + bool require_output = false; + bool require_input = false; + std::vector remap_vertex_attributes; + + uint32_t type_to_consumed_locations(const SPIRType &type) const; + + void emit_io_block(const SPIRVariable &var); + std::string to_semantic(uint32_t vertex_location); + + uint32_t num_workgroups_builtin = 0; + + // Custom root constant layout, which should be emitted + // when translating push constant ranges. + std::vector root_constants_layout; +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_msl.cpp b/3rdparty/spirv-cross/spirv_msl.cpp new file mode 100644 index 000000000..21647198b --- /dev/null +++ b/3rdparty/spirv-cross/spirv_msl.cpp @@ -0,0 +1,5703 @@ +/* + * Copyright 2016-2019 The Brenwill Workshop Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_msl.hpp" +#include "GLSL.std.450.h" + +#include +#include +#include + +using namespace spv; +using namespace spirv_cross; +using namespace std; + +static const uint32_t k_unknown_location = ~0u; +static const uint32_t k_unknown_component = ~0u; + +static const uint32_t k_aux_mbr_idx_swizzle_const = 0u; + +CompilerMSL::CompilerMSL(vector spirv_, vector *p_vtx_attrs, + vector *p_res_bindings) + : CompilerGLSL(move(spirv_)) +{ + if (p_vtx_attrs) + for (auto &va : *p_vtx_attrs) + vtx_attrs_by_location[va.location] = &va; + + if (p_res_bindings) + for (auto &rb : *p_res_bindings) + resource_bindings.push_back(&rb); +} + +CompilerMSL::CompilerMSL(const uint32_t *ir_, size_t word_count, MSLVertexAttr *p_vtx_attrs, size_t vtx_attrs_count, + MSLResourceBinding *p_res_bindings, size_t res_bindings_count) + : CompilerGLSL(ir_, word_count) +{ + if (p_vtx_attrs) + for (size_t i = 0; i < vtx_attrs_count; i++) + vtx_attrs_by_location[p_vtx_attrs[i].location] = &p_vtx_attrs[i]; + + if (p_res_bindings) + for (size_t i = 0; i < res_bindings_count; i++) + resource_bindings.push_back(&p_res_bindings[i]); +} + +CompilerMSL::CompilerMSL(const ParsedIR &ir_, MSLVertexAttr *p_vtx_attrs, size_t vtx_attrs_count, + MSLResourceBinding *p_res_bindings, size_t res_bindings_count) + : CompilerGLSL(ir_) +{ + if (p_vtx_attrs) + for (size_t i = 0; i < vtx_attrs_count; i++) + vtx_attrs_by_location[p_vtx_attrs[i].location] = &p_vtx_attrs[i]; + + if (p_res_bindings) + for (size_t i = 0; i < res_bindings_count; i++) + resource_bindings.push_back(&p_res_bindings[i]); +} + +CompilerMSL::CompilerMSL(ParsedIR &&ir_, MSLVertexAttr *p_vtx_attrs, size_t vtx_attrs_count, + MSLResourceBinding *p_res_bindings, size_t res_bindings_count) + : CompilerGLSL(std::move(ir_)) +{ + if (p_vtx_attrs) + for (size_t i = 0; i < vtx_attrs_count; i++) + vtx_attrs_by_location[p_vtx_attrs[i].location] = &p_vtx_attrs[i]; + + if (p_res_bindings) + for (size_t i = 0; i < res_bindings_count; i++) + resource_bindings.push_back(&p_res_bindings[i]); +} + +void CompilerMSL::build_implicit_builtins() +{ + bool need_sample_pos = active_input_builtins.get(BuiltInSamplePosition); + if (need_subpass_input || need_sample_pos) + { + bool has_frag_coord = false; + bool has_sample_id = false; + + for (auto &id : ir.ids) + { + if (id.get_type() != TypeVariable) + continue; + + auto &var = id.get(); + + if (need_subpass_input && var.storage == StorageClassInput && ir.meta[var.self].decoration.builtin && + ir.meta[var.self].decoration.builtin_type == BuiltInFragCoord) + { + builtin_frag_coord_id = var.self; + has_frag_coord = true; + break; + } + + if (need_sample_pos && var.storage == StorageClassInput && ir.meta[var.self].decoration.builtin && + ir.meta[var.self].decoration.builtin_type == BuiltInSampleId) + { + builtin_sample_id_id = var.self; + has_sample_id = true; + break; + } + } + + if (!has_frag_coord && need_subpass_input) + { + uint32_t offset = ir.increase_bound_by(3); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + uint32_t var_id = offset + 2; + + // Create gl_FragCoord. + SPIRType vec4_type; + vec4_type.basetype = SPIRType::Float; + vec4_type.width = 32; + vec4_type.vecsize = 4; + set(type_id, vec4_type); + + SPIRType vec4_type_ptr; + vec4_type_ptr = vec4_type; + vec4_type_ptr.pointer = true; + vec4_type_ptr.parent_type = type_id; + vec4_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, vec4_type_ptr); + ptr_type.self = type_id; + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInFragCoord); + builtin_frag_coord_id = var_id; + } + + if (!has_sample_id && need_sample_pos) + { + uint32_t offset = ir.increase_bound_by(3); + uint32_t type_id = offset; + uint32_t type_ptr_id = offset + 1; + uint32_t var_id = offset + 2; + + // Create gl_SampleID. + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_ptr; + uint_type_ptr = uint_type; + uint_type_ptr.pointer = true; + uint_type_ptr.parent_type = type_id; + uint_type_ptr.storage = StorageClassInput; + auto &ptr_type = set(type_ptr_id, uint_type_ptr); + ptr_type.self = type_id; + + set(var_id, type_ptr_id, StorageClassInput); + set_decoration(var_id, DecorationBuiltIn, BuiltInSampleId); + builtin_sample_id_id = var_id; + } + } + + if (msl_options.swizzle_texture_samples && has_sampled_images) + { + uint32_t offset = ir.increase_bound_by(5); + uint32_t type_id = offset; + uint32_t type_arr_id = offset + 1; + uint32_t struct_id = offset + 2; + uint32_t struct_ptr_id = offset + 3; + uint32_t var_id = offset + 4; + + // Create a buffer to hold the swizzle constants. + SPIRType uint_type; + uint_type.basetype = SPIRType::UInt; + uint_type.width = 32; + set(type_id, uint_type); + + SPIRType uint_type_arr = uint_type; + uint_type_arr.array.push_back(0); + uint_type_arr.array_size_literal.push_back(true); + uint_type_arr.parent_type = type_id; + set(type_arr_id, uint_type_arr); + set_decoration(type_arr_id, DecorationArrayStride, 4); + + SPIRType struct_type; + struct_type.basetype = SPIRType::Struct; + struct_type.member_types.push_back(type_arr_id); + auto &type = set(struct_id, struct_type); + type.self = struct_id; + set_decoration(struct_id, DecorationBlock); + set_name(struct_id, "spvAux"); + set_member_name(struct_id, 0, "swizzleConst"); + set_member_decoration(struct_id, 0, DecorationOffset, 0); + + SPIRType struct_type_ptr = struct_type; + struct_type_ptr.pointer = true; + struct_type_ptr.parent_type = struct_id; + struct_type_ptr.storage = StorageClassUniform; + auto &ptr_type = set(struct_ptr_id, struct_type_ptr); + ptr_type.self = struct_id; + + set(var_id, struct_ptr_id, StorageClassUniform); + set_name(var_id, "spvAuxBuffer"); + // This should never match anything. + set_decoration(var_id, DecorationDescriptorSet, 0xFFFFFFFE); + set_decoration(var_id, DecorationBinding, msl_options.aux_buffer_index); + aux_buffer_id = var_id; + } +} + +static string create_sampler_address(const char *prefix, MSLSamplerAddress addr) +{ + switch (addr) + { + case MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE: + return join(prefix, "address::clamp_to_edge"); + case MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO: + return join(prefix, "address::clamp_to_zero"); + case MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER: + return join(prefix, "address::clamp_to_border"); + case MSL_SAMPLER_ADDRESS_REPEAT: + return join(prefix, "address::repeat"); + case MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT: + return join(prefix, "address::mirrored_repeat"); + default: + SPIRV_CROSS_THROW("Invalid sampler addressing mode."); + } +} + +void CompilerMSL::emit_entry_point_declarations() +{ + // FIXME: Get test coverage here ... + + // Emit constexpr samplers here. + for (auto &samp : constexpr_samplers) + { + auto &var = get(samp.first); + auto &type = get(var.basetype); + if (type.basetype == SPIRType::Sampler) + add_resource_name(samp.first); + + vector args; + auto &s = samp.second; + + if (s.coord != MSL_SAMPLER_COORD_NORMALIZED) + args.push_back("coord::pixel"); + + if (s.min_filter == s.mag_filter) + { + if (s.min_filter != MSL_SAMPLER_FILTER_NEAREST) + args.push_back("filter::linear"); + } + else + { + if (s.min_filter != MSL_SAMPLER_FILTER_NEAREST) + args.push_back("min_filter::linear"); + if (s.mag_filter != MSL_SAMPLER_FILTER_NEAREST) + args.push_back("mag_filter::linear"); + } + + switch (s.mip_filter) + { + case MSL_SAMPLER_MIP_FILTER_NONE: + // Default + break; + case MSL_SAMPLER_MIP_FILTER_NEAREST: + args.push_back("mip_filter::nearest"); + break; + case MSL_SAMPLER_MIP_FILTER_LINEAR: + args.push_back("mip_filter::linear"); + break; + default: + SPIRV_CROSS_THROW("Invalid mip filter."); + } + + if (s.s_address == s.t_address && s.s_address == s.r_address) + { + if (s.s_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("", s.s_address)); + } + else + { + if (s.s_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("s_", s.s_address)); + if (s.t_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("t_", s.t_address)); + if (s.r_address != MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE) + args.push_back(create_sampler_address("r_", s.r_address)); + } + + if (s.compare_enable) + { + switch (s.compare_func) + { + case MSL_SAMPLER_COMPARE_FUNC_ALWAYS: + args.push_back("compare_func::always"); + break; + case MSL_SAMPLER_COMPARE_FUNC_NEVER: + args.push_back("compare_func::never"); + break; + case MSL_SAMPLER_COMPARE_FUNC_EQUAL: + args.push_back("compare_func::equal"); + break; + case MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL: + args.push_back("compare_func::not_equal"); + break; + case MSL_SAMPLER_COMPARE_FUNC_LESS: + args.push_back("compare_func::less"); + break; + case MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL: + args.push_back("compare_func::less_equal"); + break; + case MSL_SAMPLER_COMPARE_FUNC_GREATER: + args.push_back("compare_func::greater"); + break; + case MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL: + args.push_back("compare_func::greater_equal"); + break; + default: + SPIRV_CROSS_THROW("Invalid sampler compare function."); + } + } + + if (s.s_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER || s.t_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER || + s.r_address == MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER) + { + switch (s.border_color) + { + case MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK: + args.push_back("border_color::opaque_black"); + break; + case MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE: + args.push_back("border_color::opaque_white"); + break; + case MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK: + args.push_back("border_color::transparent_black"); + break; + default: + SPIRV_CROSS_THROW("Invalid sampler border color."); + } + } + + if (s.anisotropy_enable) + args.push_back(join("max_anisotropy(", s.max_anisotropy, ")")); + if (s.lod_clamp_enable) + { + args.push_back( + join("lod_clamp(", convert_to_string(s.lod_clamp_min), ", ", convert_to_string(s.lod_clamp_max), ")")); + } + + statement("constexpr sampler ", + type.basetype == SPIRType::SampledImage ? to_sampler_expression(samp.first) : to_name(samp.first), + "(", merge(args), ");"); + } + + // Emit buffer arrays here. + for (uint32_t array_id : buffer_arrays) + { + const auto &var = get(array_id); + const auto &type = get_variable_data_type(var); + string name = get_name(array_id); + statement(get_argument_address_space(var) + " " + type_to_glsl(type) + "* " + name + "[] ="); + begin_scope(); + for (uint32_t i = 0; i < type.array[0]; ++i) + statement(name + "_" + convert_to_string(i) + ","); + end_scope_decl(); + statement(""); + } + // For some reason, without this, we end up emitting the arrays twice. + buffer_arrays.clear(); +} + +string CompilerMSL::compile() +{ + // Force a classic "C" locale, reverts when function returns + ClassicLocale classic_locale; + + // Do not deal with GLES-isms like precision, older extensions and such. + options.vulkan_semantics = true; + options.es = false; + options.version = 450; + backend.null_pointer_literal = "nullptr"; + backend.float_literal_suffix = false; + backend.half_literal_suffix = "h"; + backend.uint32_t_literal_suffix = true; + backend.int16_t_literal_suffix = nullptr; + backend.uint16_t_literal_suffix = "u"; + backend.basic_int_type = "int"; + backend.basic_uint_type = "uint"; + backend.basic_int8_type = "char"; + backend.basic_uint8_type = "uchar"; + backend.basic_int16_type = "short"; + backend.basic_uint16_type = "ushort"; + backend.discard_literal = "discard_fragment()"; + backend.swizzle_is_function = false; + backend.shared_is_implied = false; + backend.use_initializer_list = true; + backend.use_typed_initializer_list = true; + backend.native_row_major_matrix = false; + backend.flexible_member_array_supported = false; + backend.can_declare_arrays_inline = false; + backend.can_return_array = false; + backend.boolean_mix_support = false; + backend.allow_truncated_access_chain = true; + backend.array_is_value_type = false; + + is_rasterization_disabled = msl_options.disable_rasterization; + + replace_illegal_names(); + + struct_member_padding.clear(); + + build_function_control_flow_graphs_and_analyze(); + update_active_builtins(); + analyze_image_and_sampler_usage(); + analyze_sampled_image_usage(); + build_implicit_builtins(); + + fixup_image_load_store_access(); + + set_enabled_interface_variables(get_active_interface_variables()); + if (aux_buffer_id) + active_interface_variables.insert(aux_buffer_id); + + // Preprocess OpCodes to extract the need to output additional header content + preprocess_op_codes(); + + // Create structs to hold input, output and uniform variables. + // Do output first to ensure out. is declared at top of entry function. + qual_pos_var_name = ""; + stage_out_var_id = add_interface_block(StorageClassOutput); + stage_in_var_id = add_interface_block(StorageClassInput); + + // Metal vertex functions that define no output must disable rasterization and return void. + if (!stage_out_var_id) + is_rasterization_disabled = true; + + // Convert the use of global variables to recursively-passed function parameters + localize_global_variables(); + extract_global_variables_from_functions(); + + // Mark any non-stage-in structs to be tightly packed. + mark_packable_structs(); + + uint32_t pass_count = 0; + do + { + if (pass_count >= 3) + SPIRV_CROSS_THROW("Over 3 compilation loops detected. Must be a bug!"); + + reset(); + + next_metal_resource_index = MSLResourceBinding(); // Start bindings at zero + + // Move constructor for this type is broken on GCC 4.9 ... + buffer = unique_ptr(new ostringstream()); + + emit_header(); + emit_specialization_constants(); + emit_resources(); + emit_custom_functions(); + emit_function(get(ir.default_entry_point), Bitset()); + + pass_count++; + } while (force_recompile); + + return buffer->str(); +} + +string CompilerMSL::compile(vector *p_vtx_attrs, vector *p_res_bindings) +{ + if (p_vtx_attrs) + { + vtx_attrs_by_location.clear(); + for (auto &va : *p_vtx_attrs) + vtx_attrs_by_location[va.location] = &va; + } + + if (p_res_bindings) + { + resource_bindings.clear(); + for (auto &rb : *p_res_bindings) + resource_bindings.push_back(&rb); + } + + return compile(); +} + +string CompilerMSL::compile(MSLConfiguration &msl_cfg, vector *p_vtx_attrs, + vector *p_res_bindings) +{ + msl_options = msl_cfg; + return compile(p_vtx_attrs, p_res_bindings); +} + +// Register the need to output any custom functions. +void CompilerMSL::preprocess_op_codes() +{ + OpCodePreprocessor preproc(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), preproc); + + if (preproc.suppress_missing_prototypes) + add_pragma_line("#pragma clang diagnostic ignored \"-Wmissing-prototypes\""); + + if (preproc.uses_atomics) + { + add_header_line("#include "); + add_pragma_line("#pragma clang diagnostic ignored \"-Wunused-variable\""); + } + + // Metal vertex functions that write to resources must disable rasterization and return void. + if (preproc.uses_resource_write) + is_rasterization_disabled = true; +} + +// Move the Private and Workgroup global variables to the entry function. +// Non-constant variables cannot have global scope in Metal. +void CompilerMSL::localize_global_variables() +{ + auto &entry_func = get(ir.default_entry_point); + auto iter = global_variables.begin(); + while (iter != global_variables.end()) + { + uint32_t v_id = *iter; + auto &var = get(v_id); + if (var.storage == StorageClassPrivate || var.storage == StorageClassWorkgroup) + { + entry_func.add_local_variable(v_id); + iter = global_variables.erase(iter); + } + else + iter++; + } +} + +// For any global variable accessed directly by a function, +// extract that variable and add it as an argument to that function. +void CompilerMSL::extract_global_variables_from_functions() +{ + // Uniforms + unordered_set global_var_ids; + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + if (var.storage == StorageClassInput || var.storage == StorageClassOutput || + var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant || + var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer) + { + global_var_ids.insert(var.self); + } + } + } + + // Local vars that are declared in the main function and accessed directly by a function + auto &entry_func = get(ir.default_entry_point); + for (auto &var : entry_func.local_variables) + if (get(var).storage != StorageClassFunction) + global_var_ids.insert(var); + + std::set added_arg_ids; + unordered_set processed_func_ids; + extract_global_variables_from_function(ir.default_entry_point, added_arg_ids, global_var_ids, processed_func_ids); +} + +// MSL does not support the use of global variables for shader input content. +// For any global variable accessed directly by the specified function, extract that variable, +// add it as an argument to that function, and the arg to the added_arg_ids collection. +void CompilerMSL::extract_global_variables_from_function(uint32_t func_id, std::set &added_arg_ids, + unordered_set &global_var_ids, + unordered_set &processed_func_ids) +{ + // Avoid processing a function more than once + if (processed_func_ids.find(func_id) != processed_func_ids.end()) + { + // Return function global variables + added_arg_ids = function_global_vars[func_id]; + return; + } + + processed_func_ids.insert(func_id); + + auto &func = get(func_id); + + // Recursively establish global args added to functions on which we depend. + for (auto block : func.blocks) + { + auto &b = get(block); + for (auto &i : b.ops) + { + auto ops = stream(i); + auto op = static_cast(i.op); + + switch (op) + { + case OpLoad: + case OpInBoundsAccessChain: + case OpAccessChain: + case OpPtrAccessChain: + { + uint32_t base_id = ops[2]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + + auto &type = get(ops[0]); + if (type.basetype == SPIRType::Image && type.image.dim == DimSubpassData) + { + // Implicitly reads gl_FragCoord. + assert(builtin_frag_coord_id != 0); + added_arg_ids.insert(builtin_frag_coord_id); + } + + if (msl_options.swizzle_texture_samples && has_sampled_images && is_sampled_image_type(type)) + { + // Implicitly reads spvAuxBuffer. + assert(aux_buffer_id != 0); + added_arg_ids.insert(aux_buffer_id); + } + + break; + } + + case OpFunctionCall: + { + // First see if any of the function call args are globals + for (uint32_t arg_idx = 3; arg_idx < i.length; arg_idx++) + { + uint32_t arg_id = ops[arg_idx]; + if (global_var_ids.find(arg_id) != global_var_ids.end()) + added_arg_ids.insert(arg_id); + } + + // Then recurse into the function itself to extract globals used internally in the function + uint32_t inner_func_id = ops[2]; + std::set inner_func_args; + extract_global_variables_from_function(inner_func_id, inner_func_args, global_var_ids, + processed_func_ids); + added_arg_ids.insert(inner_func_args.begin(), inner_func_args.end()); + break; + } + + case OpStore: + { + uint32_t base_id = ops[0]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + break; + } + + case OpSelect: + { + uint32_t base_id = ops[3]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + base_id = ops[4]; + if (global_var_ids.find(base_id) != global_var_ids.end()) + added_arg_ids.insert(base_id); + break; + } + + default: + break; + } + + // TODO: Add all other operations which can affect memory. + // We should consider a more unified system here to reduce boiler-plate. + // This kind of analysis is done in several places ... + } + } + + function_global_vars[func_id] = added_arg_ids; + + // Add the global variables as arguments to the function + if (func_id != ir.default_entry_point) + { + for (uint32_t arg_id : added_arg_ids) + { + auto &var = get(arg_id); + uint32_t type_id = var.basetype; + auto *p_type = &get(type_id); + + if (is_builtin_variable(var) && p_type->basetype == SPIRType::Struct) + { + // Get the pointee type + type_id = get_pointee_type_id(type_id); + p_type = &get(type_id); + + uint32_t mbr_idx = 0; + for (auto &mbr_type_id : p_type->member_types) + { + BuiltIn builtin = BuiltInMax; + bool is_builtin = is_member_builtin(*p_type, mbr_idx, &builtin); + if (is_builtin && has_active_builtin(builtin, var.storage)) + { + // Add a arg variable with the same type and decorations as the member + uint32_t next_ids = ir.increase_bound_by(2); + uint32_t ptr_type_id = next_ids + 0; + uint32_t var_id = next_ids + 1; + + // Make sure we have an actual pointer type, + // so that we will get the appropriate address space when declaring these builtins. + auto &ptr = set(ptr_type_id, get(mbr_type_id)); + ptr.self = mbr_type_id; + ptr.storage = var.storage; + ptr.pointer = true; + ptr.parent_type = mbr_type_id; + + func.add_parameter(mbr_type_id, var_id, true); + set(var_id, ptr_type_id, StorageClassFunction); + ir.meta[var_id].decoration = ir.meta[type_id].members[mbr_idx]; + } + mbr_idx++; + } + } + else + { + uint32_t next_id = ir.increase_bound_by(1); + func.add_parameter(type_id, next_id, true); + set(next_id, type_id, StorageClassFunction, 0, arg_id); + + // Ensure the existing variable has a valid name and the new variable has all the same meta info + set_name(arg_id, ensure_valid_name(to_name(arg_id), "v")); + ir.meta[next_id] = ir.meta[arg_id]; + } + } + } +} + +// For all variables that are some form of non-input-output interface block, mark that all the structs +// that are recursively contained within the type referenced by that variable should be packed tightly. +void CompilerMSL::mark_packable_structs() +{ + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + if (var.storage != StorageClassFunction && !is_hidden_variable(var)) + { + auto &type = get(var.basetype); + if (type.pointer && + (type.storage == StorageClassUniform || type.storage == StorageClassUniformConstant || + type.storage == StorageClassPushConstant || type.storage == StorageClassStorageBuffer) && + (has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock))) + mark_as_packable(type); + } + } + } +} + +// If the specified type is a struct, it and any nested structs +// are marked as packable with the DecorationCPacked decoration, +void CompilerMSL::mark_as_packable(SPIRType &type) +{ + // If this is not the base type (eg. it's a pointer or array), tunnel down + if (type.parent_type) + { + mark_as_packable(get(type.parent_type)); + return; + } + + if (type.basetype == SPIRType::Struct) + { + set_decoration(type.self, DecorationCPacked); + + // Recurse + size_t mbr_cnt = type.member_types.size(); + for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++) + { + uint32_t mbr_type_id = type.member_types[mbr_idx]; + auto &mbr_type = get(mbr_type_id); + mark_as_packable(mbr_type); + if (mbr_type.type_alias) + { + auto &mbr_type_alias = get(mbr_type.type_alias); + mark_as_packable(mbr_type_alias); + } + } + } +} + +// If a vertex attribute exists at the location, it is marked as being used by this shader +void CompilerMSL::mark_location_as_used_by_shader(uint32_t location, StorageClass storage) +{ + MSLVertexAttr *p_va; + if ((get_entry_point().model == ExecutionModelVertex) && (storage == StorageClassInput) && + (p_va = vtx_attrs_by_location[location])) + p_va->used_by_shader = true; +} + +void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var) +{ + bool is_builtin = is_builtin_variable(var); + BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn)); + bool is_flat = has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_decoration(var.self, DecorationCentroid); + bool is_sample = has_decoration(var.self, DecorationSample); + + // Add a reference to the variable type to the interface struct. + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + uint32_t type_id = ensure_correct_builtin_type(var.basetype, builtin); + var.basetype = type_id; + ib_type.member_types.push_back(get_pointee_type_id(type_id)); + + // Give the member a name + string mbr_name = ensure_valid_name(to_expression(var.self), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + // Update the original variable reference to include the structure reference + string qual_var_name = ib_var_ref + "." + mbr_name; + ir.meta[var.self].decoration.qualified_alias = qual_var_name; + + // Copy the variable location from the original variable to the member + if (get_decoration_bitset(var.self).get(DecorationLocation)) + { + uint32_t locn = get_decoration(var.self, DecorationLocation); + if (storage == StorageClassInput && get_entry_point().model == ExecutionModelVertex) + { + type_id = ensure_correct_attribute_type(type_id, locn); + var.basetype = type_id; + ib_type.member_types[ib_mbr_idx] = get_pointee_type_id(type_id); + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + if (get_decoration_bitset(var.self).get(DecorationComponent)) + { + uint32_t comp = get_decoration(var.self, DecorationComponent); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, comp); + } + + if (get_decoration_bitset(var.self).get(DecorationIndex)) + { + uint32_t index = get_decoration(var.self, DecorationIndex); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationIndex, index); + } + + // Mark the member as builtin if needed + if (is_builtin) + { + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationBuiltIn, builtin); + if (builtin == BuiltInPosition) + qual_pos_var_name = qual_var_name; + } + + // Copy interpolation decorations if needed + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); +} + +void CompilerMSL::add_composite_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var) +{ + auto &entry_func = get(ir.default_entry_point); + auto &var_type = get_variable_data_type(var); + uint32_t elem_cnt = 0; + + if (is_matrix(var_type)) + { + if (is_array(var_type)) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-matrices in input and output variables."); + + elem_cnt = var_type.columns; + } + else if (is_array(var_type)) + { + if (var_type.array.size() != 1) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-arrays in input and output variables."); + + elem_cnt = to_array_size_literal(var_type); + } + + bool is_flat = has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_decoration(var.self, DecorationCentroid); + bool is_sample = has_decoration(var.self, DecorationSample); + + auto *usable_type = &var_type; + if (usable_type->pointer) + usable_type = &get(usable_type->parent_type); + while (is_array(*usable_type) || is_matrix(*usable_type)) + usable_type = &get(usable_type->parent_type); + + entry_func.add_local_variable(var.self); + + // We need to declare the variable early and at entry-point scope. + vars_needing_early_declaration.push_back(var.self); + + for (uint32_t i = 0; i < elem_cnt; i++) + { + // Add a reference to the variable type to the interface struct. + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + ib_type.member_types.push_back(usable_type->self); + + // Give the member a name + string mbr_name = ensure_valid_name(join(to_expression(var.self), "_", i), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + // There is no qualified alias since we need to flatten the internal array on return. + if (get_decoration_bitset(var.self).get(DecorationLocation)) + { + uint32_t locn = get_decoration(var.self, DecorationLocation) + i; + if (storage == StorageClassInput && get_entry_point().model == ExecutionModelVertex) + { + var.basetype = ensure_correct_attribute_type(var.basetype, locn); + uint32_t mbr_type_id = ensure_correct_attribute_type(usable_type->self, locn); + ib_type.member_types[ib_mbr_idx] = mbr_type_id; + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + if (get_decoration_bitset(var.self).get(DecorationIndex)) + { + uint32_t index = get_decoration(var.self, DecorationIndex); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationIndex, index); + } + + // Copy interpolation decorations if needed + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); + + switch (storage) + { + case StorageClassInput: + entry_func.fixup_hooks_in.push_back( + [=]() { statement(to_name(var.self), "[", i, "] = ", ib_var_ref, ".", mbr_name, ";"); }); + break; + + case StorageClassOutput: + entry_func.fixup_hooks_out.push_back( + [=]() { statement(ib_var_ref, ".", mbr_name, " = ", to_name(var.self), "[", i, "];"); }); + break; + + default: + break; + } + } +} + +uint32_t CompilerMSL::get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx) +{ + auto &type = get(var.basetype); + uint32_t location = get_decoration(var.self, DecorationLocation); + + for (uint32_t i = 0; i < mbr_idx; i++) + { + auto &mbr_type = get(type.member_types[i]); + + // Start counting from any place we have a new location decoration. + if (has_member_decoration(type.self, mbr_idx, DecorationLocation)) + location = get_member_decoration(type.self, mbr_idx, DecorationLocation); + + uint32_t location_count = 1; + + if (mbr_type.columns > 1) + location_count = mbr_type.columns; + + if (!mbr_type.array.empty()) + for (uint32_t j = 0; j < uint32_t(mbr_type.array.size()); j++) + location_count *= to_array_size_literal(mbr_type, j); + + location += location_count; + } + + return location; +} + +void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, + uint32_t mbr_idx) +{ + auto &entry_func = get(ir.default_entry_point); + auto &var_type = get_variable_data_type(var); + + bool is_flat = + has_member_decoration(var_type.self, mbr_idx, DecorationFlat) || has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_member_decoration(var_type.self, mbr_idx, DecorationNoPerspective) || + has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_member_decoration(var_type.self, mbr_idx, DecorationCentroid) || + has_decoration(var.self, DecorationCentroid); + bool is_sample = + has_member_decoration(var_type.self, mbr_idx, DecorationSample) || has_decoration(var.self, DecorationSample); + + uint32_t mbr_type_id = var_type.member_types[mbr_idx]; + auto &mbr_type = get(mbr_type_id); + uint32_t elem_cnt = 0; + + if (is_matrix(mbr_type)) + { + if (is_array(mbr_type)) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-matrices in input and output variables."); + + elem_cnt = mbr_type.columns; + } + else if (is_array(mbr_type)) + { + if (mbr_type.array.size() != 1) + SPIRV_CROSS_THROW("MSL cannot emit arrays-of-arrays in input and output variables."); + + elem_cnt = to_array_size_literal(mbr_type); + } + + auto *usable_type = &mbr_type; + if (usable_type->pointer) + usable_type = &get(usable_type->parent_type); + while (is_array(*usable_type) || is_matrix(*usable_type)) + usable_type = &get(usable_type->parent_type); + + for (uint32_t i = 0; i < elem_cnt; i++) + { + // Add a reference to the variable type to the interface struct. + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + ib_type.member_types.push_back(usable_type->self); + + // Give the member a name + string mbr_name = ensure_valid_name(join(to_qualified_member_name(var_type, mbr_idx), "_", i), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + if (has_member_decoration(var_type.self, mbr_idx, DecorationLocation)) + { + uint32_t locn = get_member_decoration(var_type.self, mbr_idx, DecorationLocation) + i; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (has_decoration(var.self, DecorationLocation)) + { + uint32_t locn = get_accumulated_member_location(var, mbr_idx) + i; + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + if (has_member_decoration(var_type.self, mbr_idx, DecorationComponent)) + SPIRV_CROSS_THROW("DecorationComponent on matrices and arrays make little sense."); + + // Copy interpolation decorations if needed + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); + + // Unflatten or flatten from [[stage_in]] or [[stage_out]] as appropriate. + switch (storage) + { + case StorageClassInput: + entry_func.fixup_hooks_in.push_back([=]() { + statement(to_name(var.self), ".", to_member_name(var_type, mbr_idx), "[", i, "] = ", ib_var_ref, ".", + mbr_name, ";"); + }); + break; + + case StorageClassOutput: + entry_func.fixup_hooks_out.push_back([=]() { + statement(ib_var_ref, ".", mbr_name, " = ", to_name(var.self), ".", to_member_name(var_type, mbr_idx), + "[", i, "];"); + }); + break; + + default: + break; + } + } +} + +void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, uint32_t mbr_idx) +{ + auto &var_type = get_variable_data_type(var); + auto &entry_func = get(ir.default_entry_point); + + BuiltIn builtin = BuiltInMax; + bool is_builtin = is_member_builtin(var_type, mbr_idx, &builtin); + bool is_flat = + has_member_decoration(var_type.self, mbr_idx, DecorationFlat) || has_decoration(var.self, DecorationFlat); + bool is_noperspective = has_member_decoration(var_type.self, mbr_idx, DecorationNoPerspective) || + has_decoration(var.self, DecorationNoPerspective); + bool is_centroid = has_member_decoration(var_type.self, mbr_idx, DecorationCentroid) || + has_decoration(var.self, DecorationCentroid); + bool is_sample = + has_member_decoration(var_type.self, mbr_idx, DecorationSample) || has_decoration(var.self, DecorationSample); + + // Add a reference to the member to the interface struct. + uint32_t mbr_type_id = var_type.member_types[mbr_idx]; + uint32_t ib_mbr_idx = uint32_t(ib_type.member_types.size()); + mbr_type_id = ensure_correct_builtin_type(mbr_type_id, builtin); + var_type.member_types[mbr_idx] = mbr_type_id; + ib_type.member_types.push_back(mbr_type_id); + + // Give the member a name + string mbr_name = ensure_valid_name(to_qualified_member_name(var_type, mbr_idx), "m"); + set_member_name(ib_type.self, ib_mbr_idx, mbr_name); + + // Update the original variable reference to include the structure reference + string qual_var_name = ib_var_ref + "." + mbr_name; + + if (is_builtin) + { + // For the builtin gl_PerVertex, we cannot treat it as a block anyways, + // so redirect to qualified name. + set_member_qualified_name(var_type.self, mbr_idx, qual_var_name); + } + else + { + // Unflatten or flatten from [[stage_in]] or [[stage_out]] as appropriate. + switch (storage) + { + case StorageClassInput: + entry_func.fixup_hooks_in.push_back([=]() { + statement(to_name(var.self), ".", to_member_name(var_type, mbr_idx), " = ", qual_var_name, ";"); + }); + break; + + case StorageClassOutput: + entry_func.fixup_hooks_out.push_back([=]() { + statement(qual_var_name, " = ", to_name(var.self), ".", to_member_name(var_type, mbr_idx), ";"); + }); + break; + + default: + break; + } + } + + // Copy the variable location from the original variable to the member + if (has_member_decoration(var_type.self, mbr_idx, DecorationLocation)) + { + uint32_t locn = get_member_decoration(var_type.self, mbr_idx, DecorationLocation); + if (storage == StorageClassInput && get_entry_point().model == ExecutionModelVertex) + { + mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn); + var_type.member_types[mbr_idx] = mbr_type_id; + ib_type.member_types[ib_mbr_idx] = mbr_type_id; + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + else if (has_decoration(var.self, DecorationLocation)) + { + // The block itself might have a location and in this case, all members of the block + // receive incrementing locations. + uint32_t locn = get_accumulated_member_location(var, mbr_idx); + if (storage == StorageClassInput && get_entry_point().model == ExecutionModelVertex) + { + mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn); + var_type.member_types[mbr_idx] = mbr_type_id; + ib_type.member_types[ib_mbr_idx] = mbr_type_id; + } + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); + mark_location_as_used_by_shader(locn, storage); + } + + // Copy the component location, if present. + if (has_member_decoration(var_type.self, mbr_idx, DecorationComponent)) + { + uint32_t comp = get_member_decoration(var_type.self, mbr_idx, DecorationComponent); + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationComponent, comp); + } + + // Mark the member as builtin if needed + if (is_builtin) + { + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationBuiltIn, builtin); + if (builtin == BuiltInPosition) + qual_pos_var_name = qual_var_name; + } + + // Copy interpolation decorations if needed + if (is_flat) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationFlat); + if (is_noperspective) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationNoPerspective); + if (is_centroid) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationCentroid); + if (is_sample) + set_member_decoration(ib_type.self, ib_mbr_idx, DecorationSample); +} + +void CompilerMSL::add_variable_to_interface_block(StorageClass storage, const string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var) +{ + auto &entry_func = get(ir.default_entry_point); + auto &var_type = get_variable_data_type(var); + + if (var_type.basetype == SPIRType::Struct) + { + if (!is_builtin_type(var_type)) + { + // For I/O blocks or structs, we will need to pass the block itself around + // to functions if they are used globally in leaf functions. + // Rather than passing down member by member, + // we unflatten I/O blocks while running the shader, + // and pass the actual struct type down to leaf functions. + // We then unflatten inputs, and flatten outputs in the "fixup" stages. + entry_func.add_local_variable(var.self); + vars_needing_early_declaration.push_back(var.self); + } + + // Flatten the struct members into the interface struct + for (uint32_t mbr_idx = 0; mbr_idx < uint32_t(var_type.member_types.size()); mbr_idx++) + { + BuiltIn builtin = BuiltInMax; + bool is_builtin = is_member_builtin(var_type, mbr_idx, &builtin); + auto &mbr_type = get(var_type.member_types[mbr_idx]); + + if (!is_builtin || has_active_builtin(builtin, storage)) + { + if (!is_builtin && (storage == StorageClassInput || storage == StorageClassOutput) && + (is_matrix(mbr_type) || is_array(mbr_type))) + { + add_composite_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx); + } + else + { + add_plain_member_variable_to_interface_block(storage, ib_var_ref, ib_type, var, mbr_idx); + } + } + } + } + else if (var_type.basetype == SPIRType::Boolean || var_type.basetype == SPIRType::Char || + type_is_integral(var_type) || type_is_floating_point(var_type) || var_type.basetype == SPIRType::Boolean) + { + bool is_builtin = is_builtin_variable(var); + BuiltIn builtin = BuiltIn(get_decoration(var.self, DecorationBuiltIn)); + + if (!is_builtin || has_active_builtin(builtin, storage)) + { + // MSL does not allow matrices or arrays in input or output variables, so need to handle it specially. + if (!is_builtin && (storage == StorageClassInput || storage == StorageClassOutput) && + (is_matrix(var_type) || is_array(var_type))) + { + add_composite_variable_to_interface_block(storage, ib_var_ref, ib_type, var); + } + else + { + add_plain_variable_to_interface_block(storage, ib_var_ref, ib_type, var); + } + } + } +} + +// Add an interface structure for the type of storage, which is either StorageClassInput or StorageClassOutput. +// Returns the ID of the newly added variable, or zero if no variable was added. +uint32_t CompilerMSL::add_interface_block(StorageClass storage) +{ + // Accumulate the variables that should appear in the interface struct + vector vars; + bool incl_builtins = (storage == StorageClassOutput); + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get(var.basetype); + if (var.storage == storage && interface_variable_exists_in_entry_point(var.self) && + !is_hidden_variable(var, incl_builtins) && type.pointer) + { + vars.push_back(&var); + } + } + } + + // If no variables qualify, leave + if (vars.empty()) + return 0; + + // Add a new typed variable for this interface structure. + // The initializer expression is allocated here, but populated when the function + // declaraion is emitted, because it is cleared after each compilation pass. + uint32_t next_id = ir.increase_bound_by(3); + uint32_t ib_type_id = next_id++; + auto &ib_type = set(ib_type_id); + ib_type.basetype = SPIRType::Struct; + ib_type.storage = storage; + set_decoration(ib_type_id, DecorationBlock); + + uint32_t ib_var_id = next_id++; + auto &var = set(ib_var_id, ib_type_id, storage, 0); + var.initializer = next_id++; + + string ib_var_ref; + switch (storage) + { + case StorageClassInput: + ib_var_ref = stage_in_var_name; + break; + + case StorageClassOutput: + { + ib_var_ref = stage_out_var_name; + + // Add the output interface struct as a local variable to the entry function. + // If the entry point should return the output struct, set the entry function + // to return the output interface struct, otherwise to return nothing. + // Indicate the output var requires early initialization. + bool ep_should_return_output = !get_is_rasterization_disabled(); + uint32_t rtn_id = ep_should_return_output ? ib_var_id : 0; + auto &entry_func = get(ir.default_entry_point); + entry_func.add_local_variable(ib_var_id); + for (auto &blk_id : entry_func.blocks) + { + auto &blk = get(blk_id); + if (blk.terminator == SPIRBlock::Return) + blk.return_value = rtn_id; + } + vars_needing_early_declaration.push_back(ib_var_id); + break; + } + + default: + break; + } + + set_name(ib_type_id, to_name(ir.default_entry_point) + "_" + ib_var_ref); + set_name(ib_var_id, ib_var_ref); + + for (auto p_var : vars) + add_variable_to_interface_block(storage, ib_var_ref, ib_type, *p_var); + + // Sort the members of the structure by their locations. + MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Location); + member_sorter.sort(); + + return ib_var_id; +} + +// Ensure that the type is compatible with the builtin. +// If it is, simply return the given type ID. +// Otherwise, create a new type, and return it's ID. +uint32_t CompilerMSL::ensure_correct_builtin_type(uint32_t type_id, BuiltIn builtin) +{ + auto &type = get(type_id); + + if ((builtin == BuiltInSampleMask && is_array(type)) || + ((builtin == BuiltInLayer || builtin == BuiltInViewportIndex) && type.basetype != SPIRType::UInt)) + { + uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); + uint32_t base_type_id = next_id++; + auto &base_type = set(base_type_id); + base_type.basetype = SPIRType::UInt; + base_type.width = 32; + + if (!type.pointer) + return base_type_id; + + uint32_t ptr_type_id = next_id++; + auto &ptr_type = set(ptr_type_id); + ptr_type = base_type; + ptr_type.pointer = true; + ptr_type.storage = type.storage; + ptr_type.parent_type = base_type_id; + return ptr_type_id; + } + + return type_id; +} + +// Ensure that the type is compatible with the vertex attribute. +// If it is, simply return the given type ID. +// Otherwise, create a new type, and return its ID. +uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t location) +{ + auto &type = get(type_id); + + MSLVertexAttr *p_va = vtx_attrs_by_location[location]; + if (!p_va) + return type_id; + + switch (p_va->format) + { + case MSL_VERTEX_FORMAT_UINT8: + { + switch (type.basetype) + { + case SPIRType::UByte: + case SPIRType::UShort: + case SPIRType::UInt: + return type_id; + case SPIRType::Short: + case SPIRType::Int: + break; + default: + SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader"); + } + uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); + uint32_t base_type_id = next_id++; + auto &base_type = set(base_type_id); + base_type = type; + base_type.basetype = type.basetype == SPIRType::Short ? SPIRType::UShort : SPIRType::UInt; + + if (!type.pointer) + return base_type_id; + + uint32_t ptr_type_id = next_id++; + auto &ptr_type = set(ptr_type_id); + ptr_type = base_type; + ptr_type.pointer = true; + ptr_type.storage = type.storage; + ptr_type.parent_type = base_type_id; + return ptr_type_id; + } + case MSL_VERTEX_FORMAT_UINT16: + { + switch (type.basetype) + { + case SPIRType::UShort: + case SPIRType::UInt: + return type_id; + case SPIRType::Int: + break; + default: + SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader"); + } + uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); + uint32_t base_type_id = next_id++; + auto &base_type = set(base_type_id); + base_type = type; + base_type.basetype = SPIRType::UInt; + + if (!type.pointer) + return base_type_id; + + uint32_t ptr_type_id = next_id++; + auto &ptr_type = set(ptr_type_id); + ptr_type = base_type; + ptr_type.pointer = true; + ptr_type.storage = type.storage; + ptr_type.parent_type = base_type_id; + return ptr_type_id; + } + + case MSL_VERTEX_FORMAT_OTHER: + break; + } + + return type_id; +} + +// Sort the members of the struct type by offset, and pack and then pad members where needed +// to align MSL members with SPIR-V offsets. The struct members are iterated twice. Packing +// occurs first, followed by padding, because packing a member reduces both its size and its +// natural alignment, possibly requiring a padding member to be added ahead of it. +void CompilerMSL::align_struct(SPIRType &ib_type) +{ + uint32_t &ib_type_id = ib_type.self; + + // Sort the members of the interface structure by their offset. + // They should already be sorted per SPIR-V spec anyway. + MemberSorter member_sorter(ib_type, ir.meta[ib_type_id], MemberSorter::Offset); + member_sorter.sort(); + + uint32_t mbr_cnt = uint32_t(ib_type.member_types.size()); + + // Test the alignment of each member, and if a member should be closer to the previous + // member than the default spacing expects, it is likely that the previous member is in + // a packed format. If so, and the previous member is packable, pack it. + // For example...this applies to any 3-element vector that is followed by a scalar. + uint32_t curr_offset = 0; + for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++) + { + if (is_member_packable(ib_type, mbr_idx)) + set_member_decoration(ib_type_id, mbr_idx, DecorationCPacked); + + // Align current offset to the current member's default alignment. + size_t align_mask = get_declared_struct_member_alignment(ib_type, mbr_idx) - 1; + curr_offset = uint32_t((curr_offset + align_mask) & ~align_mask); + + // Fetch the member offset as declared in the SPIRV. + uint32_t mbr_offset = get_member_decoration(ib_type_id, mbr_idx, DecorationOffset); + if (mbr_offset > curr_offset) + { + // Since MSL and SPIR-V have slightly different struct member alignment and + // size rules, we'll pad to standard C-packing rules. If the member is farther + // away than C-packing, expects, add an inert padding member before the the member. + MSLStructMemberKey key = get_struct_member_key(ib_type_id, mbr_idx); + struct_member_padding[key] = mbr_offset - curr_offset; + } + + // Increment the current offset to be positioned immediately after the current member. + // Don't do this for the last member since it can be unsized, and it is not relevant for padding purposes here. + if (mbr_idx + 1 < mbr_cnt) + curr_offset = mbr_offset + uint32_t(get_declared_struct_member_size(ib_type, mbr_idx)); + } +} + +// Returns whether the specified struct member supports a packable type +// variation that is smaller than the unpacked variation of that type. +bool CompilerMSL::is_member_packable(SPIRType &ib_type, uint32_t index) +{ + // We've already marked it as packable + if (has_member_decoration(ib_type.self, index, DecorationCPacked)) + return true; + + auto &mbr_type = get(ib_type.member_types[index]); + + // Only vectors or 3-row matrices need to be packed. + if (mbr_type.vecsize == 1 || (is_matrix(mbr_type) && mbr_type.vecsize != 3)) + return false; + + // Only row-major matrices need to be packed. + if (is_matrix(mbr_type) && !has_member_decoration(ib_type.self, index, DecorationRowMajor)) + return false; + + uint32_t component_size = mbr_type.width / 8; + uint32_t unpacked_mbr_size; + if (mbr_type.vecsize == 3) + unpacked_mbr_size = component_size * (mbr_type.vecsize + 1) * mbr_type.columns; + else + unpacked_mbr_size = component_size * mbr_type.vecsize * mbr_type.columns; + if (is_array(mbr_type)) + { + // If member is an array, and the array stride is larger than the type needs, don't pack it. + // Take into consideration multi-dimentional arrays. + uint32_t md_elem_cnt = 1; + size_t last_elem_idx = mbr_type.array.size() - 1; + for (uint32_t i = 0; i < last_elem_idx; i++) + md_elem_cnt *= max(to_array_size_literal(mbr_type, i), 1u); + + uint32_t unpacked_array_stride = unpacked_mbr_size * md_elem_cnt; + uint32_t array_stride = type_struct_member_array_stride(ib_type, index); + return unpacked_array_stride > array_stride; + } + else + { + uint32_t mbr_offset_curr = get_member_decoration(ib_type.self, index, DecorationOffset); + // For vectors, pack if the member's offset doesn't conform to the + // type's usual alignment. For example, a float3 at offset 4. + if (!is_matrix(mbr_type) && (mbr_offset_curr % unpacked_mbr_size)) + return true; + // Pack if there is not enough space between this member and next. + // If last member, only pack if it's a row-major matrix. + if (index < ib_type.member_types.size() - 1) + { + uint32_t mbr_offset_next = get_member_decoration(ib_type.self, index + 1, DecorationOffset); + return unpacked_mbr_size > mbr_offset_next - mbr_offset_curr; + } + else + return is_matrix(mbr_type); + } +} + +// Returns a combination of type ID and member index for use as hash key +MSLStructMemberKey CompilerMSL::get_struct_member_key(uint32_t type_id, uint32_t index) +{ + MSLStructMemberKey k = type_id; + k <<= 32; + k += index; + return k; +} + +// Converts the format of the current expression from packed to unpacked, +// by wrapping the expression in a constructor of the appropriate type. +string CompilerMSL::unpack_expression_type(string expr_str, const SPIRType &type) +{ + return join(type_to_glsl(type), "(", expr_str, ")"); +} + +// Emits the file header info +void CompilerMSL::emit_header() +{ + for (auto &pragma : pragma_lines) + statement(pragma); + + if (!pragma_lines.empty()) + statement(""); + + statement("#include "); + statement("#include "); + + for (auto &header : header_lines) + statement(header); + + statement(""); + statement("using namespace metal;"); + statement(""); + + for (auto &td : typedef_lines) + statement(td); + + if (!typedef_lines.empty()) + statement(""); +} + +void CompilerMSL::add_pragma_line(const string &line) +{ + auto rslt = pragma_lines.insert(line); + if (rslt.second) + force_recompile = true; +} + +void CompilerMSL::add_typedef_line(const string &line) +{ + auto rslt = typedef_lines.insert(line); + if (rslt.second) + force_recompile = true; +} + +// Emits any needed custom function bodies. +void CompilerMSL::emit_custom_functions() +{ + for (uint32_t i = SPVFuncImplArrayCopyMultidimMax; i >= 2; i--) + if (spv_function_implementations.count(static_cast(SPVFuncImplArrayCopyMultidimBase + i))) + spv_function_implementations.insert(static_cast(SPVFuncImplArrayCopyMultidimBase + i - 1)); + + for (auto &spv_func : spv_function_implementations) + { + switch (spv_func) + { + case SPVFuncImplMod: + statement("// Implementation of the GLSL mod() function, which is slightly different than Metal fmod()"); + statement("template"); + statement("Tx mod(Tx x, Ty y)"); + begin_scope(); + statement("return x - y * floor(x / y);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplRadians: + statement("// Implementation of the GLSL radians() function"); + statement("template"); + statement("T radians(T d)"); + begin_scope(); + statement("return d * T(0.01745329251);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplDegrees: + statement("// Implementation of the GLSL degrees() function"); + statement("template"); + statement("T degrees(T r)"); + begin_scope(); + statement("return r * T(57.2957795131);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplFindILsb: + statement("// Implementation of the GLSL findLSB() function"); + statement("template"); + statement("T findLSB(T x)"); + begin_scope(); + statement("return select(ctz(x), T(-1), x == T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplFindUMsb: + statement("// Implementation of the unsigned GLSL findMSB() function"); + statement("template"); + statement("T findUMSB(T x)"); + begin_scope(); + statement("return select(clz(T(0)) - (clz(x) + T(1)), T(-1), x == T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplFindSMsb: + statement("// Implementation of the signed GLSL findMSB() function"); + statement("template"); + statement("T findSMSB(T x)"); + begin_scope(); + statement("T v = select(x, T(-1) - x, x < T(0));"); + statement("return select(clz(T(0)) - (clz(v) + T(1)), T(-1), v == T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplSSign: + statement("// Implementation of the GLSL sign() function for integer types"); + statement("template::value>::type>"); + statement("T sign(T x)"); + begin_scope(); + statement("return select(select(select(x, T(0), x == T(0)), T(1), x > T(0)), T(-1), x < T(0));"); + end_scope(); + statement(""); + break; + + case SPVFuncImplArrayCopy: + statement("// Implementation of an array copy function to cover GLSL's ability to copy an array via " + "assignment."); + statement("template"); + statement("void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N])"); + begin_scope(); + statement("for (uint i = 0; i < N; dst[i] = src[i], i++);"); + end_scope(); + statement(""); + + statement("template"); + statement("void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])"); + begin_scope(); + statement("for (uint i = 0; i < N; dst[i] = src[i], i++);"); + end_scope(); + statement(""); + break; + + case SPVFuncImplArrayOfArrayCopy2Dim: + case SPVFuncImplArrayOfArrayCopy3Dim: + case SPVFuncImplArrayOfArrayCopy4Dim: + case SPVFuncImplArrayOfArrayCopy5Dim: + case SPVFuncImplArrayOfArrayCopy6Dim: + { + static const char *function_name_tags[] = { + "FromStack", + "FromConstant", + }; + + static const char *src_address_space[] = { + "thread const", + "constant", + }; + + for (uint32_t variant = 0; variant < 2; variant++) + { + uint32_t dimensions = spv_func - SPVFuncImplArrayCopyMultidimBase; + string tmp = "template struct spvRemoveReference { typedef T type; };"); + statement("template struct spvRemoveReference { typedef T type; };"); + statement("template struct spvRemoveReference { typedef T type; };"); + statement("template inline constexpr thread T&& spvForward(thread typename " + "spvRemoveReference::type& x)"); + begin_scope(); + statement("return static_cast(x);"); + end_scope(); + statement("template inline constexpr thread T&& spvForward(thread typename " + "spvRemoveReference::type&& x)"); + begin_scope(); + statement("return static_cast(x);"); + end_scope(); + statement(""); + statement("template"); + statement("inline T spvGetSwizzle(vec x, T c, spvSwizzle s)"); + begin_scope(); + statement("switch (s)"); + begin_scope(); + statement("case spvSwizzle::none:"); + statement(" return c;"); + statement("case spvSwizzle::zero:"); + statement(" return 0;"); + statement("case spvSwizzle::one:"); + statement(" return 1;"); + statement("case spvSwizzle::red:"); + statement(" return x.r;"); + statement("case spvSwizzle::green:"); + statement(" return x.g;"); + statement("case spvSwizzle::blue:"); + statement(" return x.b;"); + statement("case spvSwizzle::alpha:"); + statement(" return x.a;"); + end_scope(); + end_scope(); + statement(""); + statement("// Wrapper function that swizzles texture samples and fetches."); + statement("template"); + statement("inline vec spvTextureSwizzle(vec x, uint s)"); + begin_scope(); + statement("if (!s)"); + statement(" return x;"); + statement("return vec(spvGetSwizzle(x, x.r, spvSwizzle((s >> 0) & 0xFF)), " + "spvGetSwizzle(x, x.g, spvSwizzle((s >> 8) & 0xFF)), spvGetSwizzle(x, x.b, spvSwizzle((s >> 16) " + "& 0xFF)), " + "spvGetSwizzle(x, x.a, spvSwizzle((s >> 24) & 0xFF)));"); + end_scope(); + statement(""); + statement("template"); + statement("inline T spvTextureSwizzle(T x, uint s)"); + begin_scope(); + statement("return spvTextureSwizzle(vec(x, 0, 0, 1), s).x;"); + end_scope(); + statement(""); + statement("// Wrapper function that swizzles texture gathers."); + statement("template"); + statement("inline vec spvGatherSwizzle(sampler s, thread Tex& t, Ts... params, component c, uint sw) " + "METAL_CONST_ARG(c)"); + begin_scope(); + statement("if (sw)"); + begin_scope(); + statement("switch (spvSwizzle((sw >> (uint(c) * 8)) & 0xFF))"); + begin_scope(); + statement("case spvSwizzle::none:"); + statement(" break;"); + statement("case spvSwizzle::zero:"); + statement(" return vec(0, 0, 0, 0);"); + statement("case spvSwizzle::one:"); + statement(" return vec(1, 1, 1, 1);"); + statement("case spvSwizzle::red:"); + statement(" return t.gather(s, spvForward(params)..., component::x);"); + statement("case spvSwizzle::green:"); + statement(" return t.gather(s, spvForward(params)..., component::y);"); + statement("case spvSwizzle::blue:"); + statement(" return t.gather(s, spvForward(params)..., component::z);"); + statement("case spvSwizzle::alpha:"); + statement(" return t.gather(s, spvForward(params)..., component::w);"); + end_scope(); + end_scope(); + // texture::gather insists on its component parameter being a constant + // expression, so we need this silly workaround just to compile the shader. + statement("switch (c)"); + begin_scope(); + statement("case component::x:"); + statement(" return t.gather(s, spvForward(params)..., component::x);"); + statement("case component::y:"); + statement(" return t.gather(s, spvForward(params)..., component::y);"); + statement("case component::z:"); + statement(" return t.gather(s, spvForward(params)..., component::z);"); + statement("case component::w:"); + statement(" return t.gather(s, spvForward(params)..., component::w);"); + end_scope(); + end_scope(); + statement(""); + statement("// Wrapper function that swizzles depth texture gathers."); + statement("template"); + statement("inline vec spvGatherCompareSwizzle(sampler s, thread Tex& t, Ts... params, uint sw) "); + begin_scope(); + statement("if (sw)"); + begin_scope(); + statement("switch (spvSwizzle(sw & 0xFF))"); + begin_scope(); + statement("case spvSwizzle::none:"); + statement("case spvSwizzle::red:"); + statement(" break;"); + statement("case spvSwizzle::zero:"); + statement("case spvSwizzle::green:"); + statement("case spvSwizzle::blue:"); + statement("case spvSwizzle::alpha:"); + statement(" return vec(0, 0, 0, 0);"); + statement("case spvSwizzle::one:"); + statement(" return vec(1, 1, 1, 1);"); + end_scope(); + end_scope(); + statement("return t.gather_compare(s, spvForward(params)...);"); + end_scope(); + statement(""); + + default: + break; + } + } +} + +// Undefined global memory is not allowed in MSL. +// Declare constant and init to zeros. Use {}, as global constructors can break Metal. +void CompilerMSL::declare_undefined_values() +{ + bool emitted = false; + for (auto &id : ir.ids) + { + if (id.get_type() == TypeUndef) + { + auto &undef = id.get(); + auto &type = get(undef.basetype); + statement("constant ", variable_decl(type, to_name(undef.self), undef.self), " = {};"); + emitted = true; + } + } + + if (emitted) + statement(""); +} + +void CompilerMSL::declare_constant_arrays() +{ + // MSL cannot declare arrays inline (except when declaring a variable), so we must move them out to + // global constants directly, so we are able to use constants as variable expressions. + bool emitted = false; + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + if (c.specialization) + continue; + + auto &type = get(c.constant_type); + if (!type.array.empty()) + { + auto name = to_name(c.self); + statement("constant ", variable_decl(type, name), " = ", constant_expression(c), ";"); + emitted = true; + } + } + } + + if (emitted) + statement(""); +} + +void CompilerMSL::emit_resources() +{ + // Output non-builtin interface structs. These include local function structs + // and structs nested within uniform and read-write buffers. + unordered_set declared_structs; + for (auto &id : ir.ids) + { + if (id.get_type() == TypeType) + { + auto &type = id.get(); + uint32_t type_id = type.self; + + bool is_struct = (type.basetype == SPIRType::Struct) && type.array.empty(); + bool is_block = + has_decoration(type.self, DecorationBlock) || has_decoration(type.self, DecorationBufferBlock); + + bool is_builtin_block = is_block && is_builtin_type(type); + bool is_declarable_struct = is_struct && !is_builtin_block; + + // We'll declare this later. + if (stage_out_var_id && get(stage_out_var_id).basetype == type_id) + is_declarable_struct = false; + if (stage_in_var_id && get(stage_in_var_id).basetype == type_id) + is_declarable_struct = false; + + // Align and emit declarable structs...but avoid declaring each more than once. + if (is_declarable_struct && declared_structs.count(type_id) == 0) + { + declared_structs.insert(type_id); + + if (has_decoration(type_id, DecorationCPacked)) + align_struct(type); + + emit_struct(type); + } + } + } + + declare_constant_arrays(); + declare_undefined_values(); + + // Emit the special [[stage_in]] and [[stage_out]] interface blocks which we created. + emit_interface_block(stage_out_var_id); + emit_interface_block(stage_in_var_id); +} + +// Emit declarations for the specialization Metal function constants +void CompilerMSL::emit_specialization_constants() +{ + SpecializationConstant wg_x, wg_y, wg_z; + uint32_t workgroup_size_id = get_work_group_size_specialization_constants(wg_x, wg_y, wg_z); + bool emitted = false; + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeConstant) + { + auto &c = id.get(); + + if (c.self == workgroup_size_id) + { + // TODO: This can be expressed as a [[threads_per_threadgroup]] input semantic, but we need to know + // the work group size at compile time in SPIR-V, and [[threads_per_threadgroup]] would need to be passed around as a global. + // The work group size may be a specialization constant. + statement("constant uint3 ", builtin_to_glsl(BuiltInWorkgroupSize, StorageClassWorkgroup), " = ", + constant_expression(get(workgroup_size_id)), ";"); + emitted = true; + } + else if (c.specialization) + { + auto &type = get(c.constant_type); + string sc_type_name = type_to_glsl(type); + string sc_name = to_name(c.self); + string sc_tmp_name = sc_name + "_tmp"; + + // Function constants are only supported in MSL 1.2 and later. + // If we don't support it just declare the "default" directly. + // This "default" value can be overridden to the true specialization constant by the API user. + // Specialization constants which are used as array length expressions cannot be function constants in MSL, + // so just fall back to macros. + if (msl_options.supports_msl_version(1, 2) && has_decoration(c.self, DecorationSpecId) && + !c.is_used_as_array_length) + { + uint32_t constant_id = get_decoration(c.self, DecorationSpecId); + // Only scalar, non-composite values can be function constants. + statement("constant ", sc_type_name, " ", sc_tmp_name, " [[function_constant(", constant_id, + ")]];"); + statement("constant ", sc_type_name, " ", sc_name, " = is_function_constant_defined(", sc_tmp_name, + ") ? ", sc_tmp_name, " : ", constant_expression(c), ";"); + } + else if (has_decoration(c.self, DecorationSpecId)) + { + // Fallback to macro overrides. + c.specialization_constant_macro_name = + constant_value_macro_name(get_decoration(c.self, DecorationSpecId)); + + statement("#ifndef ", c.specialization_constant_macro_name); + statement("#define ", c.specialization_constant_macro_name, " ", constant_expression(c)); + statement("#endif"); + statement("constant ", sc_type_name, " ", sc_name, " = ", c.specialization_constant_macro_name, + ";"); + } + else + { + // Composite specialization constants must be built from other specialization constants. + statement("constant ", sc_type_name, " ", sc_name, " = ", constant_expression(c), ";"); + } + emitted = true; + } + } + else if (id.get_type() == TypeConstantOp) + { + auto &c = id.get(); + auto &type = get(c.basetype); + auto name = to_name(c.self); + statement("constant ", variable_decl(type, name), " = ", constant_op_expression(c), ";"); + emitted = true; + } + } + + if (emitted) + statement(""); +} + +void CompilerMSL::emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, + const char *op) +{ + bool forward = should_forward(op0) && should_forward(op1); + emit_op(result_type, result_id, + join("(isunordered(", to_enclosed_unpacked_expression(op0), ", ", to_enclosed_unpacked_expression(op1), + ") || ", to_enclosed_unpacked_expression(op0), " ", op, " ", to_enclosed_unpacked_expression(op1), + ")"), + forward); + + inherit_expression_dependencies(result_id, op0); + inherit_expression_dependencies(result_id, op1); +} + +// Override for MSL-specific syntax instructions +void CompilerMSL::emit_instruction(const Instruction &instruction) +{ + +#define MSL_BOP(op) emit_binary_op(ops[0], ops[1], ops[2], ops[3], #op) +#define MSL_BOP_CAST(op, type) \ + emit_binary_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define MSL_UOP(op) emit_unary_op(ops[0], ops[1], ops[2], #op) +#define MSL_QFOP(op) emit_quaternary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], ops[5], #op) +#define MSL_TFOP(op) emit_trinary_func_op(ops[0], ops[1], ops[2], ops[3], ops[4], #op) +#define MSL_BFOP(op) emit_binary_func_op(ops[0], ops[1], ops[2], ops[3], #op) +#define MSL_BFOP_CAST(op, type) \ + emit_binary_func_op_cast(ops[0], ops[1], ops[2], ops[3], #op, type, opcode_is_sign_invariant(opcode)) +#define MSL_UFOP(op) emit_unary_func_op(ops[0], ops[1], ops[2], #op) +#define MSL_UNORD_BOP(op) emit_binary_unord_op(ops[0], ops[1], ops[2], ops[3], #op) + + auto ops = stream(instruction); + auto opcode = static_cast(instruction.op); + + switch (opcode) + { + + // Comparisons + case OpIEqual: + case OpLogicalEqual: + case OpFOrdEqual: + MSL_BOP(==); + break; + + case OpINotEqual: + case OpLogicalNotEqual: + case OpFOrdNotEqual: + MSL_BOP(!=); + break; + + case OpUGreaterThan: + case OpSGreaterThan: + case OpFOrdGreaterThan: + MSL_BOP(>); + break; + + case OpUGreaterThanEqual: + case OpSGreaterThanEqual: + case OpFOrdGreaterThanEqual: + MSL_BOP(>=); + break; + + case OpULessThan: + case OpSLessThan: + case OpFOrdLessThan: + MSL_BOP(<); + break; + + case OpULessThanEqual: + case OpSLessThanEqual: + case OpFOrdLessThanEqual: + MSL_BOP(<=); + break; + + case OpFUnordEqual: + MSL_UNORD_BOP(==); + break; + + case OpFUnordNotEqual: + MSL_UNORD_BOP(!=); + break; + + case OpFUnordGreaterThan: + MSL_UNORD_BOP(>); + break; + + case OpFUnordGreaterThanEqual: + MSL_UNORD_BOP(>=); + break; + + case OpFUnordLessThan: + MSL_UNORD_BOP(<); + break; + + case OpFUnordLessThanEqual: + MSL_UNORD_BOP(<=); + break; + + // Derivatives + case OpDPdx: + case OpDPdxFine: + case OpDPdxCoarse: + MSL_UFOP(dfdx); + register_control_dependent_expression(ops[1]); + break; + + case OpDPdy: + case OpDPdyFine: + case OpDPdyCoarse: + MSL_UFOP(dfdy); + register_control_dependent_expression(ops[1]); + break; + + case OpFwidth: + case OpFwidthCoarse: + case OpFwidthFine: + MSL_UFOP(fwidth); + register_control_dependent_expression(ops[1]); + break; + + // Bitfield + case OpBitFieldInsert: + MSL_QFOP(insert_bits); + break; + + case OpBitFieldSExtract: + case OpBitFieldUExtract: + MSL_TFOP(extract_bits); + break; + + case OpBitReverse: + MSL_UFOP(reverse_bits); + break; + + case OpBitCount: + MSL_UFOP(popcount); + break; + + case OpFRem: + MSL_BFOP(fmod); + break; + + // Atomics + case OpAtomicExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t mem_sem = ops[4]; + uint32_t val = ops[5]; + emit_atomic_func_op(result_type, id, "atomic_exchange_explicit", mem_sem, mem_sem, false, ptr, val); + break; + } + + case OpAtomicCompareExchange: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t mem_sem_pass = ops[4]; + uint32_t mem_sem_fail = ops[5]; + uint32_t val = ops[6]; + uint32_t comp = ops[7]; + emit_atomic_func_op(result_type, id, "atomic_compare_exchange_weak_explicit", mem_sem_pass, mem_sem_fail, true, + ptr, comp, true, false, val); + break; + } + + case OpAtomicCompareExchangeWeak: + SPIRV_CROSS_THROW("OpAtomicCompareExchangeWeak is only supported in kernel profile."); + + case OpAtomicLoad: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t ptr = ops[2]; + uint32_t mem_sem = ops[4]; + emit_atomic_func_op(result_type, id, "atomic_load_explicit", mem_sem, mem_sem, false, ptr, 0); + break; + } + + case OpAtomicStore: + { + uint32_t result_type = expression_type(ops[0]).self; + uint32_t id = ops[0]; + uint32_t ptr = ops[0]; + uint32_t mem_sem = ops[2]; + uint32_t val = ops[3]; + emit_atomic_func_op(result_type, id, "atomic_store_explicit", mem_sem, mem_sem, false, ptr, val); + break; + } + +#define MSL_AFMO_IMPL(op, valsrc, valconst) \ + do \ + { \ + uint32_t result_type = ops[0]; \ + uint32_t id = ops[1]; \ + uint32_t ptr = ops[2]; \ + uint32_t mem_sem = ops[4]; \ + uint32_t val = valsrc; \ + emit_atomic_func_op(result_type, id, "atomic_fetch_" #op "_explicit", mem_sem, mem_sem, false, ptr, val, \ + false, valconst); \ + } while (false) + +#define MSL_AFMO(op) MSL_AFMO_IMPL(op, ops[5], false) +#define MSL_AFMIO(op) MSL_AFMO_IMPL(op, 1, true) + + case OpAtomicIIncrement: + MSL_AFMIO(add); + break; + + case OpAtomicIDecrement: + MSL_AFMIO(sub); + break; + + case OpAtomicIAdd: + MSL_AFMO(add); + break; + + case OpAtomicISub: + MSL_AFMO(sub); + break; + + case OpAtomicSMin: + case OpAtomicUMin: + MSL_AFMO(min); + break; + + case OpAtomicSMax: + case OpAtomicUMax: + MSL_AFMO(max); + break; + + case OpAtomicAnd: + MSL_AFMO(and); + break; + + case OpAtomicOr: + MSL_AFMO(or); + break; + + case OpAtomicXor: + MSL_AFMO(xor); + break; + + // Images + + // Reads == Fetches in Metal + case OpImageRead: + { + // Mark that this shader reads from this image + uint32_t img_id = ops[2]; + auto &type = expression_type(img_id); + if (type.image.dim != DimSubpassData) + { + auto *p_var = maybe_get_backing_variable(img_id); + if (p_var && has_decoration(p_var->self, DecorationNonReadable)) + { + unset_decoration(p_var->self, DecorationNonReadable); + force_recompile = true; + } + } + + emit_texture_op(instruction); + break; + } + + case OpImageWrite: + { + uint32_t img_id = ops[0]; + uint32_t coord_id = ops[1]; + uint32_t texel_id = ops[2]; + const uint32_t *opt = &ops[3]; + uint32_t length = instruction.length - 3; + + // Bypass pointers because we need the real image struct + auto &type = expression_type(img_id); + auto &img_type = get(type.self); + + // Ensure this image has been marked as being written to and force a + // recommpile so that the image type output will include write access + auto *p_var = maybe_get_backing_variable(img_id); + if (p_var && has_decoration(p_var->self, DecorationNonWritable)) + { + unset_decoration(p_var->self, DecorationNonWritable); + force_recompile = true; + } + + bool forward = false; + uint32_t bias = 0; + uint32_t lod = 0; + uint32_t flags = 0; + + if (length) + { + flags = *opt++; + length--; + } + + auto test = [&](uint32_t &v, uint32_t flag) { + if (length && (flags & flag)) + { + v = *opt++; + length--; + } + }; + + test(bias, ImageOperandsBiasMask); + test(lod, ImageOperandsLodMask); + + statement(join( + to_expression(img_id), ".write(", to_expression(texel_id), ", ", + to_function_args(img_id, img_type, true, false, false, coord_id, 0, 0, 0, 0, lod, 0, 0, 0, 0, 0, &forward), + ");")); + + if (p_var && variable_storage_is_aliased(*p_var)) + flush_all_aliased_variables(); + + break; + } + + case OpImageQuerySize: + case OpImageQuerySizeLod: + { + uint32_t rslt_type_id = ops[0]; + auto &rslt_type = get(rslt_type_id); + + uint32_t id = ops[1]; + + uint32_t img_id = ops[2]; + string img_exp = to_expression(img_id); + auto &img_type = expression_type(img_id); + Dim img_dim = img_type.image.dim; + bool img_is_array = img_type.image.arrayed; + + if (img_type.basetype != SPIRType::Image) + SPIRV_CROSS_THROW("Invalid type for OpImageQuerySize."); + + string lod; + if (opcode == OpImageQuerySizeLod) + { + // LOD index defaults to zero, so don't bother outputing level zero index + string decl_lod = to_expression(ops[3]); + if (decl_lod != "0") + lod = decl_lod; + } + + string expr = type_to_glsl(rslt_type) + "("; + expr += img_exp + ".get_width(" + lod + ")"; + + if (img_dim == Dim2D || img_dim == DimCube || img_dim == Dim3D) + expr += ", " + img_exp + ".get_height(" + lod + ")"; + + if (img_dim == Dim3D) + expr += ", " + img_exp + ".get_depth(" + lod + ")"; + + if (img_is_array) + expr += ", " + img_exp + ".get_array_size()"; + + expr += ")"; + + emit_op(rslt_type_id, id, expr, should_forward(img_id)); + + break; + } + + case OpImageQueryLod: + SPIRV_CROSS_THROW("MSL does not support textureQueryLod()."); + +#define MSL_ImgQry(qrytype) \ + do \ + { \ + uint32_t rslt_type_id = ops[0]; \ + auto &rslt_type = get(rslt_type_id); \ + uint32_t id = ops[1]; \ + uint32_t img_id = ops[2]; \ + string img_exp = to_expression(img_id); \ + string expr = type_to_glsl(rslt_type) + "(" + img_exp + ".get_num_" #qrytype "())"; \ + emit_op(rslt_type_id, id, expr, should_forward(img_id)); \ + } while (false) + + case OpImageQueryLevels: + MSL_ImgQry(mip_levels); + break; + + case OpImageQuerySamples: + MSL_ImgQry(samples); + break; + + case OpImage: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto *combined = maybe_get(ops[2]); + + if (combined) + { + auto &e = emit_op(result_type, id, to_expression(combined->image), true, true); + auto *var = maybe_get_backing_variable(combined->image); + if (var) + e.loaded_from = var->self; + } + else + { + auto &e = emit_op(result_type, id, to_expression(ops[2]), true, true); + auto *var = maybe_get_backing_variable(ops[2]); + if (var) + e.loaded_from = var->self; + } + break; + } + + // Casting + case OpQuantizeToF16: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + uint32_t arg = ops[2]; + + string exp; + auto &type = get(result_type); + + switch (type.vecsize) + { + case 1: + exp = join("float(half(", to_expression(arg), "))"); + break; + case 2: + exp = join("float2(half2(", to_expression(arg), "))"); + break; + case 3: + exp = join("float3(half3(", to_expression(arg), "))"); + break; + case 4: + exp = join("float4(half4(", to_expression(arg), "))"); + break; + default: + SPIRV_CROSS_THROW("Illegal argument to OpQuantizeToF16."); + } + + emit_op(result_type, id, exp, should_forward(arg)); + break; + } + + case OpStore: + if (maybe_emit_array_assignment(ops[0], ops[1])) + break; + + CompilerGLSL::emit_instruction(instruction); + break; + + // Compute barriers + case OpMemoryBarrier: + emit_barrier(0, ops[0], ops[1]); + break; + + case OpControlBarrier: + // In GLSL a memory barrier is often followed by a control barrier. + // But in MSL, memory barriers are also control barriers, so don't + // emit a simple control barrier if a memory barrier has just been emitted. + if (previous_instruction_opcode != OpMemoryBarrier) + emit_barrier(ops[0], ops[1], ops[2]); + break; + + case OpVectorTimesMatrix: + case OpMatrixTimesVector: + { + // If the matrix needs transpose and it is square or packed, just flip the multiply order. + uint32_t mtx_id = ops[opcode == OpMatrixTimesVector ? 2 : 3]; + auto *e = maybe_get(mtx_id); + auto &t = expression_type(mtx_id); + bool is_packed = has_decoration(mtx_id, DecorationCPacked); + if (e && e->need_transpose && (t.columns == t.vecsize || is_packed)) + { + e->need_transpose = false; + + // This is important for matrices. Packed matrices + // are generally transposed, so unpacking using a constructor argument + // will result in an error. + // The simplest solution for now is to just avoid unpacking the matrix in this operation. + unset_decoration(mtx_id, DecorationCPacked); + + emit_binary_op(ops[0], ops[1], ops[3], ops[2], "*"); + if (is_packed) + set_decoration(mtx_id, DecorationCPacked); + e->need_transpose = true; + } + else + MSL_BOP(*); + break; + } + + // OpOuterProduct + + case OpIAddCarry: + case OpISubBorrow: + { + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + forced_temporaries.insert(result_id); + auto &type = get(result_type); + statement(variable_decl(type, to_name(result_id)), ";"); + set(result_id, to_name(result_id), result_type, true); + + auto &res_type = get(type.member_types[1]); + if (opcode == OpIAddCarry) + { + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " + ", + to_enclosed_expression(op1), ";"); + statement(to_expression(result_id), ".", to_member_name(type, 1), " = select(", type_to_glsl(res_type), + "(1), ", type_to_glsl(res_type), "(0), ", to_expression(result_id), ".", to_member_name(type, 0), + " >= max(", to_expression(op0), ", ", to_expression(op1), "));"); + } + else + { + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " - ", + to_enclosed_expression(op1), ";"); + statement(to_expression(result_id), ".", to_member_name(type, 1), " = select(", type_to_glsl(res_type), + "(1), ", type_to_glsl(res_type), "(0), ", to_enclosed_expression(op0), + " >= ", to_enclosed_expression(op1), ");"); + } + break; + } + + case OpUMulExtended: + case OpSMulExtended: + { + uint32_t result_type = ops[0]; + uint32_t result_id = ops[1]; + uint32_t op0 = ops[2]; + uint32_t op1 = ops[3]; + forced_temporaries.insert(result_id); + auto &type = get(result_type); + statement(variable_decl(type, to_name(result_id)), ";"); + set(result_id, to_name(result_id), result_type, true); + + statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", to_enclosed_expression(op0), " * ", + to_enclosed_expression(op1), ";"); + statement(to_expression(result_id), ".", to_member_name(type, 1), " = mulhi(", to_expression(op0), ", ", + to_expression(op1), ");"); + break; + } + + default: + CompilerGLSL::emit_instruction(instruction); + break; + } + + previous_instruction_opcode = opcode; +} + +void CompilerMSL::emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem) +{ + if (get_entry_point().model != ExecutionModelGLCompute) + return; + + string bar_stmt = "threadgroup_barrier(mem_flags::"; + + uint32_t mem_sem = id_mem_sem ? get(id_mem_sem).scalar() : uint32_t(MemorySemanticsMaskNone); + + if (mem_sem & MemorySemanticsCrossWorkgroupMemoryMask) + bar_stmt += "mem_device"; + else if (mem_sem & (MemorySemanticsSubgroupMemoryMask | MemorySemanticsWorkgroupMemoryMask | + MemorySemanticsAtomicCounterMemoryMask)) + bar_stmt += "mem_threadgroup"; + else if (mem_sem & MemorySemanticsImageMemoryMask) + bar_stmt += "mem_texture"; + else + bar_stmt += "mem_none"; + + if (msl_options.is_ios() && (msl_options.supports_msl_version(2) && !msl_options.supports_msl_version(2, 1))) + { + bar_stmt += ", "; + + // Use the wider of the two scopes (smaller value) + uint32_t exe_scope = id_exe_scope ? get(id_exe_scope).scalar() : uint32_t(ScopeInvocation); + uint32_t mem_scope = id_mem_scope ? get(id_mem_scope).scalar() : uint32_t(ScopeInvocation); + uint32_t scope = min(exe_scope, mem_scope); + switch (scope) + { + case ScopeCrossDevice: + case ScopeDevice: + bar_stmt += "memory_scope_device"; + break; + + case ScopeSubgroup: + case ScopeInvocation: + bar_stmt += "memory_scope_simdgroup"; + break; + + case ScopeWorkgroup: + default: + bar_stmt += "memory_scope_threadgroup"; + break; + } + } + + bar_stmt += ");"; + + statement(bar_stmt); + + assert(current_emitting_block); + flush_control_dependent_expressions(current_emitting_block->self); + flush_all_active_variables(); +} + +void CompilerMSL::emit_array_copy(const string &lhs, uint32_t rhs_id) +{ + // Assignment from an array initializer is fine. + auto &type = expression_type(rhs_id); + auto *var = maybe_get_backing_variable(rhs_id); + + // Unfortunately, we cannot template on address space in MSL, + // so explicit address space redirection it is ... + bool is_constant = false; + if (ir.ids[rhs_id].get_type() == TypeConstant) + { + is_constant = true; + } + else if (var && var->remapped_variable && var->statically_assigned && + ir.ids[var->static_expression].get_type() == TypeConstant) + { + is_constant = true; + } + + const char *tag = is_constant ? "FromConstant" : "FromStack"; + statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ");"); +} + +// Since MSL does not allow arrays to be copied via simple variable assignment, +// if the LHS and RHS represent an assignment of an entire array, it must be +// implemented by calling an array copy function. +// Returns whether the struct assignment was emitted. +bool CompilerMSL::maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs) +{ + // We only care about assignments of an entire array + auto &type = expression_type(id_rhs); + if (type.array.size() == 0) + return false; + + auto *var = maybe_get(id_lhs); + + // Is this a remapped, static constant? Don't do anything. + if (var && var->remapped_variable && var->statically_assigned) + return true; + + if (ir.ids[id_rhs].get_type() == TypeConstant && var && var->deferred_declaration) + { + // Special case, if we end up declaring a variable when assigning the constant array, + // we can avoid the copy by directly assigning the constant expression. + // This is likely necessary to be able to use a variable as a true look-up table, as it is unlikely + // the compiler will be able to optimize the spvArrayCopy() into a constant LUT. + // After a variable has been declared, we can no longer assign constant arrays in MSL unfortunately. + statement(to_expression(id_lhs), " = ", constant_expression(get(id_rhs)), ";"); + return true; + } + + // Ensure the LHS variable has been declared + auto *p_v_lhs = maybe_get_backing_variable(id_lhs); + if (p_v_lhs) + flush_variable_declaration(p_v_lhs->self); + + emit_array_copy(to_expression(id_lhs), id_rhs); + register_write(id_lhs); + + return true; +} + +// Emits one of the atomic functions. In MSL, the atomic functions operate on pointers +void CompilerMSL::emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, uint32_t mem_order_1, + uint32_t mem_order_2, bool has_mem_order_2, uint32_t obj, uint32_t op1, + bool op1_is_pointer, bool op1_is_literal, uint32_t op2) +{ + forced_temporaries.insert(result_id); + + string exp = string(op) + "("; + + auto &type = get_pointee_type(expression_type(obj)); + exp += "(volatile "; + auto *var = maybe_get_backing_variable(obj); + if (!var) + SPIRV_CROSS_THROW("No backing variable for atomic operation."); + exp += get_argument_address_space(*var); + exp += " atomic_"; + exp += type_to_glsl(type); + exp += "*)"; + + exp += "&"; + exp += to_enclosed_expression(obj); + + bool is_atomic_compare_exchange_strong = op1_is_pointer && op1; + + if (is_atomic_compare_exchange_strong) + { + assert(strcmp(op, "atomic_compare_exchange_weak_explicit") == 0); + assert(op2); + assert(has_mem_order_2); + exp += ", &"; + exp += to_name(result_id); + exp += ", "; + exp += to_expression(op2); + exp += ", "; + exp += get_memory_order(mem_order_1); + exp += ", "; + exp += get_memory_order(mem_order_2); + exp += ")"; + + // MSL only supports the weak atomic compare exchange, + // so emit a CAS loop here. + statement(variable_decl(type, to_name(result_id)), ";"); + statement("do"); + begin_scope(); + statement(to_name(result_id), " = ", to_expression(op1), ";"); + end_scope_decl(join("while (!", exp, ")")); + set(result_id, to_name(result_id), result_type, true); + } + else + { + assert(strcmp(op, "atomic_compare_exchange_weak_explicit") != 0); + if (op1) + { + if (op1_is_literal) + exp += join(", ", op1); + else + exp += ", " + to_expression(op1); + } + if (op2) + exp += ", " + to_expression(op2); + + exp += string(", ") + get_memory_order(mem_order_1); + if (has_mem_order_2) + exp += string(", ") + get_memory_order(mem_order_2); + + exp += ")"; + emit_op(result_type, result_id, exp, false); + } + + flush_all_atomic_capable_variables(); +} + +// Metal only supports relaxed memory order for now +const char *CompilerMSL::get_memory_order(uint32_t) +{ + return "memory_order_relaxed"; +} + +// Override for MSL-specific extension syntax instructions +void CompilerMSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop, const uint32_t *args, uint32_t count) +{ + GLSLstd450 op = static_cast(eop); + + switch (op) + { + case GLSLstd450Atan2: + emit_binary_func_op(result_type, id, args[0], args[1], "atan2"); + break; + case GLSLstd450InverseSqrt: + emit_unary_func_op(result_type, id, args[0], "rsqrt"); + break; + case GLSLstd450RoundEven: + emit_unary_func_op(result_type, id, args[0], "rint"); + break; + + case GLSLstd450FindSMsb: + emit_unary_func_op(result_type, id, args[0], "findSMSB"); + break; + case GLSLstd450FindUMsb: + emit_unary_func_op(result_type, id, args[0], "findUMSB"); + break; + + case GLSLstd450PackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_snorm4x8"); + break; + case GLSLstd450PackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_unorm4x8"); + break; + case GLSLstd450PackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_snorm2x16"); + break; + case GLSLstd450PackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "pack_float_to_unorm2x16"); + break; + + case GLSLstd450PackHalf2x16: + { + auto expr = join("as_type(half2(", to_expression(args[0]), "))"); + emit_op(result_type, id, expr, should_forward(args[0])); + inherit_expression_dependencies(id, args[0]); + break; + } + + case GLSLstd450UnpackSnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpack_snorm4x8_to_float"); + break; + case GLSLstd450UnpackUnorm4x8: + emit_unary_func_op(result_type, id, args[0], "unpack_unorm4x8_to_float"); + break; + case GLSLstd450UnpackSnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpack_snorm2x16_to_float"); + break; + case GLSLstd450UnpackUnorm2x16: + emit_unary_func_op(result_type, id, args[0], "unpack_unorm2x16_to_float"); + break; + + case GLSLstd450UnpackHalf2x16: + { + auto expr = join("float2(as_type(", to_expression(args[0]), "))"); + emit_op(result_type, id, expr, should_forward(args[0])); + inherit_expression_dependencies(id, args[0]); + break; + } + + case GLSLstd450PackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "unsupported_GLSLstd450PackDouble2x32"); // Currently unsupported + break; + case GLSLstd450UnpackDouble2x32: + emit_unary_func_op(result_type, id, args[0], "unsupported_GLSLstd450UnpackDouble2x32"); // Currently unsupported + break; + + case GLSLstd450MatrixInverse: + { + auto &mat_type = get(result_type); + switch (mat_type.columns) + { + case 2: + emit_unary_func_op(result_type, id, args[0], "spvInverse2x2"); + break; + case 3: + emit_unary_func_op(result_type, id, args[0], "spvInverse3x3"); + break; + case 4: + emit_unary_func_op(result_type, id, args[0], "spvInverse4x4"); + break; + default: + break; + } + break; + } + + case GLSLstd450FMin: + // If the result type isn't float, don't bother calling the specific + // precise::/fast:: version. Metal doesn't have those for half and + // double types. + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "min"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "fast::min"); + break; + + case GLSLstd450FMax: + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "max"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "fast::max"); + break; + + case GLSLstd450FClamp: + // TODO: If args[1] is 0 and args[2] is 1, emit a saturate() call. + if (get(result_type).basetype != SPIRType::Float) + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp"); + else + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "fast::clamp"); + break; + + case GLSLstd450NMin: + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "min"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "precise::min"); + break; + + case GLSLstd450NMax: + if (get(result_type).basetype != SPIRType::Float) + emit_binary_func_op(result_type, id, args[0], args[1], "max"); + else + emit_binary_func_op(result_type, id, args[0], args[1], "precise::max"); + break; + + case GLSLstd450NClamp: + // TODO: If args[1] is 0 and args[2] is 1, emit a saturate() call. + if (get(result_type).basetype != SPIRType::Float) + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "clamp"); + else + emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "precise::clamp"); + break; + + // TODO: + // GLSLstd450InterpolateAtCentroid (centroid_no_perspective qualifier) + // GLSLstd450InterpolateAtSample (sample_no_perspective qualifier) + // GLSLstd450InterpolateAtOffset + + default: + CompilerGLSL::emit_glsl_op(result_type, id, eop, args, count); + break; + } +} + +// Emit a structure declaration for the specified interface variable. +void CompilerMSL::emit_interface_block(uint32_t ib_var_id) +{ + if (ib_var_id) + { + auto &ib_var = get(ib_var_id); + auto &ib_type = get(ib_var.basetype); + assert(ib_type.basetype == SPIRType::Struct && !ib_type.member_types.empty()); + emit_struct(ib_type); + } +} + +// Emits the declaration signature of the specified function. +// If this is the entry point function, Metal-specific return value and function arguments are added. +void CompilerMSL::emit_function_prototype(SPIRFunction &func, const Bitset &) +{ + if (func.self != ir.default_entry_point) + add_function_overload(func); + + local_variable_names = resource_names; + string decl; + + processing_entry_point = (func.self == ir.default_entry_point); + + auto &type = get(func.return_type); + + if (type.array.empty()) + { + decl += func_type_decl(type); + } + else + { + // We cannot return arrays in MSL, so "return" through an out variable. + decl = "void"; + } + + decl += " "; + decl += to_name(func.self); + decl += "("; + + if (!type.array.empty()) + { + // Fake arrays returns by writing to an out array instead. + decl += "thread "; + decl += type_to_glsl(type); + decl += " (&SPIRV_Cross_return_value)"; + decl += type_to_array_glsl(type); + if (!func.arguments.empty()) + decl += ", "; + } + + if (processing_entry_point) + { + decl += entry_point_args(!func.arguments.empty()); + + // If entry point function has variables that require early declaration, + // ensure they each have an empty initializer, creating one if needed. + // This is done at this late stage because the initialization expression + // is cleared after each compilation pass. + for (auto var_id : vars_needing_early_declaration) + { + auto &ed_var = get(var_id); + if (!ed_var.initializer) + ed_var.initializer = ir.increase_bound_by(1); + + set(ed_var.initializer, "{}", ed_var.basetype, true); + } + } + + for (auto &arg : func.arguments) + { + uint32_t name_id = arg.id; + + string address_space; + auto *var = maybe_get(arg.id); + if (var) + { + // If we need to modify the name of the variable, make sure we modify the original variable. + // Our alias is just a shadow variable. + if (arg.alias_global_variable && var->basevariable) + name_id = var->basevariable; + + var->parameter = &arg; // Hold a pointer to the parameter so we can invalidate the readonly field if needed. + address_space = get_argument_address_space(*var); + } + + add_local_variable_name(name_id); + + if (!address_space.empty()) + decl += address_space + " "; + decl += argument_decl(arg); + + // Manufacture automatic sampler arg for SampledImage texture + auto &arg_type = get(arg.type); + if (arg_type.basetype == SPIRType::SampledImage && arg_type.image.dim != DimBuffer) + decl += join(", thread const ", sampler_type(arg_type), " ", to_sampler_expression(arg.id)); + + if (&arg != &func.arguments.back()) + decl += ", "; + } + + decl += ")"; + statement(decl); +} + +// Returns the texture sampling function string for the specified image and sampling characteristics. +string CompilerMSL::to_function_name(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool, bool, + bool has_offset, bool, bool has_dref, uint32_t) +{ + // Special-case gather. We have to alter the component being looked up + // in the swizzle case. + if (msl_options.swizzle_texture_samples && is_gather) + { + string fname = imgtype.image.depth ? "spvGatherCompareSwizzle" : "spvGatherSwizzle"; + fname += "<" + type_to_glsl(get(imgtype.image.type)) + ", metal::" + type_to_glsl(imgtype); + // Add the arg types ourselves. Yes, this sucks, but Clang can't + // deduce template pack parameters in the middle of an argument list. + switch (imgtype.image.dim) + { + case Dim2D: + fname += ", float2"; + if (imgtype.image.arrayed) + fname += ", uint"; + if (imgtype.image.depth) + fname += ", float"; + if (!imgtype.image.depth || has_offset) + fname += ", int2"; + break; + case DimCube: + fname += ", float3"; + if (imgtype.image.arrayed) + fname += ", uint"; + if (imgtype.image.depth) + fname += ", float"; + break; + default: + SPIRV_CROSS_THROW("Invalid texture dimension for gather op."); + } + fname += ">"; + return fname; + } + + auto *combined = maybe_get(img); + + // Texture reference + string fname = to_expression(combined ? combined->image : img) + "."; + if (msl_options.swizzle_texture_samples && !is_gather && is_sampled_image_type(imgtype)) + fname = "spvTextureSwizzle(" + fname; + + // Texture function and sampler + if (is_fetch) + fname += "read"; + else if (is_gather) + fname += "gather"; + else + fname += "sample"; + + if (has_dref) + fname += "_compare"; + + return fname; +} + +// Returns the function args for a texture sampling function for the specified image and sampling characteristics. +string CompilerMSL::to_function_args(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj, + uint32_t coord, uint32_t, uint32_t dref, uint32_t grad_x, uint32_t grad_y, + uint32_t lod, uint32_t coffset, uint32_t offset, uint32_t bias, uint32_t comp, + uint32_t sample, bool *p_forward) +{ + string farg_str; + if (!is_fetch) + farg_str += to_sampler_expression(img); + + if (msl_options.swizzle_texture_samples && is_gather) + { + if (!farg_str.empty()) + farg_str += ", "; + + auto *combined = maybe_get(img); + farg_str += to_expression(combined ? combined->image : img); + } + + // Texture coordinates + bool forward = should_forward(coord); + auto coord_expr = to_enclosed_expression(coord); + auto &coord_type = expression_type(coord); + bool coord_is_fp = type_is_floating_point(coord_type); + bool is_cube_fetch = false; + + string tex_coords = coord_expr; + const char *alt_coord = ""; + + switch (imgtype.image.dim) + { + + case Dim1D: + if (coord_type.vecsize > 1) + tex_coords += ".x"; + + if (is_fetch) + tex_coords = "uint(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + + alt_coord = ".y"; + + break; + + case DimBuffer: + if (coord_type.vecsize > 1) + tex_coords += ".x"; + + // Metal texel buffer textures are 2D, so convert 1D coord to 2D. + if (is_fetch) + tex_coords = "spvTexelBufferCoord(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + + alt_coord = ".y"; + + break; + + case DimSubpassData: + if (imgtype.image.ms) + tex_coords = "uint2(gl_FragCoord.xy)"; + else + tex_coords = join("uint2(gl_FragCoord.xy), 0"); + break; + + case Dim2D: + if (coord_type.vecsize > 2) + tex_coords += ".xy"; + + if (is_fetch) + tex_coords = "uint2(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + + alt_coord = ".z"; + + break; + + case Dim3D: + if (coord_type.vecsize > 3) + tex_coords += ".xyz"; + + if (is_fetch) + tex_coords = "uint3(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + + alt_coord = ".w"; + + break; + + case DimCube: + if (is_fetch) + { + is_cube_fetch = true; + tex_coords += ".xy"; + tex_coords = "uint2(" + round_fp_tex_coords(tex_coords, coord_is_fp) + ")"; + } + else + { + if (coord_type.vecsize > 3) + tex_coords += ".xyz"; + } + + alt_coord = ".w"; + + break; + + default: + break; + } + + if (is_fetch && offset) + { + // Fetch offsets must be applied directly to the coordinate. + forward = forward && should_forward(offset); + auto &type = expression_type(offset); + if (type.basetype != SPIRType::UInt) + tex_coords += " + " + bitcast_expression(SPIRType::UInt, offset); + else + tex_coords += " + " + to_enclosed_expression(offset); + } + else if (is_fetch && coffset) + { + // Fetch offsets must be applied directly to the coordinate. + forward = forward && should_forward(coffset); + auto &type = expression_type(coffset); + if (type.basetype != SPIRType::UInt) + tex_coords += " + " + bitcast_expression(SPIRType::UInt, coffset); + else + tex_coords += " + " + to_enclosed_expression(coffset); + } + + // If projection, use alt coord as divisor + if (is_proj) + tex_coords += " / " + coord_expr + alt_coord; + + if (!farg_str.empty()) + farg_str += ", "; + farg_str += tex_coords; + + // If fetch from cube, add face explicitly + if (is_cube_fetch) + { + // Special case for cube arrays, face and layer are packed in one dimension. + if (imgtype.image.arrayed) + farg_str += ", uint(" + join(coord_expr, ".z) % 6u"); + else + farg_str += ", uint(" + round_fp_tex_coords(coord_expr + ".z", coord_is_fp) + ")"; + } + + // If array, use alt coord + if (imgtype.image.arrayed) + { + // Special case for cube arrays, face and layer are packed in one dimension. + if (imgtype.image.dim == DimCube && is_fetch) + farg_str += ", uint(" + join(coord_expr, ".z) / 6u"); + else + farg_str += ", uint(" + round_fp_tex_coords(coord_expr + alt_coord, coord_is_fp) + ")"; + } + + // Depth compare reference value + if (dref) + { + forward = forward && should_forward(dref); + farg_str += ", "; + farg_str += to_expression(dref); + + if (msl_options.is_macos() && (grad_x || grad_y)) + { + // For sample compare, MSL does not support gradient2d for all targets (only iOS apparently according to docs). + // However, the most common case here is to have a constant gradient of 0, as that is the only way to express + // LOD == 0 in GLSL with sampler2DArrayShadow (cascaded shadow mapping). + // We will detect a compile-time constant 0 value for gradient and promote that to level(0) on MSL. + bool constant_zero_x = !grad_x || expression_is_constant_null(grad_x); + bool constant_zero_y = !grad_y || expression_is_constant_null(grad_y); + if (constant_zero_x && constant_zero_y) + { + lod = 0; + grad_x = 0; + grad_y = 0; + farg_str += ", level(0)"; + } + else + { + SPIRV_CROSS_THROW("Using non-constant 0.0 gradient() qualifier for sample_compare. This is not " + "supported in MSL macOS."); + } + } + + if (msl_options.is_macos() && bias) + { + // Bias is not supported either on macOS with sample_compare. + // Verify it is compile-time zero, and drop the argument. + if (expression_is_constant_null(bias)) + { + bias = 0; + } + else + { + SPIRV_CROSS_THROW( + "Using non-constant 0.0 bias() qualifier for sample_compare. This is not supported in MSL macOS."); + } + } + } + + // LOD Options + // Metal does not support LOD for 1D textures. + if (bias && imgtype.image.dim != Dim1D) + { + forward = forward && should_forward(bias); + farg_str += ", bias(" + to_expression(bias) + ")"; + } + + // Metal does not support LOD for 1D textures. + if (lod && imgtype.image.dim != Dim1D) + { + forward = forward && should_forward(lod); + if (is_fetch) + { + farg_str += ", " + to_expression(lod); + } + else + { + farg_str += ", level(" + to_expression(lod) + ")"; + } + } + else if (is_fetch && !lod && imgtype.image.dim != Dim1D && imgtype.image.dim != DimBuffer && !imgtype.image.ms && + imgtype.image.sampled != 2) + { + // Lod argument is optional in OpImageFetch, but we require a LOD value, pick 0 as the default. + // Check for sampled type as well, because is_fetch is also used for OpImageRead in MSL. + farg_str += ", 0"; + } + + // Metal does not support LOD for 1D textures. + if ((grad_x || grad_y) && imgtype.image.dim != Dim1D) + { + forward = forward && should_forward(grad_x); + forward = forward && should_forward(grad_y); + string grad_opt; + switch (imgtype.image.dim) + { + case Dim2D: + grad_opt = "2d"; + break; + case Dim3D: + grad_opt = "3d"; + break; + case DimCube: + grad_opt = "cube"; + break; + default: + grad_opt = "unsupported_gradient_dimension"; + break; + } + farg_str += ", gradient" + grad_opt + "(" + to_expression(grad_x) + ", " + to_expression(grad_y) + ")"; + } + + // Add offsets + string offset_expr; + if (coffset && !is_fetch) + { + forward = forward && should_forward(coffset); + offset_expr = to_expression(coffset); + } + else if (offset && !is_fetch) + { + forward = forward && should_forward(offset); + offset_expr = to_expression(offset); + } + + if (!offset_expr.empty()) + { + switch (imgtype.image.dim) + { + case Dim2D: + if (coord_type.vecsize > 2) + offset_expr = enclose_expression(offset_expr) + ".xy"; + + farg_str += ", " + offset_expr; + break; + + case Dim3D: + if (coord_type.vecsize > 3) + offset_expr = enclose_expression(offset_expr) + ".xyz"; + + farg_str += ", " + offset_expr; + break; + + default: + break; + } + } + + if (comp) + { + // If 2D has gather component, ensure it also has an offset arg + if (imgtype.image.dim == Dim2D && offset_expr.empty()) + farg_str += ", int2(0)"; + + forward = forward && should_forward(comp); + farg_str += ", " + to_component_argument(comp); + } + + if (sample) + { + farg_str += ", "; + farg_str += to_expression(sample); + } + + if (msl_options.swizzle_texture_samples && is_sampled_image_type(imgtype)) + { + // Add the swizzle constant from the swizzle buffer. + if (!is_gather) + farg_str += ")"; + // Get the original input variable for this image. + uint32_t img_var = img; + + auto *combined = maybe_get(img_var); + if (combined) + img_var = combined->image; + + if (auto *var = maybe_get_backing_variable(img_var)) + { + if (var->parameter && !var->parameter->alias_global_variable) + SPIRV_CROSS_THROW("Cannot yet map non-aliased parameter to Metal resource!"); + img_var = var->self; + } + auto &aux_type = expression_type(aux_buffer_id); + farg_str += ", " + to_name(aux_buffer_id) + "." + to_member_name(aux_type, k_aux_mbr_idx_swizzle_const) + "[" + + convert_to_string(get_metal_resource_index(get(img_var), SPIRType::Image)) + "]"; + used_aux_buffer = true; + } + + *p_forward = forward; + + return farg_str; +} + +// If the texture coordinates are floating point, invokes MSL round() function to round them. +string CompilerMSL::round_fp_tex_coords(string tex_coords, bool coord_is_fp) +{ + return coord_is_fp ? ("round(" + tex_coords + ")") : tex_coords; +} + +// Returns a string to use in an image sampling function argument. +// The ID must be a scalar constant. +string CompilerMSL::to_component_argument(uint32_t id) +{ + if (ir.ids[id].get_type() != TypeConstant) + { + SPIRV_CROSS_THROW("ID " + to_string(id) + " is not an OpConstant."); + return "component::x"; + } + + uint32_t component_index = get(id).scalar(); + switch (component_index) + { + case 0: + return "component::x"; + case 1: + return "component::y"; + case 2: + return "component::z"; + case 3: + return "component::w"; + + default: + SPIRV_CROSS_THROW("The value (" + to_string(component_index) + ") of OpConstant ID " + to_string(id) + + " is not a valid Component index, which must be one of 0, 1, 2, or 3."); + return "component::x"; + } +} + +// Establish sampled image as expression object and assign the sampler to it. +void CompilerMSL::emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) +{ + set(result_id, result_type, image_id, samp_id); +} + +// Returns a string representation of the ID, usable as a function arg. +// Manufacture automatic sampler arg for SampledImage texture. +string CompilerMSL::to_func_call_arg(uint32_t id) +{ + string arg_str = CompilerGLSL::to_func_call_arg(id); + + // Manufacture automatic sampler arg if the arg is a SampledImage texture. + auto &type = expression_type(id); + if (type.basetype == SPIRType::SampledImage && type.image.dim != DimBuffer) + arg_str += ", " + to_sampler_expression(id); + + return arg_str; +} + +// If the ID represents a sampled image that has been assigned a sampler already, +// generate an expression for the sampler, otherwise generate a fake sampler name +// by appending a suffix to the expression constructed from the ID. +string CompilerMSL::to_sampler_expression(uint32_t id) +{ + auto *combined = maybe_get(id); + auto expr = to_expression(combined ? combined->image : id); + auto index = expr.find_first_of('['); + + uint32_t samp_id = 0; + if (combined) + samp_id = combined->sampler; + + if (index == string::npos) + return samp_id ? to_expression(samp_id) : expr + sampler_name_suffix; + else + { + auto image_expr = expr.substr(0, index); + auto array_expr = expr.substr(index); + return samp_id ? to_expression(samp_id) : (image_expr + sampler_name_suffix + array_expr); + } +} + +// Checks whether the ID is a row_major matrix that requires conversion before use +bool CompilerMSL::is_non_native_row_major_matrix(uint32_t id) +{ + // Natively supported row-major matrices do not need to be converted. + if (backend.native_row_major_matrix) + return false; + + // Non-matrix or column-major matrix types do not need to be converted. + if (!ir.meta[id].decoration.decoration_flags.get(DecorationRowMajor)) + return false; + + // Generate a function that will swap matrix elements from row-major to column-major. + // Packed row-matrix should just use transpose() function. + if (!has_decoration(id, DecorationCPacked)) + { + const auto type = expression_type(id); + add_convert_row_major_matrix_function(type.columns, type.vecsize); + } + + return true; +} + +// Checks whether the member is a row_major matrix that requires conversion before use +bool CompilerMSL::member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) +{ + // Natively supported row-major matrices do not need to be converted. + if (backend.native_row_major_matrix) + return false; + + // Non-matrix or column-major matrix types do not need to be converted. + if (!combined_decoration_for_member(type, index).get(DecorationRowMajor)) + return false; + + // Generate a function that will swap matrix elements from row-major to column-major. + // Packed row-matrix should just use transpose() function. + if (!has_member_decoration(type.self, index, DecorationCPacked)) + { + const auto mbr_type = get(type.member_types[index]); + add_convert_row_major_matrix_function(mbr_type.columns, mbr_type.vecsize); + } + + return true; +} + +// Adds a function suitable for converting a non-square row-major matrix to a column-major matrix. +void CompilerMSL::add_convert_row_major_matrix_function(uint32_t cols, uint32_t rows) +{ + SPVFuncImpl spv_func; + if (cols == rows) // Square matrix...just use transpose() function + return; + else if (cols == 2 && rows == 3) + spv_func = SPVFuncImplRowMajor2x3; + else if (cols == 2 && rows == 4) + spv_func = SPVFuncImplRowMajor2x4; + else if (cols == 3 && rows == 2) + spv_func = SPVFuncImplRowMajor3x2; + else if (cols == 3 && rows == 4) + spv_func = SPVFuncImplRowMajor3x4; + else if (cols == 4 && rows == 2) + spv_func = SPVFuncImplRowMajor4x2; + else if (cols == 4 && rows == 3) + spv_func = SPVFuncImplRowMajor4x3; + else + SPIRV_CROSS_THROW("Could not convert row-major matrix."); + + auto rslt = spv_function_implementations.insert(spv_func); + if (rslt.second) + { + add_pragma_line("#pragma clang diagnostic ignored \"-Wmissing-prototypes\""); + force_recompile = true; + } +} + +// Wraps the expression string in a function call that converts the +// row_major matrix result of the expression to a column_major matrix. +string CompilerMSL::convert_row_major_matrix(string exp_str, const SPIRType &exp_type, bool is_packed) +{ + strip_enclosed_expression(exp_str); + + string func_name; + + // Square and packed matrices can just use transpose + if (exp_type.columns == exp_type.vecsize || is_packed) + func_name = "transpose"; + else + func_name = string("spvConvertFromRowMajor") + to_string(exp_type.columns) + "x" + to_string(exp_type.vecsize); + + return join(func_name, "(", exp_str, ")"); +} + +// Called automatically at the end of the entry point function +void CompilerMSL::emit_fixup() +{ + if ((get_entry_point().model == ExecutionModelVertex) && stage_out_var_id && !qual_pos_var_name.empty()) + { + if (options.vertex.fixup_clipspace) + statement(qual_pos_var_name, ".z = (", qual_pos_var_name, ".z + ", qual_pos_var_name, + ".w) * 0.5; // Adjust clip-space for Metal"); + + if (options.vertex.flip_vert_y) + statement(qual_pos_var_name, ".y = -(", qual_pos_var_name, ".y);", " // Invert Y-axis for Metal"); + } +} + +// Emit a structure member, padding and packing to maintain the correct memeber alignments. +void CompilerMSL::emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const string &qualifier, uint32_t) +{ + auto &membertype = get(member_type_id); + + // If this member requires padding to maintain alignment, emit a dummy padding member. + MSLStructMemberKey key = get_struct_member_key(type.self, index); + uint32_t pad_len = struct_member_padding[key]; + if (pad_len > 0) + statement("char pad", to_string(index), "[", to_string(pad_len), "];"); + + // If this member is packed, mark it as so. + string pack_pfx = ""; + if (member_is_packed_type(type, index)) + { + pack_pfx = "packed_"; + + // If we're packing a matrix, output an appropriate typedef + if (membertype.vecsize > 1 && membertype.columns > 1) + { + string base_type = membertype.width == 16 ? "half" : "float"; + string td_line = "typedef "; + td_line += base_type + to_string(membertype.vecsize) + "x" + to_string(membertype.columns); + td_line += " " + pack_pfx; + td_line += base_type + to_string(membertype.columns) + "x" + to_string(membertype.vecsize); + td_line += ";"; + add_typedef_line(td_line); + } + } + + statement(pack_pfx, type_to_glsl(membertype), " ", qualifier, to_member_name(type, index), + member_attribute_qualifier(type, index), type_to_array_glsl(membertype), ";"); +} + +// Return a MSL qualifier for the specified function attribute member +string CompilerMSL::member_attribute_qualifier(const SPIRType &type, uint32_t index) +{ + auto &execution = get_entry_point(); + + uint32_t mbr_type_id = type.member_types[index]; + auto &mbr_type = get(mbr_type_id); + + BuiltIn builtin = BuiltInMax; + bool is_builtin = is_member_builtin(type, index, &builtin); + + // Vertex function inputs + if (execution.model == ExecutionModelVertex && type.storage == StorageClassInput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInVertexId: + case BuiltInVertexIndex: + case BuiltInBaseVertex: + case BuiltInInstanceId: + case BuiltInInstanceIndex: + case BuiltInBaseInstance: + return string(" [[") + builtin_qualifier(builtin) + "]]"; + + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + default: + return ""; + } + } + uint32_t locn = get_ordered_member_location(type.self, index); + if (locn != k_unknown_location) + return string(" [[attribute(") + convert_to_string(locn) + ")]]"; + } + + // Vertex function outputs + if (execution.model == ExecutionModelVertex && type.storage == StorageClassOutput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInPointSize: + // Only mark the PointSize builtin if really rendering points. + // Some shaders may include a PointSize builtin even when used to render + // non-point topologies, and Metal will reject this builtin when compiling + // the shader into a render pipeline that uses a non-point topology. + return msl_options.enable_point_size_builtin ? (string(" [[") + builtin_qualifier(builtin) + "]]") : ""; + + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + /* fallthrough */ + case BuiltInPosition: + case BuiltInLayer: + case BuiltInClipDistance: + return string(" [[") + builtin_qualifier(builtin) + "]]" + (mbr_type.array.empty() ? "" : " "); + + default: + return ""; + } + } + uint32_t comp; + uint32_t locn = get_ordered_member_location(type.self, index, &comp); + if (locn != k_unknown_location) + { + if (comp != k_unknown_component) + return string(" [[user(locn") + convert_to_string(locn) + "_" + convert_to_string(comp) + ")]]"; + else + return string(" [[user(locn") + convert_to_string(locn) + ")]]"; + } + } + + // Fragment function inputs + if (execution.model == ExecutionModelFragment && type.storage == StorageClassInput) + { + string quals = ""; + if (is_builtin) + { + switch (builtin) + { + case BuiltInFrontFacing: + case BuiltInPointCoord: + case BuiltInFragCoord: + case BuiltInSampleId: + case BuiltInSampleMask: + case BuiltInLayer: + quals = builtin_qualifier(builtin); + + default: + break; + } + } + else + { + uint32_t comp; + uint32_t locn = get_ordered_member_location(type.self, index, &comp); + if (locn != k_unknown_location) + { + if (comp != k_unknown_component) + quals = string("user(locn") + convert_to_string(locn) + "_" + convert_to_string(comp) + ")"; + else + quals = string("user(locn") + convert_to_string(locn) + ")"; + } + } + // Don't bother decorating integers with the 'flat' attribute; it's + // the default (in fact, the only option). Also don't bother with the + // FragCoord builtin; it's always noperspective on Metal. + if (!type_is_integral(mbr_type) && (!is_builtin || builtin != BuiltInFragCoord)) + { + if (has_member_decoration(type.self, index, DecorationFlat)) + { + if (!quals.empty()) + quals += ", "; + quals += "flat"; + } + else if (has_member_decoration(type.self, index, DecorationCentroid)) + { + if (!quals.empty()) + quals += ", "; + if (has_member_decoration(type.self, index, DecorationNoPerspective)) + quals += "centroid_no_perspective"; + else + quals += "centroid_perspective"; + } + else if (has_member_decoration(type.self, index, DecorationSample)) + { + if (!quals.empty()) + quals += ", "; + if (has_member_decoration(type.self, index, DecorationNoPerspective)) + quals += "sample_no_perspective"; + else + quals += "sample_perspective"; + } + else if (has_member_decoration(type.self, index, DecorationNoPerspective)) + { + if (!quals.empty()) + quals += ", "; + quals += "center_no_perspective"; + } + } + if (!quals.empty()) + return " [[" + quals + "]]"; + } + + // Fragment function outputs + if (execution.model == ExecutionModelFragment && type.storage == StorageClassOutput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInSampleMask: + case BuiltInFragDepth: + return string(" [[") + builtin_qualifier(builtin) + "]]"; + + default: + return ""; + } + } + uint32_t locn = get_ordered_member_location(type.self, index); + if (locn != k_unknown_location && has_member_decoration(type.self, index, DecorationIndex)) + return join(" [[color(", locn, "), index(", get_member_decoration(type.self, index, DecorationIndex), + ")]]"); + else if (locn != k_unknown_location) + return join(" [[color(", locn, ")]]"); + else if (has_member_decoration(type.self, index, DecorationIndex)) + return join(" [[index(", get_member_decoration(type.self, index, DecorationIndex), ")]]"); + else + return ""; + } + + // Compute function inputs + if (execution.model == ExecutionModelGLCompute && type.storage == StorageClassInput) + { + if (is_builtin) + { + switch (builtin) + { + case BuiltInGlobalInvocationId: + case BuiltInWorkgroupId: + case BuiltInNumWorkgroups: + case BuiltInLocalInvocationId: + case BuiltInLocalInvocationIndex: + return string(" [[") + builtin_qualifier(builtin) + "]]"; + + default: + return ""; + } + } + } + + return ""; +} + +// Returns the location decoration of the member with the specified index in the specified type. +// If the location of the member has been explicitly set, that location is used. If not, this +// function assumes the members are ordered in their location order, and simply returns the +// index as the location. +uint32_t CompilerMSL::get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp) +{ + auto &m = ir.meta.at(type_id); + if (index < m.members.size()) + { + auto &dec = m.members[index]; + if (comp) + { + if (dec.decoration_flags.get(DecorationComponent)) + *comp = dec.component; + else + *comp = k_unknown_component; + } + if (dec.decoration_flags.get(DecorationLocation)) + return dec.location; + } + + return index; +} + +// Returns the type declaration for a function, including the +// entry type if the current function is the entry point function +string CompilerMSL::func_type_decl(SPIRType &type) +{ + // The regular function return type. If not processing the entry point function, that's all we need + string return_type = type_to_glsl(type) + type_to_array_glsl(type); + if (!processing_entry_point) + return return_type; + + // If an outgoing interface block has been defined, and it should be returned, override the entry point return type + bool ep_should_return_output = !get_is_rasterization_disabled(); + if (stage_out_var_id && ep_should_return_output) + { + auto &so_var = get(stage_out_var_id); + auto &so_type = get_variable_data_type(so_var); + return_type = type_to_glsl(so_type) + type_to_array_glsl(type); + } + + // Prepend a entry type, based on the execution model + string entry_type; + auto &execution = get_entry_point(); + switch (execution.model) + { + case ExecutionModelVertex: + entry_type = "vertex"; + break; + case ExecutionModelFragment: + entry_type = + execution.flags.get(ExecutionModeEarlyFragmentTests) ? "[[ early_fragment_tests ]] fragment" : "fragment"; + break; + case ExecutionModelGLCompute: + case ExecutionModelKernel: + entry_type = "kernel"; + break; + default: + entry_type = "unknown"; + break; + } + + return entry_type + " " + return_type; +} + +// In MSL, address space qualifiers are required for all pointer or reference variables +string CompilerMSL::get_argument_address_space(const SPIRVariable &argument) +{ + const auto &type = get(argument.basetype); + + switch (type.storage) + { + case StorageClassWorkgroup: + return "threadgroup"; + + case StorageClassStorageBuffer: + { + bool readonly = ir.get_buffer_block_flags(argument).get(DecorationNonWritable); + return readonly ? "const device" : "device"; + } + + case StorageClassUniform: + case StorageClassUniformConstant: + case StorageClassPushConstant: + if (type.basetype == SPIRType::Struct) + { + bool ssbo = has_decoration(type.self, DecorationBufferBlock); + if (ssbo) + { + bool readonly = ir.get_buffer_block_flags(argument).get(DecorationNonWritable); + return readonly ? "const device" : "device"; + } + else + return "constant"; + } + break; + + case StorageClassFunction: + case StorageClassGeneric: + // No address space for plain values. + return type.pointer ? "thread" : ""; + + default: + break; + } + + return "thread"; +} + +string CompilerMSL::get_type_address_space(const SPIRType &type) +{ + switch (type.storage) + { + case StorageClassWorkgroup: + return "threadgroup"; + + case StorageClassStorageBuffer: + // FIXME: Need to use 'const device' for pointers into non-writable SSBOs + return "device"; + + case StorageClassUniform: + case StorageClassUniformConstant: + case StorageClassPushConstant: + if (type.basetype == SPIRType::Struct) + { + bool ssbo = has_decoration(type.self, DecorationBufferBlock); + // FIXME: Need to use 'const device' for pointers into non-writable SSBOs + if (ssbo) + return "device"; + else + return "constant"; + } + break; + + case StorageClassFunction: + case StorageClassGeneric: + // No address space for plain values. + return type.pointer ? "thread" : ""; + + default: + break; + } + + return "thread"; +} + +// Returns a string containing a comma-delimited list of args for the entry point function +string CompilerMSL::entry_point_args(bool append_comma) +{ + string ep_args; + + // Stage-in structure + if (stage_in_var_id) + { + auto &var = get(stage_in_var_id); + auto &type = get_variable_data_type(var); + + if (!ep_args.empty()) + ep_args += ", "; + + ep_args += type_to_glsl(type) + " " + to_name(var.self) + " [[stage_in]]"; + } + + // Output resources, sorted by resource index & type + // We need to sort to work around a bug on macOS 10.13 with NVidia drivers where switching between shaders + // with different order of buffers can result in issues with buffer assignments inside the driver. + struct Resource + { + Variant *id; + string name; + SPIRType::BaseType basetype; + uint32_t index; + }; + + vector resources; + + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + auto &type = get_variable_data_type(var); + + uint32_t var_id = var.self; + + if ((var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant || + var.storage == StorageClassPushConstant || var.storage == StorageClassStorageBuffer) && + !is_hidden_variable(var)) + { + if (type.basetype == SPIRType::SampledImage) + { + resources.push_back( + { &id, to_name(var_id), SPIRType::Image, get_metal_resource_index(var, SPIRType::Image) }); + + if (type.image.dim != DimBuffer && constexpr_samplers.count(var_id) == 0) + { + resources.push_back({ &id, to_sampler_expression(var_id), SPIRType::Sampler, + get_metal_resource_index(var, SPIRType::Sampler) }); + } + } + else if (constexpr_samplers.count(var_id) == 0) + { + // constexpr samplers are not declared as resources. + resources.push_back( + { &id, to_name(var_id), type.basetype, get_metal_resource_index(var, type.basetype) }); + } + } + } + } + + std::sort(resources.begin(), resources.end(), [](const Resource &lhs, const Resource &rhs) { + return tie(lhs.basetype, lhs.index) < tie(rhs.basetype, rhs.index); + }); + + for (auto &r : resources) + { + auto &var = r.id->get(); + auto &type = get_variable_data_type(var); + + uint32_t var_id = var.self; + + switch (r.basetype) + { + case SPIRType::Struct: + { + auto &m = ir.meta.at(type.self); + if (m.members.size() == 0) + break; + if (!type.array.empty()) + { + if (type.array.size() > 1) + SPIRV_CROSS_THROW("Arrays of arrays of buffers are not supported."); + + // Metal doesn't directly support this, so we must expand the + // array. We'll declare a local array to hold these elements + // later. + uint32_t array_size = to_array_size_literal(type); + + if (array_size == 0) + SPIRV_CROSS_THROW("Unsized arrays of buffers are not supported in MSL."); + + buffer_arrays.push_back(var_id); + for (uint32_t i = 0; i < array_size; ++i) + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += get_argument_address_space(var) + " " + type_to_glsl(type) + "* " + r.name + "_" + + convert_to_string(i); + ep_args += " [[buffer(" + convert_to_string(r.index + i) + ")]]"; + } + } + else + { + if (!ep_args.empty()) + ep_args += ", "; + ep_args += get_argument_address_space(var) + " " + type_to_glsl(type) + "& " + r.name; + ep_args += " [[buffer(" + convert_to_string(r.index) + ")]]"; + } + break; + } + case SPIRType::Sampler: + if (!ep_args.empty()) + ep_args += ", "; + ep_args += sampler_type(type) + " " + r.name; + ep_args += " [[sampler(" + convert_to_string(r.index) + ")]]"; + break; + case SPIRType::Image: + if (!ep_args.empty()) + ep_args += ", "; + ep_args += image_type_glsl(type, var_id) + " " + r.name; + ep_args += " [[texture(" + convert_to_string(r.index) + ")]]"; + break; + default: + SPIRV_CROSS_THROW("Unexpected resource type"); + break; + } + } + + // Builtin variables + for (auto &id : ir.ids) + { + if (id.get_type() == TypeVariable) + { + auto &var = id.get(); + + uint32_t var_id = var.self; + BuiltIn bi_type = ir.meta[var_id].decoration.builtin_type; + + // Don't emit SamplePosition as a separate parameter. In the entry + // point, we get that by calling get_sample_position() on the sample ID. + if (var.storage == StorageClassInput && is_builtin_variable(var)) + { + if (bi_type == BuiltInSamplePosition) + { + auto &entry_func = get(ir.default_entry_point); + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), " = get_sample_position(", + to_expression(builtin_sample_id_id), ");"); + }); + } + else if (bi_type == BuiltInHelperInvocation) + { + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("simd_is_helper_thread() is only supported on macOS."); + else if (msl_options.is_macos() && !msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("simd_is_helper_thread() requires version 2.1 on macOS."); + + auto &entry_func = get(ir.default_entry_point); + entry_func.fixup_hooks_in.push_back([=]() { + statement(builtin_type_decl(bi_type), " ", to_expression(var_id), + " = simd_is_helper_thread();"); + }); + } + else + { + if (!ep_args.empty()) + ep_args += ", "; + + ep_args += builtin_type_decl(bi_type) + " " + to_expression(var_id); + ep_args += " [[" + builtin_qualifier(bi_type) + "]]"; + } + } + } + } + + // Vertex and instance index built-ins + if (needs_vertex_idx_arg) + ep_args += built_in_func_arg(BuiltInVertexIndex, !ep_args.empty()); + + if (needs_instance_idx_arg) + ep_args += built_in_func_arg(BuiltInInstanceIndex, !ep_args.empty()); + + if (!ep_args.empty() && append_comma) + ep_args += ", "; + + return ep_args; +} + +// Returns the Metal index of the resource of the specified type as used by the specified variable. +uint32_t CompilerMSL::get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype) +{ + auto &execution = get_entry_point(); + auto &var_dec = ir.meta[var.self].decoration; + uint32_t var_desc_set = (var.storage == StorageClassPushConstant) ? kPushConstDescSet : var_dec.set; + uint32_t var_binding = (var.storage == StorageClassPushConstant) ? kPushConstBinding : var_dec.binding; + + // If a matching binding has been specified, find and use it + for (auto p_res_bind : resource_bindings) + { + if (p_res_bind->stage == execution.model && p_res_bind->desc_set == var_desc_set && + p_res_bind->binding == var_binding) + { + + p_res_bind->used_by_shader = true; + switch (basetype) + { + case SPIRType::Struct: + return p_res_bind->msl_buffer; + case SPIRType::Image: + return p_res_bind->msl_texture; + case SPIRType::Sampler: + return p_res_bind->msl_sampler; + default: + return 0; + } + } + } + + // If there is no explicit mapping of bindings to MSL, use the declared binding. + if (has_decoration(var.self, DecorationBinding)) + return get_decoration(var.self, DecorationBinding); + + uint32_t binding_stride = 1; + auto &type = get(var.basetype); + for (uint32_t i = 0; i < uint32_t(type.array.size()); i++) + binding_stride *= type.array_size_literal[i] ? type.array[i] : get(type.array[i]).scalar(); + + // If a binding has not been specified, revert to incrementing resource indices + uint32_t resource_index; + switch (basetype) + { + case SPIRType::Struct: + resource_index = next_metal_resource_index.msl_buffer; + next_metal_resource_index.msl_buffer += binding_stride; + break; + case SPIRType::Image: + resource_index = next_metal_resource_index.msl_texture; + next_metal_resource_index.msl_texture += binding_stride; + break; + case SPIRType::Sampler: + resource_index = next_metal_resource_index.msl_sampler; + next_metal_resource_index.msl_sampler += binding_stride; + break; + default: + resource_index = 0; + break; + } + return resource_index; +} + +string CompilerMSL::argument_decl(const SPIRFunction::Parameter &arg) +{ + auto &var = get(arg.id); + auto &type = get_variable_data_type(var); + auto &var_type = get(arg.type); + StorageClass storage = var_type.storage; + bool is_pointer = var_type.pointer; + + // If we need to modify the name of the variable, make sure we use the original variable. + // Our alias is just a shadow variable. + uint32_t name_id = var.self; + if (arg.alias_global_variable && var.basevariable) + name_id = var.basevariable; + + bool constref = !arg.alias_global_variable && is_pointer && arg.write_count == 0; + + bool type_is_image = type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage || + type.basetype == SPIRType::Sampler; + + // Arrays of images/samplers in MSL are always const. + if (!type.array.empty() && type_is_image) + constref = true; + + string decl; + if (constref) + decl += "const "; + + bool builtin = is_builtin_variable(var); + if (builtin) + decl += builtin_type_decl(static_cast(get_decoration(arg.id, DecorationBuiltIn))); + else if ((storage == StorageClassUniform || storage == StorageClassStorageBuffer) && is_array(type)) + decl += join(type_to_glsl(type, arg.id), "*"); + else + decl += type_to_glsl(type, arg.id); + + bool opaque_handle = storage == StorageClassUniformConstant; + + if (!builtin && !opaque_handle && !is_pointer && + (storage == StorageClassFunction || storage == StorageClassGeneric)) + { + // If the argument is a pure value and not an opaque type, we will pass by value. + decl += " "; + decl += to_expression(name_id); + } + else if (is_array(type) && !type_is_image) + { + // Arrays of images and samplers are special cased. + decl += " (&"; + decl += to_expression(name_id); + decl += ")"; + decl += type_to_array_glsl(type); + } + else if (!opaque_handle) + { + decl += "&"; + decl += " "; + decl += to_expression(name_id); + } + else + { + decl += " "; + decl += to_expression(name_id); + } + + return decl; +} + +// If we're currently in the entry point function, and the object +// has a qualified name, use it, otherwise use the standard name. +string CompilerMSL::to_name(uint32_t id, bool allow_alias) const +{ + if (current_function && (current_function->self == ir.default_entry_point)) + { + string qual_name = ir.meta.at(id).decoration.qualified_alias; + if (!qual_name.empty()) + return qual_name; + } + return Compiler::to_name(id, allow_alias); +} + +// Returns a name that combines the name of the struct with the name of the member, except for Builtins +string CompilerMSL::to_qualified_member_name(const SPIRType &type, uint32_t index) +{ + // Don't qualify Builtin names because they are unique and are treated as such when building expressions + BuiltIn builtin = BuiltInMax; + if (is_member_builtin(type, index, &builtin)) + return builtin_to_glsl(builtin, type.storage); + + // Strip any underscore prefix from member name + string mbr_name = to_member_name(type, index); + size_t startPos = mbr_name.find_first_not_of("_"); + mbr_name = (startPos != string::npos) ? mbr_name.substr(startPos) : ""; + return join(to_name(type.self), "_", mbr_name); +} + +// Ensures that the specified name is permanently usable by prepending a prefix +// if the first chars are _ and a digit, which indicate a transient name. +string CompilerMSL::ensure_valid_name(string name, string pfx) +{ + return (name.size() >= 2 && name[0] == '_' && isdigit(name[1])) ? (pfx + name) : name; +} + +// Replace all names that match MSL keywords or Metal Standard Library functions. +void CompilerMSL::replace_illegal_names() +{ + // FIXME: MSL and GLSL are doing two different things here. + // Agree on convention and remove this override. + static const unordered_set keywords = { + "kernel", "vertex", "fragment", "compute", "bias", + }; + + static const unordered_set illegal_func_names = { + "main", + "saturate", + }; + + for (auto &id : ir.ids) + { + switch (id.get_type()) + { + case TypeVariable: + { + auto &dec = ir.meta[id.get_id()].decoration; + if (keywords.find(dec.alias) != end(keywords)) + dec.alias += "0"; + + break; + } + + case TypeFunction: + { + auto &dec = ir.meta[id.get_id()].decoration; + if (illegal_func_names.find(dec.alias) != end(illegal_func_names)) + dec.alias += "0"; + + break; + } + + case TypeType: + { + for (auto &mbr_dec : ir.meta[id.get_id()].members) + if (keywords.find(mbr_dec.alias) != end(keywords)) + mbr_dec.alias += "0"; + + break; + } + + default: + break; + } + } + + for (auto &entry : ir.entry_points) + { + // Change both the entry point name and the alias, to keep them synced. + string &ep_name = entry.second.name; + if (illegal_func_names.find(ep_name) != end(illegal_func_names)) + ep_name += "0"; + + // Always write this because entry point might have been renamed earlier. + ir.meta[entry.first].decoration.alias = ep_name; + } + + CompilerGLSL::replace_illegal_names(); +} + +string CompilerMSL::to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain) +{ + auto *var = maybe_get(base); + // If this is a buffer array, we have to dereference the buffer pointers. + // Otherwise, if this is a pointer expression, dereference it. + if ((var && ((var->storage == StorageClassUniform || var->storage == StorageClassStorageBuffer) && + is_array(get(var->basetype)))) || + (!ptr_chain && should_dereference(base))) + return join("->", to_member_name(type, index)); + else + return join(".", to_member_name(type, index)); +} + +string CompilerMSL::to_qualifiers_glsl(uint32_t id) +{ + string quals; + + auto &type = expression_type(id); + if (type.storage == StorageClassWorkgroup) + quals += "threadgroup "; + + return quals; +} + +// The optional id parameter indicates the object whose type we are trying +// to find the description for. It is optional. Most type descriptions do not +// depend on a specific object's use of that type. +string CompilerMSL::type_to_glsl(const SPIRType &type, uint32_t id) +{ + string type_name; + + // Pointer? + if (type.pointer) + { + type_name = join(get_type_address_space(type), " ", type_to_glsl(get(type.parent_type), id)); + switch (type.basetype) + { + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + // These are handles. + break; + default: + // Anything else can be a raw pointer. + type_name += "*"; + break; + } + return type_name; + } + + switch (type.basetype) + { + case SPIRType::Struct: + // Need OpName lookup here to get a "sensible" name for a struct. + return to_name(type.self); + + case SPIRType::Image: + case SPIRType::SampledImage: + return image_type_glsl(type, id); + + case SPIRType::Sampler: + return sampler_type(type); + + case SPIRType::Void: + return "void"; + + case SPIRType::AtomicCounter: + return "atomic_uint"; + + // Scalars + case SPIRType::Boolean: + type_name = "bool"; + break; + case SPIRType::Char: + case SPIRType::SByte: + type_name = "char"; + break; + case SPIRType::UByte: + type_name = "uchar"; + break; + case SPIRType::Short: + type_name = "short"; + break; + case SPIRType::UShort: + type_name = "ushort"; + break; + case SPIRType::Int: + type_name = "int"; + break; + case SPIRType::UInt: + type_name = "uint"; + break; + case SPIRType::Int64: + type_name = "long"; // Currently unsupported + break; + case SPIRType::UInt64: + type_name = "size_t"; + break; + case SPIRType::Half: + type_name = "half"; + break; + case SPIRType::Float: + type_name = "float"; + break; + case SPIRType::Double: + type_name = "double"; // Currently unsupported + break; + + default: + return "unknown_type"; + } + + // Matrix? + if (type.columns > 1) + type_name += to_string(type.columns) + "x"; + + // Vector or Matrix? + if (type.vecsize > 1) + type_name += to_string(type.vecsize); + + return type_name; +} + +std::string CompilerMSL::sampler_type(const SPIRType &type) +{ + if (!type.array.empty()) + { + if (!msl_options.supports_msl_version(2)) + SPIRV_CROSS_THROW("MSL 2.0 or greater is required for arrays of samplers."); + + if (type.array.size() > 1) + SPIRV_CROSS_THROW("Arrays of arrays of samplers are not supported in MSL."); + + // Arrays of samplers in MSL must be declared with a special array syntax ala C++11 std::array. + uint32_t array_size = to_array_size_literal(type); + if (array_size == 0) + SPIRV_CROSS_THROW("Unsized array of samplers is not supported in MSL."); + + auto &parent = get(get_pointee_type(type).parent_type); + return join("array<", sampler_type(parent), ", ", array_size, ">"); + } + else + return "sampler"; +} + +// Returns an MSL string describing the SPIR-V image type +string CompilerMSL::image_type_glsl(const SPIRType &type, uint32_t id) +{ + auto *var = maybe_get(id); + if (var && var->basevariable) + { + // For comparison images, check against the base variable, + // and not the fake ID which might have been generated for this variable. + id = var->basevariable; + } + + if (!type.array.empty()) + { + uint32_t major = 2, minor = 0; + if (msl_options.is_ios()) + { + major = 1; + minor = 2; + } + if (!msl_options.supports_msl_version(major, minor)) + { + if (msl_options.is_ios()) + SPIRV_CROSS_THROW("MSL 1.2 or greater is required for arrays of textures."); + else + SPIRV_CROSS_THROW("MSL 2.0 or greater is required for arrays of textures."); + } + + if (type.array.size() > 1) + SPIRV_CROSS_THROW("Arrays of arrays of textures are not supported in MSL."); + + // Arrays of images in MSL must be declared with a special array syntax ala C++11 std::array. + uint32_t array_size = to_array_size_literal(type); + if (array_size == 0) + SPIRV_CROSS_THROW("Unsized array of images is not supported in MSL."); + + auto &parent = get(get_pointee_type(type).parent_type); + return join("array<", image_type_glsl(parent, id), ", ", array_size, ">"); + } + + string img_type_name; + + // Bypass pointers because we need the real image struct + auto &img_type = get(type.self).image; + if (image_is_comparison(type, id)) + { + switch (img_type.dim) + { + case Dim1D: + img_type_name += "depth1d_unsupported_by_metal"; + break; + case Dim2D: + if (img_type.ms && img_type.arrayed) + { + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Multisampled array textures are supported from 2.1."); + img_type_name += "depth2d_ms_array"; + } + else if (img_type.ms) + img_type_name += "depth2d_ms"; + else if (img_type.arrayed) + img_type_name += "depth2d_array"; + else + img_type_name += "depth2d"; + break; + case Dim3D: + img_type_name += "depth3d_unsupported_by_metal"; + break; + case DimCube: + img_type_name += (img_type.arrayed ? "depthcube_array" : "depthcube"); + break; + default: + img_type_name += "unknown_depth_texture_type"; + break; + } + } + else + { + switch (img_type.dim) + { + case Dim1D: + img_type_name += (img_type.arrayed ? "texture1d_array" : "texture1d"); + break; + case DimBuffer: + case Dim2D: + case DimSubpassData: + if (img_type.ms && img_type.arrayed) + { + if (!msl_options.supports_msl_version(2, 1)) + SPIRV_CROSS_THROW("Multisampled array textures are supported from 2.1."); + img_type_name += "texture2d_ms_array"; + } + else if (img_type.ms) + img_type_name += "texture2d_ms"; + else if (img_type.arrayed) + img_type_name += "texture2d_array"; + else + img_type_name += "texture2d"; + break; + case Dim3D: + img_type_name += "texture3d"; + break; + case DimCube: + img_type_name += (img_type.arrayed ? "texturecube_array" : "texturecube"); + break; + default: + img_type_name += "unknown_texture_type"; + break; + } + } + + // Append the pixel type + img_type_name += "<"; + img_type_name += type_to_glsl(get(img_type.type)); + + // For unsampled images, append the sample/read/write access qualifier. + // For kernel images, the access qualifier my be supplied directly by SPIR-V. + // Otherwise it may be set based on whether the image is read from or written to within the shader. + if (type.basetype == SPIRType::Image && type.image.sampled == 2 && type.image.dim != DimSubpassData) + { + switch (img_type.access) + { + case AccessQualifierReadOnly: + img_type_name += ", access::read"; + break; + + case AccessQualifierWriteOnly: + img_type_name += ", access::write"; + break; + + case AccessQualifierReadWrite: + img_type_name += ", access::read_write"; + break; + + default: + { + auto *p_var = maybe_get_backing_variable(id); + if (p_var && p_var->basevariable) + p_var = maybe_get(p_var->basevariable); + if (p_var && !has_decoration(p_var->self, DecorationNonWritable)) + { + img_type_name += ", access::"; + + if (!has_decoration(p_var->self, DecorationNonReadable)) + img_type_name += "read_"; + + img_type_name += "write"; + } + break; + } + } + } + + img_type_name += ">"; + + return img_type_name; +} + +string CompilerMSL::bitcast_glsl_op(const SPIRType &out_type, const SPIRType &in_type) +{ + if ((out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::Short) || + (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::UShort) || + (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Int) || + (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::UInt) || + (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Int64) || + (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::UInt64)) + return type_to_glsl(out_type); + + if ((out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Float) || + (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Float) || + (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::UInt) || + (out_type.basetype == SPIRType::Float && in_type.basetype == SPIRType::Int) || + (out_type.basetype == SPIRType::Int64 && in_type.basetype == SPIRType::Double) || + (out_type.basetype == SPIRType::UInt64 && in_type.basetype == SPIRType::Double) || + (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::Int64) || + (out_type.basetype == SPIRType::Double && in_type.basetype == SPIRType::UInt64) || + (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UInt) || + (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::Half) || + (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::Int) || + (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Half) || + (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::UShort) || + (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::Half) || + (out_type.basetype == SPIRType::Half && in_type.basetype == SPIRType::Short) || + (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Half) || + (out_type.basetype == SPIRType::UInt && in_type.basetype == SPIRType::UShort && in_type.vecsize == 2) || + (out_type.basetype == SPIRType::UShort && in_type.basetype == SPIRType::UInt && in_type.vecsize == 1) || + (out_type.basetype == SPIRType::Int && in_type.basetype == SPIRType::Short && in_type.vecsize == 2) || + (out_type.basetype == SPIRType::Short && in_type.basetype == SPIRType::Int && in_type.vecsize == 1)) + return "as_type<" + type_to_glsl(out_type) + ">"; + + return ""; +} + +// Returns an MSL string identifying the name of a SPIR-V builtin. +// Output builtins are qualified with the name of the stage out structure. +string CompilerMSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage) +{ + switch (builtin) + { + + // Override GLSL compiler strictness + case BuiltInVertexId: + return "gl_VertexID"; + case BuiltInInstanceId: + return "gl_InstanceID"; + case BuiltInVertexIndex: + return "gl_VertexIndex"; + case BuiltInInstanceIndex: + return "gl_InstanceIndex"; + case BuiltInBaseVertex: + return "gl_BaseVertex"; + case BuiltInBaseInstance: + return "gl_BaseInstance"; + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + // When used in the entry function, output builtins are qualified with output struct name. + // Test storage class as NOT Input, as output builtins might be part of generic type. + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + /* fallthrough */ + case BuiltInPosition: + case BuiltInPointSize: + case BuiltInClipDistance: + case BuiltInCullDistance: + case BuiltInLayer: + case BuiltInFragDepth: + case BuiltInSampleMask: + if (storage != StorageClassInput && current_function && (current_function->self == ir.default_entry_point)) + return stage_out_var_name + "." + CompilerGLSL::builtin_to_glsl(builtin, storage); + + break; + + default: + break; + } + + return CompilerGLSL::builtin_to_glsl(builtin, storage); +} + +// Returns an MSL string attribute qualifer for a SPIR-V builtin +string CompilerMSL::builtin_qualifier(BuiltIn builtin) +{ + auto &execution = get_entry_point(); + + switch (builtin) + { + // Vertex function in + case BuiltInVertexId: + return "vertex_id"; + case BuiltInVertexIndex: + return "vertex_id"; + case BuiltInBaseVertex: + return "base_vertex"; + case BuiltInInstanceId: + return "instance_id"; + case BuiltInInstanceIndex: + return "instance_id"; + case BuiltInBaseInstance: + return "base_instance"; + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + // Vertex function out + case BuiltInClipDistance: + return "clip_distance"; + case BuiltInPointSize: + return "point_size"; + case BuiltInPosition: + return "position"; + case BuiltInLayer: + return "render_target_array_index"; + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + return "viewport_array_index"; + + // Fragment function in + case BuiltInFrontFacing: + return "front_facing"; + case BuiltInPointCoord: + return "point_coord"; + case BuiltInFragCoord: + return "position"; + case BuiltInSampleId: + return "sample_id"; + case BuiltInSampleMask: + return "sample_mask"; + case BuiltInSamplePosition: + // Shouldn't be reached. + SPIRV_CROSS_THROW("Sample position is retrieved by a function in MSL."); + + // Fragment function out + case BuiltInFragDepth: + if (execution.flags.get(ExecutionModeDepthGreater)) + return "depth(greater)"; + else if (execution.flags.get(ExecutionModeDepthLess)) + return "depth(less)"; + else + return "depth(any)"; + + // Compute function in + case BuiltInGlobalInvocationId: + return "thread_position_in_grid"; + + case BuiltInWorkgroupId: + return "threadgroup_position_in_grid"; + + case BuiltInNumWorkgroups: + return "threadgroups_per_grid"; + + case BuiltInLocalInvocationId: + return "thread_position_in_threadgroup"; + + case BuiltInLocalInvocationIndex: + return "thread_index_in_threadgroup"; + + default: + return "unsupported-built-in"; + } +} + +// Returns an MSL string type declaration for a SPIR-V builtin +string CompilerMSL::builtin_type_decl(BuiltIn builtin) +{ + switch (builtin) + { + // Vertex function in + case BuiltInVertexId: + return "uint"; + case BuiltInVertexIndex: + return "uint"; + case BuiltInBaseVertex: + return "uint"; + case BuiltInInstanceId: + return "uint"; + case BuiltInInstanceIndex: + return "uint"; + case BuiltInBaseInstance: + return "uint"; + case BuiltInDrawIndex: + SPIRV_CROSS_THROW("DrawIndex is not supported in MSL."); + + // Vertex function out + case BuiltInClipDistance: + return "float"; + case BuiltInPointSize: + return "float"; + case BuiltInPosition: + return "float4"; + case BuiltInLayer: + return "uint"; + case BuiltInViewportIndex: + if (!msl_options.supports_msl_version(2, 0)) + SPIRV_CROSS_THROW("ViewportIndex requires Metal 2.0."); + return "uint"; + + // Fragment function in + case BuiltInFrontFacing: + return "bool"; + case BuiltInPointCoord: + return "float2"; + case BuiltInFragCoord: + return "float4"; + case BuiltInSampleId: + return "uint"; + case BuiltInSampleMask: + return "uint"; + case BuiltInSamplePosition: + return "float2"; + + // Fragment function out + case BuiltInFragDepth: + return "float"; + + // Compute function in + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInNumWorkgroups: + case BuiltInWorkgroupId: + return "uint3"; + case BuiltInLocalInvocationIndex: + return "uint"; + + case BuiltInHelperInvocation: + return "bool"; + + default: + return "unsupported-built-in-type"; + } +} + +// Returns the declaration of a built-in argument to a function +string CompilerMSL::built_in_func_arg(BuiltIn builtin, bool prefix_comma) +{ + string bi_arg; + if (prefix_comma) + bi_arg += ", "; + + bi_arg += builtin_type_decl(builtin); + bi_arg += " " + builtin_to_glsl(builtin, StorageClassInput); + bi_arg += " [[" + builtin_qualifier(builtin) + "]]"; + + return bi_arg; +} + +// Returns the byte size of a struct member. +size_t CompilerMSL::get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const +{ + auto &type = get(struct_type.member_types[index]); + + switch (type.basetype) + { + case SPIRType::Unknown: + case SPIRType::Void: + case SPIRType::AtomicCounter: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + SPIRV_CROSS_THROW("Querying size of opaque object."); + + default: + { + // For arrays, we can use ArrayStride to get an easy check. + // Runtime arrays will have zero size so force to min of one. + if (!type.array.empty()) + { + uint32_t array_size = to_array_size_literal(type); + return type_struct_member_array_stride(struct_type, index) * max(array_size, 1u); + } + + if (type.basetype == SPIRType::Struct) + return get_declared_struct_size(type); + + uint32_t component_size = type.width / 8; + uint32_t vecsize = type.vecsize; + uint32_t columns = type.columns; + + // An unpacked 3-element vector or matrix column is the same memory size as a 4-element. + if (vecsize == 3 && !has_member_decoration(struct_type.self, index, DecorationCPacked)) + vecsize = 4; + + return component_size * vecsize * columns; + } + } +} + +// Returns the byte alignment of a struct member. +size_t CompilerMSL::get_declared_struct_member_alignment(const SPIRType &struct_type, uint32_t index) const +{ + auto &type = get(struct_type.member_types[index]); + + switch (type.basetype) + { + case SPIRType::Unknown: + case SPIRType::Void: + case SPIRType::AtomicCounter: + case SPIRType::Image: + case SPIRType::SampledImage: + case SPIRType::Sampler: + SPIRV_CROSS_THROW("Querying alignment of opaque object."); + + case SPIRType::Struct: + return 16; // Per Vulkan spec section 14.5.4 + + default: + { + // Alignment of packed type is the same as the underlying component or column size. + // Alignment of unpacked type is the same as the vector size. + // Alignment of 3-elements vector is the same as 4-elements (including packed using column). + if (member_is_packed_type(struct_type, index)) + return (type.width / 8) * (type.columns == 3 ? 4 : type.columns); + else + return (type.width / 8) * (type.vecsize == 3 ? 4 : type.vecsize); + } + } +} + +bool CompilerMSL::skip_argument(uint32_t) const +{ + return false; +} + +void CompilerMSL::analyze_sampled_image_usage() +{ + if (msl_options.swizzle_texture_samples) + { + SampledImageScanner scanner(*this); + traverse_all_reachable_opcodes(get(ir.default_entry_point), scanner); + } +} + +bool CompilerMSL::SampledImageScanner::handle(spv::Op opcode, const uint32_t *args, uint32_t length) +{ + switch (opcode) + { + case OpLoad: + case OpImage: + case OpSampledImage: + { + if (length < 3) + return false; + + uint32_t result_type = args[0]; + auto &type = compiler.get(result_type); + if ((type.basetype != SPIRType::Image && type.basetype != SPIRType::SampledImage) || type.image.sampled != 1) + return true; + + uint32_t id = args[1]; + compiler.set(id, "", result_type, true); + break; + } + case OpImageSampleExplicitLod: + case OpImageSampleProjExplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleDrefImplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageFetch: + case OpImageGather: + case OpImageDrefGather: + compiler.has_sampled_images = + compiler.has_sampled_images || compiler.is_sampled_image_type(compiler.expression_type(args[2])); + break; + default: + break; + } + return true; +} + +bool CompilerMSL::OpCodePreprocessor::handle(Op opcode, const uint32_t *args, uint32_t length) +{ + // Since MSL exists in a single execution scope, function prototype declarations are not + // needed, and clutter the output. If secondary functions are output (either as a SPIR-V + // function implementation or as indicated by the presence of OpFunctionCall), then set + // suppress_missing_prototypes to suppress compiler warnings of missing function prototypes. + + // Mark if the input requires the implementation of an SPIR-V function that does not exist in Metal. + SPVFuncImpl spv_func = get_spv_func_impl(opcode, args); + if (spv_func != SPVFuncImplNone) + { + compiler.spv_function_implementations.insert(spv_func); + suppress_missing_prototypes = true; + } + + switch (opcode) + { + + case OpFunctionCall: + suppress_missing_prototypes = true; + break; + + case OpImageWrite: + uses_resource_write = true; + break; + + case OpStore: + check_resource_write(args[0]); + break; + + case OpAtomicExchange: + case OpAtomicCompareExchange: + case OpAtomicCompareExchangeWeak: + case OpAtomicIIncrement: + case OpAtomicIDecrement: + case OpAtomicIAdd: + case OpAtomicISub: + case OpAtomicSMin: + case OpAtomicUMin: + case OpAtomicSMax: + case OpAtomicUMax: + case OpAtomicAnd: + case OpAtomicOr: + case OpAtomicXor: + uses_atomics = true; + check_resource_write(args[2]); + break; + + case OpAtomicLoad: + uses_atomics = true; + break; + + default: + break; + } + + // If it has one, keep track of the instruction's result type, mapped by ID + uint32_t result_type, result_id; + if (compiler.instruction_to_result_type(result_type, result_id, opcode, args, length)) + result_types[result_id] = result_type; + + return true; +} + +// If the variable is a Uniform or StorageBuffer, mark that a resource has been written to. +void CompilerMSL::OpCodePreprocessor::check_resource_write(uint32_t var_id) +{ + auto *p_var = compiler.maybe_get_backing_variable(var_id); + StorageClass sc = p_var ? p_var->storage : StorageClassMax; + if (sc == StorageClassUniform || sc == StorageClassStorageBuffer) + uses_resource_write = true; +} + +// Returns an enumeration of a SPIR-V function that needs to be output for certain Op codes. +CompilerMSL::SPVFuncImpl CompilerMSL::OpCodePreprocessor::get_spv_func_impl(Op opcode, const uint32_t *args) +{ + switch (opcode) + { + case OpFMod: + return SPVFuncImplMod; + + case OpFunctionCall: + { + auto &return_type = compiler.get(args[0]); + if (return_type.array.size() > 1) + { + if (return_type.array.size() > SPVFuncImplArrayCopyMultidimMax) + SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays."); + return static_cast(SPVFuncImplArrayCopyMultidimBase + return_type.array.size()); + } + else if (return_type.array.size() > 0) + return SPVFuncImplArrayCopy; + + break; + } + + case OpStore: + { + // Get the result type of the RHS. Since this is run as a pre-processing stage, + // we must extract the result type directly from the Instruction, rather than the ID. + uint32_t id_lhs = args[0]; + uint32_t id_rhs = args[1]; + + const SPIRType *type = nullptr; + if (compiler.ir.ids[id_rhs].get_type() != TypeNone) + { + // Could be a constant, or similar. + type = &compiler.expression_type(id_rhs); + } + else + { + // Or ... an expression. + uint32_t tid = result_types[id_rhs]; + if (tid) + type = &compiler.get(tid); + } + + auto *var = compiler.maybe_get(id_lhs); + + // Are we simply assigning to a statically assigned variable which takes a constant? + // Don't bother emitting this function. + bool static_expression_lhs = + var && var->storage == StorageClassFunction && var->statically_assigned && var->remapped_variable; + if (type && compiler.is_array(*type) && !static_expression_lhs) + { + if (type->array.size() > 1) + { + if (type->array.size() > SPVFuncImplArrayCopyMultidimMax) + SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays."); + return static_cast(SPVFuncImplArrayCopyMultidimBase + type->array.size()); + } + else + return SPVFuncImplArrayCopy; + } + + break; + } + + case OpImageFetch: + case OpImageRead: + case OpImageWrite: + { + // Retrieve the image type, and if it's a Buffer, emit a texel coordinate function + uint32_t tid = result_types[args[opcode == OpImageWrite ? 0 : 2]]; + if (tid && compiler.get(tid).image.dim == DimBuffer) + return SPVFuncImplTexelBufferCoords; + + if (opcode == OpImageFetch && compiler.msl_options.swizzle_texture_samples) + return SPVFuncImplTextureSwizzle; + + break; + } + + case OpImageSampleExplicitLod: + case OpImageSampleProjExplicitLod: + case OpImageSampleDrefExplicitLod: + case OpImageSampleProjDrefExplicitLod: + case OpImageSampleImplicitLod: + case OpImageSampleProjImplicitLod: + case OpImageSampleDrefImplicitLod: + case OpImageSampleProjDrefImplicitLod: + case OpImageGather: + case OpImageDrefGather: + if (compiler.msl_options.swizzle_texture_samples) + return SPVFuncImplTextureSwizzle; + break; + + case OpCompositeConstruct: + { + auto &type = compiler.get(args[0]); + if (type.array.size() > 1) // We need to use copies to build the composite. + return static_cast(SPVFuncImplArrayCopyMultidimBase + type.array.size() - 1); + break; + } + + case OpExtInst: + { + uint32_t extension_set = args[2]; + if (compiler.get(extension_set).ext == SPIRExtension::GLSL) + { + GLSLstd450 op_450 = static_cast(args[3]); + switch (op_450) + { + case GLSLstd450Radians: + return SPVFuncImplRadians; + case GLSLstd450Degrees: + return SPVFuncImplDegrees; + case GLSLstd450FindILsb: + return SPVFuncImplFindILsb; + case GLSLstd450FindSMsb: + return SPVFuncImplFindSMsb; + case GLSLstd450FindUMsb: + return SPVFuncImplFindUMsb; + case GLSLstd450SSign: + return SPVFuncImplSSign; + case GLSLstd450MatrixInverse: + { + auto &mat_type = compiler.get(args[0]); + switch (mat_type.columns) + { + case 2: + return SPVFuncImplInverse2x2; + case 3: + return SPVFuncImplInverse3x3; + case 4: + return SPVFuncImplInverse4x4; + default: + break; + } + break; + } + default: + break; + } + } + break; + } + + default: + break; + } + return SPVFuncImplNone; +} + +// Sort both type and meta member content based on builtin status (put builtins at end), +// then by the required sorting aspect. +void CompilerMSL::MemberSorter::sort() +{ + // Create a temporary array of consecutive member indices and sort it based on how + // the members should be reordered, based on builtin and sorting aspect meta info. + size_t mbr_cnt = type.member_types.size(); + vector mbr_idxs(mbr_cnt); + iota(mbr_idxs.begin(), mbr_idxs.end(), 0); // Fill with consecutive indices + std::sort(mbr_idxs.begin(), mbr_idxs.end(), *this); // Sort member indices based on sorting aspect + + // Move type and meta member info to the order defined by the sorted member indices. + // This is done by creating temporary copies of both member types and meta, and then + // copying back to the original content at the sorted indices. + auto mbr_types_cpy = type.member_types; + auto mbr_meta_cpy = meta.members; + for (uint32_t mbr_idx = 0; mbr_idx < mbr_cnt; mbr_idx++) + { + type.member_types[mbr_idx] = mbr_types_cpy[mbr_idxs[mbr_idx]]; + meta.members[mbr_idx] = mbr_meta_cpy[mbr_idxs[mbr_idx]]; + } +} + +// Sort first by builtin status (put builtins at end), then by the sorting aspect. +bool CompilerMSL::MemberSorter::operator()(uint32_t mbr_idx1, uint32_t mbr_idx2) +{ + auto &mbr_meta1 = meta.members[mbr_idx1]; + auto &mbr_meta2 = meta.members[mbr_idx2]; + if (mbr_meta1.builtin != mbr_meta2.builtin) + return mbr_meta2.builtin; + else + switch (sort_aspect) + { + case Location: + return mbr_meta1.location < mbr_meta2.location; + case LocationReverse: + return mbr_meta1.location > mbr_meta2.location; + case Offset: + return mbr_meta1.offset < mbr_meta2.offset; + case OffsetThenLocationReverse: + return (mbr_meta1.offset < mbr_meta2.offset) || + ((mbr_meta1.offset == mbr_meta2.offset) && (mbr_meta1.location > mbr_meta2.location)); + case Alphabetical: + return mbr_meta1.alias < mbr_meta2.alias; + default: + return false; + } +} + +CompilerMSL::MemberSorter::MemberSorter(SPIRType &t, Meta &m, SortAspect sa) + : type(t) + , meta(m) + , sort_aspect(sa) +{ + // Ensure enough meta info is available + meta.members.resize(max(type.member_types.size(), meta.members.size())); +} + +void CompilerMSL::remap_constexpr_sampler(uint32_t id, const MSLConstexprSampler &sampler) +{ + auto &type = get(get(id).basetype); + if (type.basetype != SPIRType::SampledImage && type.basetype != SPIRType::Sampler) + SPIRV_CROSS_THROW("Can only remap SampledImage and Sampler type."); + if (!type.array.empty()) + SPIRV_CROSS_THROW("Can not remap array of samplers."); + constexpr_samplers[id] = sampler; +} + +void CompilerMSL::bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) +{ + auto *var = maybe_get_backing_variable(source_id); + if (var) + source_id = var->self; + + // Only interested in standalone builtin variables. + if (!has_decoration(source_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(source_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + switch (builtin) + { + case BuiltInGlobalInvocationId: + case BuiltInLocalInvocationId: + case BuiltInWorkgroupId: + case BuiltInLocalInvocationIndex: + case BuiltInWorkgroupSize: + case BuiltInNumWorkgroups: + case BuiltInLayer: + case BuiltInViewportIndex: + expected_type = SPIRType::UInt; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + expr = bitcast_expression(expr_type, expected_type, expr); +} + +void CompilerMSL::bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) +{ + // Only interested in standalone builtin variables. + if (!has_decoration(target_id, DecorationBuiltIn)) + return; + + auto builtin = static_cast(get_decoration(target_id, DecorationBuiltIn)); + auto expected_type = expr_type.basetype; + switch (builtin) + { + case BuiltInLayer: + case BuiltInViewportIndex: + expected_type = SPIRType::UInt; + break; + + default: + break; + } + + if (expected_type != expr_type.basetype) + { + auto type = expr_type; + type.basetype = expected_type; + expr = bitcast_expression(type, expr_type.basetype, expr); + } +} + +std::string CompilerMSL::to_initializer_expression(const SPIRVariable &var) +{ + // We risk getting an array initializer here with MSL. If we have an array. + // FIXME: We cannot handle non-constant arrays being initialized. + // We will need to inject spvArrayCopy here somehow ... + auto &type = get(var.basetype); + if (ir.ids[var.initializer].get_type() == TypeConstant && + (!type.array.empty() || type.basetype == SPIRType::Struct)) + return constant_expression(get(var.initializer)); + else + return CompilerGLSL::to_initializer_expression(var); +} diff --git a/3rdparty/spirv-cross/spirv_msl.hpp b/3rdparty/spirv-cross/spirv_msl.hpp new file mode 100644 index 000000000..a0058e33e --- /dev/null +++ b/3rdparty/spirv-cross/spirv_msl.hpp @@ -0,0 +1,509 @@ +/* + * Copyright 2016-2019 The Brenwill Workshop Ltd. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_MSL_HPP +#define SPIRV_CROSS_MSL_HPP + +#include "spirv_glsl.hpp" +#include +#include +#include +#include +#include + +namespace spirv_cross +{ + +// Indicates the format of the vertex attribute. Currently limited to specifying +// if the attribute is an 8-bit unsigned integer, 16-bit unsigned integer, or +// some other format. +enum MSLVertexFormat +{ + MSL_VERTEX_FORMAT_OTHER, + MSL_VERTEX_FORMAT_UINT8, + MSL_VERTEX_FORMAT_UINT16 +}; + +// Defines MSL characteristics of a vertex attribute at a particular location. +// The used_by_shader flag is set to true during compilation of SPIR-V to MSL +// if the shader makes use of this vertex attribute. +struct MSLVertexAttr +{ + uint32_t location = 0; + uint32_t msl_buffer = 0; + uint32_t msl_offset = 0; + uint32_t msl_stride = 0; + bool per_instance = false; + MSLVertexFormat format = MSL_VERTEX_FORMAT_OTHER; + bool used_by_shader = false; +}; + +// Matches the binding index of a MSL resource for a binding within a descriptor set. +// Taken together, the stage, desc_set and binding combine to form a reference to a resource +// descriptor used in a particular shading stage. Generally, only one of the buffer, texture, +// or sampler elements will be populated. The used_by_shader flag is set to true during +// compilation of SPIR-V to MSL if the shader makes use of this vertex attribute. +struct MSLResourceBinding +{ + spv::ExecutionModel stage; + uint32_t desc_set = 0; + uint32_t binding = 0; + + uint32_t msl_buffer = 0; + uint32_t msl_texture = 0; + uint32_t msl_sampler = 0; + + bool used_by_shader = false; +}; + +enum MSLSamplerCoord +{ + MSL_SAMPLER_COORD_NORMALIZED, + MSL_SAMPLER_COORD_PIXEL +}; + +enum MSLSamplerFilter +{ + MSL_SAMPLER_FILTER_NEAREST, + MSL_SAMPLER_FILTER_LINEAR +}; + +enum MSLSamplerMipFilter +{ + MSL_SAMPLER_MIP_FILTER_NONE, + MSL_SAMPLER_MIP_FILTER_NEAREST, + MSL_SAMPLER_MIP_FILTER_LINEAR, +}; + +enum MSLSamplerAddress +{ + MSL_SAMPLER_ADDRESS_CLAMP_TO_ZERO, + MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE, + MSL_SAMPLER_ADDRESS_CLAMP_TO_BORDER, + MSL_SAMPLER_ADDRESS_REPEAT, + MSL_SAMPLER_ADDRESS_MIRRORED_REPEAT +}; + +enum MSLSamplerCompareFunc +{ + MSL_SAMPLER_COMPARE_FUNC_NEVER, + MSL_SAMPLER_COMPARE_FUNC_LESS, + MSL_SAMPLER_COMPARE_FUNC_LESS_EQUAL, + MSL_SAMPLER_COMPARE_FUNC_GREATER, + MSL_SAMPLER_COMPARE_FUNC_GREATER_EQUAL, + MSL_SAMPLER_COMPARE_FUNC_EQUAL, + MSL_SAMPLER_COMPARE_FUNC_NOT_EQUAL, + MSL_SAMPLER_COMPARE_FUNC_ALWAYS +}; + +enum MSLSamplerBorderColor +{ + MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_BLACK, + MSL_SAMPLER_BORDER_COLOR_OPAQUE_WHITE +}; + +struct MSLConstexprSampler +{ + MSLSamplerCoord coord = MSL_SAMPLER_COORD_NORMALIZED; + MSLSamplerFilter min_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLSamplerFilter mag_filter = MSL_SAMPLER_FILTER_NEAREST; + MSLSamplerMipFilter mip_filter = MSL_SAMPLER_MIP_FILTER_NONE; + MSLSamplerAddress s_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerAddress t_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerAddress r_address = MSL_SAMPLER_ADDRESS_CLAMP_TO_EDGE; + MSLSamplerCompareFunc compare_func = MSL_SAMPLER_COMPARE_FUNC_NEVER; + MSLSamplerBorderColor border_color = MSL_SAMPLER_BORDER_COLOR_TRANSPARENT_BLACK; + float lod_clamp_min = 0.0f; + float lod_clamp_max = 1000.0f; + int max_anisotropy = 1; + + bool compare_enable = false; + bool lod_clamp_enable = false; + bool anisotropy_enable = false; +}; + +// Tracks the type ID and member index of a struct member +using MSLStructMemberKey = uint64_t; + +// Special constant used in a MSLResourceBinding desc_set +// element to indicate the bindings for the push constants. +static const uint32_t kPushConstDescSet = ~(0u); + +// Special constant used in a MSLResourceBinding binding +// element to indicate the bindings for the push constants. +static const uint32_t kPushConstBinding = 0; + +// Decompiles SPIR-V to Metal Shading Language +class CompilerMSL : public CompilerGLSL +{ +public: + // Options for compiling to Metal Shading Language + struct Options + { + typedef enum + { + iOS, + macOS, + } Platform; + + Platform platform = macOS; + uint32_t msl_version = make_msl_version(1, 2); + uint32_t texel_buffer_texture_width = 4096; // Width of 2D Metal textures used as 1D texel buffers + uint32_t aux_buffer_index = 0; + bool enable_point_size_builtin = true; + bool disable_rasterization = false; + bool swizzle_texture_samples = false; + + bool is_ios() + { + return platform == iOS; + } + + bool is_macos() + { + return platform == macOS; + } + + void set_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) + { + msl_version = make_msl_version(major, minor, patch); + } + + bool supports_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) + { + return msl_version >= make_msl_version(major, minor, patch); + } + + static uint32_t make_msl_version(uint32_t major, uint32_t minor = 0, uint32_t patch = 0) + { + return (major * 10000) + (minor * 100) + patch; + } + }; + + SPIRV_CROSS_DEPRECATED("CompilerMSL::get_options() is obsolete, use get_msl_options() instead.") + const Options &get_options() const + { + return msl_options; + } + + const Options &get_msl_options() const + { + return msl_options; + } + + SPIRV_CROSS_DEPRECATED("CompilerMSL::set_options() is obsolete, use set_msl_options() instead.") + void set_options(Options &opts) + { + msl_options = opts; + } + + void set_msl_options(const Options &opts) + { + msl_options = opts; + } + + // Provide feedback to calling API to allow runtime to disable pipeline + // rasterization if vertex shader requires rasterization to be disabled. + bool get_is_rasterization_disabled() const + { + return is_rasterization_disabled && (get_entry_point().model == spv::ExecutionModelVertex); + } + + // Provide feedback to calling API to allow it to pass an auxiliary + // buffer if the shader needs it. + bool needs_aux_buffer() const + { + return used_aux_buffer; + } + + // An enum of SPIR-V functions that are implemented in additional + // source code that is added to the shader if necessary. + enum SPVFuncImpl + { + SPVFuncImplNone, + SPVFuncImplMod, + SPVFuncImplRadians, + SPVFuncImplDegrees, + SPVFuncImplFindILsb, + SPVFuncImplFindSMsb, + SPVFuncImplFindUMsb, + SPVFuncImplSSign, + SPVFuncImplArrayCopyMultidimBase, + // Unfortunately, we cannot use recursive templates in the MSL compiler properly, + // so stamp out variants up to some arbitrary maximum. + SPVFuncImplArrayCopy = SPVFuncImplArrayCopyMultidimBase + 1, + SPVFuncImplArrayOfArrayCopy2Dim = SPVFuncImplArrayCopyMultidimBase + 2, + SPVFuncImplArrayOfArrayCopy3Dim = SPVFuncImplArrayCopyMultidimBase + 3, + SPVFuncImplArrayOfArrayCopy4Dim = SPVFuncImplArrayCopyMultidimBase + 4, + SPVFuncImplArrayOfArrayCopy5Dim = SPVFuncImplArrayCopyMultidimBase + 5, + SPVFuncImplArrayOfArrayCopy6Dim = SPVFuncImplArrayCopyMultidimBase + 6, + SPVFuncImplTexelBufferCoords, + SPVFuncImplInverse4x4, + SPVFuncImplInverse3x3, + SPVFuncImplInverse2x2, + SPVFuncImplRowMajor2x3, + SPVFuncImplRowMajor2x4, + SPVFuncImplRowMajor3x2, + SPVFuncImplRowMajor3x4, + SPVFuncImplRowMajor4x2, + SPVFuncImplRowMajor4x3, + SPVFuncImplTextureSwizzle, + SPVFuncImplArrayCopyMultidimMax = 6 + }; + + // Constructs an instance to compile the SPIR-V code into Metal Shading Language, + // using the configuration parameters, if provided: + // - p_vtx_attrs is an optional list of vertex attribute bindings used to match + // vertex content locations to MSL attributes. If vertex attributes are provided, + // the compiler will set the used_by_shader flag to true in any vertex attribute + // actually used by the MSL code. + // - p_res_bindings is a list of resource bindings to indicate the MSL buffer, + // texture or sampler index to use for a particular SPIR-V description set + // and binding. If resource bindings are provided, the compiler will set the + // used_by_shader flag to true in any resource binding actually used by the MSL code. + CompilerMSL(std::vector spirv, std::vector *p_vtx_attrs = nullptr, + std::vector *p_res_bindings = nullptr); + + // Alternate constructor avoiding use of std::vectors. + CompilerMSL(const uint32_t *ir, size_t word_count, MSLVertexAttr *p_vtx_attrs = nullptr, size_t vtx_attrs_count = 0, + MSLResourceBinding *p_res_bindings = nullptr, size_t res_bindings_count = 0); + + // Alternate constructors taking pre-parsed IR directly. + CompilerMSL(const ParsedIR &ir, MSLVertexAttr *p_vtx_attrs = nullptr, size_t vtx_attrs_count = 0, + MSLResourceBinding *p_res_bindings = nullptr, size_t res_bindings_count = 0); + + CompilerMSL(ParsedIR &&ir, MSLVertexAttr *p_vtx_attrs = nullptr, size_t vtx_attrs_count = 0, + MSLResourceBinding *p_res_bindings = nullptr, size_t res_bindings_count = 0); + + // Compiles the SPIR-V code into Metal Shading Language. + std::string compile() override; + + // Compiles the SPIR-V code into Metal Shading Language, overriding configuration parameters. + // Any of the parameters here may be null to indicate that the configuration provided in the + // constructor should be used. They are not declared as optional to avoid a conflict with the + // inherited and overridden zero-parameter compile() function. + std::string compile(std::vector *p_vtx_attrs, std::vector *p_res_bindings); + + // This legacy method is deprecated. + typedef Options MSLConfiguration; + SPIRV_CROSS_DEPRECATED("Please use get_msl_options() and set_msl_options() instead.") + std::string compile(MSLConfiguration &msl_cfg, std::vector *p_vtx_attrs = nullptr, + std::vector *p_res_bindings = nullptr); + + // Remap a sampler with ID to a constexpr sampler. + // Older iOS targets must use constexpr samplers in certain cases (PCF), + // so a static sampler must be used. + // The sampler will not consume a binding, but be declared in the entry point as a constexpr sampler. + // This can be used on both combined image/samplers (sampler2D) or standalone samplers. + // The remapped sampler must not be an array of samplers. + void remap_constexpr_sampler(uint32_t id, const MSLConstexprSampler &sampler); + +protected: + void emit_binary_unord_op(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op); + void emit_instruction(const Instruction &instr) override; + void emit_glsl_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args, + uint32_t count) override; + void emit_header() override; + void emit_function_prototype(SPIRFunction &func, const Bitset &return_flags) override; + void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id) override; + void emit_fixup() override; + void emit_struct_member(const SPIRType &type, uint32_t member_type_id, uint32_t index, + const std::string &qualifier = "", uint32_t base_offset = 0) override; + std::string type_to_glsl(const SPIRType &type, uint32_t id = 0) override; + std::string image_type_glsl(const SPIRType &type, uint32_t id = 0) override; + std::string sampler_type(const SPIRType &type); + std::string builtin_to_glsl(spv::BuiltIn builtin, spv::StorageClass storage) override; + size_t get_declared_struct_member_size(const SPIRType &struct_type, uint32_t index) const override; + std::string to_func_call_arg(uint32_t id) override; + std::string to_name(uint32_t id, bool allow_alias = true) const override; + std::string to_function_name(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj, + bool has_array_offsets, bool has_offset, bool has_grad, bool has_dref, + uint32_t lod) override; + std::string to_function_args(uint32_t img, const SPIRType &imgtype, bool is_fetch, bool is_gather, bool is_proj, + uint32_t coord, uint32_t coord_components, uint32_t dref, uint32_t grad_x, + uint32_t grad_y, uint32_t lod, uint32_t coffset, uint32_t offset, uint32_t bias, + uint32_t comp, uint32_t sample, bool *p_forward) override; + std::string to_initializer_expression(const SPIRVariable &var) override; + std::string unpack_expression_type(std::string expr_str, const SPIRType &type) override; + std::string bitcast_glsl_op(const SPIRType &result_type, const SPIRType &argument_type) override; + bool skip_argument(uint32_t id) const override; + std::string to_member_reference(uint32_t base, const SPIRType &type, uint32_t index, bool ptr_chain) override; + std::string to_qualifiers_glsl(uint32_t id) override; + void replace_illegal_names() override; + void declare_undefined_values() override; + void declare_constant_arrays(); + bool is_non_native_row_major_matrix(uint32_t id) override; + bool member_is_non_native_row_major_matrix(const SPIRType &type, uint32_t index) override; + std::string convert_row_major_matrix(std::string exp_str, const SPIRType &exp_type, bool is_packed) override; + + void preprocess_op_codes(); + void localize_global_variables(); + void extract_global_variables_from_functions(); + void mark_packable_structs(); + void mark_as_packable(SPIRType &type); + + std::unordered_map> function_global_vars; + void extract_global_variables_from_function(uint32_t func_id, std::set &added_arg_ids, + std::unordered_set &global_var_ids, + std::unordered_set &processed_func_ids); + uint32_t add_interface_block(spv::StorageClass storage); + + void add_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, SPIRType &ib_type, + SPIRVariable &var); + void add_composite_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var); + void add_plain_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var); + void add_plain_member_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, uint32_t index); + void add_composite_member_variable_to_interface_block(spv::StorageClass storage, const std::string &ib_var_ref, + SPIRType &ib_type, SPIRVariable &var, uint32_t index); + uint32_t get_accumulated_member_location(const SPIRVariable &var, uint32_t mbr_idx); + + void mark_location_as_used_by_shader(uint32_t location, spv::StorageClass storage); + uint32_t ensure_correct_builtin_type(uint32_t type_id, spv::BuiltIn builtin); + uint32_t ensure_correct_attribute_type(uint32_t type_id, uint32_t location); + + void emit_custom_functions(); + void emit_resources(); + void emit_specialization_constants(); + void emit_interface_block(uint32_t ib_var_id); + bool maybe_emit_array_assignment(uint32_t id_lhs, uint32_t id_rhs); + void add_convert_row_major_matrix_function(uint32_t cols, uint32_t rows); + + std::string func_type_decl(SPIRType &type); + std::string entry_point_args(bool append_comma); + std::string to_qualified_member_name(const SPIRType &type, uint32_t index); + std::string ensure_valid_name(std::string name, std::string pfx); + std::string to_sampler_expression(uint32_t id); + std::string builtin_qualifier(spv::BuiltIn builtin); + std::string builtin_type_decl(spv::BuiltIn builtin); + std::string built_in_func_arg(spv::BuiltIn builtin, bool prefix_comma); + std::string member_attribute_qualifier(const SPIRType &type, uint32_t index); + std::string argument_decl(const SPIRFunction::Parameter &arg); + std::string round_fp_tex_coords(std::string tex_coords, bool coord_is_fp); + uint32_t get_metal_resource_index(SPIRVariable &var, SPIRType::BaseType basetype); + uint32_t get_ordered_member_location(uint32_t type_id, uint32_t index, uint32_t *comp = nullptr); + size_t get_declared_struct_member_alignment(const SPIRType &struct_type, uint32_t index) const; + std::string to_component_argument(uint32_t id); + void align_struct(SPIRType &ib_type); + bool is_member_packable(SPIRType &ib_type, uint32_t index); + MSLStructMemberKey get_struct_member_key(uint32_t type_id, uint32_t index); + std::string get_argument_address_space(const SPIRVariable &argument); + std::string get_type_address_space(const SPIRType &type); + void emit_atomic_func_op(uint32_t result_type, uint32_t result_id, const char *op, uint32_t mem_order_1, + uint32_t mem_order_2, bool has_mem_order_2, uint32_t op0, uint32_t op1 = 0, + bool op1_is_pointer = false, bool op1_is_literal = false, uint32_t op2 = 0); + const char *get_memory_order(uint32_t spv_mem_sem); + void add_pragma_line(const std::string &line); + void add_typedef_line(const std::string &line); + void emit_barrier(uint32_t id_exe_scope, uint32_t id_mem_scope, uint32_t id_mem_sem); + void emit_array_copy(const std::string &lhs, uint32_t rhs_id) override; + void build_implicit_builtins(); + void emit_entry_point_declarations() override; + uint32_t builtin_frag_coord_id = 0; + uint32_t builtin_sample_id_id = 0; + uint32_t aux_buffer_id = 0; + + void bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type) override; + void bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type) override; + + void analyze_sampled_image_usage(); + + Options msl_options; + std::set spv_function_implementations; + std::unordered_map vtx_attrs_by_location; + std::unordered_map struct_member_padding; + std::set pragma_lines; + std::set typedef_lines; + std::vector vars_needing_early_declaration; + std::vector resource_bindings; + MSLResourceBinding next_metal_resource_index; + uint32_t stage_in_var_id = 0; + uint32_t stage_out_var_id = 0; + bool has_sampled_images = false; + bool needs_vertex_idx_arg = false; + bool needs_instance_idx_arg = false; + bool is_rasterization_disabled = false; + bool used_aux_buffer = false; + std::string qual_pos_var_name; + std::string stage_in_var_name = "in"; + std::string stage_out_var_name = "out"; + std::string sampler_name_suffix = "Smplr"; + spv::Op previous_instruction_opcode = spv::OpNop; + + std::unordered_map constexpr_samplers; + std::vector buffer_arrays; + + // OpcodeHandler that handles several MSL preprocessing operations. + struct OpCodePreprocessor : OpcodeHandler + { + OpCodePreprocessor(CompilerMSL &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override; + CompilerMSL::SPVFuncImpl get_spv_func_impl(spv::Op opcode, const uint32_t *args); + void check_resource_write(uint32_t var_id); + + CompilerMSL &compiler; + std::unordered_map result_types; + bool suppress_missing_prototypes = false; + bool uses_atomics = false; + bool uses_resource_write = false; + }; + + // OpcodeHandler that scans for uses of sampled images + struct SampledImageScanner : OpcodeHandler + { + SampledImageScanner(CompilerMSL &compiler_) + : compiler(compiler_) + { + } + + bool handle(spv::Op opcode, const uint32_t *args, uint32_t) override; + + CompilerMSL &compiler; + }; + + // Sorts the members of a SPIRType and associated Meta info based on a settable sorting + // aspect, which defines which aspect of the struct members will be used to sort them. + // Regardless of the sorting aspect, built-in members always appear at the end of the struct. + struct MemberSorter + { + enum SortAspect + { + Location, + LocationReverse, + Offset, + OffsetThenLocationReverse, + Alphabetical + }; + + void sort(); + bool operator()(uint32_t mbr_idx1, uint32_t mbr_idx2); + MemberSorter(SPIRType &t, Meta &m, SortAspect sa); + + SPIRType &type; + Meta &meta; + SortAspect sort_aspect; + }; +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_parser.cpp b/3rdparty/spirv-cross/spirv_parser.cpp new file mode 100644 index 000000000..205305ddb --- /dev/null +++ b/3rdparty/spirv-cross/spirv_parser.cpp @@ -0,0 +1,1121 @@ +/* + * Copyright 2018-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_parser.hpp" +#include + +using namespace std; +using namespace spv; + +namespace spirv_cross +{ +Parser::Parser(std::vector spirv) +{ + ir.spirv = move(spirv); +} + +Parser::Parser(const uint32_t *spirv_data, size_t word_count) +{ + ir.spirv = vector(spirv_data, spirv_data + word_count); +} + +static bool decoration_is_string(Decoration decoration) +{ + switch (decoration) + { + case DecorationHlslSemanticGOOGLE: + return true; + + default: + return false; + } +} + +static inline uint32_t swap_endian(uint32_t v) +{ + return ((v >> 24) & 0x000000ffu) | ((v >> 8) & 0x0000ff00u) | ((v << 8) & 0x00ff0000u) | ((v << 24) & 0xff000000u); +} + +static bool is_valid_spirv_version(uint32_t version) +{ + switch (version) + { + // Allow v99 since it tends to just work. + case 99: + case 0x10000: // SPIR-V 1.0 + case 0x10100: // SPIR-V 1.1 + case 0x10200: // SPIR-V 1.2 + case 0x10300: // SPIR-V 1.3 + return true; + + default: + return false; + } +} + +void Parser::parse() +{ + auto &spirv = ir.spirv; + + auto len = spirv.size(); + if (len < 5) + SPIRV_CROSS_THROW("SPIRV file too small."); + + auto s = spirv.data(); + + // Endian-swap if we need to. + if (s[0] == swap_endian(MagicNumber)) + transform(begin(spirv), end(spirv), begin(spirv), [](uint32_t c) { return swap_endian(c); }); + + if (s[0] != MagicNumber || !is_valid_spirv_version(s[1])) + SPIRV_CROSS_THROW("Invalid SPIRV format."); + + uint32_t bound = s[3]; + ir.set_id_bounds(bound); + + uint32_t offset = 5; + + vector instructions; + while (offset < len) + { + Instruction instr = {}; + instr.op = spirv[offset] & 0xffff; + instr.count = (spirv[offset] >> 16) & 0xffff; + + if (instr.count == 0) + SPIRV_CROSS_THROW("SPIR-V instructions cannot consume 0 words. Invalid SPIR-V file."); + + instr.offset = offset + 1; + instr.length = instr.count - 1; + + offset += instr.count; + + if (offset > spirv.size()) + SPIRV_CROSS_THROW("SPIR-V instruction goes out of bounds."); + + instructions.push_back(instr); + } + + for (auto &i : instructions) + parse(i); + + if (current_function) + SPIRV_CROSS_THROW("Function was not terminated."); + if (current_block) + SPIRV_CROSS_THROW("Block was not terminated."); +} + +const uint32_t *Parser::stream(const Instruction &instr) const +{ + // If we're not going to use any arguments, just return nullptr. + // We want to avoid case where we return an out of range pointer + // that trips debug assertions on some platforms. + if (!instr.length) + return nullptr; + + if (instr.offset + instr.length > ir.spirv.size()) + SPIRV_CROSS_THROW("Compiler::stream() out of range."); + return &ir.spirv[instr.offset]; +} + +static string extract_string(const vector &spirv, uint32_t offset) +{ + string ret; + for (uint32_t i = offset; i < spirv.size(); i++) + { + uint32_t w = spirv[i]; + + for (uint32_t j = 0; j < 4; j++, w >>= 8) + { + char c = w & 0xff; + if (c == '\0') + return ret; + ret += c; + } + } + + SPIRV_CROSS_THROW("String was not terminated before EOF"); +} + +void Parser::parse(const Instruction &instruction) +{ + auto *ops = stream(instruction); + auto op = static_cast(instruction.op); + uint32_t length = instruction.length; + + switch (op) + { + case OpMemoryModel: + case OpSourceContinued: + case OpSourceExtension: + case OpNop: + case OpLine: + case OpNoLine: + case OpString: + case OpModuleProcessed: + break; + + case OpSource: + { + auto lang = static_cast(ops[0]); + switch (lang) + { + case SourceLanguageESSL: + ir.source.es = true; + ir.source.version = ops[1]; + ir.source.known = true; + ir.source.hlsl = false; + break; + + case SourceLanguageGLSL: + ir.source.es = false; + ir.source.version = ops[1]; + ir.source.known = true; + ir.source.hlsl = false; + break; + + case SourceLanguageHLSL: + // For purposes of cross-compiling, this is GLSL 450. + ir.source.es = false; + ir.source.version = 450; + ir.source.known = true; + ir.source.hlsl = true; + break; + + default: + ir.source.known = false; + break; + } + break; + } + + case OpUndef: + { + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + set(id, result_type); + break; + } + + case OpCapability: + { + uint32_t cap = ops[0]; + if (cap == CapabilityKernel) + SPIRV_CROSS_THROW("Kernel capability not supported."); + + ir.declared_capabilities.push_back(static_cast(ops[0])); + break; + } + + case OpExtension: + { + auto ext = extract_string(ir.spirv, instruction.offset); + ir.declared_extensions.push_back(move(ext)); + break; + } + + case OpExtInstImport: + { + uint32_t id = ops[0]; + auto ext = extract_string(ir.spirv, instruction.offset + 1); + if (ext == "GLSL.std.450") + set(id, SPIRExtension::GLSL); + else if (ext == "SPV_AMD_shader_ballot") + set(id, SPIRExtension::SPV_AMD_shader_ballot); + else if (ext == "SPV_AMD_shader_explicit_vertex_parameter") + set(id, SPIRExtension::SPV_AMD_shader_explicit_vertex_parameter); + else if (ext == "SPV_AMD_shader_trinary_minmax") + set(id, SPIRExtension::SPV_AMD_shader_trinary_minmax); + else if (ext == "SPV_AMD_gcn_shader") + set(id, SPIRExtension::SPV_AMD_gcn_shader); + else + set(id, SPIRExtension::Unsupported); + + // Other SPIR-V extensions which have ExtInstrs are currently not supported. + + break; + } + + case OpEntryPoint: + { + auto itr = + ir.entry_points.insert(make_pair(ops[1], SPIREntryPoint(ops[1], static_cast(ops[0]), + extract_string(ir.spirv, instruction.offset + 2)))); + auto &e = itr.first->second; + + // Strings need nul-terminator and consume the whole word. + uint32_t strlen_words = uint32_t((e.name.size() + 1 + 3) >> 2); + e.interface_variables.insert(end(e.interface_variables), ops + strlen_words + 2, ops + instruction.length); + + // Set the name of the entry point in case OpName is not provided later. + ir.set_name(ops[1], e.name); + + // If we don't have an entry, make the first one our "default". + if (!ir.default_entry_point) + ir.default_entry_point = ops[1]; + break; + } + + case OpExecutionMode: + { + auto &execution = ir.entry_points[ops[0]]; + auto mode = static_cast(ops[1]); + execution.flags.set(mode); + + switch (mode) + { + case ExecutionModeInvocations: + execution.invocations = ops[2]; + break; + + case ExecutionModeLocalSize: + execution.workgroup_size.x = ops[2]; + execution.workgroup_size.y = ops[3]; + execution.workgroup_size.z = ops[4]; + break; + + case ExecutionModeOutputVertices: + execution.output_vertices = ops[2]; + break; + + default: + break; + } + break; + } + + case OpName: + { + uint32_t id = ops[0]; + ir.set_name(id, extract_string(ir.spirv, instruction.offset + 1)); + break; + } + + case OpMemberName: + { + uint32_t id = ops[0]; + uint32_t member = ops[1]; + ir.set_member_name(id, member, extract_string(ir.spirv, instruction.offset + 2)); + break; + } + + case OpDecorationGroup: + { + // Noop, this simply means an ID should be a collector of decorations. + // The meta array is already a flat array of decorations which will contain the relevant decorations. + break; + } + + case OpGroupDecorate: + { + uint32_t group_id = ops[0]; + auto &decorations = ir.meta[group_id].decoration; + auto &flags = decorations.decoration_flags; + + // Copies decorations from one ID to another. Only copy decorations which are set in the group, + // i.e., we cannot just copy the meta structure directly. + for (uint32_t i = 1; i < length; i++) + { + uint32_t target = ops[i]; + flags.for_each_bit([&](uint32_t bit) { + auto decoration = static_cast(bit); + + if (decoration_is_string(decoration)) + { + ir.set_decoration_string(target, decoration, ir.get_decoration_string(group_id, decoration)); + } + else + { + ir.meta[target].decoration_word_offset[decoration] = + ir.meta[group_id].decoration_word_offset[decoration]; + ir.set_decoration(target, decoration, ir.get_decoration(group_id, decoration)); + } + }); + } + break; + } + + case OpGroupMemberDecorate: + { + uint32_t group_id = ops[0]; + auto &flags = ir.meta[group_id].decoration.decoration_flags; + + // Copies decorations from one ID to another. Only copy decorations which are set in the group, + // i.e., we cannot just copy the meta structure directly. + for (uint32_t i = 1; i + 1 < length; i += 2) + { + uint32_t target = ops[i + 0]; + uint32_t index = ops[i + 1]; + flags.for_each_bit([&](uint32_t bit) { + auto decoration = static_cast(bit); + + if (decoration_is_string(decoration)) + ir.set_member_decoration_string(target, index, decoration, + ir.get_decoration_string(group_id, decoration)); + else + ir.set_member_decoration(target, index, decoration, ir.get_decoration(group_id, decoration)); + }); + } + break; + } + + case OpDecorate: + case OpDecorateId: + { + // OpDecorateId technically supports an array of arguments, but our only supported decorations are single uint, + // so merge decorate and decorate-id here. + uint32_t id = ops[0]; + + auto decoration = static_cast(ops[1]); + if (length >= 3) + { + ir.meta[id].decoration_word_offset[decoration] = uint32_t(&ops[2] - ir.spirv.data()); + ir.set_decoration(id, decoration, ops[2]); + } + else + ir.set_decoration(id, decoration); + + break; + } + + case OpDecorateStringGOOGLE: + { + uint32_t id = ops[0]; + auto decoration = static_cast(ops[1]); + ir.set_decoration_string(id, decoration, extract_string(ir.spirv, instruction.offset + 2)); + break; + } + + case OpMemberDecorate: + { + uint32_t id = ops[0]; + uint32_t member = ops[1]; + auto decoration = static_cast(ops[2]); + if (length >= 4) + ir.set_member_decoration(id, member, decoration, ops[3]); + else + ir.set_member_decoration(id, member, decoration); + break; + } + + case OpMemberDecorateStringGOOGLE: + { + uint32_t id = ops[0]; + uint32_t member = ops[1]; + auto decoration = static_cast(ops[2]); + ir.set_member_decoration_string(id, member, decoration, extract_string(ir.spirv, instruction.offset + 3)); + break; + } + + // Build up basic types. + case OpTypeVoid: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Void; + break; + } + + case OpTypeBool: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Boolean; + type.width = 1; + break; + } + + case OpTypeFloat: + { + uint32_t id = ops[0]; + uint32_t width = ops[1]; + auto &type = set(id); + if (width == 64) + type.basetype = SPIRType::Double; + else if (width == 32) + type.basetype = SPIRType::Float; + else if (width == 16) + type.basetype = SPIRType::Half; + else + SPIRV_CROSS_THROW("Unrecognized bit-width of floating point type."); + type.width = width; + break; + } + + case OpTypeInt: + { + uint32_t id = ops[0]; + uint32_t width = ops[1]; + bool signedness = ops[2] != 0; + auto &type = set(id); + switch (width) + { + case 64: + type.basetype = signedness ? SPIRType::Int64 : SPIRType::UInt64; + break; + case 32: + type.basetype = signedness ? SPIRType::Int : SPIRType::UInt; + break; + case 16: + type.basetype = signedness ? SPIRType::Short : SPIRType::UShort; + break; + case 8: + type.basetype = signedness ? SPIRType::SByte : SPIRType::UByte; + break; + default: + SPIRV_CROSS_THROW("Unrecognized bit-width of integral type."); + } + type.width = width; + break; + } + + // Build composite types by "inheriting". + // NOTE: The self member is also copied! For pointers and array modifiers this is a good thing + // since we can refer to decorations on pointee classes which is needed for UBO/SSBO, I/O blocks in geometry/tess etc. + case OpTypeVector: + { + uint32_t id = ops[0]; + uint32_t vecsize = ops[2]; + + auto &base = get(ops[1]); + auto &vecbase = set(id); + + vecbase = base; + vecbase.vecsize = vecsize; + vecbase.self = id; + vecbase.parent_type = ops[1]; + break; + } + + case OpTypeMatrix: + { + uint32_t id = ops[0]; + uint32_t colcount = ops[2]; + + auto &base = get(ops[1]); + auto &matrixbase = set(id); + + matrixbase = base; + matrixbase.columns = colcount; + matrixbase.self = id; + matrixbase.parent_type = ops[1]; + break; + } + + case OpTypeArray: + { + uint32_t id = ops[0]; + auto &arraybase = set(id); + + uint32_t tid = ops[1]; + auto &base = get(tid); + + arraybase = base; + arraybase.parent_type = tid; + + uint32_t cid = ops[2]; + ir.mark_used_as_array_length(cid); + auto *c = maybe_get(cid); + bool literal = c && !c->specialization; + + arraybase.array_size_literal.push_back(literal); + arraybase.array.push_back(literal ? c->scalar() : cid); + // Do NOT set arraybase.self! + break; + } + + case OpTypeRuntimeArray: + { + uint32_t id = ops[0]; + + auto &base = get(ops[1]); + auto &arraybase = set(id); + + arraybase = base; + arraybase.array.push_back(0); + arraybase.array_size_literal.push_back(true); + arraybase.parent_type = ops[1]; + // Do NOT set arraybase.self! + break; + } + + case OpTypeImage: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Image; + type.image.type = ops[1]; + type.image.dim = static_cast(ops[2]); + type.image.depth = ops[3] == 1; + type.image.arrayed = ops[4] != 0; + type.image.ms = ops[5] != 0; + type.image.sampled = ops[6]; + type.image.format = static_cast(ops[7]); + type.image.access = (length >= 9) ? static_cast(ops[8]) : AccessQualifierMax; + + if (type.image.sampled == 0) + SPIRV_CROSS_THROW("OpTypeImage Sampled parameter must not be zero."); + + break; + } + + case OpTypeSampledImage: + { + uint32_t id = ops[0]; + uint32_t imagetype = ops[1]; + auto &type = set(id); + type = get(imagetype); + type.basetype = SPIRType::SampledImage; + type.self = id; + break; + } + + case OpTypeSampler: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Sampler; + break; + } + + case OpTypePointer: + { + uint32_t id = ops[0]; + + auto &base = get(ops[2]); + auto &ptrbase = set(id); + + ptrbase = base; + ptrbase.pointer = true; + ptrbase.pointer_depth++; + ptrbase.storage = static_cast(ops[1]); + + if (ptrbase.storage == StorageClassAtomicCounter) + ptrbase.basetype = SPIRType::AtomicCounter; + + ptrbase.parent_type = ops[2]; + + // Do NOT set ptrbase.self! + break; + } + + case OpTypeStruct: + { + uint32_t id = ops[0]; + auto &type = set(id); + type.basetype = SPIRType::Struct; + for (uint32_t i = 1; i < length; i++) + type.member_types.push_back(ops[i]); + + // Check if we have seen this struct type before, with just different + // decorations. + // + // Add workaround for issue #17 as well by looking at OpName for the struct + // types, which we shouldn't normally do. + // We should not normally have to consider type aliases like this to begin with + // however ... glslang issues #304, #307 cover this. + + // For stripped names, never consider struct type aliasing. + // We risk declaring the same struct multiple times, but type-punning is not allowed + // so this is safe. + bool consider_aliasing = !ir.get_name(type.self).empty(); + if (consider_aliasing) + { + for (auto &other : global_struct_cache) + { + if (ir.get_name(type.self) == ir.get_name(other) && + types_are_logically_equivalent(type, get(other))) + { + type.type_alias = other; + break; + } + } + + if (type.type_alias == 0) + global_struct_cache.push_back(id); + } + break; + } + + case OpTypeFunction: + { + uint32_t id = ops[0]; + uint32_t ret = ops[1]; + + auto &func = set(id, ret); + for (uint32_t i = 2; i < length; i++) + func.parameter_types.push_back(ops[i]); + break; + } + + // Variable declaration + // All variables are essentially pointers with a storage qualifier. + case OpVariable: + { + uint32_t type = ops[0]; + uint32_t id = ops[1]; + auto storage = static_cast(ops[2]); + uint32_t initializer = length == 4 ? ops[3] : 0; + + if (storage == StorageClassFunction) + { + if (!current_function) + SPIRV_CROSS_THROW("No function currently in scope"); + current_function->add_local_variable(id); + } + + set(id, type, storage, initializer); + + // hlsl based shaders don't have those decorations. force them and then reset when reading/writing images + auto &ttype = get(type); + if (ttype.basetype == SPIRType::BaseType::Image) + { + ir.set_decoration(id, DecorationNonWritable); + ir.set_decoration(id, DecorationNonReadable); + } + + break; + } + + // OpPhi + // OpPhi is a fairly magical opcode. + // It selects temporary variables based on which parent block we *came from*. + // In high-level languages we can "de-SSA" by creating a function local, and flush out temporaries to this function-local + // variable to emulate SSA Phi. + case OpPhi: + { + if (!current_function) + SPIRV_CROSS_THROW("No function currently in scope"); + if (!current_block) + SPIRV_CROSS_THROW("No block currently in scope"); + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + + // Instead of a temporary, create a new function-wide temporary with this ID instead. + auto &var = set(id, result_type, spv::StorageClassFunction); + var.phi_variable = true; + + current_function->add_local_variable(id); + + for (uint32_t i = 2; i + 2 <= length; i += 2) + current_block->phi_variables.push_back({ ops[i], ops[i + 1], id }); + break; + } + + // Constants + case OpSpecConstant: + case OpConstant: + { + uint32_t id = ops[1]; + auto &type = get(ops[0]); + + if (type.width > 32) + set(id, ops[0], ops[2] | (uint64_t(ops[3]) << 32), op == OpSpecConstant); + else + set(id, ops[0], ops[2], op == OpSpecConstant); + break; + } + + case OpSpecConstantFalse: + case OpConstantFalse: + { + uint32_t id = ops[1]; + set(id, ops[0], uint32_t(0), op == OpSpecConstantFalse); + break; + } + + case OpSpecConstantTrue: + case OpConstantTrue: + { + uint32_t id = ops[1]; + set(id, ops[0], uint32_t(1), op == OpSpecConstantTrue); + break; + } + + case OpConstantNull: + { + uint32_t id = ops[1]; + uint32_t type = ops[0]; + make_constant_null(id, type); + break; + } + + case OpSpecConstantComposite: + case OpConstantComposite: + { + uint32_t id = ops[1]; + uint32_t type = ops[0]; + + auto &ctype = get(type); + + // We can have constants which are structs and arrays. + // In this case, our SPIRConstant will be a list of other SPIRConstant ids which we + // can refer to. + if (ctype.basetype == SPIRType::Struct || !ctype.array.empty()) + { + set(id, type, ops + 2, length - 2, op == OpSpecConstantComposite); + } + else + { + uint32_t elements = length - 2; + if (elements > 4) + SPIRV_CROSS_THROW("OpConstantComposite only supports 1, 2, 3 and 4 elements."); + + SPIRConstant remapped_constant_ops[4]; + const SPIRConstant *c[4]; + for (uint32_t i = 0; i < elements; i++) + { + // Specialization constants operations can also be part of this. + // We do not know their value, so any attempt to query SPIRConstant later + // will fail. We can only propagate the ID of the expression and use to_expression on it. + auto *constant_op = maybe_get(ops[2 + i]); + if (constant_op) + { + if (op == OpConstantComposite) + SPIRV_CROSS_THROW("Specialization constant operation used in OpConstantComposite."); + + remapped_constant_ops[i].make_null(get(constant_op->basetype)); + remapped_constant_ops[i].self = constant_op->self; + remapped_constant_ops[i].constant_type = constant_op->basetype; + remapped_constant_ops[i].specialization = true; + c[i] = &remapped_constant_ops[i]; + } + else + c[i] = &get(ops[2 + i]); + } + set(id, type, c, elements, op == OpSpecConstantComposite); + } + break; + } + + // Functions + case OpFunction: + { + uint32_t res = ops[0]; + uint32_t id = ops[1]; + // Control + uint32_t type = ops[3]; + + if (current_function) + SPIRV_CROSS_THROW("Must end a function before starting a new one!"); + + current_function = &set(id, res, type); + break; + } + + case OpFunctionParameter: + { + uint32_t type = ops[0]; + uint32_t id = ops[1]; + + if (!current_function) + SPIRV_CROSS_THROW("Must be in a function!"); + + current_function->add_parameter(type, id); + set(id, type, StorageClassFunction); + break; + } + + case OpFunctionEnd: + { + if (current_block) + { + // Very specific error message, but seems to come up quite often. + SPIRV_CROSS_THROW( + "Cannot end a function before ending the current block.\n" + "Likely cause: If this SPIR-V was created from glslang HLSL, make sure the entry point is valid."); + } + current_function = nullptr; + break; + } + + // Blocks + case OpLabel: + { + // OpLabel always starts a block. + if (!current_function) + SPIRV_CROSS_THROW("Blocks cannot exist outside functions!"); + + uint32_t id = ops[0]; + + current_function->blocks.push_back(id); + if (!current_function->entry_block) + current_function->entry_block = id; + + if (current_block) + SPIRV_CROSS_THROW("Cannot start a block before ending the current block."); + + current_block = &set(id); + break; + } + + // Branch instructions end blocks. + case OpBranch: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + + uint32_t target = ops[0]; + current_block->terminator = SPIRBlock::Direct; + current_block->next_block = target; + current_block = nullptr; + break; + } + + case OpBranchConditional: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + + current_block->condition = ops[0]; + current_block->true_block = ops[1]; + current_block->false_block = ops[2]; + + current_block->terminator = SPIRBlock::Select; + current_block = nullptr; + break; + } + + case OpSwitch: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + + if (current_block->merge == SPIRBlock::MergeNone) + SPIRV_CROSS_THROW("Switch statement is not structured"); + + current_block->terminator = SPIRBlock::MultiSelect; + + current_block->condition = ops[0]; + current_block->default_block = ops[1]; + + for (uint32_t i = 2; i + 2 <= length; i += 2) + current_block->cases.push_back({ ops[i], ops[i + 1] }); + + // If we jump to next block, make it break instead since we're inside a switch case block at that point. + ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_MULTISELECT_MERGE_BIT; + + current_block = nullptr; + break; + } + + case OpKill: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Kill; + current_block = nullptr; + break; + } + + case OpReturn: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Return; + current_block = nullptr; + break; + } + + case OpReturnValue: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Return; + current_block->return_value = ops[0]; + current_block = nullptr; + break; + } + + case OpUnreachable: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to end a non-existing block."); + current_block->terminator = SPIRBlock::Unreachable; + current_block = nullptr; + break; + } + + case OpSelectionMerge: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to modify a non-existing block."); + + current_block->next_block = ops[0]; + current_block->merge = SPIRBlock::MergeSelection; + ir.block_meta[current_block->next_block] |= ParsedIR::BLOCK_META_SELECTION_MERGE_BIT; + + if (length >= 2) + { + if (ops[1] & SelectionControlFlattenMask) + current_block->hint = SPIRBlock::HintFlatten; + else if (ops[1] & SelectionControlDontFlattenMask) + current_block->hint = SPIRBlock::HintDontFlatten; + } + break; + } + + case OpLoopMerge: + { + if (!current_block) + SPIRV_CROSS_THROW("Trying to modify a non-existing block."); + + current_block->merge_block = ops[0]; + current_block->continue_block = ops[1]; + current_block->merge = SPIRBlock::MergeLoop; + + ir.block_meta[current_block->self] |= ParsedIR::BLOCK_META_LOOP_HEADER_BIT; + ir.block_meta[current_block->merge_block] |= ParsedIR::BLOCK_META_LOOP_MERGE_BIT; + + ir.continue_block_to_loop_header[current_block->continue_block] = current_block->self; + + // Don't add loop headers to continue blocks, + // which would make it impossible branch into the loop header since + // they are treated as continues. + if (current_block->continue_block != current_block->self) + ir.block_meta[current_block->continue_block] |= ParsedIR::BLOCK_META_CONTINUE_BIT; + + if (length >= 3) + { + if (ops[2] & LoopControlUnrollMask) + current_block->hint = SPIRBlock::HintUnroll; + else if (ops[2] & LoopControlDontUnrollMask) + current_block->hint = SPIRBlock::HintDontUnroll; + } + break; + } + + case OpSpecConstantOp: + { + if (length < 3) + SPIRV_CROSS_THROW("OpSpecConstantOp not enough arguments."); + + uint32_t result_type = ops[0]; + uint32_t id = ops[1]; + auto spec_op = static_cast(ops[2]); + + set(id, result_type, spec_op, ops + 3, length - 3); + break; + } + + // Actual opcodes. + default: + { + if (!current_block) + SPIRV_CROSS_THROW("Currently no block to insert opcode."); + + current_block->ops.push_back(instruction); + break; + } + } +} + +bool Parser::types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const +{ + if (a.basetype != b.basetype) + return false; + if (a.width != b.width) + return false; + if (a.vecsize != b.vecsize) + return false; + if (a.columns != b.columns) + return false; + if (a.array.size() != b.array.size()) + return false; + + size_t array_count = a.array.size(); + if (array_count && memcmp(a.array.data(), b.array.data(), array_count * sizeof(uint32_t)) != 0) + return false; + + if (a.basetype == SPIRType::Image || a.basetype == SPIRType::SampledImage) + { + if (memcmp(&a.image, &b.image, sizeof(SPIRType::Image)) != 0) + return false; + } + + if (a.member_types.size() != b.member_types.size()) + return false; + + size_t member_types = a.member_types.size(); + for (size_t i = 0; i < member_types; i++) + { + if (!types_are_logically_equivalent(get(a.member_types[i]), get(b.member_types[i]))) + return false; + } + + return true; +} + +bool Parser::variable_storage_is_aliased(const SPIRVariable &v) const +{ + auto &type = get(v.basetype); + bool ssbo = v.storage == StorageClassStorageBuffer || + ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock); + bool image = type.basetype == SPIRType::Image; + bool counter = type.basetype == SPIRType::AtomicCounter; + + bool is_restrict; + if (ssbo) + is_restrict = ir.get_buffer_block_flags(v).get(DecorationRestrict); + else + is_restrict = ir.has_decoration(v.self, DecorationRestrict); + + return !is_restrict && (ssbo || image || counter); +} + +void Parser::make_constant_null(uint32_t id, uint32_t type) +{ + auto &constant_type = get(type); + + if (constant_type.pointer) + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } + else if (!constant_type.array.empty()) + { + assert(constant_type.parent_type); + uint32_t parent_id = ir.increase_bound_by(1); + make_constant_null(parent_id, constant_type.parent_type); + + if (!constant_type.array_size_literal.back()) + SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal."); + + vector elements(constant_type.array.back()); + for (uint32_t i = 0; i < constant_type.array.back(); i++) + elements[i] = parent_id; + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else if (!constant_type.member_types.empty()) + { + uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size())); + vector elements(constant_type.member_types.size()); + for (uint32_t i = 0; i < constant_type.member_types.size(); i++) + { + make_constant_null(member_ids + i, constant_type.member_types[i]); + elements[i] = member_ids + i; + } + set(id, type, elements.data(), uint32_t(elements.size()), false); + } + else + { + auto &constant = set(id, type); + constant.make_null(constant_type); + } +} + +} // namespace spirv_cross diff --git a/3rdparty/spirv-cross/spirv_parser.hpp b/3rdparty/spirv-cross/spirv_parser.hpp new file mode 100644 index 000000000..81c54987b --- /dev/null +++ b/3rdparty/spirv-cross/spirv_parser.hpp @@ -0,0 +1,94 @@ +/* + * Copyright 2018-2019 Arm Limited + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_PARSER_HPP +#define SPIRV_CROSS_PARSER_HPP + +#include "spirv_cross_parsed_ir.hpp" +#include +#include + +namespace spirv_cross +{ +class Parser +{ +public: + Parser(const uint32_t *spirv_data, size_t word_count); + Parser(std::vector spirv); + + void parse(); + + ParsedIR &get_parsed_ir() + { + return ir; + } + +private: + ParsedIR ir; + SPIRFunction *current_function = nullptr; + SPIRBlock *current_block = nullptr; + + void parse(const Instruction &instr); + const uint32_t *stream(const Instruction &instr) const; + + template + T &set(uint32_t id, P &&... args) + { + auto &var = variant_set(ir.ids.at(id), std::forward

(args)...); + var.self = id; + return var; + } + + template + T &get(uint32_t id) + { + return variant_get(ir.ids.at(id)); + } + + template + T *maybe_get(uint32_t id) + { + if (ir.ids.at(id).get_type() == T::type) + return &get(id); + else + return nullptr; + } + + template + const T &get(uint32_t id) const + { + return variant_get(ir.ids.at(id)); + } + + template + const T *maybe_get(uint32_t id) const + { + if (ir.ids.at(id).get_type() == T::type) + return &get(id); + else + return nullptr; + } + + // This must be an ordered data structure so we always pick the same type aliases. + std::vector global_struct_cache; + + bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const; + bool variable_storage_is_aliased(const SPIRVariable &v) const; + void make_constant_null(uint32_t id, uint32_t type); +}; +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/spirv_reflect.cpp b/3rdparty/spirv-cross/spirv_reflect.cpp new file mode 100644 index 000000000..003073150 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_reflect.cpp @@ -0,0 +1,573 @@ +/* + * Copyright 2018-2019 Bradley Austin Davis + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "spirv_reflect.hpp" +#include "spirv_glsl.hpp" +#include + +using namespace spv; +using namespace spirv_cross; +using namespace std; + +namespace simple_json +{ +enum class Type +{ + Object, + Array, +}; + +using State = std::pair; +using Stack = std::stack; + +class Stream +{ + Stack stack; + std::ostringstream buffer; + uint32_t indent{ 0 }; + +public: + void begin_json_object(); + void end_json_object(); + void emit_json_key(const std::string &key); + void emit_json_key_value(const std::string &key, const std::string &value); + void emit_json_key_value(const std::string &key, bool value); + void emit_json_key_value(const std::string &key, uint32_t value); + void emit_json_key_value(const std::string &key, int32_t value); + void emit_json_key_value(const std::string &key, float value); + void emit_json_key_object(const std::string &key); + void emit_json_key_array(const std::string &key); + + void begin_json_array(); + void end_json_array(); + void emit_json_array_value(const std::string &value); + void emit_json_array_value(uint32_t value); + + std::string str() const + { + return buffer.str(); + } + +private: + inline void statement_indent() + { + for (uint32_t i = 0; i < indent; i++) + buffer << " "; + } + + template + inline void statement_inner(T &&t) + { + buffer << std::forward(t); + } + + template + inline void statement_inner(T &&t, Ts &&... ts) + { + buffer << std::forward(t); + statement_inner(std::forward(ts)...); + } + + template + inline void statement(Ts &&... ts) + { + statement_indent(); + statement_inner(std::forward(ts)...); + buffer << '\n'; + } + + template + void statement_no_return(Ts &&... ts) + { + statement_indent(); + statement_inner(std::forward(ts)...); + } +}; +} // namespace simple_json + +using namespace simple_json; + +// Hackery to emit JSON without using nlohmann/json C++ library (which requires a +// higher level of compiler compliance than is required by SPIRV-Cross +void Stream::begin_json_array() +{ + if (!stack.empty() && stack.top().second) + { + statement_inner(",\n"); + } + statement("["); + ++indent; + stack.emplace(Type::Array, false); +} + +void Stream::end_json_array() +{ + if (stack.empty() || stack.top().first != Type::Array) + SPIRV_CROSS_THROW("Invalid JSON state"); + if (stack.top().second) + { + statement_inner("\n"); + } + --indent; + statement_no_return("]"); + stack.pop(); + if (!stack.empty()) + { + stack.top().second = true; + } +} + +void Stream::emit_json_array_value(const std::string &value) +{ + if (stack.empty() || stack.top().first != Type::Array) + SPIRV_CROSS_THROW("Invalid JSON state"); + + if (stack.top().second) + statement_inner(",\n"); + + statement_no_return("\"", value, "\""); + stack.top().second = true; +} + +void Stream::emit_json_array_value(uint32_t value) +{ + if (stack.empty() || stack.top().first != Type::Array) + SPIRV_CROSS_THROW("Invalid JSON state"); + if (stack.top().second) + statement_inner(",\n"); + statement_no_return(std::to_string(value)); + stack.top().second = true; +} + +void Stream::begin_json_object() +{ + if (!stack.empty() && stack.top().second) + { + statement_inner(",\n"); + } + statement("{"); + ++indent; + stack.emplace(Type::Object, false); +} + +void Stream::end_json_object() +{ + if (stack.empty() || stack.top().first != Type::Object) + SPIRV_CROSS_THROW("Invalid JSON state"); + if (stack.top().second) + { + statement_inner("\n"); + } + --indent; + statement_no_return("}"); + stack.pop(); + if (!stack.empty()) + { + stack.top().second = true; + } +} + +void Stream::emit_json_key(const std::string &key) +{ + if (stack.empty() || stack.top().first != Type::Object) + SPIRV_CROSS_THROW("Invalid JSON state"); + + if (stack.top().second) + statement_inner(",\n"); + statement_no_return("\"", key, "\" : "); + stack.top().second = true; +} + +void Stream::emit_json_key_value(const std::string &key, const std::string &value) +{ + emit_json_key(key); + statement_inner("\"", value, "\""); +} + +void Stream::emit_json_key_value(const std::string &key, uint32_t value) +{ + emit_json_key(key); + statement_inner(value); +} + +void Stream::emit_json_key_value(const std::string &key, int32_t value) +{ + emit_json_key(key); + statement_inner(value); +} + +void Stream::emit_json_key_value(const std::string &key, float value) +{ + emit_json_key(key); + statement_inner(value); +} + +void Stream::emit_json_key_value(const std::string &key, bool value) +{ + emit_json_key(key); + statement_inner(value ? "true" : "false"); +} + +void Stream::emit_json_key_object(const std::string &key) +{ + emit_json_key(key); + statement_inner("{\n"); + ++indent; + stack.emplace(Type::Object, false); +} + +void Stream::emit_json_key_array(const std::string &key) +{ + emit_json_key(key); + statement_inner("[\n"); + ++indent; + stack.emplace(Type::Array, false); +} + +void CompilerReflection::set_format(const std::string &format) +{ + if (format != "json") + { + SPIRV_CROSS_THROW("Unsupported format"); + } +} + +string CompilerReflection::compile() +{ + // Force a classic "C" locale, reverts when function returns + ClassicLocale classic_locale; + + // Move constructor for this type is broken on GCC 4.9 ... + json_stream = std::make_shared(); + json_stream->begin_json_object(); + emit_entry_points(); + emit_types(); + emit_resources(); + emit_specialization_constants(); + json_stream->end_json_object(); + return json_stream->str(); +} + +void CompilerReflection::emit_types() +{ + bool emitted_open_tag = false; + for (auto &id : ir.ids) + { + auto idType = id.get_type(); + if (idType == TypeType) + { + auto &type = id.get(); + if (type.basetype == SPIRType::Struct && !type.pointer && type.array.empty()) + { + emit_type(type, emitted_open_tag); + } + } + } + + if (emitted_open_tag) + { + json_stream->end_json_object(); + } +} + +void CompilerReflection::emit_type(const SPIRType &type, bool &emitted_open_tag) +{ + auto name = type_to_glsl(type); + + if (type.type_alias != 0) + return; + + if (!emitted_open_tag) + { + json_stream->emit_json_key_object("types"); + emitted_open_tag = true; + } + json_stream->emit_json_key_object("_" + std::to_string(type.self)); + json_stream->emit_json_key_value("name", name); + json_stream->emit_json_key_array("members"); + // FIXME ideally we'd like to emit the size of a structure as a + // convenience to people parsing the reflected JSON. The problem + // is that there's no implicit size for a type. It's final size + // will be determined by the top level declaration in which it's + // included. So there might be one size for the struct if it's + // included in a std140 uniform block and another if it's included + // in a std430 uniform block. + // The solution is to include *all* potential sizes as a map of + // layout type name to integer, but that will probably require + // some additional logic being written in this class, or in the + // parent CompilerGLSL class. + auto size = type.member_types.size(); + for (uint32_t i = 0; i < size; ++i) + { + emit_type_member(type, i); + } + json_stream->end_json_array(); + json_stream->end_json_object(); +} + +void CompilerReflection::emit_type_member(const SPIRType &type, uint32_t index) +{ + auto &membertype = get(type.member_types[index]); + json_stream->begin_json_object(); + auto name = to_member_name(type, index); + // FIXME we'd like to emit the offset of each member, but such offsets are + // context dependent. See the comment above regarding structure sizes + json_stream->emit_json_key_value("name", name); + if (membertype.basetype == SPIRType::Struct) + { + json_stream->emit_json_key_value("type", "_" + std::to_string(membertype.self)); + } + else + { + json_stream->emit_json_key_value("type", type_to_glsl(membertype)); + } + emit_type_member_qualifiers(type, index); + json_stream->end_json_object(); +} + +void CompilerReflection::emit_type_array(const SPIRType &type) +{ + if (!type.array.empty()) + { + json_stream->emit_json_key_array("array"); + // Note that we emit the zeros here as a means of identifying + // unbounded arrays. This is necessary as otherwise there would + // be no way of differentiating between float[4] and float[4][] + for (const auto &value : type.array) + json_stream->emit_json_array_value(value); + json_stream->end_json_array(); + } +} + +void CompilerReflection::emit_type_member_qualifiers(const SPIRType &type, uint32_t index) +{ + auto flags = combined_decoration_for_member(type, index); + if (flags.get(DecorationRowMajor)) + json_stream->emit_json_key_value("row_major", true); + + auto &membertype = get(type.member_types[index]); + emit_type_array(membertype); + auto &memb = ir.meta[type.self].members; + if (index < memb.size()) + { + auto &dec = memb[index]; + if (dec.decoration_flags.get(DecorationLocation)) + json_stream->emit_json_key_value("location", dec.location); + if (dec.decoration_flags.get(DecorationOffset)) + json_stream->emit_json_key_value("offset", dec.offset); + } +} + +string CompilerReflection::execution_model_to_str(spv::ExecutionModel model) +{ + switch (model) + { + case spv::ExecutionModelVertex: + return "vert"; + case spv::ExecutionModelTessellationControl: + return "tesc"; + case ExecutionModelTessellationEvaluation: + return "tese"; + case ExecutionModelGeometry: + return "geom"; + case ExecutionModelFragment: + return "frag"; + case ExecutionModelGLCompute: + return "comp"; + default: + return "???"; + } +} + +// FIXME include things like the local_size dimensions, geometry output vertex count, etc +void CompilerReflection::emit_entry_points() +{ + auto entries = get_entry_points_and_stages(); + if (!entries.empty()) + { + json_stream->emit_json_key_array("entryPoints"); + for (auto &e : entries) + { + json_stream->begin_json_object(); + json_stream->emit_json_key_value("name", e.name); + json_stream->emit_json_key_value("mode", execution_model_to_str(e.execution_model)); + json_stream->end_json_object(); + } + json_stream->end_json_array(); + } +} + +void CompilerReflection::emit_resources() +{ + auto res = get_shader_resources(); + emit_resources("subpass_inputs", res.subpass_inputs); + emit_resources("inputs", res.stage_inputs); + emit_resources("outputs", res.stage_outputs); + emit_resources("textures", res.sampled_images); + emit_resources("separate_images", res.separate_images); + emit_resources("separate_samplers", res.separate_samplers); + emit_resources("images", res.storage_images); + emit_resources("ssbos", res.storage_buffers); + emit_resources("ubos", res.uniform_buffers); + emit_resources("push_constants", res.push_constant_buffers); + emit_resources("counters", res.atomic_counters); +} + +void CompilerReflection::emit_resources(const char *tag, const vector &resources) +{ + if (resources.empty()) + { + return; + } + + json_stream->emit_json_key_array(tag); + for (auto &res : resources) + { + auto &type = get_type(res.type_id); + auto typeflags = ir.meta[type.self].decoration.decoration_flags; + auto &mask = get_decoration_bitset(res.id); + + // If we don't have a name, use the fallback for the type instead of the variable + // for SSBOs and UBOs since those are the only meaningful names to use externally. + // Push constant blocks are still accessed by name and not block name, even though they are technically Blocks. + bool is_push_constant = get_storage_class(res.id) == StorageClassPushConstant; + bool is_block = get_decoration_bitset(type.self).get(DecorationBlock) || + get_decoration_bitset(type.self).get(DecorationBufferBlock); + + uint32_t fallback_id = !is_push_constant && is_block ? res.base_type_id : res.id; + + json_stream->begin_json_object(); + + if (type.basetype == SPIRType::Struct) + { + json_stream->emit_json_key_value("type", "_" + std::to_string(res.base_type_id)); + } + else + { + json_stream->emit_json_key_value("type", type_to_glsl(type)); + } + + json_stream->emit_json_key_value("name", !res.name.empty() ? res.name : get_fallback_name(fallback_id)); + { + bool ssbo_block = type.storage == StorageClassStorageBuffer || + (type.storage == StorageClassUniform && typeflags.get(DecorationBufferBlock)); + if (ssbo_block) + { + auto buffer_flags = get_buffer_block_flags(res.id); + if (buffer_flags.get(DecorationNonReadable)) + json_stream->emit_json_key_value("writeonly", true); + if (buffer_flags.get(DecorationNonWritable)) + json_stream->emit_json_key_value("readonly", true); + if (buffer_flags.get(DecorationRestrict)) + json_stream->emit_json_key_value("restrict", true); + if (buffer_flags.get(DecorationCoherent)) + json_stream->emit_json_key_value("coherent", true); + } + } + + emit_type_array(type); + + { + bool is_sized_block = is_block && (get_storage_class(res.id) == StorageClassUniform || + get_storage_class(res.id) == StorageClassUniformConstant); + if (is_sized_block) + { + uint32_t block_size = uint32_t(get_declared_struct_size(get_type(res.base_type_id))); + json_stream->emit_json_key_value("block_size", block_size); + } + } + + if (type.storage == StorageClassPushConstant) + json_stream->emit_json_key_value("push_constant", true); + if (mask.get(DecorationLocation)) + json_stream->emit_json_key_value("location", get_decoration(res.id, DecorationLocation)); + if (mask.get(DecorationRowMajor)) + json_stream->emit_json_key_value("row_major", true); + if (mask.get(DecorationColMajor)) + json_stream->emit_json_key_value("column_major", true); + if (mask.get(DecorationIndex)) + json_stream->emit_json_key_value("index", get_decoration(res.id, DecorationIndex)); + if (type.storage != StorageClassPushConstant && mask.get(DecorationDescriptorSet)) + json_stream->emit_json_key_value("set", get_decoration(res.id, DecorationDescriptorSet)); + if (mask.get(DecorationBinding)) + json_stream->emit_json_key_value("binding", get_decoration(res.id, DecorationBinding)); + if (mask.get(DecorationInputAttachmentIndex)) + json_stream->emit_json_key_value("input_attachment_index", + get_decoration(res.id, DecorationInputAttachmentIndex)); + if (mask.get(DecorationOffset)) + json_stream->emit_json_key_value("offset", get_decoration(res.id, DecorationOffset)); + + // For images, the type itself adds a layout qualifer. + // Only emit the format for storage images. + if (type.basetype == SPIRType::Image && type.image.sampled == 2) + { + const char *fmt = format_to_glsl(type.image.format); + if (fmt != nullptr) + json_stream->emit_json_key_value("format", std::string(fmt)); + } + json_stream->end_json_object(); + } + json_stream->end_json_array(); +} + +void CompilerReflection::emit_specialization_constants() +{ + auto specialization_constants = get_specialization_constants(); + if (specialization_constants.empty()) + return; + + json_stream->emit_json_key_array("specialization_constants"); + for (const auto spec_const : specialization_constants) + { + auto &c = get(spec_const.id); + auto type = get(c.constant_type); + json_stream->begin_json_object(); + json_stream->emit_json_key_value("id", spec_const.constant_id); + json_stream->emit_json_key_value("type", type_to_glsl(type)); + switch (type.basetype) + { + case SPIRType::UInt: + json_stream->emit_json_key_value("default_value", c.scalar()); + break; + + case SPIRType::Int: + json_stream->emit_json_key_value("default_value", c.scalar_i32()); + break; + + case SPIRType::Float: + json_stream->emit_json_key_value("default_value", c.scalar_f32()); + break; + + case SPIRType::Boolean: + json_stream->emit_json_key_value("default_value", c.scalar() != 0); + break; + + default: + break; + } + json_stream->end_json_object(); + } + json_stream->end_json_array(); +} + +string CompilerReflection::to_member_name(const SPIRType &type, uint32_t index) const +{ + auto &memb = ir.meta[type.self].members; + if (index < memb.size() && !memb[index].alias.empty()) + return memb[index].alias; + else + return join("_m", index); +} diff --git a/3rdparty/spirv-cross/spirv_reflect.hpp b/3rdparty/spirv-cross/spirv_reflect.hpp new file mode 100644 index 000000000..13b5b4312 --- /dev/null +++ b/3rdparty/spirv-cross/spirv_reflect.hpp @@ -0,0 +1,84 @@ +/* + * Copyright 2018-2019 Bradley Austin Davis + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef SPIRV_CROSS_REFLECT_HPP +#define SPIRV_CROSS_REFLECT_HPP + +#include "spirv_glsl.hpp" +#include +#include + +namespace simple_json +{ +class Stream; +} + +namespace spirv_cross +{ +class CompilerReflection : public CompilerGLSL +{ + using Parent = CompilerGLSL; + +public: + explicit CompilerReflection(std::vector spirv_) + : Parent(move(spirv_)) + { + options.vulkan_semantics = true; + } + + CompilerReflection(const uint32_t *ir_, size_t word_count) + : Parent(ir_, word_count) + { + options.vulkan_semantics = true; + } + + explicit CompilerReflection(const ParsedIR &ir_) + : CompilerGLSL(ir_) + { + options.vulkan_semantics = true; + } + + explicit CompilerReflection(ParsedIR &&ir_) + : CompilerGLSL(std::move(ir_)) + { + options.vulkan_semantics = true; + } + + void set_format(const std::string &format); + std::string compile() override; + +private: + static std::string execution_model_to_str(spv::ExecutionModel model); + + void emit_entry_points(); + void emit_types(); + void emit_resources(); + void emit_specialization_constants(); + + void emit_type(const SPIRType &type, bool &emitted_open_tag); + void emit_type_member(const SPIRType &type, uint32_t index); + void emit_type_member_qualifiers(const SPIRType &type, uint32_t index); + void emit_type_array(const SPIRType &type); + void emit_resources(const char *tag, const std::vector &resources); + + std::string to_member_name(const SPIRType &type, uint32_t index) const; + + std::shared_ptr json_stream; +}; + +} // namespace spirv_cross + +#endif diff --git a/3rdparty/spirv-cross/test_shaders.py b/3rdparty/spirv-cross/test_shaders.py new file mode 100755 index 000000000..045c2551b --- /dev/null +++ b/3rdparty/spirv-cross/test_shaders.py @@ -0,0 +1,639 @@ +#!/usr/bin/env python3 + +import sys +import os +import os.path +import subprocess +import tempfile +import re +import itertools +import hashlib +import shutil +import argparse +import codecs +import json +import multiprocessing +import errno +from functools import partial + +def remove_file(path): + #print('Removing file:', path) + os.remove(path) + +def create_temporary(suff = ''): + f, path = tempfile.mkstemp(suffix = suff) + os.close(f) + #print('Creating temporary:', path) + return path + +def parse_stats(stats): + m = re.search('([0-9]+) work registers', stats) + registers = int(m.group(1)) if m else 0 + + m = re.search('([0-9]+) uniform registers', stats) + uniform_regs = int(m.group(1)) if m else 0 + + m_list = re.findall('(-?[0-9]+)\s+(-?[0-9]+)\s+(-?[0-9]+)', stats) + alu_short = float(m_list[1][0]) if m_list else 0 + ls_short = float(m_list[1][1]) if m_list else 0 + tex_short = float(m_list[1][2]) if m_list else 0 + alu_long = float(m_list[2][0]) if m_list else 0 + ls_long = float(m_list[2][1]) if m_list else 0 + tex_long = float(m_list[2][2]) if m_list else 0 + + return (registers, uniform_regs, alu_short, ls_short, tex_short, alu_long, ls_long, tex_long) + +def get_shader_type(shader): + _, ext = os.path.splitext(shader) + if ext == '.vert': + return '--vertex' + elif ext == '.frag': + return '--fragment' + elif ext == '.comp': + return '--compute' + elif ext == '.tesc': + return '--tessellation_control' + elif ext == '.tese': + return '--tessellation_evaluation' + elif ext == '.geom': + return '--geometry' + else: + return '' + +def get_shader_stats(shader): + path = create_temporary() + + p = subprocess.Popen(['malisc', get_shader_type(shader), '--core', 'Mali-T760', '-V', shader], stdout = subprocess.PIPE, stderr = subprocess.PIPE) + stdout, stderr = p.communicate() + remove_file(path) + + if p.returncode != 0: + print(stderr.decode('utf-8')) + raise OSError('malisc failed') + p.wait() + + returned = stdout.decode('utf-8') + return parse_stats(returned) + +def print_msl_compiler_version(): + try: + subprocess.check_call(['xcrun', '--sdk', 'iphoneos', 'metal', '--version']) + print('...are the Metal compiler characteristics.\n') # display after so xcrun FNF is silent + except OSError as e: + if (e.errno != errno.ENOENT): # Ignore xcrun not found error + raise + +def path_to_msl_standard(shader): + if '.ios.' in shader: + if '.msl2.' in shader: + return '-std=ios-metal2.0' + elif '.msl21.' in shader: + return '-std=ios-metal2.1' + elif '.msl11.' in shader: + return '-std=ios-metal1.1' + elif '.msl10.' in shader: + return '-std=ios-metal1.0' + else: + return '-std=ios-metal1.2' + else: + if '.msl2.' in shader: + return '-std=macos-metal2.0' + elif '.msl21.' in shader: + return '-std=macos-metal2.1' + elif '.msl11.' in shader: + return '-std=macos-metal1.1' + else: + return '-std=macos-metal1.2' + +def path_to_msl_standard_cli(shader): + if '.msl2.' in shader: + return '20000' + elif '.msl21.' in shader: + return '20100' + elif '.msl11.' in shader: + return '10100' + else: + return '10200' + +def validate_shader_msl(shader, opt): + msl_path = reference_path(shader[0], shader[1], opt) + try: + if '.ios.' in msl_path: + msl_os = 'iphoneos' + else: + msl_os = 'macosx' + subprocess.check_call(['xcrun', '--sdk', msl_os, 'metal', '-x', 'metal', path_to_msl_standard(msl_path), '-Werror', '-Wno-unused-variable', msl_path]) + print('Compiled Metal shader: ' + msl_path) # display after so xcrun FNF is silent + except OSError as oe: + if (oe.errno != errno.ENOENT): # Ignore xcrun not found error + raise + except subprocess.CalledProcessError: + print('Error compiling Metal shader: ' + msl_path) + sys.exit(1) + +def cross_compile_msl(shader, spirv, opt): + spirv_path = create_temporary() + msl_path = create_temporary(os.path.basename(shader)) + + if spirv: + subprocess.check_call(['spirv-as', '-o', spirv_path, shader]) + else: + subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader]) + + if opt: + subprocess.check_call(['spirv-opt', '--skip-validation', '-O', '-o', spirv_path, spirv_path]) + + spirv_cross_path = './spirv-cross' + + msl_args = [spirv_cross_path, '--entry', 'main', '--output', msl_path, spirv_path, '--msl'] + msl_args.append('--msl-version') + msl_args.append(path_to_msl_standard_cli(shader)) + if '.swizzle.' in shader: + msl_args.append('--msl-swizzle-texture-samples') + if '.ios.' in shader: + msl_args.append('--msl-ios') + + subprocess.check_call(msl_args) + + if not shader_is_invalid_spirv(msl_path): + subprocess.check_call(['spirv-val', '--target-env', 'vulkan1.1', spirv_path]) + + return (spirv_path, msl_path) + +def shader_model_hlsl(shader): + if '.vert' in shader: + if '.sm30.' in shader: + return '-Tvs_3_0' + else: + return '-Tvs_5_1' + elif '.frag' in shader: + if '.sm30.' in shader: + return '-Tps_3_0' + else: + return '-Tps_5_1' + elif '.comp' in shader: + return '-Tcs_5_1' + else: + return None + +def shader_to_win_path(shader): + # It's (very) convenient to be able to run HLSL testing in wine on Unix-likes, so support that. + try: + with subprocess.Popen(['winepath', '-w', shader], stdout = subprocess.PIPE, stderr = subprocess.PIPE) as f: + stdout_data, stderr_data = f.communicate() + return stdout_data.decode('utf-8') + except OSError as oe: + if (oe.errno != errno.ENOENT): # Ignore not found errors + return shader + except subprocess.CalledProcessError: + raise + + return shader + +ignore_fxc = False +def validate_shader_hlsl(shader, force_no_external_validation): + subprocess.check_call(['glslangValidator', '-e', 'main', '-D', '--target-env', 'vulkan1.1', '-V', shader]) + is_no_fxc = '.nofxc.' in shader + global ignore_fxc + if (not ignore_fxc) and (not force_no_external_validation) and (not is_no_fxc): + try: + win_path = shader_to_win_path(shader) + subprocess.check_call(['fxc', '-nologo', shader_model_hlsl(shader), win_path]) + except OSError as oe: + if (oe.errno != errno.ENOENT): # Ignore not found errors + raise + else: + ignore_fxc = True + except subprocess.CalledProcessError: + print('Failed compiling HLSL shader:', shader, 'with FXC.') + sys.exit(1) + +def shader_to_sm(shader): + if '.sm60.' in shader: + return '60' + elif '.sm51.' in shader: + return '51' + elif '.sm30.' in shader: + return '30' + else: + return '50' + +def cross_compile_hlsl(shader, spirv, opt, force_no_external_validation): + spirv_path = create_temporary() + hlsl_path = create_temporary(os.path.basename(shader)) + + if spirv: + subprocess.check_call(['spirv-as', '-o', spirv_path, shader]) + else: + subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader]) + + if opt: + subprocess.check_call(['spirv-opt', '--skip-validation', '-O', '-o', spirv_path, spirv_path]) + + spirv_cross_path = './spirv-cross' + + sm = shader_to_sm(shader) + subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', hlsl_path, spirv_path, '--hlsl-enable-compat', '--hlsl', '--shader-model', sm]) + + if not shader_is_invalid_spirv(hlsl_path): + subprocess.check_call(['spirv-val', '--target-env', 'vulkan1.1', spirv_path]) + + validate_shader_hlsl(hlsl_path, force_no_external_validation) + + return (spirv_path, hlsl_path) + +def cross_compile_reflect(shader, spirv, opt): + spirv_path = create_temporary() + reflect_path = create_temporary(os.path.basename(shader)) + + if spirv: + subprocess.check_call(['spirv-as', '-o', spirv_path, shader]) + else: + subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader]) + + if opt: + subprocess.check_call(['spirv-opt', '--skip-validation', '-O', '-o', spirv_path, spirv_path]) + + spirv_cross_path = './spirv-cross' + + sm = shader_to_sm(shader) + subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', reflect_path, spirv_path, '--reflect']) + return (spirv_path, reflect_path) + +def validate_shader(shader, vulkan): + if vulkan: + subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', shader]) + else: + subprocess.check_call(['glslangValidator', shader]) + +def cross_compile(shader, vulkan, spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, opt): + spirv_path = create_temporary() + glsl_path = create_temporary(os.path.basename(shader)) + + if vulkan or spirv: + vulkan_glsl_path = create_temporary('vk' + os.path.basename(shader)) + + if spirv: + subprocess.check_call(['spirv-as', '-o', spirv_path, shader]) + else: + subprocess.check_call(['glslangValidator', '--target-env', 'vulkan1.1', '-V', '-o', spirv_path, shader]) + + if opt and (not invalid_spirv): + subprocess.check_call(['spirv-opt', '--skip-validation', '-O', '-o', spirv_path, spirv_path]) + + if not invalid_spirv: + subprocess.check_call(['spirv-val', '--target-env', 'vulkan1.1', spirv_path]) + + extra_args = [] + if eliminate: + extra_args += ['--remove-unused-variables'] + if is_legacy: + extra_args += ['--version', '100', '--es'] + if flatten_ubo: + extra_args += ['--flatten-ubo'] + if sso: + extra_args += ['--separate-shader-objects'] + if flatten_dim: + extra_args += ['--flatten-multidimensional-arrays'] + + spirv_cross_path = './spirv-cross' + + # A shader might not be possible to make valid GLSL from, skip validation for this case. + if not ('nocompat' in glsl_path): + subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', glsl_path, spirv_path] + extra_args) + validate_shader(glsl_path, False) + else: + remove_file(glsl_path) + glsl_path = None + + if vulkan or spirv: + subprocess.check_call([spirv_cross_path, '--entry', 'main', '--vulkan-semantics', '--output', vulkan_glsl_path, spirv_path] + extra_args) + validate_shader(vulkan_glsl_path, True) + # SPIR-V shaders might just want to validate Vulkan GLSL output, we don't always care about the output. + if not vulkan: + remove_file(vulkan_glsl_path) + + return (spirv_path, glsl_path, vulkan_glsl_path if vulkan else None) + +def make_unix_newline(buf): + decoded = codecs.decode(buf, 'utf-8') + decoded = decoded.replace('\r', '') + return codecs.encode(decoded, 'utf-8') + +def md5_for_file(path): + md5 = hashlib.md5() + with open(path, 'rb') as f: + for chunk in iter(lambda: make_unix_newline(f.read(8192)), b''): + md5.update(chunk) + return md5.digest() + +def make_reference_dir(path): + base = os.path.dirname(path) + if not os.path.exists(base): + os.makedirs(base) + +def reference_path(directory, relpath, opt): + split_paths = os.path.split(directory) + reference_dir = os.path.join(split_paths[0], 'reference/' + ('opt/' if opt else '')) + reference_dir = os.path.join(reference_dir, split_paths[1]) + return os.path.join(reference_dir, relpath) + +def json_ordered(obj): + if isinstance(obj, dict): + return sorted((k, json_ordered(v)) for k, v in obj.items()) + if isinstance(obj, list): + return sorted(json_ordered(x) for x in obj) + else: + return obj + +def json_compare(json_a, json_b): + return json_ordered(json_a) == json_ordered(json_b) + +def regression_check_reflect(shader, json_file, update, keep, opt): + reference = reference_path(shader[0], shader[1], opt) + '.json' + joined_path = os.path.join(shader[0], shader[1]) + print('Reference shader reflection path:', reference) + if os.path.exists(reference): + actual = '' + expected = '' + with open(json_file) as f: + actual_json = f.read(); + actual = json.loads(actual_json) + with open(reference) as f: + expected = json.load(f) + if (json_compare(actual, expected) != True): + if update: + print('Generated reflection json has changed for {}!'.format(reference)) + # If we expect changes, update the reference file. + if os.path.exists(reference): + remove_file(reference) + make_reference_dir(reference) + shutil.move(json_file, reference) + else: + print('Generated reflection json in {} does not match reference {}!'.format(json_file, reference)) + with open(json_file, 'r') as f: + print('') + print('Generated:') + print('======================') + print(f.read()) + print('======================') + print('') + + # Otherwise, fail the test. Keep the shader file around so we can inspect. + if not keep: + remove_file(json_file) + sys.exit(1) + else: + remove_file(json_file) + else: + print('Found new shader {}. Placing generated source code in {}'.format(joined_path, reference)) + make_reference_dir(reference) + shutil.move(json_file, reference) + +def regression_check(shader, glsl, update, keep, opt): + reference = reference_path(shader[0], shader[1], opt) + joined_path = os.path.join(shader[0], shader[1]) + print('Reference shader path:', reference) + + if os.path.exists(reference): + if md5_for_file(glsl) != md5_for_file(reference): + if update: + print('Generated source code has changed for {}!'.format(reference)) + # If we expect changes, update the reference file. + if os.path.exists(reference): + remove_file(reference) + make_reference_dir(reference) + shutil.move(glsl, reference) + else: + print('Generated source code in {} does not match reference {}!'.format(glsl, reference)) + with open(glsl, 'r') as f: + print('') + print('Generated:') + print('======================') + print(f.read()) + print('======================') + print('') + + # Otherwise, fail the test. Keep the shader file around so we can inspect. + if not keep: + remove_file(glsl) + sys.exit(1) + else: + remove_file(glsl) + else: + print('Found new shader {}. Placing generated source code in {}'.format(joined_path, reference)) + make_reference_dir(reference) + shutil.move(glsl, reference) + +def shader_is_vulkan(shader): + return '.vk.' in shader + +def shader_is_desktop(shader): + return '.desktop.' in shader + +def shader_is_eliminate_dead_variables(shader): + return '.noeliminate.' not in shader + +def shader_is_spirv(shader): + return '.asm.' in shader + +def shader_is_invalid_spirv(shader): + return '.invalid.' in shader + +def shader_is_legacy(shader): + return '.legacy.' in shader + +def shader_is_flatten_ubo(shader): + return '.flatten.' in shader + +def shader_is_sso(shader): + return '.sso.' in shader + +def shader_is_flatten_dimensions(shader): + return '.flatten_dim.' in shader + +def shader_is_noopt(shader): + return '.noopt.' in shader + +def test_shader(stats, shader, update, keep, opt): + joined_path = os.path.join(shader[0], shader[1]) + vulkan = shader_is_vulkan(shader[1]) + desktop = shader_is_desktop(shader[1]) + eliminate = shader_is_eliminate_dead_variables(shader[1]) + is_spirv = shader_is_spirv(shader[1]) + invalid_spirv = shader_is_invalid_spirv(shader[1]) + is_legacy = shader_is_legacy(shader[1]) + flatten_ubo = shader_is_flatten_ubo(shader[1]) + sso = shader_is_sso(shader[1]) + flatten_dim = shader_is_flatten_dimensions(shader[1]) + noopt = shader_is_noopt(shader[1]) + + print('Testing shader:', joined_path) + spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, opt and (not noopt)) + + # Only test GLSL stats if we have a shader following GL semantics. + if stats and (not vulkan) and (not is_spirv) and (not desktop): + cross_stats = get_shader_stats(glsl) + + if glsl: + regression_check(shader, glsl, update, keep, opt) + if vulkan_glsl: + regression_check((shader[0], shader[1] + '.vk'), vulkan_glsl, update, keep, opt) + + remove_file(spirv) + + if stats and (not vulkan) and (not is_spirv) and (not desktop): + pristine_stats = get_shader_stats(joined_path) + + a = [] + a.append(shader[1]) + for i in pristine_stats: + a.append(str(i)) + for i in cross_stats: + a.append(str(i)) + print(','.join(a), file = stats) + +def test_shader_msl(stats, shader, update, keep, opt, force_no_external_validation): + joined_path = os.path.join(shader[0], shader[1]) + print('\nTesting MSL shader:', joined_path) + is_spirv = shader_is_spirv(shader[1]) + noopt = shader_is_noopt(shader[1]) + spirv, msl = cross_compile_msl(joined_path, is_spirv, opt and (not noopt)) + regression_check(shader, msl, update, keep, opt) + + # Uncomment the following line to print the temp SPIR-V file path. + # This temp SPIR-V file is not deleted until after the Metal validation step below. + # If Metal validation fails, the temp SPIR-V file can be copied out and + # used as input to an invocation of spirv-cross to debug from Xcode directly. + # To do so, build spriv-cross using `make DEBUG=1`, then run the spriv-cross + # executable from Xcode using args: `--msl --entry main --output msl_path spirv_path`. +# print('SPRIV shader: ' + spirv) + + if not force_no_external_validation: + validate_shader_msl(shader, opt) + + remove_file(spirv) + +def test_shader_hlsl(stats, shader, update, keep, opt, force_no_external_validation): + joined_path = os.path.join(shader[0], shader[1]) + print('Testing HLSL shader:', joined_path) + is_spirv = shader_is_spirv(shader[1]) + noopt = shader_is_noopt(shader[1]) + spirv, hlsl = cross_compile_hlsl(joined_path, is_spirv, opt and (not noopt), force_no_external_validation) + regression_check(shader, hlsl, update, keep, opt) + remove_file(spirv) + +def test_shader_reflect(stats, shader, update, keep, opt): + joined_path = os.path.join(shader[0], shader[1]) + print('Testing shader reflection:', joined_path) + is_spirv = shader_is_spirv(shader[1]) + noopt = shader_is_noopt(shader[1]) + spirv, reflect = cross_compile_reflect(joined_path, is_spirv, opt and (not noopt)) + regression_check_reflect(shader, reflect, update, keep, opt) + remove_file(spirv) + +def test_shader_file(relpath, stats, shader_dir, update, keep, opt, force_no_external_validation, backend): + if backend == 'msl': + test_shader_msl(stats, (shader_dir, relpath), update, keep, opt, force_no_external_validation) + elif backend == 'hlsl': + test_shader_hlsl(stats, (shader_dir, relpath), update, keep, opt, force_no_external_validation) + elif backend == 'reflect': + test_shader_reflect(stats, (shader_dir, relpath), update, keep, opt) + else: + test_shader(stats, (shader_dir, relpath), update, keep, opt) + +def test_shaders_helper(stats, backend, args): + all_files = [] + for root, dirs, files in os.walk(os.path.join(args.folder)): + files = [ f for f in files if not f.startswith(".") ] #ignore system files (esp OSX) + for i in files: + path = os.path.join(root, i) + relpath = os.path.relpath(path, args.folder) + all_files.append(relpath) + + # The child processes in parallel execution mode don't have the proper state for the global args variable, so + # at this point we need to switch to explicit arguments + if args.parallel: + pool = multiprocessing.Pool(multiprocessing.cpu_count()) + pool.map(partial(test_shader_file, + stats = stats, + shader_dir = args.folder, + update = args.update, + keep = args.keep, + opt = args.opt, + force_no_external_validation = args.force_no_external_validation, + backend = backend), all_files) + else: + for i in all_files: + test_shader_file(i, stats, args.folder, args.update, args.keep, args.opt, args.force_no_external_validation, backend) + +def test_shaders(backend, args): + if args.malisc: + with open('stats.csv', 'w') as stats: + print('Shader,OrigRegs,OrigUniRegs,OrigALUShort,OrigLSShort,OrigTEXShort,OrigALULong,OrigLSLong,OrigTEXLong,CrossRegs,CrossUniRegs,CrossALUShort,CrossLSShort,CrossTEXShort,CrossALULong,CrossLSLong,CrossTEXLong', file = stats) + test_shaders_helper(stats, backend, args) + else: + test_shaders_helper(None, backend, args) + +def main(): + parser = argparse.ArgumentParser(description = 'Script for regression testing.') + parser.add_argument('folder', + help = 'Folder containing shader files to test.') + parser.add_argument('--update', + action = 'store_true', + help = 'Updates reference files if there is a mismatch. Use when legitimate changes in output is found.') + parser.add_argument('--keep', + action = 'store_true', + help = 'Leave failed GLSL shaders on disk if they fail regression. Useful for debugging.') + parser.add_argument('--malisc', + action = 'store_true', + help = 'Use malisc offline compiler to determine static cycle counts before and after spirv-cross.') + parser.add_argument('--msl', + action = 'store_true', + help = 'Test Metal backend.') + parser.add_argument('--metal', + action = 'store_true', + help = 'Deprecated Metal option. Use --msl instead.') + parser.add_argument('--hlsl', + action = 'store_true', + help = 'Test HLSL backend.') + parser.add_argument('--force-no-external-validation', + action = 'store_true', + help = 'Disable all external validation.') + parser.add_argument('--opt', + action = 'store_true', + help = 'Run SPIRV-Tools optimization passes as well.') + parser.add_argument('--reflect', + action = 'store_true', + help = 'Test reflection backend.') + parser.add_argument('--parallel', + action = 'store_true', + help = 'Execute tests in parallel. Useful for doing regression quickly, but bad for debugging and stat output.') + + args = parser.parse_args() + if not args.folder: + sys.stderr.write('Need shader folder.\n') + sys.exit(1) + + if (args.parallel and (args.malisc or args.force_no_external_validation or args.update)): + sys.stderr.write('Parallel execution is disabled when using the flags --update, --malisc or --force-no-external-validation\n') + args.parallel = False + + if args.msl: + print_msl_compiler_version() + + backend = 'glsl' + if (args.msl or args.metal): + backend = 'msl' + elif args.hlsl: + backend = 'hlsl' + elif args.reflect: + backend = 'reflect' + + test_shaders(backend, args) + if args.malisc: + print('Stats in stats.csv!') + print('Tests completed!') + +if __name__ == '__main__': + main() diff --git a/3rdparty/spirv-cross/test_shaders.sh b/3rdparty/spirv-cross/test_shaders.sh new file mode 100755 index 000000000..d49ceb2b4 --- /dev/null +++ b/3rdparty/spirv-cross/test_shaders.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +echo "Building spirv-cross" +make -j$(nproc) + +export PATH="./external/glslang-build/output/bin:./external/spirv-tools-build/output/bin:.:$PATH" +echo "Using glslangValidation in: $(which glslangValidator)." +echo "Using spirv-opt in: $(which spirv-opt)." + +./test_shaders.py shaders || exit 1 +./test_shaders.py shaders --opt || exit 1 +./test_shaders.py shaders-no-opt || exit 1 +./test_shaders.py shaders-msl --msl || exit 1 +./test_shaders.py shaders-msl --msl --opt || exit 1 +./test_shaders.py shaders-msl-no-opt --msl || exit 1 +./test_shaders.py shaders-hlsl --hlsl || exit 1 +./test_shaders.py shaders-hlsl --hlsl --opt || exit 1 +./test_shaders.py shaders-hlsl-no-opt --hlsl || exit 1 +./test_shaders.py shaders-reflection --reflect || exit 1 + diff --git a/3rdparty/spirv-cross/tests-other/hlsl_wave_mask.cpp b/3rdparty/spirv-cross/tests-other/hlsl_wave_mask.cpp new file mode 100644 index 000000000..de11dd9fe --- /dev/null +++ b/3rdparty/spirv-cross/tests-other/hlsl_wave_mask.cpp @@ -0,0 +1,73 @@ +// Ad-hoc test that the wave op masks work as expected. +#include +#include + +using namespace glm; + +static uvec4 gl_SubgroupEqMask; +static uvec4 gl_SubgroupGeMask; +static uvec4 gl_SubgroupGtMask; +static uvec4 gl_SubgroupLeMask; +static uvec4 gl_SubgroupLtMask; +using uint4 = uvec4; + +static void test_main(unsigned wave_index) +{ + const auto WaveGetLaneIndex = [&]() { return wave_index; }; + + gl_SubgroupEqMask = 1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96)); + if (WaveGetLaneIndex() >= 32) gl_SubgroupEqMask.x = 0; + if (WaveGetLaneIndex() >= 64 || WaveGetLaneIndex() < 32) gl_SubgroupEqMask.y = 0; + if (WaveGetLaneIndex() >= 96 || WaveGetLaneIndex() < 64) gl_SubgroupEqMask.z = 0; + if (WaveGetLaneIndex() < 96) gl_SubgroupEqMask.w = 0; + gl_SubgroupGeMask = ~((1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u); + if (WaveGetLaneIndex() >= 32) gl_SubgroupGeMask.x = 0u; + if (WaveGetLaneIndex() >= 64) gl_SubgroupGeMask.y = 0u; + if (WaveGetLaneIndex() >= 96) gl_SubgroupGeMask.z = 0u; + if (WaveGetLaneIndex() < 32) gl_SubgroupGeMask.y = ~0u; + if (WaveGetLaneIndex() < 64) gl_SubgroupGeMask.z = ~0u; + if (WaveGetLaneIndex() < 96) gl_SubgroupGeMask.w = ~0u; + uint gt_lane_index = WaveGetLaneIndex() + 1; + gl_SubgroupGtMask = ~((1u << (gt_lane_index - uint4(0, 32, 64, 96))) - 1u); + if (gt_lane_index >= 32) gl_SubgroupGtMask.x = 0u; + if (gt_lane_index >= 64) gl_SubgroupGtMask.y = 0u; + if (gt_lane_index >= 96) gl_SubgroupGtMask.z = 0u; + if (gt_lane_index >= 128) gl_SubgroupGtMask.w = 0u; + if (gt_lane_index < 32) gl_SubgroupGtMask.y = ~0u; + if (gt_lane_index < 64) gl_SubgroupGtMask.z = ~0u; + if (gt_lane_index < 96) gl_SubgroupGtMask.w = ~0u; + uint le_lane_index = WaveGetLaneIndex() + 1; + gl_SubgroupLeMask = (1u << (le_lane_index - uint4(0, 32, 64, 96))) - 1u; + if (le_lane_index >= 32) gl_SubgroupLeMask.x = ~0u; + if (le_lane_index >= 64) gl_SubgroupLeMask.y = ~0u; + if (le_lane_index >= 96) gl_SubgroupLeMask.z = ~0u; + if (le_lane_index >= 128) gl_SubgroupLeMask.w = ~0u; + if (le_lane_index < 32) gl_SubgroupLeMask.y = 0u; + if (le_lane_index < 64) gl_SubgroupLeMask.z = 0u; + if (le_lane_index < 96) gl_SubgroupLeMask.w = 0u; + gl_SubgroupLtMask = (1u << (WaveGetLaneIndex() - uint4(0, 32, 64, 96))) - 1u; + if (WaveGetLaneIndex() >= 32) gl_SubgroupLtMask.x = ~0u; + if (WaveGetLaneIndex() >= 64) gl_SubgroupLtMask.y = ~0u; + if (WaveGetLaneIndex() >= 96) gl_SubgroupLtMask.z = ~0u; + if (WaveGetLaneIndex() < 32) gl_SubgroupLtMask.y = 0u; + if (WaveGetLaneIndex() < 64) gl_SubgroupLtMask.z = 0u; + if (WaveGetLaneIndex() < 96) gl_SubgroupLtMask.w = 0u; +} + +int main() +{ + for (unsigned subgroup_id = 0; subgroup_id < 128; subgroup_id++) + { + test_main(subgroup_id); + + for (unsigned bit = 0; bit < 128; bit++) + { + assert(bool(gl_SubgroupEqMask[bit / 32] & (1u << (bit & 31))) == (bit == subgroup_id)); + assert(bool(gl_SubgroupGtMask[bit / 32] & (1u << (bit & 31))) == (bit > subgroup_id)); + assert(bool(gl_SubgroupGeMask[bit / 32] & (1u << (bit & 31))) == (bit >= subgroup_id)); + assert(bool(gl_SubgroupLtMask[bit / 32] & (1u << (bit & 31))) == (bit < subgroup_id)); + assert(bool(gl_SubgroupLeMask[bit / 32] & (1u << (bit & 31))) == (bit <= subgroup_id)); + } + } +} + diff --git a/3rdparty/spirv-cross/update_test_shaders.sh b/3rdparty/spirv-cross/update_test_shaders.sh new file mode 100755 index 000000000..2fb5ffa4a --- /dev/null +++ b/3rdparty/spirv-cross/update_test_shaders.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +echo "Building spirv-cross" +make -j$(nproc) + +export PATH="./external/glslang-build/output/bin:./external/spirv-tools-build/output/bin:.:$PATH" +echo "Using glslangValidation in: $(which glslangValidator)." +echo "Using spirv-opt in: $(which spirv-opt)." + +./test_shaders.py shaders --update || exit 1 +./test_shaders.py shaders --update --opt || exit 1 +./test_shaders.py shaders-no-opt --update || exit 1 +./test_shaders.py shaders-msl --update --msl || exit 1 +./test_shaders.py shaders-msl --update --msl --opt || exit 1 +./test_shaders.py shaders-msl-no-opt --update --msl || exit 1 +./test_shaders.py shaders-hlsl --update --hlsl || exit 1 +./test_shaders.py shaders-hlsl --update --hlsl --opt || exit 1 +./test_shaders.py shaders-hlsl-no-opt --update --hlsl || exit 1 +./test_shaders.py shaders-reflection --reflect --update || exit 1 + + diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/CMakeLists.txt b/3rdparty/spirv-headers/CMakeLists.txt similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/CMakeLists.txt rename to 3rdparty/spirv-headers/CMakeLists.txt diff --git a/3rdparty/spirv-headers/CODE_OF_CONDUCT.md b/3rdparty/spirv-headers/CODE_OF_CONDUCT.md new file mode 100644 index 000000000..a11610bd3 --- /dev/null +++ b/3rdparty/spirv-headers/CODE_OF_CONDUCT.md @@ -0,0 +1 @@ +A reminder that this issue tracker is managed by the Khronos Group. Interactions here should follow the Khronos Code of Conduct (https://www.khronos.org/developers/code-of-conduct), which prohibits aggressive or derogatory language. Please keep the discussion friendly and civil. diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/LICENSE b/3rdparty/spirv-headers/LICENSE similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/LICENSE rename to 3rdparty/spirv-headers/LICENSE diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/README.md b/3rdparty/spirv-headers/README.md similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/README.md rename to 3rdparty/spirv-headers/README.md diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/example/CMakeLists.txt b/3rdparty/spirv-headers/example/CMakeLists.txt similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/example/CMakeLists.txt rename to 3rdparty/spirv-headers/example/CMakeLists.txt diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/example/example-1.1.cpp b/3rdparty/spirv-headers/example/example-1.1.cpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/example/example-1.1.cpp rename to 3rdparty/spirv-headers/example/example-1.1.cpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/example/example.cpp b/3rdparty/spirv-headers/example/example.cpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/example/example.cpp rename to 3rdparty/spirv-headers/example/example.cpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/GLSL.std.450.h b/3rdparty/spirv-headers/include/spirv/1.0/GLSL.std.450.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/GLSL.std.450.h rename to 3rdparty/spirv-headers/include/spirv/1.0/GLSL.std.450.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/OpenCL.std.h b/3rdparty/spirv-headers/include/spirv/1.0/OpenCL.std.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/OpenCL.std.h rename to 3rdparty/spirv-headers/include/spirv/1.0/OpenCL.std.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/extinst.glsl.std.450.grammar.json b/3rdparty/spirv-headers/include/spirv/1.0/extinst.glsl.std.450.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/extinst.glsl.std.450.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.0/extinst.glsl.std.450.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/extinst.opencl.std.100.grammar.json b/3rdparty/spirv-headers/include/spirv/1.0/extinst.opencl.std.100.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/extinst.opencl.std.100.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.0/extinst.opencl.std.100.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.core.grammar.json b/3rdparty/spirv-headers/include/spirv/1.0/spirv.core.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.core.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.core.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.cs b/3rdparty/spirv-headers/include/spirv/1.0/spirv.cs similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.cs rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.cs diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.h b/3rdparty/spirv-headers/include/spirv/1.0/spirv.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.h rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.hpp b/3rdparty/spirv-headers/include/spirv/1.0/spirv.hpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.hpp rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.hpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.hpp11 b/3rdparty/spirv-headers/include/spirv/1.0/spirv.hpp11 similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.hpp11 rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.hpp11 diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.json b/3rdparty/spirv-headers/include/spirv/1.0/spirv.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.json rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.lua b/3rdparty/spirv-headers/include/spirv/1.0/spirv.lua similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.lua rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.lua diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.py b/3rdparty/spirv-headers/include/spirv/1.0/spirv.py similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.0/spirv.py rename to 3rdparty/spirv-headers/include/spirv/1.0/spirv.py diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/GLSL.std.450.h b/3rdparty/spirv-headers/include/spirv/1.1/GLSL.std.450.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/GLSL.std.450.h rename to 3rdparty/spirv-headers/include/spirv/1.1/GLSL.std.450.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/OpenCL.std.h b/3rdparty/spirv-headers/include/spirv/1.1/OpenCL.std.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/OpenCL.std.h rename to 3rdparty/spirv-headers/include/spirv/1.1/OpenCL.std.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/extinst.glsl.std.450.grammar.json b/3rdparty/spirv-headers/include/spirv/1.1/extinst.glsl.std.450.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/extinst.glsl.std.450.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.1/extinst.glsl.std.450.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/extinst.opencl.std.100.grammar.json b/3rdparty/spirv-headers/include/spirv/1.1/extinst.opencl.std.100.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/extinst.opencl.std.100.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.1/extinst.opencl.std.100.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.core.grammar.json b/3rdparty/spirv-headers/include/spirv/1.1/spirv.core.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.core.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.core.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.cs b/3rdparty/spirv-headers/include/spirv/1.1/spirv.cs similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.cs rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.cs diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.h b/3rdparty/spirv-headers/include/spirv/1.1/spirv.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.h rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.hpp b/3rdparty/spirv-headers/include/spirv/1.1/spirv.hpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.hpp rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.hpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.hpp11 b/3rdparty/spirv-headers/include/spirv/1.1/spirv.hpp11 similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.hpp11 rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.hpp11 diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.json b/3rdparty/spirv-headers/include/spirv/1.1/spirv.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.json rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.lua b/3rdparty/spirv-headers/include/spirv/1.1/spirv.lua similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.lua rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.lua diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.py b/3rdparty/spirv-headers/include/spirv/1.1/spirv.py similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.1/spirv.py rename to 3rdparty/spirv-headers/include/spirv/1.1/spirv.py diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/GLSL.std.450.h b/3rdparty/spirv-headers/include/spirv/1.2/GLSL.std.450.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/GLSL.std.450.h rename to 3rdparty/spirv-headers/include/spirv/1.2/GLSL.std.450.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/OpenCL.std.h b/3rdparty/spirv-headers/include/spirv/1.2/OpenCL.std.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/OpenCL.std.h rename to 3rdparty/spirv-headers/include/spirv/1.2/OpenCL.std.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/extinst.glsl.std.450.grammar.json b/3rdparty/spirv-headers/include/spirv/1.2/extinst.glsl.std.450.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/extinst.glsl.std.450.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.2/extinst.glsl.std.450.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/extinst.opencl.std.100.grammar.json b/3rdparty/spirv-headers/include/spirv/1.2/extinst.opencl.std.100.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/extinst.opencl.std.100.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.2/extinst.opencl.std.100.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.core.grammar.json b/3rdparty/spirv-headers/include/spirv/1.2/spirv.core.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.core.grammar.json rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.core.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.cs b/3rdparty/spirv-headers/include/spirv/1.2/spirv.cs similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.cs rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.cs diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.h b/3rdparty/spirv-headers/include/spirv/1.2/spirv.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.h rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.hpp b/3rdparty/spirv-headers/include/spirv/1.2/spirv.hpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.hpp rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.hpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.hpp11 b/3rdparty/spirv-headers/include/spirv/1.2/spirv.hpp11 similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.hpp11 rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.hpp11 diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.json b/3rdparty/spirv-headers/include/spirv/1.2/spirv.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.json rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.lua b/3rdparty/spirv-headers/include/spirv/1.2/spirv.lua similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.lua rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.lua diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.py b/3rdparty/spirv-headers/include/spirv/1.2/spirv.py similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/1.2/spirv.py rename to 3rdparty/spirv-headers/include/spirv/1.2/spirv.py diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/spir-v.xml b/3rdparty/spirv-headers/include/spirv/spir-v.xml similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/spir-v.xml rename to 3rdparty/spirv-headers/include/spirv/spir-v.xml diff --git a/3rdparty/spirv-headers/include/spirv/unified1/GLSL.std.450.h b/3rdparty/spirv-headers/include/spirv/unified1/GLSL.std.450.h new file mode 100644 index 000000000..54cc00e9a --- /dev/null +++ b/3rdparty/spirv-headers/include/spirv/unified1/GLSL.std.450.h @@ -0,0 +1,131 @@ +/* +** Copyright (c) 2014-2016 The Khronos Group Inc. +** +** Permission is hereby granted, free of charge, to any person obtaining a copy +** of this software and/or associated documentation files (the "Materials"), +** to deal in the Materials without restriction, including without limitation +** the rights to use, copy, modify, merge, publish, distribute, sublicense, +** and/or sell copies of the Materials, and to permit persons to whom the +** Materials are furnished to do so, subject to the following conditions: +** +** The above copyright notice and this permission notice shall be included in +** all copies or substantial portions of the Materials. +** +** MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS +** STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND +** HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ +** +** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +** OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +** THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +** FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS +** IN THE MATERIALS. +*/ + +#ifndef GLSLstd450_H +#define GLSLstd450_H + +static const int GLSLstd450Version = 100; +static const int GLSLstd450Revision = 3; + +enum GLSLstd450 { + GLSLstd450Bad = 0, // Don't use + + GLSLstd450Round = 1, + GLSLstd450RoundEven = 2, + GLSLstd450Trunc = 3, + GLSLstd450FAbs = 4, + GLSLstd450SAbs = 5, + GLSLstd450FSign = 6, + GLSLstd450SSign = 7, + GLSLstd450Floor = 8, + GLSLstd450Ceil = 9, + GLSLstd450Fract = 10, + + GLSLstd450Radians = 11, + GLSLstd450Degrees = 12, + GLSLstd450Sin = 13, + GLSLstd450Cos = 14, + GLSLstd450Tan = 15, + GLSLstd450Asin = 16, + GLSLstd450Acos = 17, + GLSLstd450Atan = 18, + GLSLstd450Sinh = 19, + GLSLstd450Cosh = 20, + GLSLstd450Tanh = 21, + GLSLstd450Asinh = 22, + GLSLstd450Acosh = 23, + GLSLstd450Atanh = 24, + GLSLstd450Atan2 = 25, + + GLSLstd450Pow = 26, + GLSLstd450Exp = 27, + GLSLstd450Log = 28, + GLSLstd450Exp2 = 29, + GLSLstd450Log2 = 30, + GLSLstd450Sqrt = 31, + GLSLstd450InverseSqrt = 32, + + GLSLstd450Determinant = 33, + GLSLstd450MatrixInverse = 34, + + GLSLstd450Modf = 35, // second operand needs an OpVariable to write to + GLSLstd450ModfStruct = 36, // no OpVariable operand + GLSLstd450FMin = 37, + GLSLstd450UMin = 38, + GLSLstd450SMin = 39, + GLSLstd450FMax = 40, + GLSLstd450UMax = 41, + GLSLstd450SMax = 42, + GLSLstd450FClamp = 43, + GLSLstd450UClamp = 44, + GLSLstd450SClamp = 45, + GLSLstd450FMix = 46, + GLSLstd450IMix = 47, // Reserved + GLSLstd450Step = 48, + GLSLstd450SmoothStep = 49, + + GLSLstd450Fma = 50, + GLSLstd450Frexp = 51, // second operand needs an OpVariable to write to + GLSLstd450FrexpStruct = 52, // no OpVariable operand + GLSLstd450Ldexp = 53, + + GLSLstd450PackSnorm4x8 = 54, + GLSLstd450PackUnorm4x8 = 55, + GLSLstd450PackSnorm2x16 = 56, + GLSLstd450PackUnorm2x16 = 57, + GLSLstd450PackHalf2x16 = 58, + GLSLstd450PackDouble2x32 = 59, + GLSLstd450UnpackSnorm2x16 = 60, + GLSLstd450UnpackUnorm2x16 = 61, + GLSLstd450UnpackHalf2x16 = 62, + GLSLstd450UnpackSnorm4x8 = 63, + GLSLstd450UnpackUnorm4x8 = 64, + GLSLstd450UnpackDouble2x32 = 65, + + GLSLstd450Length = 66, + GLSLstd450Distance = 67, + GLSLstd450Cross = 68, + GLSLstd450Normalize = 69, + GLSLstd450FaceForward = 70, + GLSLstd450Reflect = 71, + GLSLstd450Refract = 72, + + GLSLstd450FindILsb = 73, + GLSLstd450FindSMsb = 74, + GLSLstd450FindUMsb = 75, + + GLSLstd450InterpolateAtCentroid = 76, + GLSLstd450InterpolateAtSample = 77, + GLSLstd450InterpolateAtOffset = 78, + + GLSLstd450NMin = 79, + GLSLstd450NMax = 80, + GLSLstd450NClamp = 81, + + GLSLstd450Count +}; + +#endif // #ifndef GLSLstd450_H diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/OpenCL.std.h b/3rdparty/spirv-headers/include/spirv/unified1/OpenCL.std.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/OpenCL.std.h rename to 3rdparty/spirv-headers/include/spirv/unified1/OpenCL.std.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/extinst.glsl.std.450.grammar.json b/3rdparty/spirv-headers/include/spirv/unified1/extinst.glsl.std.450.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/extinst.glsl.std.450.grammar.json rename to 3rdparty/spirv-headers/include/spirv/unified1/extinst.glsl.std.450.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/extinst.opencl.std.100.grammar.json b/3rdparty/spirv-headers/include/spirv/unified1/extinst.opencl.std.100.grammar.json similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/extinst.opencl.std.100.grammar.json rename to 3rdparty/spirv-headers/include/spirv/unified1/extinst.opencl.std.100.grammar.json diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.core.grammar.json b/3rdparty/spirv-headers/include/spirv/unified1/spirv.core.grammar.json similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.core.grammar.json rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.core.grammar.json index cd1785965..4739bfe94 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.core.grammar.json +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.core.grammar.json @@ -339,7 +339,10 @@ { "kind" : "IdRef", "name" : "'Pointer Type'" }, { "kind" : "StorageClass" } ], - "capabilities" : [ "Addresses" ] + "capabilities" : [ + "Addresses", + "PhysicalStorageBufferAddressesEXT" + ] }, { "opname" : "OpConstantTrue", @@ -563,7 +566,8 @@ "capabilities" : [ "Addresses", "VariablePointers", - "VariablePointersStorageBuffer" + "VariablePointersStorageBuffer", + "PhysicalStorageBufferAddressesEXT" ] }, { @@ -1048,7 +1052,10 @@ { "kind" : "IdResult" }, { "kind" : "IdRef", "name" : "'Pointer'" } ], - "capabilities" : [ "Addresses" ] + "capabilities" : [ + "Addresses", + "PhysicalStorageBufferAddressesEXT" + ] }, { "opname" : "OpSatConvertSToU", @@ -1078,7 +1085,10 @@ { "kind" : "IdResult" }, { "kind" : "IdRef", "name" : "'Integer Value'" } ], - "capabilities" : [ "Addresses" ] + "capabilities" : [ + "Addresses", + "PhysicalStorageBufferAddressesEXT" + ] }, { "opname" : "OpPtrCastToGeneric", @@ -4502,6 +4512,12 @@ "enumerant" : "Physical64", "value" : 2, "capabilities" : [ "Addresses" ] + }, + { + "enumerant" : "PhysicalStorageBuffer64EXT", + "value" : 5348, + "extensions" : [ "SPV_EXT_physical_storage_buffer" ], + "capabilities" : [ "PhysicalStorageBufferAddressesEXT" ] } ] }, @@ -4973,6 +4989,12 @@ "value" : 5343, "extensions" : [ "SPV_NV_ray_tracing" ], "capabilities" : [ "RayTracingNV" ] + }, + { + "enumerant" : "PhysicalStorageBufferEXT", + "value" : 5349, + "extensions" : [ "SPV_EXT_physical_storage_buffer" ], + "capabilities" : [ "PhysicalStorageBufferAddressesEXT" ] } ] }, @@ -5964,6 +5986,20 @@ ], "extensions" : [ "SPV_GOOGLE_hlsl_functionality1" ], "version" : "None" + }, + { + "enumerant" : "RestrictPointerEXT", + "value" : 5355, + "capabilities" : [ "PhysicalStorageBufferAddressesEXT" ], + "extensions" : [ "SPV_EXT_physical_storage_buffer" ], + "version" : "None" + }, + { + "enumerant" : "AliasedPointerEXT", + "value" : 5356, + "capabilities" : [ "PhysicalStorageBufferAddressesEXT" ], + "extensions" : [ "SPV_EXT_physical_storage_buffer" ], + "version" : "None" } ] }, @@ -7395,6 +7431,13 @@ "capabilities" : [ "Shader" ], "extensions" : [ "SPV_NV_shading_rate", "SPV_EXT_fragment_invocation_density" ], "version" : "None" + }, + { + "enumerant" : "PhysicalStorageBufferAddressesEXT", + "value" : 5347, + "capabilities" : [ "Shader" ], + "extensions" : [ "SPV_EXT_physical_storage_buffer" ], + "version" : "None" } ] }, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.cs b/3rdparty/spirv-headers/include/spirv/unified1/spirv.cs similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.cs rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.cs index bde9c1fc5..f031d720c 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.cs +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.cs @@ -87,6 +87,7 @@ namespace Spv Logical = 0, Physical32 = 1, Physical64 = 2, + PhysicalStorageBuffer64EXT = 5348, } public enum MemoryModel @@ -172,6 +173,7 @@ namespace Spv HitAttributeNV = 5339, IncomingRayPayloadNV = 5342, ShaderRecordBufferNV = 5343, + PhysicalStorageBufferEXT = 5349, } public enum Dim @@ -434,6 +436,8 @@ namespace Spv PerTaskNV = 5273, PerVertexNV = 5285, NonUniformEXT = 5300, + RestrictPointerEXT = 5355, + AliasedPointerEXT = 5356, HlslCounterBufferGOOGLE = 5634, HlslSemanticGOOGLE = 5635, } @@ -809,6 +813,7 @@ namespace Spv RayTracingNV = 5340, VulkanMemoryModelKHR = 5345, VulkanMemoryModelDeviceScopeKHR = 5346, + PhysicalStorageBufferAddressesEXT = 5347, ComputeDerivativeGroupLinearNV = 5350, SubgroupShuffleINTEL = 5568, SubgroupBufferBlockIOINTEL = 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.h b/3rdparty/spirv-headers/include/spirv/unified1/spirv.h similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.h rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.h index d996f09fc..081e12b4c 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.h +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.h @@ -95,6 +95,7 @@ typedef enum SpvAddressingModel_ { SpvAddressingModelLogical = 0, SpvAddressingModelPhysical32 = 1, SpvAddressingModelPhysical64 = 2, + SpvAddressingModelPhysicalStorageBuffer64EXT = 5348, SpvAddressingModelMax = 0x7fffffff, } SpvAddressingModel; @@ -180,6 +181,7 @@ typedef enum SpvStorageClass_ { SpvStorageClassHitAttributeNV = 5339, SpvStorageClassIncomingRayPayloadNV = 5342, SpvStorageClassShaderRecordBufferNV = 5343, + SpvStorageClassPhysicalStorageBufferEXT = 5349, SpvStorageClassMax = 0x7fffffff, } SpvStorageClass; @@ -440,6 +442,8 @@ typedef enum SpvDecoration_ { SpvDecorationPerTaskNV = 5273, SpvDecorationPerVertexNV = 5285, SpvDecorationNonUniformEXT = 5300, + SpvDecorationRestrictPointerEXT = 5355, + SpvDecorationAliasedPointerEXT = 5356, SpvDecorationHlslCounterBufferGOOGLE = 5634, SpvDecorationHlslSemanticGOOGLE = 5635, SpvDecorationMax = 0x7fffffff, @@ -809,6 +813,7 @@ typedef enum SpvCapability_ { SpvCapabilityRayTracingNV = 5340, SpvCapabilityVulkanMemoryModelKHR = 5345, SpvCapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + SpvCapabilityPhysicalStorageBufferAddressesEXT = 5347, SpvCapabilityComputeDerivativeGroupLinearNV = 5350, SpvCapabilitySubgroupShuffleINTEL = 5568, SpvCapabilitySubgroupBufferBlockIOINTEL = 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.hpp b/3rdparty/spirv-headers/include/spirv/unified1/spirv.hpp similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.hpp rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.hpp index 9100356ab..44d06168b 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.hpp +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.hpp @@ -91,6 +91,7 @@ enum AddressingModel { AddressingModelLogical = 0, AddressingModelPhysical32 = 1, AddressingModelPhysical64 = 2, + AddressingModelPhysicalStorageBuffer64EXT = 5348, AddressingModelMax = 0x7fffffff, }; @@ -176,6 +177,7 @@ enum StorageClass { StorageClassHitAttributeNV = 5339, StorageClassIncomingRayPayloadNV = 5342, StorageClassShaderRecordBufferNV = 5343, + StorageClassPhysicalStorageBufferEXT = 5349, StorageClassMax = 0x7fffffff, }; @@ -436,6 +438,8 @@ enum Decoration { DecorationPerTaskNV = 5273, DecorationPerVertexNV = 5285, DecorationNonUniformEXT = 5300, + DecorationRestrictPointerEXT = 5355, + DecorationAliasedPointerEXT = 5356, DecorationHlslCounterBufferGOOGLE = 5634, DecorationHlslSemanticGOOGLE = 5635, DecorationMax = 0x7fffffff, @@ -805,6 +809,7 @@ enum Capability { CapabilityRayTracingNV = 5340, CapabilityVulkanMemoryModelKHR = 5345, CapabilityVulkanMemoryModelDeviceScopeKHR = 5346, + CapabilityPhysicalStorageBufferAddressesEXT = 5347, CapabilityComputeDerivativeGroupLinearNV = 5350, CapabilitySubgroupShuffleINTEL = 5568, CapabilitySubgroupBufferBlockIOINTEL = 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.hpp11 b/3rdparty/spirv-headers/include/spirv/unified1/spirv.hpp11 similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.hpp11 rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.hpp11 index bc7ecaabf..9a3bc07b0 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.hpp11 +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.hpp11 @@ -91,6 +91,7 @@ enum class AddressingModel : unsigned { Logical = 0, Physical32 = 1, Physical64 = 2, + PhysicalStorageBuffer64EXT = 5348, Max = 0x7fffffff, }; @@ -176,6 +177,7 @@ enum class StorageClass : unsigned { HitAttributeNV = 5339, IncomingRayPayloadNV = 5342, ShaderRecordBufferNV = 5343, + PhysicalStorageBufferEXT = 5349, Max = 0x7fffffff, }; @@ -436,6 +438,8 @@ enum class Decoration : unsigned { PerTaskNV = 5273, PerVertexNV = 5285, NonUniformEXT = 5300, + RestrictPointerEXT = 5355, + AliasedPointerEXT = 5356, HlslCounterBufferGOOGLE = 5634, HlslSemanticGOOGLE = 5635, Max = 0x7fffffff, @@ -805,6 +809,7 @@ enum class Capability : unsigned { RayTracingNV = 5340, VulkanMemoryModelKHR = 5345, VulkanMemoryModelDeviceScopeKHR = 5346, + PhysicalStorageBufferAddressesEXT = 5347, ComputeDerivativeGroupLinearNV = 5350, SubgroupShuffleINTEL = 5568, SubgroupBufferBlockIOINTEL = 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.json b/3rdparty/spirv-headers/include/spirv/unified1/spirv.json similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.json rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.json index 11fd7a4bc..7e7a69355 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.json +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.json @@ -103,7 +103,8 @@ { "Logical": 0, "Physical32": 1, - "Physical64": 2 + "Physical64": 2, + "PhysicalStorageBuffer64EXT": 5348 } }, { @@ -197,7 +198,8 @@ "RayPayloadNV": 5338, "HitAttributeNV": 5339, "IncomingRayPayloadNV": 5342, - "ShaderRecordBufferNV": 5343 + "ShaderRecordBufferNV": 5343, + "PhysicalStorageBufferEXT": 5349 } }, { @@ -472,6 +474,8 @@ "PerTaskNV": 5273, "PerVertexNV": 5285, "NonUniformEXT": 5300, + "RestrictPointerEXT": 5355, + "AliasedPointerEXT": 5356, "HlslCounterBufferGOOGLE": 5634, "HlslSemanticGOOGLE": 5635 } @@ -820,6 +824,7 @@ "RayTracingNV": 5340, "VulkanMemoryModelKHR": 5345, "VulkanMemoryModelDeviceScopeKHR": 5346, + "PhysicalStorageBufferAddressesEXT": 5347, "ComputeDerivativeGroupLinearNV": 5350, "SubgroupShuffleINTEL": 5568, "SubgroupBufferBlockIOINTEL": 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.lua b/3rdparty/spirv-headers/include/spirv/unified1/spirv.lua similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.lua rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.lua index 29644450c..9efff398d 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.lua +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.lua @@ -80,6 +80,7 @@ spv = { Logical = 0, Physical32 = 1, Physical64 = 2, + PhysicalStorageBuffer64EXT = 5348, }, MemoryModel = { @@ -162,6 +163,7 @@ spv = { HitAttributeNV = 5339, IncomingRayPayloadNV = 5342, ShaderRecordBufferNV = 5343, + PhysicalStorageBufferEXT = 5349, }, Dim = { @@ -409,6 +411,8 @@ spv = { PerTaskNV = 5273, PerVertexNV = 5285, NonUniformEXT = 5300, + RestrictPointerEXT = 5355, + AliasedPointerEXT = 5356, HlslCounterBufferGOOGLE = 5634, HlslSemanticGOOGLE = 5635, }, @@ -767,6 +771,7 @@ spv = { RayTracingNV = 5340, VulkanMemoryModelKHR = 5345, VulkanMemoryModelDeviceScopeKHR = 5346, + PhysicalStorageBufferAddressesEXT = 5347, ComputeDerivativeGroupLinearNV = 5350, SubgroupShuffleINTEL = 5568, SubgroupBufferBlockIOINTEL = 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.py b/3rdparty/spirv-headers/include/spirv/unified1/spirv.py similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.py rename to 3rdparty/spirv-headers/include/spirv/unified1/spirv.py index 89855e11c..0da966078 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spirv.py +++ b/3rdparty/spirv-headers/include/spirv/unified1/spirv.py @@ -80,6 +80,7 @@ spv = { 'Logical' : 0, 'Physical32' : 1, 'Physical64' : 2, + 'PhysicalStorageBuffer64EXT' : 5348, }, 'MemoryModel' : { @@ -162,6 +163,7 @@ spv = { 'HitAttributeNV' : 5339, 'IncomingRayPayloadNV' : 5342, 'ShaderRecordBufferNV' : 5343, + 'PhysicalStorageBufferEXT' : 5349, }, 'Dim' : { @@ -409,6 +411,8 @@ spv = { 'PerTaskNV' : 5273, 'PerVertexNV' : 5285, 'NonUniformEXT' : 5300, + 'RestrictPointerEXT' : 5355, + 'AliasedPointerEXT' : 5356, 'HlslCounterBufferGOOGLE' : 5634, 'HlslSemanticGOOGLE' : 5635, }, @@ -767,6 +771,7 @@ spv = { 'RayTracingNV' : 5340, 'VulkanMemoryModelKHR' : 5345, 'VulkanMemoryModelDeviceScopeKHR' : 5346, + 'PhysicalStorageBufferAddressesEXT' : 5347, 'ComputeDerivativeGroupLinearNV' : 5350, 'SubgroupShuffleINTEL' : 5568, 'SubgroupBufferBlockIOINTEL' : 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spv.d b/3rdparty/spirv-headers/include/spirv/unified1/spv.d similarity index 99% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spv.d rename to 3rdparty/spirv-headers/include/spirv/unified1/spv.d index 4791e7be5..ea9eaeddd 100644 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/include/spirv/unified1/spv.d +++ b/3rdparty/spirv-headers/include/spirv/unified1/spv.d @@ -90,6 +90,7 @@ enum AddressingModel : uint Logical = 0, Physical32 = 1, Physical64 = 2, + PhysicalStorageBuffer64EXT = 5348, } enum MemoryModel : uint @@ -175,6 +176,7 @@ enum StorageClass : uint HitAttributeNV = 5339, IncomingRayPayloadNV = 5342, ShaderRecordBufferNV = 5343, + PhysicalStorageBufferEXT = 5349, } enum Dim : uint @@ -437,6 +439,8 @@ enum Decoration : uint PerTaskNV = 5273, PerVertexNV = 5285, NonUniformEXT = 5300, + RestrictPointerEXT = 5355, + AliasedPointerEXT = 5356, HlslCounterBufferGOOGLE = 5634, HlslSemanticGOOGLE = 5635, } @@ -812,6 +816,7 @@ enum Capability : uint RayTracingNV = 5340, VulkanMemoryModelKHR = 5345, VulkanMemoryModelDeviceScopeKHR = 5346, + PhysicalStorageBufferAddressesEXT = 5347, ComputeDerivativeGroupLinearNV = 5350, SubgroupShuffleINTEL = 5568, SubgroupBufferBlockIOINTEL = 5569, diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/CMakeLists.txt b/3rdparty/spirv-headers/tools/buildHeaders/CMakeLists.txt similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/CMakeLists.txt rename to 3rdparty/spirv-headers/tools/buildHeaders/CMakeLists.txt diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/bin/makeHeaders b/3rdparty/spirv-headers/tools/buildHeaders/bin/makeHeaders similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/bin/makeHeaders rename to 3rdparty/spirv-headers/tools/buildHeaders/bin/makeHeaders diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/header.cpp b/3rdparty/spirv-headers/tools/buildHeaders/header.cpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/header.cpp rename to 3rdparty/spirv-headers/tools/buildHeaders/header.cpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/header.h b/3rdparty/spirv-headers/tools/buildHeaders/header.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/header.h rename to 3rdparty/spirv-headers/tools/buildHeaders/header.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsonToSpirv.cpp b/3rdparty/spirv-headers/tools/buildHeaders/jsonToSpirv.cpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsonToSpirv.cpp rename to 3rdparty/spirv-headers/tools/buildHeaders/jsonToSpirv.cpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsonToSpirv.h b/3rdparty/spirv-headers/tools/buildHeaders/jsonToSpirv.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsonToSpirv.h rename to 3rdparty/spirv-headers/tools/buildHeaders/jsonToSpirv.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsoncpp/dist/json/json-forwards.h b/3rdparty/spirv-headers/tools/buildHeaders/jsoncpp/dist/json/json-forwards.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsoncpp/dist/json/json-forwards.h rename to 3rdparty/spirv-headers/tools/buildHeaders/jsoncpp/dist/json/json-forwards.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsoncpp/dist/json/json.h b/3rdparty/spirv-headers/tools/buildHeaders/jsoncpp/dist/json/json.h similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsoncpp/dist/json/json.h rename to 3rdparty/spirv-headers/tools/buildHeaders/jsoncpp/dist/json/json.h diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsoncpp/dist/jsoncpp.cpp b/3rdparty/spirv-headers/tools/buildHeaders/jsoncpp/dist/jsoncpp.cpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/jsoncpp/dist/jsoncpp.cpp rename to 3rdparty/spirv-headers/tools/buildHeaders/jsoncpp/dist/jsoncpp.cpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/main.cpp b/3rdparty/spirv-headers/tools/buildHeaders/main.cpp similarity index 100% rename from 3rdparty/spirv-tools/external/SPIRV-Headers/tools/buildHeaders/main.cpp rename to 3rdparty/spirv-headers/tools/buildHeaders/main.cpp diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/.gitattributes b/3rdparty/spirv-tools/external/SPIRV-Headers/.gitattributes deleted file mode 100644 index 3fa5aaf16..000000000 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/.gitattributes +++ /dev/null @@ -1,7 +0,0 @@ -*.json text -*.h text -*.hpp text -*.hpp11 text -*.lua text -*.py text -*.xml diff --git a/3rdparty/spirv-tools/external/SPIRV-Headers/.gitignore b/3rdparty/spirv-tools/external/SPIRV-Headers/.gitignore deleted file mode 100644 index 9bcdd5aba..000000000 --- a/3rdparty/spirv-tools/external/SPIRV-Headers/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -build -out diff --git a/scripts/shaderc.lua b/scripts/shaderc.lua index 7e34656bc..e378205cf 100644 --- a/scripts/shaderc.lua +++ b/scripts/shaderc.lua @@ -6,9 +6,11 @@ group "tools/shaderc" local GLSL_OPTIMIZER = path.join(BGFX_DIR, "3rdparty/glsl-optimizer") -local FCPP_DIR = path.join(BGFX_DIR, "3rdparty/fcpp") -local GLSLANG = path.join(BGFX_DIR, "3rdparty/glslang") -local SPIRV_TOOLS = path.join(BGFX_DIR, "3rdparty/spirv-tools") +local FCPP_DIR = path.join(BGFX_DIR, "3rdparty/fcpp") +local GLSLANG = path.join(BGFX_DIR, "3rdparty/glslang") +local SPIRV_CROSS = path.join(BGFX_DIR, "3rdparty/spirv-cross") +local SPIRV_HEADERS = path.join(BGFX_DIR, "3rdparty/spirv-headers") +local SPIRV_TOOLS = path.join(BGFX_DIR, "3rdparty/spirv-tools") project "spirv-opt" kind "StaticLib" @@ -18,7 +20,7 @@ project "spirv-opt" path.join(SPIRV_TOOLS, "include/generated"), path.join(SPIRV_TOOLS, "source"), path.join(SPIRV_TOOLS), - path.join(SPIRV_TOOLS, "external/SPIRV-Headers/include"), + path.join(SPIRV_HEADERS, "include"), } files { @@ -137,6 +139,63 @@ project "spirv-opt" "/wd4706", -- warning C4706: assignment within conditional expression } + configuration { "mingw* or linux or osx" } + buildoptions { + "-Wno-switch", + } + + configuration { "mingw* or linux-gcc-*" } + buildoptions { + "-Wno-misleading-indentation", + } + + configuration {} + +project "spirv-cross" + kind "StaticLib" + + defines { + "SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS", + } + + includedirs { + path.join(SPIRV_CROSS, "include"), + } + + files { + path.join(SPIRV_CROSS, "spirv.hpp"), + path.join(SPIRV_CROSS, "spirv_cfg.cpp"), + path.join(SPIRV_CROSS, "spirv_cfg.hpp"), + path.join(SPIRV_CROSS, "spirv_common.hpp"), + path.join(SPIRV_CROSS, "spirv_cpp.cpp"), + path.join(SPIRV_CROSS, "spirv_cpp.hpp"), + path.join(SPIRV_CROSS, "spirv_cross.cpp"), + path.join(SPIRV_CROSS, "spirv_cross.hpp"), + path.join(SPIRV_CROSS, "spirv_cross_parsed_ir.cpp"), + path.join(SPIRV_CROSS, "spirv_cross_parsed_ir.hpp"), + path.join(SPIRV_CROSS, "spirv_cross_util.cpp"), + path.join(SPIRV_CROSS, "spirv_cross_util.hpp"), + path.join(SPIRV_CROSS, "spirv_glsl.cpp"), + path.join(SPIRV_CROSS, "spirv_glsl.hpp"), + path.join(SPIRV_CROSS, "spirv_hlsl.cpp"), + path.join(SPIRV_CROSS, "spirv_hlsl.hpp"), + path.join(SPIRV_CROSS, "spirv_msl.cpp"), + path.join(SPIRV_CROSS, "spirv_msl.hpp"), + path.join(SPIRV_CROSS, "spirv_parser.cpp"), + path.join(SPIRV_CROSS, "spirv_parser.hpp"), + path.join(SPIRV_CROSS, "spirv_reflect.cpp"), + path.join(SPIRV_CROSS, "spirv_reflect.hpp"), + } + + configuration { "vs*" } + buildoptions { + "/wd4018", -- warning C4018: '<': signed/unsigned mismatch + "/wd4245", -- warning C4245: 'return': conversion from 'int' to 'unsigned int', signed/unsigned mismatch + "/wd4706", -- warning C4706: assignment within conditional expression + } + + configuration {} + project "glslang" kind "StaticLib" @@ -552,6 +611,7 @@ project "shaderc" "glslang", "glsl-optimizer", "spirv-opt", + "spirv-cross", } files {