mirror of
https://github.com/bkaradzic/bgfx.git
synced 2026-02-17 20:52:36 +01:00
Updated spirv-cross.
This commit is contained in:
14
3rdparty/spirv-cross/CMakeLists.txt
vendored
14
3rdparty/spirv-cross/CMakeLists.txt
vendored
@@ -37,6 +37,7 @@ option(SPIRV_CROSS_SANITIZE_THREADS "Sanitize threads" OFF)
|
||||
option(SPIRV_CROSS_SANITIZE_UNDEFINED "Sanitize undefined" OFF)
|
||||
|
||||
option(SPIRV_CROSS_NAMESPACE_OVERRIDE "" "Override the namespace used in the C++ API.")
|
||||
option(SPIRV_CROSS_FORCE_STL_TYPES "Force use of STL types instead of STL replacements in certain places. Might reduce performance." OFF)
|
||||
|
||||
if(${CMAKE_GENERATOR} MATCHES "Makefile")
|
||||
if(${CMAKE_CURRENT_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_BINARY_DIR})
|
||||
@@ -52,6 +53,10 @@ if(SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS)
|
||||
set(spirv-compiler-defines ${spirv-compiler-defines} SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS)
|
||||
endif()
|
||||
|
||||
if(SPIRV_CROSS_FORCE_STL_TYPES)
|
||||
set(spirv-compiler-defines ${spirv-compiler-defines} SPIRV_CROSS_FORCE_STL_TYPES)
|
||||
endif()
|
||||
|
||||
if (CMAKE_COMPILER_IS_GNUCXX OR (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang"))
|
||||
set(spirv-compiler-options ${spirv-compiler-options} -Wall -Wextra -Werror -Wshadow)
|
||||
|
||||
@@ -132,6 +137,8 @@ endmacro()
|
||||
set(spirv-cross-core-sources
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/GLSL.std.450.h
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/spirv_common.hpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_containers.hpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross_error_handling.hpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/spirv.hpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.hpp
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/spirv_cross.cpp
|
||||
@@ -415,11 +422,18 @@ if (SPIRV_CROSS_CLI)
|
||||
add_executable(spirv-cross-c-api-test tests-other/c_api_test.c)
|
||||
target_link_libraries(spirv-cross-c-api-test spirv-cross-c)
|
||||
set_target_properties(spirv-cross-c-api-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}")
|
||||
|
||||
add_executable(spirv-cross-small-vector-test tests-other/small_vector.cpp)
|
||||
target_link_libraries(spirv-cross-small-vector-test spirv-cross-core)
|
||||
set_target_properties(spirv-cross-small-vector-test PROPERTIES LINK_FLAGS "${spirv-cross-link-flags}")
|
||||
|
||||
if (CMAKE_COMPILER_IS_GNUCXX OR (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang"))
|
||||
target_compile_options(spirv-cross-c-api-test PRIVATE -std=c89 -Wall -Wextra)
|
||||
endif()
|
||||
add_test(NAME spirv-cross-c-api-test
|
||||
COMMAND $<TARGET_FILE:spirv-cross-c-api-test> ${CMAKE_CURRENT_SOURCE_DIR}/tests-other/c_api_test.spv)
|
||||
add_test(NAME spirv-cross-small-vector-test
|
||||
COMMAND $<TARGET_FILE:spirv-cross-small-vector-test>)
|
||||
add_test(NAME spirv-cross-test
|
||||
COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test_shaders.py --parallel
|
||||
${spirv-cross-externals}
|
||||
|
||||
33
3rdparty/spirv-cross/README.md
vendored
33
3rdparty/spirv-cross/README.md
vendored
@@ -414,45 +414,40 @@ The reference files are stored inside the repository in order to be able to trac
|
||||
|
||||
All pull requests should ensure that test output does not change unexpectedly. This can be tested with:
|
||||
|
||||
```
|
||||
./test_shaders.py shaders || exit 1
|
||||
./test_shaders.py shaders --opt || exit 1
|
||||
./test_shaders.py shaders-no-opt || exit 1
|
||||
./test_shaders.py shaders-msl --msl || exit 1
|
||||
./test_shaders.py shaders-msl --msl --opt || exit 1
|
||||
./test_shaders.py shaders-msl-no-opt --msl || exit 1
|
||||
./test_shaders.py shaders-hlsl --hlsl || exit 1
|
||||
./test_shaders.py shaders-hlsl --hlsl --opt || exit 1
|
||||
./test_shaders.py shaders-hlsl-no-opt --hlsl || exit 1
|
||||
./test_shaders.py shaders-reflection --reflect || exit 1
|
||||
```
|
||||
|
||||
although there are a couple of convenience script for doing this:
|
||||
|
||||
```
|
||||
./checkout_glslang_spirv_tools.sh # Checks out glslang and SPIRV-Tools at a fixed revision which matches the reference output.
|
||||
# NOTE: Some users have reported problems cloning from git:// paths. To use https:// instead pass in
|
||||
# $ PROTOCOL=https ./checkout_glslang_spirv_tools.sh
|
||||
# instead.
|
||||
./build_glslang_spirv_tools.sh # Builds glslang and SPIRV-Tools.
|
||||
./test_shaders.sh # Runs over all changes and makes sure that there are no deltas compared to reference files.
|
||||
```
|
||||
|
||||
`./test_shaders.sh` currently requires a Makefile setup with GCC/Clang to be set up.
|
||||
However, on Windows, this can be rather inconvenient if a MinGW environment is not set up.
|
||||
To use a spirv-cross binary you built with CMake (or otherwise), you can pass in an environment variable as such:
|
||||
|
||||
```
|
||||
SPIRV_CROSS_PATH=path/to/custom/spirv-cross ./test_shaders.sh
|
||||
```
|
||||
|
||||
However, when improving SPIRV-Cross there are of course legitimate cases where reference output should change.
|
||||
In these cases, run:
|
||||
|
||||
```
|
||||
./update_test_shaders.sh
|
||||
./update_test_shaders.sh # SPIRV_CROSS_PATH also works here.
|
||||
```
|
||||
|
||||
to update the reference files and include these changes as part of the pull request.
|
||||
Always make sure you are running the correct version of glslangValidator as well as SPIRV-Tools when updating reference files.
|
||||
See `checkout_glslang_spirv_tools.sh`.
|
||||
See `checkout_glslang_spirv_tools.sh` which revisions are currently expected. The revisions change regularly.
|
||||
|
||||
In short, the master branch should always be able to run `./test_shaders.py shaders` and friends without failure.
|
||||
SPIRV-Cross uses Travis CI to test all pull requests, so it is not strictly needed to perform testing yourself if you have problems running it locally.
|
||||
A pull request which does not pass testing on Travis will not be accepted however.
|
||||
|
||||
When adding support for new features to SPIRV-Cross, a new shader and reference file should be added which covers usage of the new shader features in question.
|
||||
|
||||
Travis CI runs the test suite with the CMake, by running `ctest`. This method is compatible with MSVC.
|
||||
Travis CI runs the test suite with the CMake, by running `ctest`. This is a more straight-forward alternative to `./test_shaders.sh`.
|
||||
|
||||
### Licensing
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
PROFILE=Release
|
||||
|
||||
if [ ! -z $1 ]; then
|
||||
PROFILE=$1
|
||||
fi
|
||||
|
||||
@@ -4,6 +4,12 @@ GLSLANG_REV=ef807f4bc543e061f25dbbee6cb64dd5053b2adc
|
||||
SPIRV_TOOLS_REV=12e4a7b649e6fe28683de9fc352200c82948a1f0
|
||||
SPIRV_HEADERS_REV=111a25e4ae45e2b4d7c18415e1d6884712b958c4
|
||||
|
||||
if [ -z $PROTOCOL ]; then
|
||||
PROTOCOL=git
|
||||
fi
|
||||
|
||||
echo "Using protocol \"$PROTOCOL\" for checking out repositories. If this is problematic, try PROTOCOL=https $0."
|
||||
|
||||
if [ -d external/glslang ]; then
|
||||
echo "Updating glslang to revision $GLSLANG_REV."
|
||||
cd external/glslang
|
||||
@@ -13,7 +19,7 @@ else
|
||||
echo "Cloning glslang revision $GLSLANG_REV."
|
||||
mkdir -p external
|
||||
cd external
|
||||
git clone git://github.com/KhronosGroup/glslang.git
|
||||
git clone $PROTOCOL://github.com/KhronosGroup/glslang.git
|
||||
cd glslang
|
||||
git checkout $GLSLANG_REV
|
||||
fi
|
||||
@@ -28,7 +34,7 @@ else
|
||||
echo "Cloning SPIRV-Tools revision $SPIRV_TOOLS_REV."
|
||||
mkdir -p external
|
||||
cd external
|
||||
git clone git://github.com/KhronosGroup/SPIRV-Tools.git spirv-tools
|
||||
git clone $PROTOCOL://github.com/KhronosGroup/SPIRV-Tools.git spirv-tools
|
||||
cd spirv-tools
|
||||
git checkout $SPIRV_TOOLS_REV
|
||||
fi
|
||||
@@ -39,7 +45,7 @@ if [ -d external/spirv-headers ]; then
|
||||
git checkout $SPIRV_HEADERS_REV
|
||||
cd ../..
|
||||
else
|
||||
git clone git://github.com/KhronosGroup/SPIRV-Headers.git external/spirv-headers
|
||||
git clone $PROTOCOL://github.com/KhronosGroup/SPIRV-Headers.git external/spirv-headers
|
||||
cd external/spirv-headers
|
||||
git checkout $SPIRV_HEADERS_REV
|
||||
cd ../..
|
||||
|
||||
624
3rdparty/spirv-cross/main.cpp
vendored
624
3rdparty/spirv-cross/main.cpp
vendored
@@ -221,7 +221,7 @@ static bool write_string_to_file(const char *path, const char *string)
|
||||
return true;
|
||||
}
|
||||
|
||||
static void print_resources(const Compiler &compiler, const char *tag, const vector<Resource> &resources)
|
||||
static void print_resources(const Compiler &compiler, const char *tag, const SmallVector<Resource> &resources)
|
||||
{
|
||||
fprintf(stderr, "%s\n", tag);
|
||||
fprintf(stderr, "=============\n\n");
|
||||
@@ -411,7 +411,7 @@ static void print_resources(const Compiler &compiler, const ShaderResources &res
|
||||
print_resources(compiler, "acceleration structures", res.acceleration_structures);
|
||||
}
|
||||
|
||||
static void print_push_constant_resources(const Compiler &compiler, const vector<Resource> &res)
|
||||
static void print_push_constant_resources(const Compiler &compiler, const SmallVector<Resource> &res)
|
||||
{
|
||||
for (auto &block : res)
|
||||
{
|
||||
@@ -510,14 +510,14 @@ struct CLIArguments
|
||||
bool msl_domain_lower_left = false;
|
||||
bool msl_argument_buffers = false;
|
||||
bool glsl_emit_push_constant_as_ubo = false;
|
||||
vector<uint32_t> msl_discrete_descriptor_sets;
|
||||
vector<PLSArg> pls_in;
|
||||
vector<PLSArg> pls_out;
|
||||
vector<Remap> remaps;
|
||||
vector<string> extensions;
|
||||
vector<VariableTypeRemap> variable_type_remaps;
|
||||
vector<InterfaceVariableRename> interface_variable_renames;
|
||||
vector<HLSLVertexAttributeRemap> hlsl_attr_remap;
|
||||
SmallVector<uint32_t> msl_discrete_descriptor_sets;
|
||||
SmallVector<PLSArg> pls_in;
|
||||
SmallVector<PLSArg> pls_out;
|
||||
SmallVector<Remap> remaps;
|
||||
SmallVector<string> extensions;
|
||||
SmallVector<VariableTypeRemap> variable_type_remaps;
|
||||
SmallVector<InterfaceVariableRename> interface_variable_renames;
|
||||
SmallVector<HLSLVertexAttributeRemap> hlsl_attr_remap;
|
||||
string entry;
|
||||
string entry_stage;
|
||||
|
||||
@@ -527,7 +527,7 @@ struct CLIArguments
|
||||
string new_name;
|
||||
ExecutionModel execution_model;
|
||||
};
|
||||
vector<Rename> entry_point_rename;
|
||||
SmallVector<Rename> entry_point_rename;
|
||||
|
||||
uint32_t iterations = 1;
|
||||
bool cpp = false;
|
||||
@@ -595,7 +595,7 @@ static void print_help()
|
||||
"\n");
|
||||
}
|
||||
|
||||
static bool remap_generic(Compiler &compiler, const vector<Resource> &resources, const Remap &remap)
|
||||
static bool remap_generic(Compiler &compiler, const SmallVector<Resource> &resources, const Remap &remap)
|
||||
{
|
||||
auto itr =
|
||||
find_if(begin(resources), end(resources), [&remap](const Resource &res) { return res.name == remap.src_name; });
|
||||
@@ -611,8 +611,8 @@ static bool remap_generic(Compiler &compiler, const vector<Resource> &resources,
|
||||
return false;
|
||||
}
|
||||
|
||||
static vector<PlsRemap> remap_pls(const vector<PLSArg> &pls_variables, const vector<Resource> &resources,
|
||||
const vector<Resource> *secondary_resources)
|
||||
static vector<PlsRemap> remap_pls(const SmallVector<PLSArg> &pls_variables, const SmallVector<Resource> &resources,
|
||||
const SmallVector<Resource> *secondary_resources)
|
||||
{
|
||||
vector<PlsRemap> ret;
|
||||
|
||||
@@ -697,6 +697,298 @@ static ExecutionModel stage_to_execution_model(const std::string &stage)
|
||||
SPIRV_CROSS_THROW("Invalid stage.");
|
||||
}
|
||||
|
||||
static string compile_iteration(const CLIArguments &args, std::vector<uint32_t> spirv_file)
|
||||
{
|
||||
Parser spirv_parser(move(spirv_file));
|
||||
spirv_parser.parse();
|
||||
|
||||
unique_ptr<CompilerGLSL> compiler;
|
||||
bool combined_image_samplers = false;
|
||||
bool build_dummy_sampler = false;
|
||||
|
||||
if (args.cpp)
|
||||
{
|
||||
compiler.reset(new CompilerCPP(move(spirv_parser.get_parsed_ir())));
|
||||
if (args.cpp_interface_name)
|
||||
static_cast<CompilerCPP *>(compiler.get())->set_interface_name(args.cpp_interface_name);
|
||||
}
|
||||
else if (args.msl)
|
||||
{
|
||||
compiler.reset(new CompilerMSL(move(spirv_parser.get_parsed_ir())));
|
||||
|
||||
auto *msl_comp = static_cast<CompilerMSL *>(compiler.get());
|
||||
auto msl_opts = msl_comp->get_msl_options();
|
||||
if (args.set_msl_version)
|
||||
msl_opts.msl_version = args.msl_version;
|
||||
msl_opts.capture_output_to_buffer = args.msl_capture_output_to_buffer;
|
||||
msl_opts.swizzle_texture_samples = args.msl_swizzle_texture_samples;
|
||||
if (args.msl_ios)
|
||||
msl_opts.platform = CompilerMSL::Options::iOS;
|
||||
msl_opts.pad_fragment_output_components = args.msl_pad_fragment_output;
|
||||
msl_opts.tess_domain_origin_lower_left = args.msl_domain_lower_left;
|
||||
msl_opts.argument_buffers = args.msl_argument_buffers;
|
||||
msl_comp->set_msl_options(msl_opts);
|
||||
for (auto &v : args.msl_discrete_descriptor_sets)
|
||||
msl_comp->add_discrete_descriptor_set(v);
|
||||
}
|
||||
else if (args.hlsl)
|
||||
compiler.reset(new CompilerHLSL(move(spirv_parser.get_parsed_ir())));
|
||||
else
|
||||
{
|
||||
combined_image_samplers = !args.vulkan_semantics;
|
||||
if (!args.vulkan_semantics)
|
||||
build_dummy_sampler = true;
|
||||
compiler.reset(new CompilerGLSL(move(spirv_parser.get_parsed_ir())));
|
||||
}
|
||||
|
||||
if (!args.variable_type_remaps.empty())
|
||||
{
|
||||
auto remap_cb = [&](const SPIRType &, const string &name, string &out) -> void {
|
||||
for (const VariableTypeRemap &remap : args.variable_type_remaps)
|
||||
if (name == remap.variable_name)
|
||||
out = remap.new_variable_type;
|
||||
};
|
||||
|
||||
compiler->set_variable_type_remap_callback(move(remap_cb));
|
||||
}
|
||||
|
||||
for (auto &rename : args.entry_point_rename)
|
||||
compiler->rename_entry_point(rename.old_name, rename.new_name, rename.execution_model);
|
||||
|
||||
auto entry_points = compiler->get_entry_points_and_stages();
|
||||
auto entry_point = args.entry;
|
||||
ExecutionModel model = ExecutionModelMax;
|
||||
|
||||
if (!args.entry_stage.empty())
|
||||
{
|
||||
model = stage_to_execution_model(args.entry_stage);
|
||||
if (entry_point.empty())
|
||||
{
|
||||
// Just use the first entry point with this stage.
|
||||
for (auto &e : entry_points)
|
||||
{
|
||||
if (e.execution_model == model)
|
||||
{
|
||||
entry_point = e.name;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (entry_point.empty())
|
||||
{
|
||||
fprintf(stderr, "Could not find an entry point with stage: %s\n", args.entry_stage.c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Make sure both stage and name exists.
|
||||
bool exists = false;
|
||||
for (auto &e : entry_points)
|
||||
{
|
||||
if (e.execution_model == model && e.name == entry_point)
|
||||
{
|
||||
exists = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!exists)
|
||||
{
|
||||
fprintf(stderr, "Could not find an entry point %s with stage: %s\n", entry_point.c_str(),
|
||||
args.entry_stage.c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!entry_point.empty())
|
||||
{
|
||||
// Make sure there is just one entry point with this name, or the stage
|
||||
// is ambiguous.
|
||||
uint32_t stage_count = 0;
|
||||
for (auto &e : entry_points)
|
||||
{
|
||||
if (e.name == entry_point)
|
||||
{
|
||||
stage_count++;
|
||||
model = e.execution_model;
|
||||
}
|
||||
}
|
||||
|
||||
if (stage_count == 0)
|
||||
{
|
||||
fprintf(stderr, "There is no entry point with name: %s\n", entry_point.c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
else if (stage_count > 1)
|
||||
{
|
||||
fprintf(stderr, "There is more than one entry point with name: %s. Use --stage.\n", entry_point.c_str());
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
if (!entry_point.empty())
|
||||
compiler->set_entry_point(entry_point, model);
|
||||
|
||||
if (!args.set_version && !compiler->get_common_options().version)
|
||||
{
|
||||
fprintf(stderr, "Didn't specify GLSL version and SPIR-V did not specify language.\n");
|
||||
print_help();
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
CompilerGLSL::Options opts = compiler->get_common_options();
|
||||
if (args.set_version)
|
||||
opts.version = args.version;
|
||||
if (args.set_es)
|
||||
opts.es = args.es;
|
||||
opts.force_temporary = args.force_temporary;
|
||||
opts.separate_shader_objects = args.sso;
|
||||
opts.flatten_multidimensional_arrays = args.flatten_multidimensional_arrays;
|
||||
opts.enable_420pack_extension = args.use_420pack_extension;
|
||||
opts.vulkan_semantics = args.vulkan_semantics;
|
||||
opts.vertex.fixup_clipspace = args.fixup;
|
||||
opts.vertex.flip_vert_y = args.yflip;
|
||||
opts.vertex.support_nonzero_base_instance = args.support_nonzero_baseinstance;
|
||||
opts.emit_push_constant_as_uniform_buffer = args.glsl_emit_push_constant_as_ubo;
|
||||
compiler->set_common_options(opts);
|
||||
|
||||
// Set HLSL specific options.
|
||||
if (args.hlsl)
|
||||
{
|
||||
auto *hlsl = static_cast<CompilerHLSL *>(compiler.get());
|
||||
auto hlsl_opts = hlsl->get_hlsl_options();
|
||||
if (args.set_shader_model)
|
||||
{
|
||||
if (args.shader_model < 30)
|
||||
{
|
||||
fprintf(stderr, "Shader model earlier than 30 (3.0) not supported.\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
|
||||
hlsl_opts.shader_model = args.shader_model;
|
||||
}
|
||||
|
||||
if (args.hlsl_compat)
|
||||
{
|
||||
// Enable all compat options.
|
||||
hlsl_opts.point_size_compat = true;
|
||||
hlsl_opts.point_coord_compat = true;
|
||||
}
|
||||
|
||||
if (hlsl_opts.shader_model <= 30)
|
||||
{
|
||||
combined_image_samplers = true;
|
||||
build_dummy_sampler = true;
|
||||
}
|
||||
|
||||
hlsl_opts.support_nonzero_base_vertex_base_instance = args.hlsl_support_nonzero_base;
|
||||
hlsl->set_hlsl_options(hlsl_opts);
|
||||
}
|
||||
|
||||
if (build_dummy_sampler)
|
||||
{
|
||||
uint32_t sampler = compiler->build_dummy_sampler_for_combined_images();
|
||||
if (sampler != 0)
|
||||
{
|
||||
// Set some defaults to make validation happy.
|
||||
compiler->set_decoration(sampler, DecorationDescriptorSet, 0);
|
||||
compiler->set_decoration(sampler, DecorationBinding, 0);
|
||||
}
|
||||
}
|
||||
|
||||
ShaderResources res;
|
||||
if (args.remove_unused)
|
||||
{
|
||||
auto active = compiler->get_active_interface_variables();
|
||||
res = compiler->get_shader_resources(active);
|
||||
compiler->set_enabled_interface_variables(move(active));
|
||||
}
|
||||
else
|
||||
res = compiler->get_shader_resources();
|
||||
|
||||
if (args.flatten_ubo)
|
||||
{
|
||||
for (auto &ubo : res.uniform_buffers)
|
||||
compiler->flatten_buffer_block(ubo.id);
|
||||
for (auto &ubo : res.push_constant_buffers)
|
||||
compiler->flatten_buffer_block(ubo.id);
|
||||
}
|
||||
|
||||
auto pls_inputs = remap_pls(args.pls_in, res.stage_inputs, &res.subpass_inputs);
|
||||
auto pls_outputs = remap_pls(args.pls_out, res.stage_outputs, nullptr);
|
||||
compiler->remap_pixel_local_storage(move(pls_inputs), move(pls_outputs));
|
||||
|
||||
for (auto &ext : args.extensions)
|
||||
compiler->require_extension(ext);
|
||||
|
||||
for (auto &remap : args.remaps)
|
||||
{
|
||||
if (remap_generic(*compiler, res.stage_inputs, remap))
|
||||
continue;
|
||||
if (remap_generic(*compiler, res.stage_outputs, remap))
|
||||
continue;
|
||||
if (remap_generic(*compiler, res.subpass_inputs, remap))
|
||||
continue;
|
||||
}
|
||||
|
||||
for (auto &rename : args.interface_variable_renames)
|
||||
{
|
||||
if (rename.storageClass == StorageClassInput)
|
||||
spirv_cross_util::rename_interface_variable(*compiler, res.stage_inputs, rename.location,
|
||||
rename.variable_name);
|
||||
else if (rename.storageClass == StorageClassOutput)
|
||||
spirv_cross_util::rename_interface_variable(*compiler, res.stage_outputs, rename.location,
|
||||
rename.variable_name);
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "error at --rename-interface-variable <in|out> ...\n");
|
||||
exit(EXIT_FAILURE);
|
||||
}
|
||||
}
|
||||
|
||||
if (args.dump_resources)
|
||||
{
|
||||
print_resources(*compiler, res);
|
||||
print_push_constant_resources(*compiler, res.push_constant_buffers);
|
||||
print_spec_constants(*compiler);
|
||||
print_capabilities_and_extensions(*compiler);
|
||||
}
|
||||
|
||||
if (combined_image_samplers)
|
||||
{
|
||||
compiler->build_combined_image_samplers();
|
||||
if (args.combined_samplers_inherit_bindings)
|
||||
spirv_cross_util::inherit_combined_sampler_bindings(*compiler);
|
||||
|
||||
// Give the remapped combined samplers new names.
|
||||
for (auto &remap : compiler->get_combined_image_samplers())
|
||||
{
|
||||
compiler->set_name(remap.combined_id, join("SPIRV_Cross_Combined", compiler->get_name(remap.image_id),
|
||||
compiler->get_name(remap.sampler_id)));
|
||||
}
|
||||
}
|
||||
|
||||
if (args.hlsl)
|
||||
{
|
||||
auto *hlsl_compiler = static_cast<CompilerHLSL *>(compiler.get());
|
||||
uint32_t new_builtin = hlsl_compiler->remap_num_workgroups_builtin();
|
||||
if (new_builtin)
|
||||
{
|
||||
hlsl_compiler->set_decoration(new_builtin, DecorationDescriptorSet, 0);
|
||||
hlsl_compiler->set_decoration(new_builtin, DecorationBinding, 0);
|
||||
}
|
||||
}
|
||||
|
||||
if (args.hlsl)
|
||||
{
|
||||
for (auto &remap : args.hlsl_attr_remap)
|
||||
static_cast<CompilerHLSL *>(compiler.get())->add_vertex_attribute_remap(remap);
|
||||
}
|
||||
|
||||
return compiler->compile();
|
||||
}
|
||||
|
||||
static int main_inner(int argc, char *argv[])
|
||||
{
|
||||
CLIArguments args;
|
||||
@@ -819,13 +1111,9 @@ static int main_inner(int argc, char *argv[])
|
||||
|
||||
CLIParser parser{ move(cbs), argc - 1, argv + 1 };
|
||||
if (!parser.parse())
|
||||
{
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
else if (parser.ended_state)
|
||||
{
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
if (!args.input)
|
||||
{
|
||||
@@ -837,13 +1125,13 @@ static int main_inner(int argc, char *argv[])
|
||||
auto spirv_file = read_spirv_file(args.input);
|
||||
if (spirv_file.empty())
|
||||
return EXIT_FAILURE;
|
||||
Parser spirv_parser(move(spirv_file));
|
||||
|
||||
spirv_parser.parse();
|
||||
|
||||
// Special case reflection because it has little to do with the path followed by code-outputting compilers
|
||||
if (!args.reflect.empty())
|
||||
{
|
||||
Parser spirv_parser(move(spirv_file));
|
||||
spirv_parser.parse();
|
||||
|
||||
CompilerReflection compiler(move(spirv_parser.get_parsed_ir()));
|
||||
compiler.set_format(args.reflect);
|
||||
auto json = compiler.compile();
|
||||
@@ -854,300 +1142,20 @@ static int main_inner(int argc, char *argv[])
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
unique_ptr<CompilerGLSL> compiler;
|
||||
bool combined_image_samplers = false;
|
||||
bool build_dummy_sampler = false;
|
||||
string compiled_output;
|
||||
|
||||
if (args.cpp)
|
||||
{
|
||||
compiler.reset(new CompilerCPP(move(spirv_parser.get_parsed_ir())));
|
||||
if (args.cpp_interface_name)
|
||||
static_cast<CompilerCPP *>(compiler.get())->set_interface_name(args.cpp_interface_name);
|
||||
}
|
||||
else if (args.msl)
|
||||
{
|
||||
compiler.reset(new CompilerMSL(move(spirv_parser.get_parsed_ir())));
|
||||
|
||||
auto *msl_comp = static_cast<CompilerMSL *>(compiler.get());
|
||||
auto msl_opts = msl_comp->get_msl_options();
|
||||
if (args.set_msl_version)
|
||||
msl_opts.msl_version = args.msl_version;
|
||||
msl_opts.capture_output_to_buffer = args.msl_capture_output_to_buffer;
|
||||
msl_opts.swizzle_texture_samples = args.msl_swizzle_texture_samples;
|
||||
if (args.msl_ios)
|
||||
msl_opts.platform = CompilerMSL::Options::iOS;
|
||||
msl_opts.pad_fragment_output_components = args.msl_pad_fragment_output;
|
||||
msl_opts.tess_domain_origin_lower_left = args.msl_domain_lower_left;
|
||||
msl_opts.argument_buffers = args.msl_argument_buffers;
|
||||
msl_comp->set_msl_options(msl_opts);
|
||||
for (auto &v : args.msl_discrete_descriptor_sets)
|
||||
msl_comp->add_discrete_descriptor_set(v);
|
||||
}
|
||||
else if (args.hlsl)
|
||||
compiler.reset(new CompilerHLSL(move(spirv_parser.get_parsed_ir())));
|
||||
if (args.iterations == 1)
|
||||
compiled_output = compile_iteration(args, move(spirv_file));
|
||||
else
|
||||
{
|
||||
combined_image_samplers = !args.vulkan_semantics;
|
||||
if (!args.vulkan_semantics)
|
||||
build_dummy_sampler = true;
|
||||
compiler.reset(new CompilerGLSL(move(spirv_parser.get_parsed_ir())));
|
||||
}
|
||||
|
||||
if (!args.variable_type_remaps.empty())
|
||||
{
|
||||
auto remap_cb = [&](const SPIRType &, const string &name, string &out) -> void {
|
||||
for (const VariableTypeRemap &remap : args.variable_type_remaps)
|
||||
if (name == remap.variable_name)
|
||||
out = remap.new_variable_type;
|
||||
};
|
||||
|
||||
compiler->set_variable_type_remap_callback(move(remap_cb));
|
||||
}
|
||||
|
||||
for (auto &rename : args.entry_point_rename)
|
||||
compiler->rename_entry_point(rename.old_name, rename.new_name, rename.execution_model);
|
||||
|
||||
auto entry_points = compiler->get_entry_points_and_stages();
|
||||
auto entry_point = args.entry;
|
||||
ExecutionModel model = ExecutionModelMax;
|
||||
|
||||
if (!args.entry_stage.empty())
|
||||
{
|
||||
model = stage_to_execution_model(args.entry_stage);
|
||||
if (entry_point.empty())
|
||||
{
|
||||
// Just use the first entry point with this stage.
|
||||
for (auto &e : entry_points)
|
||||
{
|
||||
if (e.execution_model == model)
|
||||
{
|
||||
entry_point = e.name;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (entry_point.empty())
|
||||
{
|
||||
fprintf(stderr, "Could not find an entry point with stage: %s\n", args.entry_stage.c_str());
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Make sure both stage and name exists.
|
||||
bool exists = false;
|
||||
for (auto &e : entry_points)
|
||||
{
|
||||
if (e.execution_model == model && e.name == entry_point)
|
||||
{
|
||||
exists = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!exists)
|
||||
{
|
||||
fprintf(stderr, "Could not find an entry point %s with stage: %s\n", entry_point.c_str(),
|
||||
args.entry_stage.c_str());
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!entry_point.empty())
|
||||
{
|
||||
// Make sure there is just one entry point with this name, or the stage
|
||||
// is ambiguous.
|
||||
uint32_t stage_count = 0;
|
||||
for (auto &e : entry_points)
|
||||
{
|
||||
if (e.name == entry_point)
|
||||
{
|
||||
stage_count++;
|
||||
model = e.execution_model;
|
||||
}
|
||||
}
|
||||
|
||||
if (stage_count == 0)
|
||||
{
|
||||
fprintf(stderr, "There is no entry point with name: %s\n", entry_point.c_str());
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
else if (stage_count > 1)
|
||||
{
|
||||
fprintf(stderr, "There is more than one entry point with name: %s. Use --stage.\n", entry_point.c_str());
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
if (!entry_point.empty())
|
||||
compiler->set_entry_point(entry_point, model);
|
||||
|
||||
if (!args.set_version && !compiler->get_common_options().version)
|
||||
{
|
||||
fprintf(stderr, "Didn't specify GLSL version and SPIR-V did not specify language.\n");
|
||||
print_help();
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
CompilerGLSL::Options opts = compiler->get_common_options();
|
||||
if (args.set_version)
|
||||
opts.version = args.version;
|
||||
if (args.set_es)
|
||||
opts.es = args.es;
|
||||
opts.force_temporary = args.force_temporary;
|
||||
opts.separate_shader_objects = args.sso;
|
||||
opts.flatten_multidimensional_arrays = args.flatten_multidimensional_arrays;
|
||||
opts.enable_420pack_extension = args.use_420pack_extension;
|
||||
opts.vulkan_semantics = args.vulkan_semantics;
|
||||
opts.vertex.fixup_clipspace = args.fixup;
|
||||
opts.vertex.flip_vert_y = args.yflip;
|
||||
opts.vertex.support_nonzero_base_instance = args.support_nonzero_baseinstance;
|
||||
opts.emit_push_constant_as_uniform_buffer = args.glsl_emit_push_constant_as_ubo;
|
||||
compiler->set_common_options(opts);
|
||||
|
||||
// Set HLSL specific options.
|
||||
if (args.hlsl)
|
||||
{
|
||||
auto *hlsl = static_cast<CompilerHLSL *>(compiler.get());
|
||||
auto hlsl_opts = hlsl->get_hlsl_options();
|
||||
if (args.set_shader_model)
|
||||
{
|
||||
if (args.shader_model < 30)
|
||||
{
|
||||
fprintf(stderr, "Shader model earlier than 30 (3.0) not supported.\n");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
hlsl_opts.shader_model = args.shader_model;
|
||||
}
|
||||
|
||||
if (args.hlsl_compat)
|
||||
{
|
||||
// Enable all compat options.
|
||||
hlsl_opts.point_size_compat = true;
|
||||
hlsl_opts.point_coord_compat = true;
|
||||
}
|
||||
|
||||
if (hlsl_opts.shader_model <= 30)
|
||||
{
|
||||
combined_image_samplers = true;
|
||||
build_dummy_sampler = true;
|
||||
}
|
||||
|
||||
hlsl_opts.support_nonzero_base_vertex_base_instance = args.hlsl_support_nonzero_base;
|
||||
hlsl->set_hlsl_options(hlsl_opts);
|
||||
}
|
||||
|
||||
if (build_dummy_sampler)
|
||||
{
|
||||
uint32_t sampler = compiler->build_dummy_sampler_for_combined_images();
|
||||
if (sampler != 0)
|
||||
{
|
||||
// Set some defaults to make validation happy.
|
||||
compiler->set_decoration(sampler, DecorationDescriptorSet, 0);
|
||||
compiler->set_decoration(sampler, DecorationBinding, 0);
|
||||
}
|
||||
}
|
||||
|
||||
ShaderResources res;
|
||||
if (args.remove_unused)
|
||||
{
|
||||
auto active = compiler->get_active_interface_variables();
|
||||
res = compiler->get_shader_resources(active);
|
||||
compiler->set_enabled_interface_variables(move(active));
|
||||
}
|
||||
else
|
||||
res = compiler->get_shader_resources();
|
||||
|
||||
if (args.flatten_ubo)
|
||||
{
|
||||
for (auto &ubo : res.uniform_buffers)
|
||||
compiler->flatten_buffer_block(ubo.id);
|
||||
for (auto &ubo : res.push_constant_buffers)
|
||||
compiler->flatten_buffer_block(ubo.id);
|
||||
}
|
||||
|
||||
auto pls_inputs = remap_pls(args.pls_in, res.stage_inputs, &res.subpass_inputs);
|
||||
auto pls_outputs = remap_pls(args.pls_out, res.stage_outputs, nullptr);
|
||||
compiler->remap_pixel_local_storage(move(pls_inputs), move(pls_outputs));
|
||||
|
||||
for (auto &ext : args.extensions)
|
||||
compiler->require_extension(ext);
|
||||
|
||||
for (auto &remap : args.remaps)
|
||||
{
|
||||
if (remap_generic(*compiler, res.stage_inputs, remap))
|
||||
continue;
|
||||
if (remap_generic(*compiler, res.stage_outputs, remap))
|
||||
continue;
|
||||
if (remap_generic(*compiler, res.subpass_inputs, remap))
|
||||
continue;
|
||||
}
|
||||
|
||||
for (auto &rename : args.interface_variable_renames)
|
||||
{
|
||||
if (rename.storageClass == StorageClassInput)
|
||||
spirv_cross_util::rename_interface_variable(*compiler, res.stage_inputs, rename.location,
|
||||
rename.variable_name);
|
||||
else if (rename.storageClass == StorageClassOutput)
|
||||
spirv_cross_util::rename_interface_variable(*compiler, res.stage_outputs, rename.location,
|
||||
rename.variable_name);
|
||||
else
|
||||
{
|
||||
fprintf(stderr, "error at --rename-interface-variable <in|out> ...\n");
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
}
|
||||
|
||||
if (args.dump_resources)
|
||||
{
|
||||
print_resources(*compiler, res);
|
||||
print_push_constant_resources(*compiler, res.push_constant_buffers);
|
||||
print_spec_constants(*compiler);
|
||||
print_capabilities_and_extensions(*compiler);
|
||||
}
|
||||
|
||||
if (combined_image_samplers)
|
||||
{
|
||||
compiler->build_combined_image_samplers();
|
||||
if (args.combined_samplers_inherit_bindings)
|
||||
spirv_cross_util::inherit_combined_sampler_bindings(*compiler);
|
||||
|
||||
// Give the remapped combined samplers new names.
|
||||
for (auto &remap : compiler->get_combined_image_samplers())
|
||||
{
|
||||
compiler->set_name(remap.combined_id, join("SPIRV_Cross_Combined", compiler->get_name(remap.image_id),
|
||||
compiler->get_name(remap.sampler_id)));
|
||||
}
|
||||
}
|
||||
|
||||
if (args.hlsl)
|
||||
{
|
||||
auto *hlsl_compiler = static_cast<CompilerHLSL *>(compiler.get());
|
||||
uint32_t new_builtin = hlsl_compiler->remap_num_workgroups_builtin();
|
||||
if (new_builtin)
|
||||
{
|
||||
hlsl_compiler->set_decoration(new_builtin, DecorationDescriptorSet, 0);
|
||||
hlsl_compiler->set_decoration(new_builtin, DecorationBinding, 0);
|
||||
}
|
||||
}
|
||||
|
||||
string glsl;
|
||||
for (uint32_t i = 0; i < args.iterations; i++)
|
||||
{
|
||||
if (args.hlsl)
|
||||
{
|
||||
for (auto &remap : args.hlsl_attr_remap)
|
||||
static_cast<CompilerHLSL *>(compiler.get())->add_vertex_attribute_remap(remap);
|
||||
}
|
||||
|
||||
glsl = compiler->compile();
|
||||
for (unsigned i = 0; i < args.iterations; i++)
|
||||
compiled_output = compile_iteration(args, spirv_file);
|
||||
}
|
||||
|
||||
if (args.output)
|
||||
write_string_to_file(args.output, glsl.c_str());
|
||||
write_string_to_file(args.output, compiled_output.c_str());
|
||||
else
|
||||
printf("%s", glsl.c_str());
|
||||
printf("%s", compiled_output.c_str());
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
||||
|
||||
13
3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/fma.legacy.frag
vendored
Normal file
13
3rdparty/spirv-cross/reference/opt/shaders/legacy/fragment/fma.legacy.frag
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
#version 100
|
||||
precision mediump float;
|
||||
precision highp int;
|
||||
|
||||
varying highp vec4 vA;
|
||||
varying highp vec4 vB;
|
||||
varying highp vec4 vC;
|
||||
|
||||
void main()
|
||||
{
|
||||
gl_FragData[0] = vA * vB + vC;
|
||||
}
|
||||
|
||||
42
3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/comp/arithmetic-conversion-signs.asm.comp
vendored
Normal file
42
3rdparty/spirv-cross/reference/shaders-msl-no-opt/asm/comp/arithmetic-conversion-signs.asm.comp
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
#include <metal_stdlib>
|
||||
#include <simd/simd.h>
|
||||
|
||||
using namespace metal;
|
||||
|
||||
struct SSBO
|
||||
{
|
||||
int s32;
|
||||
uint u32;
|
||||
short s16;
|
||||
ushort u16;
|
||||
float f32;
|
||||
};
|
||||
|
||||
kernel void main0(device SSBO& _4 [[buffer(0)]])
|
||||
{
|
||||
int _29 = _4.s32;
|
||||
uint _30 = _4.u32;
|
||||
short _31 = _4.s16;
|
||||
ushort _32 = _4.u16;
|
||||
float _33 = _4.f32;
|
||||
_4.s32 = int(_31);
|
||||
_4.u32 = uint(_31);
|
||||
_4.s32 = int(short(_32));
|
||||
_4.u32 = uint(short(_32));
|
||||
_4.u32 = uint(ushort(_31));
|
||||
_4.u32 = uint(_32);
|
||||
_4.s16 = short(_29);
|
||||
_4.u16 = ushort(_29);
|
||||
_4.s16 = short(_30);
|
||||
_4.u16 = ushort(_30);
|
||||
_4.u16 = ushort(_29);
|
||||
_4.u16 = ushort(_30);
|
||||
_4.f32 = float(_31);
|
||||
_4.f32 = float(short(_32));
|
||||
_4.f32 = float(ushort(_31));
|
||||
_4.f32 = float(_32);
|
||||
_4.s16 = short(_33);
|
||||
_4.u16 = ushort(short(_33));
|
||||
_4.u16 = ushort(_33);
|
||||
}
|
||||
|
||||
@@ -46,10 +46,11 @@ kernel void main0(device foo& buf [[buffer(0)]], constant bar& cb [[buffer(3)]],
|
||||
threadgroup int* cur = stgsm;
|
||||
device int* _73;
|
||||
_73 = &buf.a[0u];
|
||||
threadgroup int* _76;
|
||||
int _77;
|
||||
for (;;)
|
||||
{
|
||||
threadgroup int* _76 = cur;
|
||||
_76 = cur;
|
||||
_77 = *_73;
|
||||
if (_77 != 0)
|
||||
{
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
#pragma clang diagnostic ignored "-Wmissing-prototypes"
|
||||
|
||||
#include <metal_stdlib>
|
||||
#include <simd/simd.h>
|
||||
|
||||
using namespace metal;
|
||||
|
||||
constant float _21 = {};
|
||||
|
||||
struct main0_out
|
||||
{
|
||||
float4 gl_Position [[position]];
|
||||
};
|
||||
|
||||
// Implementation of an array copy function to cover GLSL's ability to copy an array via assignment.
|
||||
template<typename T, uint N>
|
||||
void spvArrayCopyFromStack1(thread T (&dst)[N], thread const T (&src)[N])
|
||||
{
|
||||
for (uint i = 0; i < N; dst[i] = src[i], i++);
|
||||
}
|
||||
|
||||
template<typename T, uint N>
|
||||
void spvArrayCopyFromConstant1(thread T (&dst)[N], constant T (&src)[N])
|
||||
{
|
||||
for (uint i = 0; i < N; dst[i] = src[i], i++);
|
||||
}
|
||||
|
||||
vertex main0_out main0()
|
||||
{
|
||||
main0_out out = {};
|
||||
float _23[2];
|
||||
for (int _25 = 0; _25 < 2; )
|
||||
{
|
||||
_23[_25] = 0.0;
|
||||
_25++;
|
||||
continue;
|
||||
}
|
||||
float _31[2];
|
||||
spvArrayCopyFromStack1(_31, _23);
|
||||
float _37;
|
||||
if (as_type<uint>(3.0) != 0u)
|
||||
{
|
||||
_37 = _31[0];
|
||||
}
|
||||
else
|
||||
{
|
||||
_37 = _21;
|
||||
}
|
||||
out.gl_Position = float4(0.0, 0.0, 0.0, _37);
|
||||
return out;
|
||||
}
|
||||
|
||||
@@ -0,0 +1,30 @@
|
||||
#version 450
|
||||
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout(binding = 0, std430) buffer SSBO
|
||||
{
|
||||
int values[];
|
||||
} _4;
|
||||
|
||||
void main()
|
||||
{
|
||||
int _17 = 0;
|
||||
for (;;)
|
||||
{
|
||||
if (_17 < 100)
|
||||
{
|
||||
int _24 = _4.values[_17];
|
||||
_4.values[_24] = _17;
|
||||
int _26 = _24 + 1;
|
||||
int _18 = _4.values[_26];
|
||||
_4.values[_17] = _18;
|
||||
_17 = _18;
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,27 @@
|
||||
#version 450
|
||||
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout(binding = 0, std430) buffer SSBO
|
||||
{
|
||||
int values[];
|
||||
} _4;
|
||||
|
||||
void main()
|
||||
{
|
||||
int _17 = 0;
|
||||
for (;;)
|
||||
{
|
||||
if (_17 < 100)
|
||||
{
|
||||
int _24 = _4.values[_17];
|
||||
_4.values[_24] = _17;
|
||||
_17 = _4.values[_24 + 1];
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,42 @@
|
||||
#version 450
|
||||
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require
|
||||
#extension GL_EXT_shader_16bit_storage : require
|
||||
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
layout(set = 0, binding = 0, std430) buffer SSBO
|
||||
{
|
||||
int s32;
|
||||
uint u32;
|
||||
int16_t s16;
|
||||
uint16_t u16;
|
||||
float f32;
|
||||
} _4;
|
||||
|
||||
void main()
|
||||
{
|
||||
int _29 = _4.s32;
|
||||
uint _30 = _4.u32;
|
||||
int16_t _31 = _4.s16;
|
||||
uint16_t _32 = _4.u16;
|
||||
float _33 = _4.f32;
|
||||
_4.s32 = int(_31);
|
||||
_4.u32 = uint(_31);
|
||||
_4.s32 = int(int16_t(_32));
|
||||
_4.u32 = uint(int16_t(_32));
|
||||
_4.u32 = uint(uint16_t(_31));
|
||||
_4.u32 = uint(_32);
|
||||
_4.s16 = int16_t(_29);
|
||||
_4.u16 = uint16_t(_29);
|
||||
_4.s16 = int16_t(_30);
|
||||
_4.u16 = uint16_t(_30);
|
||||
_4.u16 = uint16_t(_29);
|
||||
_4.u16 = uint16_t(_30);
|
||||
_4.f32 = float(_31);
|
||||
_4.f32 = float(int16_t(_32));
|
||||
_4.f32 = float(uint16_t(_31));
|
||||
_4.f32 = float(_32);
|
||||
_4.s16 = int16_t(_33);
|
||||
_4.u16 = uint16_t(int16_t(_33));
|
||||
_4.u16 = uint16_t(_33);
|
||||
}
|
||||
|
||||
33
3rdparty/spirv-cross/reference/shaders-no-opt/asm/comp/spec-constant-op-convert-sign.asm.comp
vendored
Normal file
33
3rdparty/spirv-cross/reference/shaders-no-opt/asm/comp/spec-constant-op-convert-sign.asm.comp
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
#version 450
|
||||
#extension GL_ARB_gpu_shader_int64 : require
|
||||
layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;
|
||||
|
||||
#ifndef SPIRV_CROSS_CONSTANT_ID_0
|
||||
#define SPIRV_CROSS_CONSTANT_ID_0 1
|
||||
#endif
|
||||
const int ConstantInt = SPIRV_CROSS_CONSTANT_ID_0;
|
||||
#ifndef SPIRV_CROSS_CONSTANT_ID_1
|
||||
#define SPIRV_CROSS_CONSTANT_ID_1 2u
|
||||
#endif
|
||||
const uint ConstantUint = SPIRV_CROSS_CONSTANT_ID_1;
|
||||
const int64_t ConstantInt64_1 = int64_t(ConstantInt);
|
||||
const int64_t ConstantInt64_2 = int64_t(int(ConstantUint));
|
||||
const uint64_t ConstantUint64_1 = uint64_t(ConstantInt);
|
||||
const uint64_t ConstantUint64_2 = uint64_t(int(ConstantUint));
|
||||
const int64_t _20 = (ConstantInt64_1 + ConstantInt64_2);
|
||||
const uint64_t _21 = (ConstantUint64_1 + ConstantUint64_2);
|
||||
const int _22 = int(_20);
|
||||
const uint _23 = uint(_21);
|
||||
|
||||
layout(binding = 0, std430) buffer SSBO
|
||||
{
|
||||
int s64;
|
||||
uint u64;
|
||||
} _4;
|
||||
|
||||
void main()
|
||||
{
|
||||
_4.s64 = _22;
|
||||
_4.u64 = _23;
|
||||
}
|
||||
|
||||
13
3rdparty/spirv-cross/reference/shaders/legacy/fragment/fma.legacy.frag
vendored
Normal file
13
3rdparty/spirv-cross/reference/shaders/legacy/fragment/fma.legacy.frag
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
#version 100
|
||||
precision mediump float;
|
||||
precision highp int;
|
||||
|
||||
varying highp vec4 vA;
|
||||
varying highp vec4 vB;
|
||||
varying highp vec4 vC;
|
||||
|
||||
void main()
|
||||
{
|
||||
gl_FragData[0] = vA * vB + vC;
|
||||
}
|
||||
|
||||
131
3rdparty/spirv-cross/shaders-msl-no-opt/asm/comp/arithmetic-conversion-signs.asm.comp
vendored
Normal file
131
3rdparty/spirv-cross/shaders-msl-no-opt/asm/comp/arithmetic-conversion-signs.asm.comp
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
; SPIR-V
|
||||
; Version: 1.0
|
||||
; Generator: Khronos Glslang Reference Front End; 7
|
||||
; Bound: 76
|
||||
; Schema: 0
|
||||
OpCapability Shader
|
||||
OpCapability Int16
|
||||
OpCapability StorageBuffer16BitAccess
|
||||
OpExtension "SPV_KHR_16bit_storage"
|
||||
%1 = OpExtInstImport "GLSL.std.450"
|
||||
OpMemoryModel Logical GLSL450
|
||||
OpEntryPoint GLCompute %main "main"
|
||||
OpExecutionMode %main LocalSize 1 1 1
|
||||
OpSource GLSL 450
|
||||
OpSourceExtension "GL_EXT_shader_explicit_arithmetic_types_int16"
|
||||
OpName %main "main"
|
||||
OpName %SSBO "SSBO"
|
||||
OpMemberName %SSBO 0 "s32"
|
||||
OpMemberName %SSBO 1 "u32"
|
||||
OpMemberName %SSBO 2 "s16"
|
||||
OpMemberName %SSBO 3 "u16"
|
||||
OpMemberName %SSBO 4 "f32"
|
||||
OpName %_ ""
|
||||
OpMemberDecorate %SSBO 0 Offset 0
|
||||
OpMemberDecorate %SSBO 1 Offset 4
|
||||
OpMemberDecorate %SSBO 2 Offset 8
|
||||
OpMemberDecorate %SSBO 3 Offset 10
|
||||
OpMemberDecorate %SSBO 4 Offset 12
|
||||
OpDecorate %SSBO BufferBlock
|
||||
OpDecorate %_ DescriptorSet 0
|
||||
OpDecorate %_ Binding 0
|
||||
%void = OpTypeVoid
|
||||
%3 = OpTypeFunction %void
|
||||
%int = OpTypeInt 32 1
|
||||
%uint = OpTypeInt 32 0
|
||||
%short = OpTypeInt 16 1
|
||||
%ushort = OpTypeInt 16 0
|
||||
%float = OpTypeFloat 32
|
||||
%SSBO = OpTypeStruct %int %uint %short %ushort %float
|
||||
%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO
|
||||
%_ = OpVariable %_ptr_Uniform_SSBO Uniform
|
||||
%int_2 = OpConstant %int 2
|
||||
%int_0 = OpConstant %int 0
|
||||
%_ptr_Uniform_int = OpTypePointer Uniform %int
|
||||
%_ptr_Uniform_short = OpTypePointer Uniform %short
|
||||
%int_1 = OpConstant %int 1
|
||||
%_ptr_Uniform_uint = OpTypePointer Uniform %uint
|
||||
%int_3 = OpConstant %int 3
|
||||
%_ptr_Uniform_ushort = OpTypePointer Uniform %ushort
|
||||
%int_4 = OpConstant %int 4
|
||||
%_ptr_Uniform_float = OpTypePointer Uniform %float
|
||||
%main = OpFunction %void None %3
|
||||
%5 = OpLabel
|
||||
%ptr_s32 = OpAccessChain %_ptr_Uniform_int %_ %int_0
|
||||
%ptr_u32 = OpAccessChain %_ptr_Uniform_uint %_ %int_1
|
||||
%ptr_s16 = OpAccessChain %_ptr_Uniform_short %_ %int_2
|
||||
%ptr_u16 = OpAccessChain %_ptr_Uniform_ushort %_ %int_3
|
||||
%ptr_f32 = OpAccessChain %_ptr_Uniform_float %_ %int_4
|
||||
%s32 = OpLoad %int %ptr_s32
|
||||
%u32 = OpLoad %uint %ptr_u32
|
||||
%s16 = OpLoad %short %ptr_s16
|
||||
%u16 = OpLoad %ushort %ptr_u16
|
||||
%f32 = OpLoad %float %ptr_f32
|
||||
|
||||
; Sign-extend
|
||||
%s16_to_s32_signed = OpSConvert %int %s16
|
||||
OpStore %ptr_s32 %s16_to_s32_signed
|
||||
%s16_to_u32_signed = OpSConvert %uint %s16
|
||||
OpStore %ptr_u32 %s16_to_u32_signed
|
||||
|
||||
%u16_to_s32_signed = OpSConvert %int %u16
|
||||
OpStore %ptr_s32 %u16_to_s32_signed
|
||||
%u16_to_u32_signed = OpSConvert %uint %u16
|
||||
OpStore %ptr_u32 %u16_to_u32_signed
|
||||
|
||||
; Zero-extend
|
||||
; Result must be unsigned for OpUConvert.
|
||||
;%s16_to_s32_unsigned = OpUConvert %int %s16
|
||||
;OpStore %ptr_s32 %s16_to_s32_unsigned
|
||||
%s16_to_u32_unsigned = OpUConvert %uint %s16
|
||||
OpStore %ptr_u32 %s16_to_u32_unsigned
|
||||
|
||||
;%u16_to_s32_unsigned = OpUConvert %int %u16
|
||||
;OpStore %ptr_s32 %u16_to_s32_unsigned
|
||||
%u16_to_u32_unsigned = OpUConvert %uint %u16
|
||||
OpStore %ptr_u32 %u16_to_u32_unsigned
|
||||
|
||||
; Truncate (SConvert == UConvert)
|
||||
%s32_to_s16_signed = OpSConvert %short %s32
|
||||
OpStore %ptr_s16 %s32_to_s16_signed
|
||||
%s32_to_u16_signed = OpSConvert %ushort %s32
|
||||
OpStore %ptr_u16 %s32_to_u16_signed
|
||||
|
||||
%u32_to_s16_signed = OpSConvert %short %u32
|
||||
OpStore %ptr_s16 %u32_to_s16_signed
|
||||
%u32_to_u16_signed = OpSConvert %ushort %u32
|
||||
OpStore %ptr_u16 %u32_to_u16_signed
|
||||
|
||||
;%s32_to_s16_unsigned = OpUConvert %short %s32
|
||||
;OpStore %ptr_s16 %s32_to_s16_unsigned
|
||||
%s32_to_u16_unsigned = OpUConvert %ushort %s32
|
||||
OpStore %ptr_u16 %s32_to_u16_unsigned
|
||||
|
||||
;%u32_to_s16_unsigned = OpUConvert %short %u32
|
||||
;OpStore %ptr_s16 %u32_to_s16_unsigned
|
||||
%u32_to_u16_unsigned = OpUConvert %ushort %u32
|
||||
OpStore %ptr_u16 %u32_to_u16_unsigned
|
||||
|
||||
; SToF
|
||||
%s16_to_f32_signed = OpConvertSToF %float %s16
|
||||
OpStore %ptr_f32 %s16_to_f32_signed
|
||||
%u16_to_f32_signed = OpConvertSToF %float %u16
|
||||
OpStore %ptr_f32 %u16_to_f32_signed
|
||||
%s16_to_f32_unsigned = OpConvertUToF %float %s16
|
||||
OpStore %ptr_f32 %s16_to_f32_unsigned
|
||||
%u16_to_f32_unsigned = OpConvertUToF %float %u16
|
||||
OpStore %ptr_f32 %u16_to_f32_unsigned
|
||||
|
||||
; FToS
|
||||
%f32_to_s16_signed = OpConvertFToS %short %f32
|
||||
OpStore %ptr_s16 %f32_to_s16_signed
|
||||
%f32_to_u16_signed = OpConvertFToS %ushort %f32
|
||||
OpStore %ptr_u16 %f32_to_u16_signed
|
||||
|
||||
; FToU
|
||||
%f32_to_u16_unsigned = OpConvertFToU %ushort %f32
|
||||
OpStore %ptr_u16 %f32_to_u16_unsigned
|
||||
; Result must be unsigned for FToU, so don't bother testing that.
|
||||
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
60
3rdparty/spirv-cross/shaders-msl-no-opt/asm/vert/op-load-forced-temporary-array.asm.frag
vendored
Normal file
60
3rdparty/spirv-cross/shaders-msl-no-opt/asm/vert/op-load-forced-temporary-array.asm.frag
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
; SPIR-V
|
||||
; Version: 1.0
|
||||
; Generator: Google spiregg; 0
|
||||
; Bound: 39
|
||||
; Schema: 0
|
||||
OpCapability Shader
|
||||
OpMemoryModel Logical GLSL450
|
||||
OpEntryPoint Vertex %vs_main "main" %gl_Position
|
||||
OpSource HLSL 600
|
||||
OpName %vs_main "vs_main"
|
||||
OpDecorate %gl_Position BuiltIn Position
|
||||
%int = OpTypeInt 32 1
|
||||
%int_0 = OpConstant %int 0
|
||||
%int_2 = OpConstant %int 2
|
||||
%float = OpTypeFloat 32
|
||||
%float_0 = OpConstant %float 0
|
||||
%int_1 = OpConstant %int 1
|
||||
%float_3 = OpConstant %float 3
|
||||
%uint = OpTypeInt 32 0
|
||||
%uint_0 = OpConstant %uint 0
|
||||
%v4float = OpTypeVector %float 4
|
||||
%_ptr_Output_v4float = OpTypePointer Output %v4float
|
||||
%void = OpTypeVoid
|
||||
%15 = OpTypeFunction %void
|
||||
%uint_2 = OpConstant %uint 2
|
||||
%_arr_float_uint_2 = OpTypeArray %float %uint_2
|
||||
%_ptr_Function__arr_float_uint_2 = OpTypePointer Function %_arr_float_uint_2
|
||||
%_ptr_Function_float = OpTypePointer Function %float
|
||||
%bool = OpTypeBool
|
||||
%gl_Position = OpVariable %_ptr_Output_v4float Output
|
||||
%21 = OpUndef %float
|
||||
%vs_main = OpFunction %void None %15
|
||||
%22 = OpLabel
|
||||
%23 = OpVariable %_ptr_Function__arr_float_uint_2 Function
|
||||
OpBranch %24
|
||||
%24 = OpLabel
|
||||
%25 = OpPhi %int %int_0 %22 %26 %27
|
||||
%28 = OpSLessThan %bool %25 %int_2
|
||||
OpLoopMerge %29 %27 None
|
||||
OpBranchConditional %28 %27 %29
|
||||
%27 = OpLabel
|
||||
%30 = OpAccessChain %_ptr_Function_float %23 %25
|
||||
OpStore %30 %float_0
|
||||
%26 = OpIAdd %int %25 %int_1
|
||||
OpBranch %24
|
||||
%29 = OpLabel
|
||||
%31 = OpLoad %_arr_float_uint_2 %23
|
||||
%32 = OpBitcast %uint %float_3
|
||||
%33 = OpINotEqual %bool %32 %uint_0
|
||||
OpSelectionMerge %34 None
|
||||
OpBranchConditional %33 %35 %34
|
||||
%35 = OpLabel
|
||||
%36 = OpCompositeExtract %float %31 0
|
||||
OpBranch %34
|
||||
%34 = OpLabel
|
||||
%37 = OpPhi %float %21 %29 %36 %35
|
||||
%38 = OpCompositeConstruct %v4float %float_0 %float_0 %float_0 %37
|
||||
OpStore %gl_Position %38
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
55
3rdparty/spirv-cross/shaders-no-opt/asm/comp/access-chain-dominator-in-loop-body-2.asm.comp
vendored
Normal file
55
3rdparty/spirv-cross/shaders-no-opt/asm/comp/access-chain-dominator-in-loop-body-2.asm.comp
vendored
Normal file
@@ -0,0 +1,55 @@
|
||||
; SPIR-V
|
||||
; Version: 1.0
|
||||
; Generator: Khronos Glslang Reference Front End; 7
|
||||
; Bound: 52
|
||||
; Schema: 0
|
||||
OpCapability Shader
|
||||
%1 = OpExtInstImport "GLSL.std.450"
|
||||
OpMemoryModel Logical GLSL450
|
||||
OpEntryPoint GLCompute %main "main"
|
||||
OpExecutionMode %main LocalSize 1 1 1
|
||||
OpSource GLSL 450
|
||||
OpName %main "main"
|
||||
OpName %SSBO "SSBO"
|
||||
OpMemberName %SSBO 0 "values"
|
||||
OpName %_ ""
|
||||
OpDecorate %_runtimearr_int ArrayStride 4
|
||||
OpMemberDecorate %SSBO 0 Offset 0
|
||||
OpDecorate %SSBO BufferBlock
|
||||
OpDecorate %_ DescriptorSet 0
|
||||
OpDecorate %_ Binding 0
|
||||
%void = OpTypeVoid
|
||||
%3 = OpTypeFunction %void
|
||||
%int = OpTypeInt 32 1
|
||||
%int_0 = OpConstant %int 0
|
||||
%int_100 = OpConstant %int 100
|
||||
%bool = OpTypeBool
|
||||
%_runtimearr_int = OpTypeRuntimeArray %int
|
||||
%SSBO = OpTypeStruct %_runtimearr_int
|
||||
%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO
|
||||
%_ = OpVariable %_ptr_Uniform_SSBO Uniform
|
||||
%_ptr_Uniform_int = OpTypePointer Uniform %int
|
||||
%int_1 = OpConstant %int 1
|
||||
%main = OpFunction %void None %3
|
||||
%5 = OpLabel
|
||||
OpBranch %32
|
||||
%32 = OpLabel
|
||||
%51 = OpPhi %int %int_0 %5 %49 %loop_continue
|
||||
%38 = OpSLessThan %bool %51 %int_100
|
||||
OpLoopMerge %loop_merge %loop_continue None
|
||||
OpBranchConditional %38 %loop_body %loop_merge
|
||||
%loop_body = OpLabel
|
||||
%40 = OpAccessChain %_ptr_Uniform_int %_ %int_0 %51
|
||||
OpBranch %loop_continue
|
||||
%loop_continue = OpLabel
|
||||
%41 = OpLoad %int %40
|
||||
%44 = OpAccessChain %_ptr_Uniform_int %_ %int_0 %41
|
||||
OpStore %44 %51
|
||||
%47 = OpIAdd %int %41 %int_1
|
||||
%48 = OpAccessChain %_ptr_Uniform_int %_ %int_0 %47
|
||||
%49 = OpLoad %int %48
|
||||
OpStore %40 %49
|
||||
OpBranch %32
|
||||
%loop_merge = OpLabel
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
54
3rdparty/spirv-cross/shaders-no-opt/asm/comp/access-chain-dominator-in-loop-body.asm.comp
vendored
Normal file
54
3rdparty/spirv-cross/shaders-no-opt/asm/comp/access-chain-dominator-in-loop-body.asm.comp
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
; SPIR-V
|
||||
; Version: 1.0
|
||||
; Generator: Khronos Glslang Reference Front End; 7
|
||||
; Bound: 52
|
||||
; Schema: 0
|
||||
OpCapability Shader
|
||||
%1 = OpExtInstImport "GLSL.std.450"
|
||||
OpMemoryModel Logical GLSL450
|
||||
OpEntryPoint GLCompute %main "main"
|
||||
OpExecutionMode %main LocalSize 1 1 1
|
||||
OpSource GLSL 450
|
||||
OpName %main "main"
|
||||
OpName %SSBO "SSBO"
|
||||
OpMemberName %SSBO 0 "values"
|
||||
OpName %_ ""
|
||||
OpDecorate %_runtimearr_int ArrayStride 4
|
||||
OpMemberDecorate %SSBO 0 Offset 0
|
||||
OpDecorate %SSBO BufferBlock
|
||||
OpDecorate %_ DescriptorSet 0
|
||||
OpDecorate %_ Binding 0
|
||||
%void = OpTypeVoid
|
||||
%3 = OpTypeFunction %void
|
||||
%int = OpTypeInt 32 1
|
||||
%int_0 = OpConstant %int 0
|
||||
%int_100 = OpConstant %int 100
|
||||
%bool = OpTypeBool
|
||||
%_runtimearr_int = OpTypeRuntimeArray %int
|
||||
%SSBO = OpTypeStruct %_runtimearr_int
|
||||
%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO
|
||||
%_ = OpVariable %_ptr_Uniform_SSBO Uniform
|
||||
%_ptr_Uniform_int = OpTypePointer Uniform %int
|
||||
%int_1 = OpConstant %int 1
|
||||
%main = OpFunction %void None %3
|
||||
%5 = OpLabel
|
||||
OpBranch %32
|
||||
%32 = OpLabel
|
||||
%51 = OpPhi %int %int_0 %5 %49 %loop_continue
|
||||
%38 = OpSLessThan %bool %51 %int_100
|
||||
OpLoopMerge %loop_merge %loop_continue None
|
||||
OpBranchConditional %38 %loop_body %loop_merge
|
||||
%loop_body = OpLabel
|
||||
%40 = OpAccessChain %_ptr_Uniform_int %_ %int_0 %51
|
||||
OpBranch %loop_continue
|
||||
%loop_continue = OpLabel
|
||||
%41 = OpLoad %int %40
|
||||
%44 = OpAccessChain %_ptr_Uniform_int %_ %int_0 %41
|
||||
OpStore %44 %51
|
||||
%47 = OpIAdd %int %41 %int_1
|
||||
%48 = OpAccessChain %_ptr_Uniform_int %_ %int_0 %47
|
||||
%49 = OpLoad %int %48
|
||||
OpBranch %32
|
||||
%loop_merge = OpLabel
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
131
3rdparty/spirv-cross/shaders-no-opt/asm/comp/arithmetic-conversion-signs.asm.nocompat.vk.comp
vendored
Normal file
131
3rdparty/spirv-cross/shaders-no-opt/asm/comp/arithmetic-conversion-signs.asm.nocompat.vk.comp
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
; SPIR-V
|
||||
; Version: 1.0
|
||||
; Generator: Khronos Glslang Reference Front End; 7
|
||||
; Bound: 76
|
||||
; Schema: 0
|
||||
OpCapability Shader
|
||||
OpCapability Int16
|
||||
OpCapability StorageBuffer16BitAccess
|
||||
OpExtension "SPV_KHR_16bit_storage"
|
||||
%1 = OpExtInstImport "GLSL.std.450"
|
||||
OpMemoryModel Logical GLSL450
|
||||
OpEntryPoint GLCompute %main "main"
|
||||
OpExecutionMode %main LocalSize 1 1 1
|
||||
OpSource GLSL 450
|
||||
OpSourceExtension "GL_EXT_shader_explicit_arithmetic_types_int16"
|
||||
OpName %main "main"
|
||||
OpName %SSBO "SSBO"
|
||||
OpMemberName %SSBO 0 "s32"
|
||||
OpMemberName %SSBO 1 "u32"
|
||||
OpMemberName %SSBO 2 "s16"
|
||||
OpMemberName %SSBO 3 "u16"
|
||||
OpMemberName %SSBO 4 "f32"
|
||||
OpName %_ ""
|
||||
OpMemberDecorate %SSBO 0 Offset 0
|
||||
OpMemberDecorate %SSBO 1 Offset 4
|
||||
OpMemberDecorate %SSBO 2 Offset 8
|
||||
OpMemberDecorate %SSBO 3 Offset 10
|
||||
OpMemberDecorate %SSBO 4 Offset 12
|
||||
OpDecorate %SSBO BufferBlock
|
||||
OpDecorate %_ DescriptorSet 0
|
||||
OpDecorate %_ Binding 0
|
||||
%void = OpTypeVoid
|
||||
%3 = OpTypeFunction %void
|
||||
%int = OpTypeInt 32 1
|
||||
%uint = OpTypeInt 32 0
|
||||
%short = OpTypeInt 16 1
|
||||
%ushort = OpTypeInt 16 0
|
||||
%float = OpTypeFloat 32
|
||||
%SSBO = OpTypeStruct %int %uint %short %ushort %float
|
||||
%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO
|
||||
%_ = OpVariable %_ptr_Uniform_SSBO Uniform
|
||||
%int_2 = OpConstant %int 2
|
||||
%int_0 = OpConstant %int 0
|
||||
%_ptr_Uniform_int = OpTypePointer Uniform %int
|
||||
%_ptr_Uniform_short = OpTypePointer Uniform %short
|
||||
%int_1 = OpConstant %int 1
|
||||
%_ptr_Uniform_uint = OpTypePointer Uniform %uint
|
||||
%int_3 = OpConstant %int 3
|
||||
%_ptr_Uniform_ushort = OpTypePointer Uniform %ushort
|
||||
%int_4 = OpConstant %int 4
|
||||
%_ptr_Uniform_float = OpTypePointer Uniform %float
|
||||
%main = OpFunction %void None %3
|
||||
%5 = OpLabel
|
||||
%ptr_s32 = OpAccessChain %_ptr_Uniform_int %_ %int_0
|
||||
%ptr_u32 = OpAccessChain %_ptr_Uniform_uint %_ %int_1
|
||||
%ptr_s16 = OpAccessChain %_ptr_Uniform_short %_ %int_2
|
||||
%ptr_u16 = OpAccessChain %_ptr_Uniform_ushort %_ %int_3
|
||||
%ptr_f32 = OpAccessChain %_ptr_Uniform_float %_ %int_4
|
||||
%s32 = OpLoad %int %ptr_s32
|
||||
%u32 = OpLoad %uint %ptr_u32
|
||||
%s16 = OpLoad %short %ptr_s16
|
||||
%u16 = OpLoad %ushort %ptr_u16
|
||||
%f32 = OpLoad %float %ptr_f32
|
||||
|
||||
; Sign-extend
|
||||
%s16_to_s32_signed = OpSConvert %int %s16
|
||||
OpStore %ptr_s32 %s16_to_s32_signed
|
||||
%s16_to_u32_signed = OpSConvert %uint %s16
|
||||
OpStore %ptr_u32 %s16_to_u32_signed
|
||||
|
||||
%u16_to_s32_signed = OpSConvert %int %u16
|
||||
OpStore %ptr_s32 %u16_to_s32_signed
|
||||
%u16_to_u32_signed = OpSConvert %uint %u16
|
||||
OpStore %ptr_u32 %u16_to_u32_signed
|
||||
|
||||
; Zero-extend
|
||||
; Result must be unsigned for OpUConvert.
|
||||
;%s16_to_s32_unsigned = OpUConvert %int %s16
|
||||
;OpStore %ptr_s32 %s16_to_s32_unsigned
|
||||
%s16_to_u32_unsigned = OpUConvert %uint %s16
|
||||
OpStore %ptr_u32 %s16_to_u32_unsigned
|
||||
|
||||
;%u16_to_s32_unsigned = OpUConvert %int %u16
|
||||
;OpStore %ptr_s32 %u16_to_s32_unsigned
|
||||
%u16_to_u32_unsigned = OpUConvert %uint %u16
|
||||
OpStore %ptr_u32 %u16_to_u32_unsigned
|
||||
|
||||
; Truncate (SConvert == UConvert)
|
||||
%s32_to_s16_signed = OpSConvert %short %s32
|
||||
OpStore %ptr_s16 %s32_to_s16_signed
|
||||
%s32_to_u16_signed = OpSConvert %ushort %s32
|
||||
OpStore %ptr_u16 %s32_to_u16_signed
|
||||
|
||||
%u32_to_s16_signed = OpSConvert %short %u32
|
||||
OpStore %ptr_s16 %u32_to_s16_signed
|
||||
%u32_to_u16_signed = OpSConvert %ushort %u32
|
||||
OpStore %ptr_u16 %u32_to_u16_signed
|
||||
|
||||
;%s32_to_s16_unsigned = OpUConvert %short %s32
|
||||
;OpStore %ptr_s16 %s32_to_s16_unsigned
|
||||
%s32_to_u16_unsigned = OpUConvert %ushort %s32
|
||||
OpStore %ptr_u16 %s32_to_u16_unsigned
|
||||
|
||||
;%u32_to_s16_unsigned = OpUConvert %short %u32
|
||||
;OpStore %ptr_s16 %u32_to_s16_unsigned
|
||||
%u32_to_u16_unsigned = OpUConvert %ushort %u32
|
||||
OpStore %ptr_u16 %u32_to_u16_unsigned
|
||||
|
||||
; SToF
|
||||
%s16_to_f32_signed = OpConvertSToF %float %s16
|
||||
OpStore %ptr_f32 %s16_to_f32_signed
|
||||
%u16_to_f32_signed = OpConvertSToF %float %u16
|
||||
OpStore %ptr_f32 %u16_to_f32_signed
|
||||
%s16_to_f32_unsigned = OpConvertUToF %float %s16
|
||||
OpStore %ptr_f32 %s16_to_f32_unsigned
|
||||
%u16_to_f32_unsigned = OpConvertUToF %float %u16
|
||||
OpStore %ptr_f32 %u16_to_f32_unsigned
|
||||
|
||||
; FToS
|
||||
%f32_to_s16_signed = OpConvertFToS %short %f32
|
||||
OpStore %ptr_s16 %f32_to_s16_signed
|
||||
%f32_to_u16_signed = OpConvertFToS %ushort %f32
|
||||
OpStore %ptr_u16 %f32_to_u16_signed
|
||||
|
||||
; FToU
|
||||
%f32_to_u16_unsigned = OpConvertFToU %ushort %f32
|
||||
OpStore %ptr_u16 %f32_to_u16_unsigned
|
||||
; Result must be unsigned for FToU, so don't bother testing that.
|
||||
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
63
3rdparty/spirv-cross/shaders-no-opt/asm/comp/spec-constant-op-convert-sign.asm.comp
vendored
Normal file
63
3rdparty/spirv-cross/shaders-no-opt/asm/comp/spec-constant-op-convert-sign.asm.comp
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
; SPIR-V
|
||||
; Version: 1.0
|
||||
; Generator: Khronos Glslang Reference Front End; 7
|
||||
; Bound: 30
|
||||
; Schema: 0
|
||||
OpCapability Shader
|
||||
OpCapability Int64
|
||||
%1 = OpExtInstImport "GLSL.std.450"
|
||||
OpMemoryModel Logical GLSL450
|
||||
OpEntryPoint GLCompute %main "main"
|
||||
OpExecutionMode %main LocalSize 1 1 1
|
||||
OpSource GLSL 450
|
||||
OpSourceExtension "GL_ARB_gpu_shader_int64"
|
||||
OpName %main "main"
|
||||
OpName %SSBO "SSBO"
|
||||
OpMemberName %SSBO 0 "s64"
|
||||
OpMemberName %SSBO 1 "u64"
|
||||
OpName %_ ""
|
||||
OpName %ConstantInt "ConstantInt"
|
||||
OpName %ConstantInt64_1 "ConstantInt64_1"
|
||||
OpName %ConstantUint "ConstantUint"
|
||||
OpName %ConstantInt64_2 "ConstantInt64_2"
|
||||
OpName %ConstantUint64_1 "ConstantUint64_1"
|
||||
OpName %ConstantUint64_2 "ConstantUint64_2"
|
||||
OpMemberDecorate %SSBO 0 Offset 0
|
||||
OpMemberDecorate %SSBO 1 Offset 4
|
||||
OpDecorate %SSBO BufferBlock
|
||||
OpDecorate %_ DescriptorSet 0
|
||||
OpDecorate %_ Binding 0
|
||||
OpDecorate %ConstantInt SpecId 0
|
||||
OpDecorate %ConstantUint SpecId 1
|
||||
%void = OpTypeVoid
|
||||
%3 = OpTypeFunction %void
|
||||
%int = OpTypeInt 32 1
|
||||
%uint = OpTypeInt 32 0
|
||||
%long = OpTypeInt 64 1
|
||||
%ulong = OpTypeInt 64 0
|
||||
%SSBO = OpTypeStruct %int %uint
|
||||
%_ptr_Uniform_SSBO = OpTypePointer Uniform %SSBO
|
||||
%_ = OpVariable %_ptr_Uniform_SSBO Uniform
|
||||
%int_0 = OpConstant %int 0
|
||||
%ulong_0 = OpConstant %ulong 0
|
||||
%ConstantInt = OpSpecConstant %int 1
|
||||
%ConstantUint = OpSpecConstant %uint 2
|
||||
%ConstantInt64_1 = OpSpecConstantOp %long SConvert %ConstantInt
|
||||
%ConstantInt64_2 = OpSpecConstantOp %long SConvert %ConstantUint
|
||||
%ConstantUint64_1 = OpSpecConstantOp %ulong SConvert %ConstantInt
|
||||
%ConstantUint64_2 = OpSpecConstantOp %ulong SConvert %ConstantUint
|
||||
%added_long = OpSpecConstantOp %long IAdd %ConstantInt64_1 %ConstantInt64_2
|
||||
%added_ulong = OpSpecConstantOp %ulong IAdd %ConstantUint64_1 %ConstantUint64_2
|
||||
%trunc_long = OpSpecConstantOp %int SConvert %added_long
|
||||
%trunc_ulong = OpSpecConstantOp %uint SConvert %added_ulong
|
||||
%_ptr_Uniform_int = OpTypePointer Uniform %int
|
||||
%int_1 = OpConstant %int 1
|
||||
%_ptr_Uniform_uint = OpTypePointer Uniform %uint
|
||||
%main = OpFunction %void None %3
|
||||
%5 = OpLabel
|
||||
%22 = OpAccessChain %_ptr_Uniform_int %_ %int_0
|
||||
OpStore %22 %trunc_long
|
||||
%29 = OpAccessChain %_ptr_Uniform_uint %_ %int_1
|
||||
OpStore %29 %trunc_ulong
|
||||
OpReturn
|
||||
OpFunctionEnd
|
||||
11
3rdparty/spirv-cross/shaders/legacy/fragment/fma.legacy.frag
vendored
Normal file
11
3rdparty/spirv-cross/shaders/legacy/fragment/fma.legacy.frag
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
#version 450
|
||||
|
||||
layout(location = 0) in vec4 vA;
|
||||
layout(location = 1) in vec4 vB;
|
||||
layout(location = 2) in vec4 vC;
|
||||
layout(location = 0) out vec4 FragColor;
|
||||
|
||||
void main()
|
||||
{
|
||||
FragColor = fma(vA, vB, vC);
|
||||
}
|
||||
4
3rdparty/spirv-cross/spirv_cfg.cpp
vendored
4
3rdparty/spirv-cross/spirv_cfg.cpp
vendored
@@ -143,7 +143,7 @@ void CFG::build_post_order_visit_order()
|
||||
|
||||
void CFG::add_branch(uint32_t from, uint32_t to)
|
||||
{
|
||||
const auto add_unique = [](vector<uint32_t> &l, uint32_t value) {
|
||||
const auto add_unique = [](SmallVector<uint32_t> &l, uint32_t value) {
|
||||
auto itr = find(begin(l), end(l), value);
|
||||
if (itr == end(l))
|
||||
l.push_back(value);
|
||||
@@ -223,4 +223,4 @@ void DominatorBuilder::lift_continue_block_dominator()
|
||||
if (back_edge_dominator)
|
||||
dominator = cfg.get_function().entry_block;
|
||||
}
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
14
3rdparty/spirv-cross/spirv_cfg.hpp
vendored
14
3rdparty/spirv-cross/spirv_cfg.hpp
vendored
@@ -63,7 +63,7 @@ public:
|
||||
|
||||
uint32_t find_common_dominator(uint32_t a, uint32_t b) const;
|
||||
|
||||
const std::vector<uint32_t> &get_preceding_edges(uint32_t block) const
|
||||
const SmallVector<uint32_t> &get_preceding_edges(uint32_t block) const
|
||||
{
|
||||
auto itr = preceding_edges.find(block);
|
||||
if (itr != std::end(preceding_edges))
|
||||
@@ -72,7 +72,7 @@ public:
|
||||
return empty_vector;
|
||||
}
|
||||
|
||||
const std::vector<uint32_t> &get_succeeding_edges(uint32_t block) const
|
||||
const SmallVector<uint32_t> &get_succeeding_edges(uint32_t block) const
|
||||
{
|
||||
auto itr = succeeding_edges.find(block);
|
||||
if (itr != std::end(succeeding_edges))
|
||||
@@ -111,12 +111,12 @@ private:
|
||||
|
||||
Compiler &compiler;
|
||||
const SPIRFunction &func;
|
||||
std::unordered_map<uint32_t, std::vector<uint32_t>> preceding_edges;
|
||||
std::unordered_map<uint32_t, std::vector<uint32_t>> succeeding_edges;
|
||||
std::unordered_map<uint32_t, SmallVector<uint32_t>> preceding_edges;
|
||||
std::unordered_map<uint32_t, SmallVector<uint32_t>> succeeding_edges;
|
||||
std::unordered_map<uint32_t, uint32_t> immediate_dominators;
|
||||
std::unordered_map<uint32_t, VisitOrder> visit_order;
|
||||
std::vector<uint32_t> post_order;
|
||||
std::vector<uint32_t> empty_vector;
|
||||
SmallVector<uint32_t> post_order;
|
||||
SmallVector<uint32_t> empty_vector;
|
||||
|
||||
void add_branch(uint32_t from, uint32_t to);
|
||||
void build_post_order_visit_order();
|
||||
@@ -144,6 +144,6 @@ private:
|
||||
const CFG &cfg;
|
||||
uint32_t dominator = 0;
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
243
3rdparty/spirv-cross/spirv_common.hpp
vendored
243
3rdparty/spirv-cross/spirv_common.hpp
vendored
@@ -18,22 +18,8 @@
|
||||
#define SPIRV_CROSS_COMMON_HPP
|
||||
|
||||
#include "spirv.hpp"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <sstream>
|
||||
#include <stack>
|
||||
#include <stdexcept>
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
#include "spirv_cross_containers.hpp"
|
||||
#include "spirv_cross_error_handling.hpp"
|
||||
|
||||
// A bit crude, but allows projects which embed SPIRV-Cross statically to
|
||||
// effectively hide all the symbols from other projects.
|
||||
@@ -53,68 +39,16 @@
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
|
||||
#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
|
||||
#ifndef _MSC_VER
|
||||
[[noreturn]]
|
||||
#endif
|
||||
inline void
|
||||
report_and_abort(const std::string &msg)
|
||||
{
|
||||
#ifdef NDEBUG
|
||||
(void)msg;
|
||||
#else
|
||||
fprintf(stderr, "There was a compiler error: %s\n", msg.c_str());
|
||||
#endif
|
||||
fflush(stderr);
|
||||
abort();
|
||||
}
|
||||
|
||||
#define SPIRV_CROSS_THROW(x) report_and_abort(x)
|
||||
#else
|
||||
class CompilerError : public std::runtime_error
|
||||
{
|
||||
public:
|
||||
explicit CompilerError(const std::string &str)
|
||||
: std::runtime_error(str)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
#define SPIRV_CROSS_THROW(x) throw CompilerError(x)
|
||||
#endif
|
||||
|
||||
//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
|
||||
|
||||
// MSVC 2013 does not have noexcept. We need this for Variant to get move constructor to work correctly
|
||||
// instead of copy constructor.
|
||||
// MSVC 2013 ignores that move constructors cannot throw in std::vector, so just don't define it.
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1900
|
||||
#define SPIRV_CROSS_NOEXCEPT
|
||||
#else
|
||||
#define SPIRV_CROSS_NOEXCEPT noexcept
|
||||
#endif
|
||||
|
||||
#if __cplusplus >= 201402l
|
||||
#define SPIRV_CROSS_DEPRECATED(reason) [[deprecated(reason)]]
|
||||
#elif defined(__GNUC__)
|
||||
#define SPIRV_CROSS_DEPRECATED(reason) __attribute__((deprecated))
|
||||
#elif defined(_MSC_VER)
|
||||
#define SPIRV_CROSS_DEPRECATED(reason) __declspec(deprecated(reason))
|
||||
#else
|
||||
#define SPIRV_CROSS_DEPRECATED(reason)
|
||||
#endif
|
||||
|
||||
namespace inner
|
||||
{
|
||||
template <typename T>
|
||||
void join_helper(std::ostringstream &stream, T &&t)
|
||||
void join_helper(StringStream<> &stream, T &&t)
|
||||
{
|
||||
stream << std::forward<T>(t);
|
||||
}
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
void join_helper(std::ostringstream &stream, T &&t, Ts &&... ts)
|
||||
void join_helper(StringStream<> &stream, T &&t, Ts &&... ts)
|
||||
{
|
||||
stream << std::forward<T>(t);
|
||||
join_helper(stream, std::forward<Ts>(ts)...);
|
||||
@@ -217,7 +151,7 @@ public:
|
||||
|
||||
// Need to enforce an order here for reproducible results,
|
||||
// but hitting this path should happen extremely rarely, so having this slow path is fine.
|
||||
std::vector<uint32_t> bits;
|
||||
SmallVector<uint32_t> bits;
|
||||
bits.reserve(higher.size());
|
||||
for (auto &v : higher)
|
||||
bits.push_back(v);
|
||||
@@ -244,21 +178,21 @@ private:
|
||||
template <typename... Ts>
|
||||
std::string join(Ts &&... ts)
|
||||
{
|
||||
std::ostringstream stream;
|
||||
StringStream<> stream;
|
||||
inner::join_helper(stream, std::forward<Ts>(ts)...);
|
||||
return stream.str();
|
||||
}
|
||||
|
||||
inline std::string merge(const std::vector<std::string> &list)
|
||||
inline std::string merge(const SmallVector<std::string> &list)
|
||||
{
|
||||
std::string s;
|
||||
StringStream<> stream;
|
||||
for (auto &elem : list)
|
||||
{
|
||||
s += elem;
|
||||
stream << elem;
|
||||
if (&elem != &list.back())
|
||||
s += ", ";
|
||||
stream << ", ";
|
||||
}
|
||||
return s;
|
||||
return stream.str();
|
||||
}
|
||||
|
||||
// Make sure we don't accidentally call this with float or doubles with SFINAE.
|
||||
@@ -340,15 +274,14 @@ struct Instruction
|
||||
struct IVariant
|
||||
{
|
||||
virtual ~IVariant() = default;
|
||||
virtual std::unique_ptr<IVariant> clone() = 0;
|
||||
|
||||
virtual IVariant *clone(ObjectPoolBase *pool) = 0;
|
||||
uint32_t self = 0;
|
||||
};
|
||||
|
||||
#define SPIRV_CROSS_DECLARE_CLONE(T) \
|
||||
std::unique_ptr<IVariant> clone() override \
|
||||
{ \
|
||||
return std::unique_ptr<IVariant>(new T(*this)); \
|
||||
#define SPIRV_CROSS_DECLARE_CLONE(T) \
|
||||
IVariant *clone(ObjectPoolBase *pool) override \
|
||||
{ \
|
||||
return static_cast<ObjectPool<T> *>(pool)->allocate(*this); \
|
||||
}
|
||||
|
||||
enum Types
|
||||
@@ -421,7 +354,7 @@ struct SPIRConstantOp : IVariant
|
||||
}
|
||||
|
||||
spv::Op opcode;
|
||||
std::vector<uint32_t> arguments;
|
||||
SmallVector<uint32_t> arguments;
|
||||
uint32_t basetype;
|
||||
|
||||
SPIRV_CROSS_DECLARE_CLONE(SPIRConstantOp)
|
||||
@@ -469,14 +402,14 @@ struct SPIRType : IVariant
|
||||
uint32_t columns = 1;
|
||||
|
||||
// Arrays, support array of arrays by having a vector of array sizes.
|
||||
std::vector<uint32_t> array;
|
||||
SmallVector<uint32_t> array;
|
||||
|
||||
// Array elements can be either specialization constants or specialization ops.
|
||||
// This array determines how to interpret the array size.
|
||||
// If an element is true, the element is a literal,
|
||||
// otherwise, it's an expression, which must be resolved on demand.
|
||||
// The actual size is not really known until runtime.
|
||||
std::vector<bool> array_size_literal;
|
||||
SmallVector<bool> array_size_literal;
|
||||
|
||||
// Pointers
|
||||
// Keep track of how many pointer layers we have.
|
||||
@@ -485,7 +418,7 @@ struct SPIRType : IVariant
|
||||
|
||||
spv::StorageClass storage = spv::StorageClassGeneric;
|
||||
|
||||
std::vector<uint32_t> member_types;
|
||||
SmallVector<uint32_t> member_types;
|
||||
|
||||
struct ImageType
|
||||
{
|
||||
@@ -556,7 +489,7 @@ struct SPIREntryPoint
|
||||
uint32_t self = 0;
|
||||
std::string name;
|
||||
std::string orig_name;
|
||||
std::vector<uint32_t> interface_variables;
|
||||
SmallVector<uint32_t> interface_variables;
|
||||
|
||||
Bitset flags;
|
||||
struct
|
||||
@@ -610,11 +543,11 @@ struct SPIRExpression : IVariant
|
||||
bool access_chain = false;
|
||||
|
||||
// A list of expressions which this expression depends on.
|
||||
std::vector<uint32_t> expression_dependencies;
|
||||
SmallVector<uint32_t> expression_dependencies;
|
||||
|
||||
// By reading this expression, we implicitly read these expressions as well.
|
||||
// Used by access chain Store and Load since we read multiple expressions in this case.
|
||||
std::vector<uint32_t> implied_read_expressions;
|
||||
SmallVector<uint32_t> implied_read_expressions;
|
||||
|
||||
SPIRV_CROSS_DECLARE_CLONE(SPIRExpression)
|
||||
};
|
||||
@@ -632,7 +565,7 @@ struct SPIRFunctionPrototype : IVariant
|
||||
}
|
||||
|
||||
uint32_t return_type;
|
||||
std::vector<uint32_t> parameter_types;
|
||||
SmallVector<uint32_t> parameter_types;
|
||||
|
||||
SPIRV_CROSS_DECLARE_CLONE(SPIRFunctionPrototype)
|
||||
};
|
||||
@@ -716,7 +649,7 @@ struct SPIRBlock : IVariant
|
||||
uint32_t false_block = 0;
|
||||
uint32_t default_block = 0;
|
||||
|
||||
std::vector<Instruction> ops;
|
||||
SmallVector<Instruction> ops;
|
||||
|
||||
struct Phi
|
||||
{
|
||||
@@ -726,22 +659,22 @@ struct SPIRBlock : IVariant
|
||||
};
|
||||
|
||||
// Before entering this block flush out local variables to magical "phi" variables.
|
||||
std::vector<Phi> phi_variables;
|
||||
SmallVector<Phi> phi_variables;
|
||||
|
||||
// Declare these temporaries before beginning the block.
|
||||
// Used for handling complex continue blocks which have side effects.
|
||||
std::vector<std::pair<uint32_t, uint32_t>> declare_temporary;
|
||||
SmallVector<std::pair<uint32_t, uint32_t>> declare_temporary;
|
||||
|
||||
// Declare these temporaries, but only conditionally if this block turns out to be
|
||||
// a complex loop header.
|
||||
std::vector<std::pair<uint32_t, uint32_t>> potential_declare_temporary;
|
||||
SmallVector<std::pair<uint32_t, uint32_t>> potential_declare_temporary;
|
||||
|
||||
struct Case
|
||||
{
|
||||
uint32_t value;
|
||||
uint32_t block;
|
||||
};
|
||||
std::vector<Case> cases;
|
||||
SmallVector<Case> cases;
|
||||
|
||||
// If we have tried to optimize code for this block but failed,
|
||||
// keep track of this.
|
||||
@@ -759,17 +692,17 @@ struct SPIRBlock : IVariant
|
||||
|
||||
// All access to these variables are dominated by this block,
|
||||
// so before branching anywhere we need to make sure that we declare these variables.
|
||||
std::vector<uint32_t> dominated_variables;
|
||||
SmallVector<uint32_t> dominated_variables;
|
||||
|
||||
// These are variables which should be declared in a for loop header, if we
|
||||
// fail to use a classic for-loop,
|
||||
// we remove these variables, and fall back to regular variables outside the loop.
|
||||
std::vector<uint32_t> loop_variables;
|
||||
SmallVector<uint32_t> loop_variables;
|
||||
|
||||
// Some expressions are control-flow dependent, i.e. any instruction which relies on derivatives or
|
||||
// sub-group-like operations.
|
||||
// Make sure that we only use these expressions in the original block.
|
||||
std::vector<uint32_t> invalidate_expressions;
|
||||
SmallVector<uint32_t> invalidate_expressions;
|
||||
|
||||
SPIRV_CROSS_DECLARE_CLONE(SPIRBlock)
|
||||
};
|
||||
@@ -822,16 +755,16 @@ struct SPIRFunction : IVariant
|
||||
|
||||
uint32_t return_type;
|
||||
uint32_t function_type;
|
||||
std::vector<Parameter> arguments;
|
||||
SmallVector<Parameter> arguments;
|
||||
|
||||
// Can be used by backends to add magic arguments.
|
||||
// Currently used by combined image/sampler implementation.
|
||||
|
||||
std::vector<Parameter> shadow_arguments;
|
||||
std::vector<uint32_t> local_variables;
|
||||
SmallVector<Parameter> shadow_arguments;
|
||||
SmallVector<uint32_t> local_variables;
|
||||
uint32_t entry_block = 0;
|
||||
std::vector<uint32_t> blocks;
|
||||
std::vector<CombinedImageSamplerParameter> combined_parameters;
|
||||
SmallVector<uint32_t> blocks;
|
||||
SmallVector<CombinedImageSamplerParameter> combined_parameters;
|
||||
|
||||
void add_local_variable(uint32_t id)
|
||||
{
|
||||
@@ -847,17 +780,19 @@ struct SPIRFunction : IVariant
|
||||
// Hooks to be run when the function returns.
|
||||
// Mostly used for lowering internal data structures onto flattened structures.
|
||||
// Need to defer this, because they might rely on things which change during compilation.
|
||||
std::vector<std::function<void()>> fixup_hooks_out;
|
||||
// Intentionally not a small vector, this one is rare, and std::function can be large.
|
||||
Vector<std::function<void()>> fixup_hooks_out;
|
||||
|
||||
// Hooks to be run when the function begins.
|
||||
// Mostly used for populating internal data structures from flattened structures.
|
||||
// Need to defer this, because they might rely on things which change during compilation.
|
||||
std::vector<std::function<void()>> fixup_hooks_in;
|
||||
// Intentionally not a small vector, this one is rare, and std::function can be large.
|
||||
Vector<std::function<void()>> fixup_hooks_in;
|
||||
|
||||
// On function entry, make sure to copy a constant array into thread addr space to work around
|
||||
// the case where we are passing a constant array by value to a function on backends which do not
|
||||
// consider arrays value types.
|
||||
std::vector<uint32_t> constant_arrays_needed_on_stack;
|
||||
SmallVector<uint32_t> constant_arrays_needed_on_stack;
|
||||
|
||||
bool active = false;
|
||||
bool flush_undeclared = true;
|
||||
@@ -901,7 +836,7 @@ struct SPIRAccessChain : IVariant
|
||||
|
||||
// By reading this expression, we implicitly read these expressions as well.
|
||||
// Used by access chain Store and Load since we read multiple expressions in this case.
|
||||
std::vector<uint32_t> implied_read_expressions;
|
||||
SmallVector<uint32_t> implied_read_expressions;
|
||||
|
||||
SPIRV_CROSS_DECLARE_CLONE(SPIRAccessChain)
|
||||
};
|
||||
@@ -928,7 +863,7 @@ struct SPIRVariable : IVariant
|
||||
uint32_t initializer = 0;
|
||||
uint32_t basevariable = 0;
|
||||
|
||||
std::vector<uint32_t> dereference_chain;
|
||||
SmallVector<uint32_t> dereference_chain;
|
||||
bool compat_builtin = false;
|
||||
|
||||
// If a variable is shadowed, we only statically assign to it
|
||||
@@ -939,7 +874,7 @@ struct SPIRVariable : IVariant
|
||||
uint32_t static_expression = 0;
|
||||
|
||||
// Temporaries which can remain forwarded as long as this variable is not modified.
|
||||
std::vector<uint32_t> dependees;
|
||||
SmallVector<uint32_t> dependees;
|
||||
bool forwardable = true;
|
||||
|
||||
bool deferred_declaration = false;
|
||||
@@ -1178,7 +1113,7 @@ struct SPIRConstant : IVariant
|
||||
: constant_type(constant_type_)
|
||||
, specialization(specialized)
|
||||
{
|
||||
subconstants.insert(end(subconstants), elements, elements + num_elements);
|
||||
subconstants.insert(std::end(subconstants), elements, elements + num_elements);
|
||||
specialization = specialized;
|
||||
}
|
||||
|
||||
@@ -1247,7 +1182,7 @@ struct SPIRConstant : IVariant
|
||||
bool is_used_as_lut = false;
|
||||
|
||||
// For composites which are constant arrays, etc.
|
||||
std::vector<uint32_t> subconstants;
|
||||
SmallVector<uint32_t> subconstants;
|
||||
|
||||
// Non-Vulkan GLSL, HLSL and sometimes MSL emits defines for each specialization constant,
|
||||
// and uses them to initialize the constant. This allows the user
|
||||
@@ -1258,11 +1193,25 @@ struct SPIRConstant : IVariant
|
||||
SPIRV_CROSS_DECLARE_CLONE(SPIRConstant)
|
||||
};
|
||||
|
||||
// Variants have a very specific allocation scheme.
|
||||
struct ObjectPoolGroup
|
||||
{
|
||||
std::unique_ptr<ObjectPoolBase> pools[TypeCount];
|
||||
};
|
||||
|
||||
class Variant
|
||||
{
|
||||
public:
|
||||
// MSVC 2013 workaround, we shouldn't need these constructors.
|
||||
Variant() = default;
|
||||
explicit Variant(ObjectPoolGroup *group_)
|
||||
: group(group_)
|
||||
{
|
||||
}
|
||||
|
||||
~Variant()
|
||||
{
|
||||
if (holder)
|
||||
group->pools[type]->free_opaque(holder);
|
||||
}
|
||||
|
||||
// Marking custom move constructor as noexcept is important.
|
||||
Variant(Variant &&other) SPIRV_CROSS_NOEXCEPT
|
||||
@@ -1270,19 +1219,23 @@ public:
|
||||
*this = std::move(other);
|
||||
}
|
||||
|
||||
Variant(const Variant &variant)
|
||||
{
|
||||
*this = variant;
|
||||
}
|
||||
// We cannot copy from other variant without our own pool group.
|
||||
// Have to explicitly copy.
|
||||
Variant(const Variant &variant) = delete;
|
||||
|
||||
// Marking custom move constructor as noexcept is important.
|
||||
Variant &operator=(Variant &&other) SPIRV_CROSS_NOEXCEPT
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
holder = std::move(other.holder);
|
||||
if (holder)
|
||||
group->pools[type]->free_opaque(holder);
|
||||
holder = other.holder;
|
||||
group = other.group;
|
||||
type = other.type;
|
||||
allow_type_rewrite = other.allow_type_rewrite;
|
||||
|
||||
other.holder = nullptr;
|
||||
other.type = TypeNone;
|
||||
}
|
||||
return *this;
|
||||
@@ -1293,29 +1246,52 @@ public:
|
||||
// This should never happen.
|
||||
Variant &operator=(const Variant &other)
|
||||
{
|
||||
//#define SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
|
||||
#ifdef SPIRV_CROSS_COPY_CONSTRUCTOR_SANITIZE
|
||||
abort();
|
||||
#endif
|
||||
if (this != &other)
|
||||
{
|
||||
holder.reset();
|
||||
if (holder)
|
||||
group->pools[type]->free_opaque(holder);
|
||||
|
||||
if (other.holder)
|
||||
holder = other.holder->clone();
|
||||
holder = other.holder->clone(group->pools[other.type].get());
|
||||
else
|
||||
holder = nullptr;
|
||||
|
||||
type = other.type;
|
||||
allow_type_rewrite = other.allow_type_rewrite;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void set(std::unique_ptr<IVariant> val, Types new_type)
|
||||
void set(IVariant *val, Types new_type)
|
||||
{
|
||||
holder = std::move(val);
|
||||
if (holder)
|
||||
group->pools[type]->free_opaque(holder);
|
||||
holder = nullptr;
|
||||
|
||||
if (!allow_type_rewrite && type != TypeNone && type != new_type)
|
||||
{
|
||||
if (val)
|
||||
group->pools[new_type]->free_opaque(val);
|
||||
SPIRV_CROSS_THROW("Overwriting a variant with new type.");
|
||||
}
|
||||
|
||||
holder = val;
|
||||
type = new_type;
|
||||
allow_type_rewrite = false;
|
||||
}
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
T *allocate_and_set(Types new_type, Ts &&... ts)
|
||||
{
|
||||
T *val = static_cast<ObjectPool<T> &>(*group->pools[new_type]).allocate(std::forward<Ts>(ts)...);
|
||||
set(val, new_type);
|
||||
return val;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T &get()
|
||||
{
|
||||
@@ -1323,7 +1299,7 @@ public:
|
||||
SPIRV_CROSS_THROW("nullptr");
|
||||
if (static_cast<Types>(T::type) != type)
|
||||
SPIRV_CROSS_THROW("Bad cast");
|
||||
return *static_cast<T *>(holder.get());
|
||||
return *static_cast<T *>(holder);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@@ -1333,7 +1309,7 @@ public:
|
||||
SPIRV_CROSS_THROW("nullptr");
|
||||
if (static_cast<Types>(T::type) != type)
|
||||
SPIRV_CROSS_THROW("Bad cast");
|
||||
return *static_cast<const T *>(holder.get());
|
||||
return *static_cast<const T *>(holder);
|
||||
}
|
||||
|
||||
Types get_type() const
|
||||
@@ -1353,7 +1329,9 @@ public:
|
||||
|
||||
void reset()
|
||||
{
|
||||
holder.reset();
|
||||
if (holder)
|
||||
group->pools[type]->free_opaque(holder);
|
||||
holder = nullptr;
|
||||
type = TypeNone;
|
||||
}
|
||||
|
||||
@@ -1363,7 +1341,8 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<IVariant> holder;
|
||||
ObjectPoolGroup *group = nullptr;
|
||||
IVariant *holder = nullptr;
|
||||
Types type = TypeNone;
|
||||
bool allow_type_rewrite = false;
|
||||
};
|
||||
@@ -1383,9 +1362,7 @@ const T &variant_get(const Variant &var)
|
||||
template <typename T, typename... P>
|
||||
T &variant_set(Variant &var, P &&... args)
|
||||
{
|
||||
auto uptr = std::unique_ptr<T>(new T(std::forward<P>(args)...));
|
||||
auto ptr = uptr.get();
|
||||
var.set(std::move(uptr), static_cast<Types>(T::type));
|
||||
auto *ptr = var.allocate_and_set<T>(static_cast<Types>(T::type), std::forward<P>(args)...);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
@@ -1430,7 +1407,9 @@ struct Meta
|
||||
};
|
||||
|
||||
Decoration decoration;
|
||||
std::vector<Decoration> members;
|
||||
|
||||
// Intentionally not a SmallVector. Decoration is large and somewhat rare.
|
||||
Vector<Decoration> members;
|
||||
|
||||
std::unordered_map<uint32_t, uint32_t> decoration_word_offset;
|
||||
|
||||
@@ -1529,6 +1508,6 @@ static inline bool opcode_is_sign_invariant(spv::Op opcode)
|
||||
return false;
|
||||
}
|
||||
}
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
4
3rdparty/spirv-cross/spirv_cpp.cpp
vendored
4
3rdparty/spirv-cross/spirv_cpp.cpp
vendored
@@ -334,7 +334,7 @@ string CompilerCPP::compile()
|
||||
reset();
|
||||
|
||||
// Move constructor for this type is broken on GCC 4.9 ...
|
||||
buffer = unique_ptr<ostringstream>(new ostringstream());
|
||||
buffer.reset();
|
||||
|
||||
emit_header();
|
||||
emit_resources();
|
||||
@@ -355,7 +355,7 @@ string CompilerCPP::compile()
|
||||
// Entry point in CPP is always main() for the time being.
|
||||
get_entry_point().name = "main";
|
||||
|
||||
return buffer->str();
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
void CompilerCPP::emit_c_linkage()
|
||||
|
||||
7
3rdparty/spirv-cross/spirv_cpp.hpp
vendored
7
3rdparty/spirv-cross/spirv_cpp.hpp
vendored
@@ -19,7 +19,6 @@
|
||||
|
||||
#include "spirv_glsl.hpp"
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
@@ -27,7 +26,7 @@ class CompilerCPP : public CompilerGLSL
|
||||
{
|
||||
public:
|
||||
explicit CompilerCPP(std::vector<uint32_t> spirv_)
|
||||
: CompilerGLSL(move(spirv_))
|
||||
: CompilerGLSL(std::move(spirv_))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -75,13 +74,13 @@ private:
|
||||
|
||||
std::string argument_decl(const SPIRFunction::Parameter &arg);
|
||||
|
||||
std::vector<std::string> resource_registrations;
|
||||
SmallVector<std::string> resource_registrations;
|
||||
std::string impl_type;
|
||||
std::string resource_type;
|
||||
uint32_t shared_counter = 0;
|
||||
|
||||
std::string interface_name;
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
83
3rdparty/spirv-cross/spirv_cross.cpp
vendored
83
3rdparty/spirv-cross/spirv_cross.cpp
vendored
@@ -1897,9 +1897,9 @@ bool Compiler::BufferAccessHandler::handle(Op opcode, const uint32_t *args, uint
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<BufferRange> Compiler::get_active_buffer_ranges(uint32_t id) const
|
||||
SmallVector<BufferRange> Compiler::get_active_buffer_ranges(uint32_t id) const
|
||||
{
|
||||
std::vector<BufferRange> ranges;
|
||||
SmallVector<BufferRange> ranges;
|
||||
BufferAccessHandler handler(*this, ranges, id);
|
||||
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), handler);
|
||||
return ranges;
|
||||
@@ -2126,9 +2126,9 @@ void Compiler::inherit_expression_dependencies(uint32_t dst, uint32_t source_exp
|
||||
e_deps.erase(unique(begin(e_deps), end(e_deps)), end(e_deps));
|
||||
}
|
||||
|
||||
vector<EntryPoint> Compiler::get_entry_points_and_stages() const
|
||||
SmallVector<EntryPoint> Compiler::get_entry_points_and_stages() const
|
||||
{
|
||||
vector<EntryPoint> entries;
|
||||
SmallVector<EntryPoint> entries;
|
||||
for (auto &entry : ir.entry_points)
|
||||
entries.push_back({ entry.second.orig_name, entry.second.model });
|
||||
return entries;
|
||||
@@ -2715,9 +2715,9 @@ void Compiler::build_combined_image_samplers()
|
||||
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), handler);
|
||||
}
|
||||
|
||||
vector<SpecializationConstant> Compiler::get_specialization_constants() const
|
||||
SmallVector<SpecializationConstant> Compiler::get_specialization_constants() const
|
||||
{
|
||||
vector<SpecializationConstant> spec_consts;
|
||||
SmallVector<SpecializationConstant> spec_consts;
|
||||
ir.for_each_typed_id<SPIRConstant>([&](uint32_t, const SPIRConstant &c) {
|
||||
if (c.specialization && has_decoration(c.self, DecorationSpecId))
|
||||
spec_consts.push_back({ c.self, get_decoration(c.self, DecorationSpecId) });
|
||||
@@ -2874,6 +2874,9 @@ void Compiler::AnalyzeVariableScopeAccessHandler::set_current_block(const SPIRBl
|
||||
|
||||
void Compiler::AnalyzeVariableScopeAccessHandler::notify_variable_access(uint32_t id, uint32_t block)
|
||||
{
|
||||
if (id == 0)
|
||||
return;
|
||||
|
||||
if (id_is_phi_variable(id))
|
||||
accessed_variables_to_block[id].insert(block);
|
||||
else if (id_is_potential_temporary(id))
|
||||
@@ -2924,6 +2927,8 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
|
||||
partial_write_variables_to_block[var->self].insert(current_block->self);
|
||||
}
|
||||
|
||||
// args[0] might be an access chain we have to track use of.
|
||||
notify_variable_access(args[0], current_block->self);
|
||||
// Might try to store a Phi variable here.
|
||||
notify_variable_access(args[1], current_block->self);
|
||||
break;
|
||||
@@ -2941,9 +2946,16 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
|
||||
if (var)
|
||||
accessed_variables_to_block[var->self].insert(current_block->self);
|
||||
|
||||
for (uint32_t i = 3; i < length; i++)
|
||||
// args[2] might be another access chain we have to track use of.
|
||||
for (uint32_t i = 2; i < length; i++)
|
||||
notify_variable_access(args[i], current_block->self);
|
||||
|
||||
// Also keep track of the access chain pointer itself.
|
||||
// In exceptionally rare cases, we can end up with a case where
|
||||
// the access chain is generated in the loop body, but is consumed in continue block.
|
||||
// This means we need complex loop workarounds, and we must detect this via CFG analysis.
|
||||
notify_variable_access(args[1], current_block->self);
|
||||
|
||||
// The result of an access chain is a fixed expression and is not really considered a temporary.
|
||||
auto &e = compiler.set<SPIRExpression>(args[1], "", args[0], true);
|
||||
auto *backing_variable = compiler.maybe_get_backing_variable(ptr);
|
||||
@@ -2951,6 +2963,7 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
|
||||
|
||||
// Other backends might use SPIRAccessChain for this later.
|
||||
compiler.ir.ids[args[1]].set_allow_type_rewrite();
|
||||
access_chain_expressions.insert(args[1]);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -2973,6 +2986,10 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
|
||||
partial_write_variables_to_block[var->self].insert(current_block->self);
|
||||
}
|
||||
|
||||
// args[0:1] might be access chains we have to track use of.
|
||||
for (uint32_t i = 0; i < 2; i++)
|
||||
notify_variable_access(args[i], current_block->self);
|
||||
|
||||
var = compiler.maybe_get_backing_variable(rhs);
|
||||
if (var)
|
||||
accessed_variables_to_block[var->self].insert(current_block->self);
|
||||
@@ -2988,6 +3005,11 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
|
||||
if (var)
|
||||
accessed_variables_to_block[var->self].insert(current_block->self);
|
||||
|
||||
// Might be an access chain which we have to keep track of.
|
||||
notify_variable_access(args[1], current_block->self);
|
||||
if (access_chain_expressions.count(args[2]))
|
||||
access_chain_expressions.insert(args[1]);
|
||||
|
||||
// Might try to copy a Phi variable here.
|
||||
notify_variable_access(args[2], current_block->self);
|
||||
break;
|
||||
@@ -3004,6 +3026,9 @@ bool Compiler::AnalyzeVariableScopeAccessHandler::handle(spv::Op op, const uint3
|
||||
|
||||
// Loaded value is a temporary.
|
||||
notify_variable_access(args[1], current_block->self);
|
||||
|
||||
// Might be an access chain we have to track use of.
|
||||
notify_variable_access(args[2], current_block->self);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -3370,7 +3395,14 @@ void Compiler::analyze_variable_scope(SPIRFunction &entry, AnalyzeVariableScopeA
|
||||
// If a temporary is used in more than one block, we might have to lift continue block
|
||||
// access up to loop header like we did for variables.
|
||||
if (blocks.size() != 1 && is_continue(block))
|
||||
builder.add_block(ir.continue_block_to_loop_header[block]);
|
||||
{
|
||||
auto &loop_header_block = get<SPIRBlock>(ir.continue_block_to_loop_header[block]);
|
||||
assert(loop_header_block.merge == SPIRBlock::MergeLoop);
|
||||
|
||||
// Only relevant if the loop is not marked as complex.
|
||||
if (!loop_header_block.complex_continue)
|
||||
builder.add_block(loop_header_block.self);
|
||||
}
|
||||
else if (blocks.size() != 1 && is_single_block_loop(block))
|
||||
{
|
||||
// Awkward case, because the loop header is also the continue block.
|
||||
@@ -3387,14 +3419,27 @@ void Compiler::analyze_variable_scope(SPIRFunction &entry, AnalyzeVariableScopeA
|
||||
|
||||
if (!first_use_is_dominator || force_temporary)
|
||||
{
|
||||
// This should be very rare, but if we try to declare a temporary inside a loop,
|
||||
// and that temporary is used outside the loop as well (spirv-opt inliner likes this)
|
||||
// we should actually emit the temporary outside the loop.
|
||||
hoisted_temporaries.insert(var.first);
|
||||
forced_temporaries.insert(var.first);
|
||||
if (handler.access_chain_expressions.count(var.first))
|
||||
{
|
||||
// Exceptionally rare case.
|
||||
// We cannot declare temporaries of access chains (except on MSL perhaps with pointers).
|
||||
// Rather than do that, we force a complex loop to make sure access chains are created and consumed
|
||||
// in expected order.
|
||||
auto &loop_header_block = get<SPIRBlock>(dominating_block);
|
||||
assert(loop_header_block.merge == SPIRBlock::MergeLoop);
|
||||
loop_header_block.complex_continue = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
// This should be very rare, but if we try to declare a temporary inside a loop,
|
||||
// and that temporary is used outside the loop as well (spirv-opt inliner likes this)
|
||||
// we should actually emit the temporary outside the loop.
|
||||
hoisted_temporaries.insert(var.first);
|
||||
forced_temporaries.insert(var.first);
|
||||
|
||||
auto &block_temporaries = get<SPIRBlock>(dominating_block).declare_temporary;
|
||||
block_temporaries.emplace_back(handler.result_id_to_type[var.first], var.first);
|
||||
auto &block_temporaries = get<SPIRBlock>(dominating_block).declare_temporary;
|
||||
block_temporaries.emplace_back(handler.result_id_to_type[var.first], var.first);
|
||||
}
|
||||
}
|
||||
else if (blocks.size() > 1)
|
||||
{
|
||||
@@ -3966,7 +4011,7 @@ void Compiler::make_constant_null(uint32_t id, uint32_t type)
|
||||
if (!constant_type.array_size_literal.back())
|
||||
SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal.");
|
||||
|
||||
vector<uint32_t> elements(constant_type.array.back());
|
||||
SmallVector<uint32_t> elements(constant_type.array.back());
|
||||
for (uint32_t i = 0; i < constant_type.array.back(); i++)
|
||||
elements[i] = parent_id;
|
||||
set<SPIRConstant>(id, type, elements.data(), uint32_t(elements.size()), false);
|
||||
@@ -3974,7 +4019,7 @@ void Compiler::make_constant_null(uint32_t id, uint32_t type)
|
||||
else if (!constant_type.member_types.empty())
|
||||
{
|
||||
uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size()));
|
||||
vector<uint32_t> elements(constant_type.member_types.size());
|
||||
SmallVector<uint32_t> elements(constant_type.member_types.size());
|
||||
for (uint32_t i = 0; i < constant_type.member_types.size(); i++)
|
||||
{
|
||||
make_constant_null(member_ids + i, constant_type.member_types[i]);
|
||||
@@ -3989,12 +4034,12 @@ void Compiler::make_constant_null(uint32_t id, uint32_t type)
|
||||
}
|
||||
}
|
||||
|
||||
const std::vector<spv::Capability> &Compiler::get_declared_capabilities() const
|
||||
const SmallVector<spv::Capability> &Compiler::get_declared_capabilities() const
|
||||
{
|
||||
return ir.declared_capabilities;
|
||||
}
|
||||
|
||||
const std::vector<std::string> &Compiler::get_declared_extensions() const
|
||||
const SmallVector<std::string> &Compiler::get_declared_extensions() const
|
||||
{
|
||||
return ir.declared_extensions;
|
||||
}
|
||||
|
||||
51
3rdparty/spirv-cross/spirv_cross.hpp
vendored
51
3rdparty/spirv-cross/spirv_cross.hpp
vendored
@@ -54,24 +54,24 @@ struct Resource
|
||||
|
||||
struct ShaderResources
|
||||
{
|
||||
std::vector<Resource> uniform_buffers;
|
||||
std::vector<Resource> storage_buffers;
|
||||
std::vector<Resource> stage_inputs;
|
||||
std::vector<Resource> stage_outputs;
|
||||
std::vector<Resource> subpass_inputs;
|
||||
std::vector<Resource> storage_images;
|
||||
std::vector<Resource> sampled_images;
|
||||
std::vector<Resource> atomic_counters;
|
||||
std::vector<Resource> acceleration_structures;
|
||||
SmallVector<Resource> uniform_buffers;
|
||||
SmallVector<Resource> storage_buffers;
|
||||
SmallVector<Resource> stage_inputs;
|
||||
SmallVector<Resource> stage_outputs;
|
||||
SmallVector<Resource> subpass_inputs;
|
||||
SmallVector<Resource> storage_images;
|
||||
SmallVector<Resource> sampled_images;
|
||||
SmallVector<Resource> atomic_counters;
|
||||
SmallVector<Resource> acceleration_structures;
|
||||
|
||||
// There can only be one push constant block,
|
||||
// but keep the vector in case this restriction is lifted in the future.
|
||||
std::vector<Resource> push_constant_buffers;
|
||||
SmallVector<Resource> push_constant_buffers;
|
||||
|
||||
// For Vulkan GLSL and HLSL source,
|
||||
// these correspond to separate texture2D and samplers respectively.
|
||||
std::vector<Resource> separate_images;
|
||||
std::vector<Resource> separate_samplers;
|
||||
SmallVector<Resource> separate_images;
|
||||
SmallVector<Resource> separate_samplers;
|
||||
};
|
||||
|
||||
struct CombinedImageSampler
|
||||
@@ -235,7 +235,7 @@ public:
|
||||
// SPIR-V shader. The granularity of this analysis is per-member of a struct.
|
||||
// This can be used for Buffer (UBO), BufferBlock/StorageBuffer (SSBO) and PushConstant blocks.
|
||||
// ID is the Resource::id obtained from get_shader_resources().
|
||||
std::vector<BufferRange> get_active_buffer_ranges(uint32_t id) const;
|
||||
SmallVector<BufferRange> get_active_buffer_ranges(uint32_t id) const;
|
||||
|
||||
// Returns the effective size of a buffer block.
|
||||
size_t get_declared_struct_size(const SPIRType &struct_type) const;
|
||||
@@ -308,7 +308,7 @@ public:
|
||||
// New variants of entry point query and reflection.
|
||||
// Names for entry points in the SPIR-V module may alias if they belong to different execution models.
|
||||
// To disambiguate, we must pass along with the entry point names the execution model.
|
||||
std::vector<EntryPoint> get_entry_points_and_stages() const;
|
||||
SmallVector<EntryPoint> get_entry_points_and_stages() const;
|
||||
void set_entry_point(const std::string &entry, spv::ExecutionModel execution_model);
|
||||
|
||||
// Renames an entry point from old_name to new_name.
|
||||
@@ -392,7 +392,7 @@ public:
|
||||
void build_combined_image_samplers();
|
||||
|
||||
// Gets a remapping for the combined image samplers.
|
||||
const std::vector<CombinedImageSampler> &get_combined_image_samplers() const
|
||||
const SmallVector<CombinedImageSampler> &get_combined_image_samplers() const
|
||||
{
|
||||
return combined_image_samplers;
|
||||
}
|
||||
@@ -417,7 +417,7 @@ public:
|
||||
// For composite types, the subconstants can be iterated over and modified.
|
||||
// constant_type is the SPIRType for the specialization constant,
|
||||
// which can be queried to determine which fields in the unions should be poked at.
|
||||
std::vector<SpecializationConstant> get_specialization_constants() const;
|
||||
SmallVector<SpecializationConstant> get_specialization_constants() const;
|
||||
SPIRConstant &get_constant(uint32_t id);
|
||||
const SPIRConstant &get_constant(uint32_t id) const;
|
||||
|
||||
@@ -468,10 +468,10 @@ public:
|
||||
bool buffer_get_hlsl_counter_buffer(uint32_t id, uint32_t &counter_id) const;
|
||||
|
||||
// Gets the list of all SPIR-V Capabilities which were declared in the SPIR-V module.
|
||||
const std::vector<spv::Capability> &get_declared_capabilities() const;
|
||||
const SmallVector<spv::Capability> &get_declared_capabilities() const;
|
||||
|
||||
// Gets the list of all SPIR-V extensions which were declared in the SPIR-V module.
|
||||
const std::vector<std::string> &get_declared_extensions() const;
|
||||
const SmallVector<std::string> &get_declared_extensions() const;
|
||||
|
||||
// When declaring buffer blocks in GLSL, the name declared in the GLSL source
|
||||
// might not be the same as the name declared in the SPIR-V module due to naming conflicts.
|
||||
@@ -511,8 +511,8 @@ protected:
|
||||
ParsedIR ir;
|
||||
// Marks variables which have global scope and variables which can alias with other variables
|
||||
// (SSBO, image load store, etc)
|
||||
std::vector<uint32_t> global_variables;
|
||||
std::vector<uint32_t> aliased_variables;
|
||||
SmallVector<uint32_t> global_variables;
|
||||
SmallVector<uint32_t> aliased_variables;
|
||||
|
||||
SPIRFunction *current_function = nullptr;
|
||||
SPIRBlock *current_block = nullptr;
|
||||
@@ -686,7 +686,7 @@ protected:
|
||||
// variable is part of that entry points interface.
|
||||
bool interface_variable_exists_in_entry_point(uint32_t id) const;
|
||||
|
||||
std::vector<CombinedImageSampler> combined_image_samplers;
|
||||
SmallVector<CombinedImageSampler> combined_image_samplers;
|
||||
|
||||
void remap_variable_type_name(const SPIRType &type, const std::string &var_name, std::string &type_name) const
|
||||
{
|
||||
@@ -729,7 +729,7 @@ protected:
|
||||
|
||||
struct BufferAccessHandler : OpcodeHandler
|
||||
{
|
||||
BufferAccessHandler(const Compiler &compiler_, std::vector<BufferRange> &ranges_, uint32_t id_)
|
||||
BufferAccessHandler(const Compiler &compiler_, SmallVector<BufferRange> &ranges_, uint32_t id_)
|
||||
: compiler(compiler_)
|
||||
, ranges(ranges_)
|
||||
, id(id_)
|
||||
@@ -739,7 +739,7 @@ protected:
|
||||
bool handle(spv::Op opcode, const uint32_t *args, uint32_t length) override;
|
||||
|
||||
const Compiler &compiler;
|
||||
std::vector<BufferRange> &ranges;
|
||||
SmallVector<BufferRange> &ranges;
|
||||
uint32_t id;
|
||||
|
||||
std::unordered_set<uint32_t> seen;
|
||||
@@ -810,7 +810,7 @@ protected:
|
||||
bool traverse_all_reachable_opcodes(const SPIRBlock &block, OpcodeHandler &handler) const;
|
||||
bool traverse_all_reachable_opcodes(const SPIRFunction &block, OpcodeHandler &handler) const;
|
||||
// This must be an ordered data structure so we always pick the same type aliases.
|
||||
std::vector<uint32_t> global_struct_cache;
|
||||
SmallVector<uint32_t> global_struct_cache;
|
||||
|
||||
ShaderResources get_shader_resources(const std::unordered_set<uint32_t> *active_variables) const;
|
||||
|
||||
@@ -916,6 +916,7 @@ protected:
|
||||
std::unordered_map<uint32_t, uint32_t> result_id_to_type;
|
||||
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> complete_write_variables_to_block;
|
||||
std::unordered_map<uint32_t, std::unordered_set<uint32_t>> partial_write_variables_to_block;
|
||||
std::unordered_set<uint32_t> access_chain_expressions;
|
||||
const SPIRBlock *current_block = nullptr;
|
||||
};
|
||||
|
||||
@@ -967,6 +968,6 @@ private:
|
||||
bool type_is_block_like(const SPIRType &type) const;
|
||||
bool type_is_opaque_value(const SPIRType &type) const;
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
50
3rdparty/spirv-cross/spirv_cross_c.cpp
vendored
50
3rdparty/spirv-cross/spirv_cross_c.cpp
vendored
@@ -34,9 +34,9 @@
|
||||
#include "spirv_reflect.hpp"
|
||||
#endif
|
||||
#include "spirv_parser.hpp"
|
||||
#include <string.h>
|
||||
#include <memory>
|
||||
#include <new>
|
||||
#include <string.h>
|
||||
|
||||
// clang-format off
|
||||
|
||||
@@ -88,7 +88,7 @@ struct StringAllocation : ScratchMemoryAllocation
|
||||
template <typename T>
|
||||
struct TemporaryBuffer : ScratchMemoryAllocation
|
||||
{
|
||||
std::vector<T> buffer;
|
||||
SmallVector<T> buffer;
|
||||
};
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
@@ -100,7 +100,7 @@ static inline std::unique_ptr<T> spvc_allocate(Ts &&... ts)
|
||||
struct spvc_context_s
|
||||
{
|
||||
string last_error;
|
||||
vector<unique_ptr<ScratchMemoryAllocation>> allocations;
|
||||
SmallVector<unique_ptr<ScratchMemoryAllocation>> allocations;
|
||||
const char *allocate_name(const std::string &name);
|
||||
|
||||
spvc_error_callback callback = nullptr;
|
||||
@@ -173,20 +173,20 @@ struct spvc_constant_s : SPIRConstant
|
||||
struct spvc_resources_s : ScratchMemoryAllocation
|
||||
{
|
||||
spvc_context context = nullptr;
|
||||
std::vector<spvc_reflected_resource> uniform_buffers;
|
||||
std::vector<spvc_reflected_resource> storage_buffers;
|
||||
std::vector<spvc_reflected_resource> stage_inputs;
|
||||
std::vector<spvc_reflected_resource> stage_outputs;
|
||||
std::vector<spvc_reflected_resource> subpass_inputs;
|
||||
std::vector<spvc_reflected_resource> storage_images;
|
||||
std::vector<spvc_reflected_resource> sampled_images;
|
||||
std::vector<spvc_reflected_resource> atomic_counters;
|
||||
std::vector<spvc_reflected_resource> push_constant_buffers;
|
||||
std::vector<spvc_reflected_resource> separate_images;
|
||||
std::vector<spvc_reflected_resource> separate_samplers;
|
||||
std::vector<spvc_reflected_resource> acceleration_structures;
|
||||
SmallVector<spvc_reflected_resource> uniform_buffers;
|
||||
SmallVector<spvc_reflected_resource> storage_buffers;
|
||||
SmallVector<spvc_reflected_resource> stage_inputs;
|
||||
SmallVector<spvc_reflected_resource> stage_outputs;
|
||||
SmallVector<spvc_reflected_resource> subpass_inputs;
|
||||
SmallVector<spvc_reflected_resource> storage_images;
|
||||
SmallVector<spvc_reflected_resource> sampled_images;
|
||||
SmallVector<spvc_reflected_resource> atomic_counters;
|
||||
SmallVector<spvc_reflected_resource> push_constant_buffers;
|
||||
SmallVector<spvc_reflected_resource> separate_images;
|
||||
SmallVector<spvc_reflected_resource> separate_samplers;
|
||||
SmallVector<spvc_reflected_resource> acceleration_structures;
|
||||
|
||||
bool copy_resources(std::vector<spvc_reflected_resource> &outputs, const std::vector<Resource> &inputs);
|
||||
bool copy_resources(SmallVector<spvc_reflected_resource> &outputs, const SmallVector<Resource> &inputs);
|
||||
bool copy_resources(const ShaderResources &resources);
|
||||
};
|
||||
|
||||
@@ -634,7 +634,7 @@ spvc_result spvc_compiler_hlsl_set_root_constants_layout(spvc_compiler compiler,
|
||||
}
|
||||
|
||||
auto &hlsl = *static_cast<CompilerHLSL *>(compiler->compiler.get());
|
||||
std::vector<RootConstants> roots;
|
||||
vector<RootConstants> roots;
|
||||
roots.reserve(count);
|
||||
for (size_t i = 0; i < count; i++)
|
||||
{
|
||||
@@ -980,8 +980,8 @@ spvc_result spvc_compiler_compile(spvc_compiler compiler, const char **source)
|
||||
SPVC_END_SAFE_SCOPE(compiler->context, SPVC_ERROR_UNSUPPORTED_SPIRV)
|
||||
}
|
||||
|
||||
bool spvc_resources_s::copy_resources(std::vector<spvc_reflected_resource> &outputs,
|
||||
const std::vector<Resource> &inputs)
|
||||
bool spvc_resources_s::copy_resources(SmallVector<spvc_reflected_resource> &outputs,
|
||||
const SmallVector<Resource> &inputs)
|
||||
{
|
||||
for (auto &i : inputs)
|
||||
{
|
||||
@@ -1117,7 +1117,7 @@ spvc_result spvc_resources_get_resource_list_for_type(spvc_resources resources,
|
||||
const spvc_reflected_resource **resource_list,
|
||||
size_t *resource_size)
|
||||
{
|
||||
const std::vector<spvc_reflected_resource> *list = nullptr;
|
||||
const SmallVector<spvc_reflected_resource> *list = nullptr;
|
||||
switch (type)
|
||||
{
|
||||
case SPVC_RESOURCE_TYPE_UNIFORM_BUFFER:
|
||||
@@ -1275,7 +1275,7 @@ spvc_result spvc_compiler_get_entry_points(spvc_compiler compiler, const spvc_en
|
||||
SPVC_BEGIN_SAFE_SCOPE
|
||||
{
|
||||
auto entries = compiler->compiler->get_entry_points_and_stages();
|
||||
std::vector<spvc_entry_point> translated;
|
||||
SmallVector<spvc_entry_point> translated;
|
||||
translated.reserve(entries.size());
|
||||
|
||||
for (auto &entry : entries)
|
||||
@@ -1406,7 +1406,7 @@ unsigned spvc_type_get_bit_width(spvc_type type)
|
||||
return type->width;
|
||||
}
|
||||
|
||||
unsigned spvc_type_get_vector_size(spvc_type type)
|
||||
unsigned spvc_type_get_SmallVector_size(spvc_type type)
|
||||
{
|
||||
return type->vecsize;
|
||||
}
|
||||
@@ -1566,7 +1566,7 @@ spvc_result spvc_compiler_get_combined_image_samplers(spvc_compiler compiler,
|
||||
SPVC_BEGIN_SAFE_SCOPE
|
||||
{
|
||||
auto combined = compiler->compiler->get_combined_image_samplers();
|
||||
std::vector<spvc_combined_image_sampler> translated;
|
||||
SmallVector<spvc_combined_image_sampler> translated;
|
||||
translated.reserve(combined.size());
|
||||
for (auto &c : combined)
|
||||
{
|
||||
@@ -1591,7 +1591,7 @@ spvc_result spvc_compiler_get_specialization_constants(spvc_compiler compiler,
|
||||
SPVC_BEGIN_SAFE_SCOPE
|
||||
{
|
||||
auto spec_constants = compiler->compiler->get_specialization_constants();
|
||||
std::vector<spvc_specialization_constant> translated;
|
||||
SmallVector<spvc_specialization_constant> translated;
|
||||
translated.reserve(spec_constants.size());
|
||||
for (auto &c : spec_constants)
|
||||
{
|
||||
@@ -1743,7 +1743,7 @@ spvc_result spvc_compiler_get_declared_extensions(spvc_compiler compiler, const
|
||||
SPVC_BEGIN_SAFE_SCOPE
|
||||
{
|
||||
auto &exts = compiler->compiler->get_declared_extensions();
|
||||
std::vector<const char *> duped;
|
||||
SmallVector<const char *> duped;
|
||||
duped.reserve(exts.size());
|
||||
for (auto &ext : exts)
|
||||
duped.push_back(compiler->context->allocate_name(ext));
|
||||
|
||||
712
3rdparty/spirv-cross/spirv_cross_containers.hpp
vendored
Normal file
712
3rdparty/spirv-cross/spirv_cross_containers.hpp
vendored
Normal file
@@ -0,0 +1,712 @@
|
||||
/*
|
||||
* Copyright 2019 Hans-Kristian Arntzen
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef SPIRV_CROSS_CONTAINERS_HPP
|
||||
#define SPIRV_CROSS_CONTAINERS_HPP
|
||||
|
||||
#include "spirv_cross_error_handling.hpp"
|
||||
#include <algorithm>
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <stack>
|
||||
#include <stdint.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <type_traits>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE
|
||||
#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE
|
||||
#else
|
||||
#define SPIRV_CROSS_NAMESPACE spirv_cross
|
||||
#endif
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
#ifndef SPIRV_CROSS_FORCE_STL_TYPES
|
||||
// std::aligned_storage does not support size == 0, so roll our own.
|
||||
template <typename T, size_t N>
|
||||
class AlignedBuffer
|
||||
{
|
||||
public:
|
||||
T *data()
|
||||
{
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1900
|
||||
// MSVC 2013 workarounds, sigh ...
|
||||
// Only use this workaround on MSVC 2013 due to some confusion around default initialized unions.
|
||||
// Spec seems to suggest the memory will be zero-initialized, which is *not* what we want.
|
||||
return reinterpret_cast<T *>(u.aligned_char);
|
||||
#else
|
||||
return reinterpret_cast<T *>(aligned_char);
|
||||
#endif
|
||||
}
|
||||
|
||||
private:
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1900
|
||||
// MSVC 2013 workarounds, sigh ...
|
||||
union {
|
||||
char aligned_char[sizeof(T) * N];
|
||||
double dummy_aligner;
|
||||
} u;
|
||||
#else
|
||||
alignas(T) char aligned_char[sizeof(T) * N];
|
||||
#endif
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class AlignedBuffer<T, 0>
|
||||
{
|
||||
public:
|
||||
T *data()
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
// An immutable version of SmallVector which erases type information about storage.
|
||||
template <typename T>
|
||||
class VectorView
|
||||
{
|
||||
public:
|
||||
T &operator[](size_t i)
|
||||
{
|
||||
return ptr[i];
|
||||
}
|
||||
|
||||
const T &operator[](size_t i) const
|
||||
{
|
||||
return ptr[i];
|
||||
}
|
||||
|
||||
bool empty() const
|
||||
{
|
||||
return buffer_size == 0;
|
||||
}
|
||||
|
||||
size_t size() const
|
||||
{
|
||||
return buffer_size;
|
||||
}
|
||||
|
||||
T *data()
|
||||
{
|
||||
return ptr;
|
||||
}
|
||||
|
||||
const T *data() const
|
||||
{
|
||||
return ptr;
|
||||
}
|
||||
|
||||
T *begin()
|
||||
{
|
||||
return ptr;
|
||||
}
|
||||
|
||||
T *end()
|
||||
{
|
||||
return ptr + buffer_size;
|
||||
}
|
||||
|
||||
const T *begin() const
|
||||
{
|
||||
return ptr;
|
||||
}
|
||||
|
||||
const T *end() const
|
||||
{
|
||||
return ptr + buffer_size;
|
||||
}
|
||||
|
||||
T &front()
|
||||
{
|
||||
return ptr[0];
|
||||
}
|
||||
|
||||
const T &front() const
|
||||
{
|
||||
return ptr[0];
|
||||
}
|
||||
|
||||
T &back()
|
||||
{
|
||||
return ptr[buffer_size - 1];
|
||||
}
|
||||
|
||||
const T &back() const
|
||||
{
|
||||
return ptr[buffer_size - 1];
|
||||
}
|
||||
|
||||
// Makes it easier to consume SmallVector.
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1900
|
||||
explicit operator std::vector<T>() const
|
||||
{
|
||||
// Another MSVC 2013 workaround. It does not understand lvalue/rvalue qualified operations.
|
||||
return std::vector<T>(ptr, ptr + buffer_size);
|
||||
}
|
||||
#else
|
||||
// Makes it easier to consume SmallVector.
|
||||
explicit operator std::vector<T>() const &
|
||||
{
|
||||
return std::vector<T>(ptr, ptr + buffer_size);
|
||||
}
|
||||
|
||||
// If we are converting as an r-value, we can pilfer our elements.
|
||||
explicit operator std::vector<T>() &&
|
||||
{
|
||||
return std::vector<T>(std::make_move_iterator(ptr), std::make_move_iterator(ptr + buffer_size));
|
||||
}
|
||||
#endif
|
||||
|
||||
// Avoid sliced copies. Base class should only be read as a reference.
|
||||
VectorView(const VectorView &) = delete;
|
||||
void operator=(const VectorView &) = delete;
|
||||
|
||||
protected:
|
||||
VectorView() = default;
|
||||
T *ptr = nullptr;
|
||||
size_t buffer_size = 0;
|
||||
};
|
||||
|
||||
// Simple vector which supports up to N elements inline, without malloc/free.
|
||||
// We use a lot of throwaway vectors all over the place which triggers allocations.
|
||||
// This class only implements the subset of std::vector we need in SPIRV-Cross.
|
||||
// It is *NOT* a drop-in replacement in general projects.
|
||||
template <typename T, size_t N = 8>
|
||||
class SmallVector : public VectorView<T>
|
||||
{
|
||||
public:
|
||||
SmallVector()
|
||||
{
|
||||
this->ptr = stack_storage.data();
|
||||
buffer_capacity = N;
|
||||
}
|
||||
|
||||
SmallVector(const T *arg_list_begin, const T *arg_list_end)
|
||||
: SmallVector()
|
||||
{
|
||||
auto count = size_t(arg_list_end - arg_list_begin);
|
||||
reserve(count);
|
||||
for (size_t i = 0; i < count; i++, arg_list_begin++)
|
||||
new (&this->ptr[i]) T(*arg_list_begin);
|
||||
this->buffer_size = count;
|
||||
}
|
||||
|
||||
SmallVector(SmallVector &&other) SPIRV_CROSS_NOEXCEPT : SmallVector()
|
||||
{
|
||||
*this = std::move(other);
|
||||
}
|
||||
|
||||
SmallVector &operator=(SmallVector &&other) SPIRV_CROSS_NOEXCEPT
|
||||
{
|
||||
clear();
|
||||
if (other.ptr != other.stack_storage.data())
|
||||
{
|
||||
// Pilfer allocated pointer.
|
||||
if (this->ptr != stack_storage.data())
|
||||
free(this->ptr);
|
||||
this->ptr = other.ptr;
|
||||
this->buffer_size = other.buffer_size;
|
||||
buffer_capacity = other.buffer_capacity;
|
||||
other.ptr = nullptr;
|
||||
other.buffer_size = 0;
|
||||
other.buffer_capacity = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Need to move the stack contents individually.
|
||||
reserve(other.buffer_size);
|
||||
for (size_t i = 0; i < other.buffer_size; i++)
|
||||
{
|
||||
new (&this->ptr[i]) T(std::move(other.ptr[i]));
|
||||
other.ptr[i].~T();
|
||||
}
|
||||
this->buffer_size = other.buffer_size;
|
||||
other.buffer_size = 0;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
SmallVector(const SmallVector &other)
|
||||
: SmallVector()
|
||||
{
|
||||
*this = other;
|
||||
}
|
||||
|
||||
SmallVector &operator=(const SmallVector &other)
|
||||
{
|
||||
clear();
|
||||
reserve(other.buffer_size);
|
||||
for (size_t i = 0; i < other.buffer_size; i++)
|
||||
new (&this->ptr[i]) T(other.ptr[i]);
|
||||
this->buffer_size = other.buffer_size;
|
||||
return *this;
|
||||
}
|
||||
|
||||
explicit SmallVector(size_t count)
|
||||
: SmallVector()
|
||||
{
|
||||
resize(count);
|
||||
}
|
||||
|
||||
~SmallVector()
|
||||
{
|
||||
clear();
|
||||
if (this->ptr != stack_storage.data())
|
||||
free(this->ptr);
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
for (size_t i = 0; i < this->buffer_size; i++)
|
||||
this->ptr[i].~T();
|
||||
this->buffer_size = 0;
|
||||
}
|
||||
|
||||
void push_back(const T &t)
|
||||
{
|
||||
reserve(this->buffer_size + 1);
|
||||
new (&this->ptr[this->buffer_size]) T(t);
|
||||
this->buffer_size++;
|
||||
}
|
||||
|
||||
void push_back(T &&t)
|
||||
{
|
||||
reserve(this->buffer_size + 1);
|
||||
new (&this->ptr[this->buffer_size]) T(std::move(t));
|
||||
this->buffer_size++;
|
||||
}
|
||||
|
||||
void pop_back()
|
||||
{
|
||||
resize(this->buffer_size - 1);
|
||||
}
|
||||
|
||||
template <typename... Ts>
|
||||
void emplace_back(Ts &&... ts)
|
||||
{
|
||||
reserve(this->buffer_size + 1);
|
||||
new (&this->ptr[this->buffer_size]) T(std::forward<Ts>(ts)...);
|
||||
this->buffer_size++;
|
||||
}
|
||||
|
||||
void reserve(size_t count)
|
||||
{
|
||||
if (count > buffer_capacity)
|
||||
{
|
||||
size_t target_capacity = buffer_capacity;
|
||||
if (target_capacity == 0)
|
||||
target_capacity = 1;
|
||||
if (target_capacity < N)
|
||||
target_capacity = N;
|
||||
|
||||
while (target_capacity < count)
|
||||
target_capacity <<= 1u;
|
||||
|
||||
T *new_buffer =
|
||||
target_capacity > N ? static_cast<T *>(malloc(target_capacity * sizeof(T))) : stack_storage.data();
|
||||
|
||||
if (!new_buffer)
|
||||
SPIRV_CROSS_THROW("Out of memory.");
|
||||
|
||||
// In case for some reason two allocations both come from same stack.
|
||||
if (new_buffer != this->ptr)
|
||||
{
|
||||
// We don't deal with types which can throw in move constructor.
|
||||
for (size_t i = 0; i < this->buffer_size; i++)
|
||||
{
|
||||
new (&new_buffer[i]) T(std::move(this->ptr[i]));
|
||||
this->ptr[i].~T();
|
||||
}
|
||||
}
|
||||
|
||||
if (this->ptr != stack_storage.data())
|
||||
free(this->ptr);
|
||||
this->ptr = new_buffer;
|
||||
buffer_capacity = target_capacity;
|
||||
}
|
||||
}
|
||||
|
||||
void insert(T *itr, const T *insert_begin, const T *insert_end)
|
||||
{
|
||||
auto count = size_t(insert_end - insert_begin);
|
||||
if (itr == this->end())
|
||||
{
|
||||
reserve(this->buffer_size + count);
|
||||
for (size_t i = 0; i < count; i++, insert_begin++)
|
||||
new (&this->ptr[this->buffer_size + i]) T(*insert_begin);
|
||||
this->buffer_size += count;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (this->buffer_size + count > buffer_capacity)
|
||||
{
|
||||
auto target_capacity = this->buffer_size + count;
|
||||
if (target_capacity == 0)
|
||||
target_capacity = 1;
|
||||
if (target_capacity < N)
|
||||
target_capacity = N;
|
||||
|
||||
while (target_capacity < count)
|
||||
target_capacity <<= 1u;
|
||||
|
||||
// Need to allocate new buffer. Move everything to a new buffer.
|
||||
T *new_buffer =
|
||||
target_capacity > N ? static_cast<T *>(malloc(target_capacity * sizeof(T))) : stack_storage.data();
|
||||
if (!new_buffer)
|
||||
SPIRV_CROSS_THROW("Out of memory.");
|
||||
|
||||
// First, move elements from source buffer to new buffer.
|
||||
// We don't deal with types which can throw in move constructor.
|
||||
auto *target_itr = new_buffer;
|
||||
auto *original_source_itr = this->begin();
|
||||
|
||||
if (new_buffer != this->ptr)
|
||||
{
|
||||
while (original_source_itr != itr)
|
||||
{
|
||||
new (target_itr) T(std::move(*original_source_itr));
|
||||
original_source_itr->~T();
|
||||
++original_source_itr;
|
||||
++target_itr;
|
||||
}
|
||||
}
|
||||
|
||||
// Copy-construct new elements.
|
||||
for (auto *source_itr = insert_begin; source_itr != insert_end; ++source_itr, ++target_itr)
|
||||
new (target_itr) T(*source_itr);
|
||||
|
||||
// Move over the other half.
|
||||
if (new_buffer != this->ptr || insert_begin != insert_end)
|
||||
{
|
||||
while (original_source_itr != this->end())
|
||||
{
|
||||
new (target_itr) T(std::move(*original_source_itr));
|
||||
original_source_itr->~T();
|
||||
++original_source_itr;
|
||||
++target_itr;
|
||||
}
|
||||
}
|
||||
|
||||
if (this->ptr != stack_storage.data())
|
||||
free(this->ptr);
|
||||
this->ptr = new_buffer;
|
||||
buffer_capacity = target_capacity;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Move in place, need to be a bit careful about which elements are constructed and which are not.
|
||||
// Move the end and construct the new elements.
|
||||
auto *target_itr = this->end() + count;
|
||||
auto *source_itr = this->end();
|
||||
while (target_itr != this->end() && source_itr != itr)
|
||||
{
|
||||
--target_itr;
|
||||
--source_itr;
|
||||
new (target_itr) T(std::move(*source_itr));
|
||||
}
|
||||
|
||||
// For already constructed elements we can move-assign.
|
||||
std::move_backward(itr, source_itr, target_itr);
|
||||
|
||||
// For the inserts which go to already constructed elements, we can do a plain copy.
|
||||
while (itr != this->end() && insert_begin != insert_end)
|
||||
*itr++ = *insert_begin++;
|
||||
|
||||
// For inserts into newly allocated memory, we must copy-construct instead.
|
||||
while (insert_begin != insert_end)
|
||||
{
|
||||
new (itr) T(*insert_begin);
|
||||
++itr;
|
||||
++insert_begin;
|
||||
}
|
||||
}
|
||||
|
||||
this->buffer_size += count;
|
||||
}
|
||||
}
|
||||
|
||||
T *erase(T *itr)
|
||||
{
|
||||
std::move(itr + 1, this->end(), itr);
|
||||
this->ptr[--this->buffer_size].~T();
|
||||
return itr;
|
||||
}
|
||||
|
||||
void erase(T *start_erase, T *end_erase)
|
||||
{
|
||||
if (end_erase == this->end())
|
||||
{
|
||||
resize(size_t(start_erase - this->begin()));
|
||||
}
|
||||
else
|
||||
{
|
||||
auto new_size = this->buffer_size - (end_erase - start_erase);
|
||||
std::move(end_erase, this->end(), start_erase);
|
||||
resize(new_size);
|
||||
}
|
||||
}
|
||||
|
||||
void resize(size_t new_size)
|
||||
{
|
||||
if (new_size < this->buffer_size)
|
||||
{
|
||||
for (size_t i = new_size; i < this->buffer_size; i++)
|
||||
this->ptr[i].~T();
|
||||
}
|
||||
else if (new_size > this->buffer_size)
|
||||
{
|
||||
reserve(new_size);
|
||||
for (size_t i = this->buffer_size; i < new_size; i++)
|
||||
new (&this->ptr[i]) T();
|
||||
}
|
||||
|
||||
this->buffer_size = new_size;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t buffer_capacity = 0;
|
||||
AlignedBuffer<T, N> stack_storage;
|
||||
};
|
||||
|
||||
// A vector without stack storage.
|
||||
// Could also be a typedef-ed to std::vector,
|
||||
// but might as well use the one we have.
|
||||
template <typename T>
|
||||
using Vector = SmallVector<T, 0>;
|
||||
|
||||
#else // SPIRV_CROSS_FORCE_STL_TYPES
|
||||
|
||||
template <typename T, size_t N = 8>
|
||||
using SmallVector = std::vector<T>;
|
||||
template <typename T>
|
||||
using Vector = std::vector<T>;
|
||||
template <typename T>
|
||||
using VectorView = std::vector<T>;
|
||||
|
||||
#endif // SPIRV_CROSS_FORCE_STL_TYPES
|
||||
|
||||
// An object pool which we use for allocating IVariant-derived objects.
|
||||
// We know we are going to allocate a bunch of objects of each type,
|
||||
// so amortize the mallocs.
|
||||
class ObjectPoolBase
|
||||
{
|
||||
public:
|
||||
virtual ~ObjectPoolBase() = default;
|
||||
virtual void free_opaque(void *ptr) = 0;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
class ObjectPool : public ObjectPoolBase
|
||||
{
|
||||
public:
|
||||
explicit ObjectPool(unsigned start_object_count_ = 16)
|
||||
: start_object_count(start_object_count_)
|
||||
{
|
||||
}
|
||||
|
||||
template <typename... P>
|
||||
T *allocate(P &&... p)
|
||||
{
|
||||
if (vacants.empty())
|
||||
{
|
||||
unsigned num_objects = start_object_count << memory.size();
|
||||
T *ptr = static_cast<T *>(malloc(num_objects * sizeof(T)));
|
||||
if (!ptr)
|
||||
return nullptr;
|
||||
|
||||
for (unsigned i = 0; i < num_objects; i++)
|
||||
vacants.push_back(&ptr[i]);
|
||||
|
||||
memory.emplace_back(ptr);
|
||||
}
|
||||
|
||||
T *ptr = vacants.back();
|
||||
vacants.pop_back();
|
||||
new (ptr) T(std::forward<P>(p)...);
|
||||
return ptr;
|
||||
}
|
||||
|
||||
void free(T *ptr)
|
||||
{
|
||||
ptr->~T();
|
||||
vacants.push_back(ptr);
|
||||
}
|
||||
|
||||
void free_opaque(void *ptr) override
|
||||
{
|
||||
free(static_cast<T *>(ptr));
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
vacants.clear();
|
||||
memory.clear();
|
||||
}
|
||||
|
||||
protected:
|
||||
Vector<T *> vacants;
|
||||
|
||||
struct MallocDeleter
|
||||
{
|
||||
void operator()(T *ptr)
|
||||
{
|
||||
::free(ptr);
|
||||
}
|
||||
};
|
||||
|
||||
SmallVector<std::unique_ptr<T, MallocDeleter>> memory;
|
||||
unsigned start_object_count;
|
||||
};
|
||||
|
||||
template <size_t StackSize = 4096, size_t BlockSize = 4096>
|
||||
class StringStream
|
||||
{
|
||||
public:
|
||||
StringStream()
|
||||
{
|
||||
reset();
|
||||
}
|
||||
|
||||
~StringStream()
|
||||
{
|
||||
reset();
|
||||
}
|
||||
|
||||
// Disable copies and moves. Makes it easier to implement, and we don't need it.
|
||||
StringStream(const StringStream &) = delete;
|
||||
void operator=(const StringStream &) = delete;
|
||||
|
||||
template <typename T, typename std::enable_if<!std::is_floating_point<T>::value, int>::type = 0>
|
||||
StringStream &operator<<(const T &t)
|
||||
{
|
||||
auto s = std::to_string(t);
|
||||
append(s.data(), s.size());
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Only overload this to make float/double conversions ambiguous.
|
||||
StringStream &operator<<(uint32_t v)
|
||||
{
|
||||
auto s = std::to_string(v);
|
||||
append(s.data(), s.size());
|
||||
return *this;
|
||||
}
|
||||
|
||||
StringStream &operator<<(char c)
|
||||
{
|
||||
append(&c, 1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
StringStream &operator<<(const std::string &s)
|
||||
{
|
||||
append(s.data(), s.size());
|
||||
return *this;
|
||||
}
|
||||
|
||||
StringStream &operator<<(const char *s)
|
||||
{
|
||||
append(s, strlen(s));
|
||||
return *this;
|
||||
}
|
||||
|
||||
template <size_t N>
|
||||
StringStream &operator<<(const char (&s)[N])
|
||||
{
|
||||
append(s, strlen(s));
|
||||
return *this;
|
||||
}
|
||||
|
||||
std::string str() const
|
||||
{
|
||||
std::string ret;
|
||||
size_t target_size = 0;
|
||||
for (auto &saved : saved_buffers)
|
||||
target_size += saved.offset;
|
||||
target_size += current_buffer.offset;
|
||||
ret.reserve(target_size);
|
||||
|
||||
for (auto &saved : saved_buffers)
|
||||
ret.insert(ret.end(), saved.buffer, saved.buffer + saved.offset);
|
||||
ret.insert(ret.end(), current_buffer.buffer, current_buffer.buffer + current_buffer.offset);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void reset()
|
||||
{
|
||||
for (auto &saved : saved_buffers)
|
||||
if (saved.buffer != stack_buffer)
|
||||
free(saved.buffer);
|
||||
if (current_buffer.buffer != stack_buffer)
|
||||
free(current_buffer.buffer);
|
||||
|
||||
saved_buffers.clear();
|
||||
current_buffer.buffer = stack_buffer;
|
||||
current_buffer.offset = 0;
|
||||
current_buffer.size = sizeof(stack_buffer);
|
||||
}
|
||||
|
||||
private:
|
||||
struct Buffer
|
||||
{
|
||||
char *buffer = nullptr;
|
||||
size_t offset = 0;
|
||||
size_t size = 0;
|
||||
};
|
||||
Buffer current_buffer;
|
||||
char stack_buffer[StackSize];
|
||||
SmallVector<Buffer> saved_buffers;
|
||||
|
||||
void append(const char *s, size_t len)
|
||||
{
|
||||
size_t avail = current_buffer.size - current_buffer.offset;
|
||||
if (avail < len)
|
||||
{
|
||||
if (avail > 0)
|
||||
{
|
||||
memcpy(current_buffer.buffer + current_buffer.offset, s, avail);
|
||||
s += avail;
|
||||
len -= avail;
|
||||
current_buffer.offset += avail;
|
||||
}
|
||||
|
||||
saved_buffers.push_back(current_buffer);
|
||||
size_t target_size = len > BlockSize ? len : BlockSize;
|
||||
current_buffer.buffer = static_cast<char *>(malloc(target_size));
|
||||
if (!current_buffer.buffer)
|
||||
SPIRV_CROSS_THROW("Out of memory.");
|
||||
|
||||
memcpy(current_buffer.buffer, s, len);
|
||||
current_buffer.offset = len;
|
||||
current_buffer.size = target_size;
|
||||
}
|
||||
else
|
||||
{
|
||||
memcpy(current_buffer.buffer + current_buffer.offset, s, len);
|
||||
current_buffer.offset += len;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
83
3rdparty/spirv-cross/spirv_cross_error_handling.hpp
vendored
Normal file
83
3rdparty/spirv-cross/spirv_cross_error_handling.hpp
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright 2015-2019 Arm Limited
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef SPIRV_CROSS_ERROR_HANDLING
|
||||
#define SPIRV_CROSS_ERROR_HANDLING
|
||||
|
||||
#include <stdexcept>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string>
|
||||
|
||||
#ifdef SPIRV_CROSS_NAMESPACE_OVERRIDE
|
||||
#define SPIRV_CROSS_NAMESPACE SPIRV_CROSS_NAMESPACE_OVERRIDE
|
||||
#else
|
||||
#define SPIRV_CROSS_NAMESPACE spirv_cross
|
||||
#endif
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
#ifdef SPIRV_CROSS_EXCEPTIONS_TO_ASSERTIONS
|
||||
#if !defined(_MSC_VER) || defined(__clang__)
|
||||
[[noreturn]]
|
||||
#endif
|
||||
inline void
|
||||
report_and_abort(const std::string &msg)
|
||||
{
|
||||
#ifdef NDEBUG
|
||||
(void)msg;
|
||||
#else
|
||||
fprintf(stderr, "There was a compiler error: %s\n", msg.c_str());
|
||||
#endif
|
||||
fflush(stderr);
|
||||
abort();
|
||||
}
|
||||
|
||||
#define SPIRV_CROSS_THROW(x) report_and_abort(x)
|
||||
#else
|
||||
class CompilerError : public std::runtime_error
|
||||
{
|
||||
public:
|
||||
explicit CompilerError(const std::string &str)
|
||||
: std::runtime_error(str)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
#define SPIRV_CROSS_THROW(x) throw CompilerError(x)
|
||||
#endif
|
||||
|
||||
// MSVC 2013 does not have noexcept. We need this for Variant to get move constructor to work correctly
|
||||
// instead of copy constructor.
|
||||
// MSVC 2013 ignores that move constructors cannot throw in std::vector, so just don't define it.
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1900
|
||||
#define SPIRV_CROSS_NOEXCEPT
|
||||
#else
|
||||
#define SPIRV_CROSS_NOEXCEPT noexcept
|
||||
#endif
|
||||
|
||||
#if __cplusplus >= 201402l
|
||||
#define SPIRV_CROSS_DEPRECATED(reason) [[deprecated(reason)]]
|
||||
#elif defined(__GNUC__)
|
||||
#define SPIRV_CROSS_DEPRECATED(reason) __attribute__((deprecated))
|
||||
#elif defined(_MSC_VER)
|
||||
#define SPIRV_CROSS_DEPRECATED(reason) __declspec(deprecated(reason))
|
||||
#else
|
||||
#define SPIRV_CROSS_DEPRECATED(reason)
|
||||
#endif
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
102
3rdparty/spirv-cross/spirv_cross_parsed_ir.cpp
vendored
102
3rdparty/spirv-cross/spirv_cross_parsed_ir.cpp
vendored
@@ -23,9 +23,101 @@ using namespace spv;
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
ParsedIR::ParsedIR()
|
||||
{
|
||||
// If we move ParsedIR, we need to make sure the pointer stays fixed since the child Variant objects consume a pointer to this group,
|
||||
// so need an extra pointer here.
|
||||
pool_group.reset(new ObjectPoolGroup);
|
||||
|
||||
pool_group->pools[TypeType].reset(new ObjectPool<SPIRType>);
|
||||
pool_group->pools[TypeVariable].reset(new ObjectPool<SPIRVariable>);
|
||||
pool_group->pools[TypeConstant].reset(new ObjectPool<SPIRConstant>);
|
||||
pool_group->pools[TypeFunction].reset(new ObjectPool<SPIRFunction>);
|
||||
pool_group->pools[TypeFunctionPrototype].reset(new ObjectPool<SPIRFunctionPrototype>);
|
||||
pool_group->pools[TypeBlock].reset(new ObjectPool<SPIRBlock>);
|
||||
pool_group->pools[TypeExtension].reset(new ObjectPool<SPIRExtension>);
|
||||
pool_group->pools[TypeExpression].reset(new ObjectPool<SPIRExpression>);
|
||||
pool_group->pools[TypeConstantOp].reset(new ObjectPool<SPIRConstantOp>);
|
||||
pool_group->pools[TypeCombinedImageSampler].reset(new ObjectPool<SPIRCombinedImageSampler>);
|
||||
pool_group->pools[TypeAccessChain].reset(new ObjectPool<SPIRAccessChain>);
|
||||
pool_group->pools[TypeUndef].reset(new ObjectPool<SPIRUndef>);
|
||||
}
|
||||
|
||||
// Should have been default-implemented, but need this on MSVC 2013.
|
||||
ParsedIR::ParsedIR(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT
|
||||
{
|
||||
*this = move(other);
|
||||
}
|
||||
|
||||
ParsedIR &ParsedIR::operator=(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
pool_group = move(other.pool_group);
|
||||
spirv = move(other.spirv);
|
||||
meta = move(other.meta);
|
||||
for (int i = 0; i < TypeCount; i++)
|
||||
ids_for_type[i] = move(other.ids_for_type[i]);
|
||||
ids_for_constant_or_type = move(other.ids_for_constant_or_type);
|
||||
ids_for_constant_or_variable = move(other.ids_for_constant_or_variable);
|
||||
declared_capabilities = move(other.declared_capabilities);
|
||||
declared_extensions = move(other.declared_extensions);
|
||||
block_meta = move(other.block_meta);
|
||||
continue_block_to_loop_header = move(other.continue_block_to_loop_header);
|
||||
entry_points = move(other.entry_points);
|
||||
ids = move(other.ids);
|
||||
|
||||
default_entry_point = other.default_entry_point;
|
||||
source = other.source;
|
||||
loop_iteration_depth = other.loop_iteration_depth;
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
ParsedIR::ParsedIR(const ParsedIR &other)
|
||||
: ParsedIR()
|
||||
{
|
||||
*this = other;
|
||||
}
|
||||
|
||||
ParsedIR &ParsedIR::operator=(const ParsedIR &other)
|
||||
{
|
||||
if (this != &other)
|
||||
{
|
||||
spirv = other.spirv;
|
||||
meta = other.meta;
|
||||
for (int i = 0; i < TypeCount; i++)
|
||||
ids_for_type[i] = other.ids_for_type[i];
|
||||
ids_for_constant_or_type = other.ids_for_constant_or_type;
|
||||
ids_for_constant_or_variable = other.ids_for_constant_or_variable;
|
||||
declared_capabilities = other.declared_capabilities;
|
||||
declared_extensions = other.declared_extensions;
|
||||
block_meta = other.block_meta;
|
||||
continue_block_to_loop_header = other.continue_block_to_loop_header;
|
||||
entry_points = other.entry_points;
|
||||
default_entry_point = other.default_entry_point;
|
||||
source = other.source;
|
||||
loop_iteration_depth = other.loop_iteration_depth;
|
||||
|
||||
// Very deliberate copying of IDs. There is no default copy constructor, nor a simple default constructor.
|
||||
// Construct object first so we have the correct allocator set-up, then we can copy object into our new pool group.
|
||||
ids.clear();
|
||||
ids.reserve(other.ids.size());
|
||||
for (size_t i = 0; i < other.ids.size(); i++)
|
||||
{
|
||||
ids.emplace_back(pool_group.get());
|
||||
ids.back() = other.ids[i];
|
||||
}
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
void ParsedIR::set_id_bounds(uint32_t bounds)
|
||||
{
|
||||
ids.resize(bounds);
|
||||
ids.reserve(bounds);
|
||||
while (ids.size() < bounds)
|
||||
ids.emplace_back(pool_group.get());
|
||||
|
||||
block_meta.resize(bounds);
|
||||
}
|
||||
|
||||
@@ -571,7 +663,11 @@ uint32_t ParsedIR::increase_bound_by(uint32_t incr_amount)
|
||||
{
|
||||
auto curr_bound = ids.size();
|
||||
auto new_bound = curr_bound + incr_amount;
|
||||
ids.resize(new_bound);
|
||||
|
||||
ids.reserve(ids.size() + incr_amount);
|
||||
for (uint32_t i = 0; i < incr_amount; i++)
|
||||
ids.emplace_back(pool_group.get());
|
||||
|
||||
block_meta.resize(new_bound);
|
||||
return uint32_t(curr_bound);
|
||||
}
|
||||
@@ -645,4 +741,4 @@ Meta *ParsedIR::find_meta(uint32_t id)
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
32
3rdparty/spirv-cross/spirv_cross_parsed_ir.hpp
vendored
32
3rdparty/spirv-cross/spirv_cross_parsed_ir.hpp
vendored
@@ -20,7 +20,6 @@
|
||||
#include "spirv_common.hpp"
|
||||
#include <stdint.h>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
@@ -32,7 +31,22 @@ namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
class ParsedIR
|
||||
{
|
||||
private:
|
||||
// This must be destroyed after the "ids" vector.
|
||||
std::unique_ptr<ObjectPoolGroup> pool_group;
|
||||
|
||||
public:
|
||||
ParsedIR();
|
||||
|
||||
// Due to custom allocations from object pools, we cannot use a default copy constructor.
|
||||
ParsedIR(const ParsedIR &other);
|
||||
ParsedIR &operator=(const ParsedIR &other);
|
||||
|
||||
// Moves are unproblematic, but we need to implement it anyways, since MSVC 2013 does not understand
|
||||
// how to default-implement these.
|
||||
ParsedIR(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT;
|
||||
ParsedIR &operator=(ParsedIR &&other) SPIRV_CROSS_NOEXCEPT;
|
||||
|
||||
// Resizes ids, meta and block_meta.
|
||||
void set_id_bounds(uint32_t bounds);
|
||||
|
||||
@@ -40,7 +54,7 @@ public:
|
||||
std::vector<uint32_t> spirv;
|
||||
|
||||
// Holds various data structures which inherit from IVariant.
|
||||
std::vector<Variant> ids;
|
||||
SmallVector<Variant> ids;
|
||||
|
||||
// Various meta data for IDs, decorations, names, etc.
|
||||
std::unordered_map<uint32_t, Meta> meta;
|
||||
@@ -48,19 +62,19 @@ public:
|
||||
// Holds all IDs which have a certain type.
|
||||
// This is needed so we can iterate through a specific kind of resource quickly,
|
||||
// and in-order of module declaration.
|
||||
std::vector<uint32_t> ids_for_type[TypeCount];
|
||||
SmallVector<uint32_t> ids_for_type[TypeCount];
|
||||
|
||||
// Special purpose lists which contain a union of types.
|
||||
// This is needed so we can declare specialization constants and structs in an interleaved fashion,
|
||||
// among other things.
|
||||
// Constants can be of struct type, and struct array sizes can use specialization constants.
|
||||
std::vector<uint32_t> ids_for_constant_or_type;
|
||||
std::vector<uint32_t> ids_for_constant_or_variable;
|
||||
SmallVector<uint32_t> ids_for_constant_or_type;
|
||||
SmallVector<uint32_t> ids_for_constant_or_variable;
|
||||
|
||||
// Declared capabilities and extensions in the SPIR-V module.
|
||||
// Not really used except for reflection at the moment.
|
||||
std::vector<spv::Capability> declared_capabilities;
|
||||
std::vector<std::string> declared_extensions;
|
||||
SmallVector<spv::Capability> declared_capabilities;
|
||||
SmallVector<std::string> declared_extensions;
|
||||
|
||||
// Meta data about blocks. The cross-compiler needs to query if a block is either of these types.
|
||||
// It is a bitset as there can be more than one tag per block.
|
||||
@@ -73,7 +87,7 @@ public:
|
||||
BLOCK_META_MULTISELECT_MERGE_BIT = 1 << 4
|
||||
};
|
||||
using BlockMetaFlags = uint8_t;
|
||||
std::vector<BlockMetaFlags> block_meta;
|
||||
SmallVector<BlockMetaFlags> block_meta;
|
||||
std::unordered_map<uint32_t, uint32_t> continue_block_to_loop_header;
|
||||
|
||||
// Normally, we'd stick SPIREntryPoint in ids array, but it conflicts with SPIRFunction.
|
||||
@@ -181,6 +195,6 @@ private:
|
||||
std::string empty_string;
|
||||
Bitset cleared_bitset;
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
4
3rdparty/spirv-cross/spirv_cross_util.cpp
vendored
4
3rdparty/spirv-cross/spirv_cross_util.cpp
vendored
@@ -22,8 +22,8 @@ using namespace SPIRV_CROSS_NAMESPACE;
|
||||
|
||||
namespace spirv_cross_util
|
||||
{
|
||||
void rename_interface_variable(Compiler &compiler, const std::vector<Resource> &resources,
|
||||
uint32_t location, const std::string &name)
|
||||
void rename_interface_variable(Compiler &compiler, const SmallVector<Resource> &resources, uint32_t location,
|
||||
const std::string &name)
|
||||
{
|
||||
for (auto &v : resources)
|
||||
{
|
||||
|
||||
3
3rdparty/spirv-cross/spirv_cross_util.hpp
vendored
3
3rdparty/spirv-cross/spirv_cross_util.hpp
vendored
@@ -21,7 +21,8 @@
|
||||
|
||||
namespace spirv_cross_util
|
||||
{
|
||||
void rename_interface_variable(SPIRV_CROSS_NAMESPACE::Compiler &compiler, const std::vector<SPIRV_CROSS_NAMESPACE::Resource> &resources,
|
||||
void rename_interface_variable(SPIRV_CROSS_NAMESPACE::Compiler &compiler,
|
||||
const SPIRV_CROSS_NAMESPACE::SmallVector<SPIRV_CROSS_NAMESPACE::Resource> &resources,
|
||||
uint32_t location, const std::string &name);
|
||||
void inherit_combined_sampler_bindings(SPIRV_CROSS_NAMESPACE::Compiler &compiler);
|
||||
} // namespace spirv_cross_util
|
||||
|
||||
230
3rdparty/spirv-cross/spirv_glsl.cpp
vendored
230
3rdparty/spirv-cross/spirv_glsl.cpp
vendored
@@ -454,8 +454,7 @@ string CompilerGLSL::compile()
|
||||
|
||||
reset();
|
||||
|
||||
// Move constructor for this type is broken on GCC 4.9 ...
|
||||
buffer = unique_ptr<ostringstream>(new ostringstream());
|
||||
buffer.reset();
|
||||
|
||||
emit_header();
|
||||
emit_resources();
|
||||
@@ -468,15 +467,15 @@ string CompilerGLSL::compile()
|
||||
// Entry point in GLSL is always main().
|
||||
get_entry_point().name = "main";
|
||||
|
||||
return buffer->str();
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
std::string CompilerGLSL::get_partial_source()
|
||||
{
|
||||
return buffer ? buffer->str() : "No compiled source available yet.";
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
void CompilerGLSL::build_workgroup_size(vector<string> &arguments, const SpecializationConstant &wg_x,
|
||||
void CompilerGLSL::build_workgroup_size(SmallVector<string> &arguments, const SpecializationConstant &wg_x,
|
||||
const SpecializationConstant &wg_y, const SpecializationConstant &wg_z)
|
||||
{
|
||||
auto &execution = get_entry_point();
|
||||
@@ -573,8 +572,8 @@ void CompilerGLSL::emit_header()
|
||||
for (auto &header : header_lines)
|
||||
statement(header);
|
||||
|
||||
vector<string> inputs;
|
||||
vector<string> outputs;
|
||||
SmallVector<string> inputs;
|
||||
SmallVector<string> outputs;
|
||||
|
||||
switch (execution.model)
|
||||
{
|
||||
@@ -798,7 +797,7 @@ string CompilerGLSL::layout_for_member(const SPIRType &type, uint32_t index)
|
||||
return "";
|
||||
auto &dec = memb[index];
|
||||
|
||||
vector<string> attr;
|
||||
SmallVector<string> attr;
|
||||
|
||||
// We can only apply layouts on members in block interfaces.
|
||||
// This is a bit problematic because in SPIR-V decorations are applied on the struct types directly.
|
||||
@@ -1294,7 +1293,7 @@ string CompilerGLSL::layout_for_variable(const SPIRVariable &var)
|
||||
if (is_legacy())
|
||||
return "";
|
||||
|
||||
vector<string> attr;
|
||||
SmallVector<string> attr;
|
||||
|
||||
auto &dec = ir.meta[var.self].decoration;
|
||||
auto &type = get<SPIRType>(var.basetype);
|
||||
@@ -2325,7 +2324,7 @@ void CompilerGLSL::emit_resources()
|
||||
|
||||
if ((wg_x.id != 0) || (wg_y.id != 0) || (wg_z.id != 0))
|
||||
{
|
||||
vector<string> inputs;
|
||||
SmallVector<string> inputs;
|
||||
build_workgroup_size(inputs, wg_x, wg_y, wg_z);
|
||||
statement("layout(", merge(inputs), ") in;");
|
||||
statement("");
|
||||
@@ -2869,7 +2868,7 @@ string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop)
|
||||
}
|
||||
|
||||
uint32_t bit_width = 0;
|
||||
if (unary || binary)
|
||||
if (unary || binary || cop.opcode == OpSConvert || cop.opcode == OpUConvert)
|
||||
bit_width = expression_type(cop.arguments[0]).width;
|
||||
|
||||
SPIRType::BaseType input_type;
|
||||
@@ -2889,6 +2888,8 @@ string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop)
|
||||
case OpSMod:
|
||||
case OpSDiv:
|
||||
case OpShiftRightArithmetic:
|
||||
case OpSConvert:
|
||||
case OpSNegate:
|
||||
input_type = to_signed_basetype(bit_width);
|
||||
break;
|
||||
|
||||
@@ -2899,6 +2900,7 @@ string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop)
|
||||
case OpUMod:
|
||||
case OpUDiv:
|
||||
case OpShiftRightLogical:
|
||||
case OpUConvert:
|
||||
input_type = to_unsigned_basetype(bit_width);
|
||||
break;
|
||||
|
||||
@@ -2940,6 +2942,21 @@ string CompilerGLSL::constant_op_expression(const SPIRConstantOp &cop)
|
||||
// Works around various casting scenarios in glslang as there is no OpBitcast for specialization constants.
|
||||
return join("(", op, bitcast_glsl(type, cop.arguments[0]), ")");
|
||||
}
|
||||
else if (cop.opcode == OpSConvert || cop.opcode == OpUConvert)
|
||||
{
|
||||
if (cop.arguments.size() < 1)
|
||||
SPIRV_CROSS_THROW("Not enough arguments to OpSpecConstantOp.");
|
||||
|
||||
auto &arg_type = expression_type(cop.arguments[0]);
|
||||
if (arg_type.width < type.width && input_type != arg_type.basetype)
|
||||
{
|
||||
auto expected = arg_type;
|
||||
expected.basetype = input_type;
|
||||
return join(op, "(", bitcast_glsl(expected, cop.arguments[0]), ")");
|
||||
}
|
||||
else
|
||||
return join(op, "(", to_expression(cop.arguments[0]), ")");
|
||||
}
|
||||
else
|
||||
{
|
||||
if (cop.arguments.size() < 1)
|
||||
@@ -3581,6 +3598,41 @@ string CompilerGLSL::constant_expression_vector(const SPIRConstant &c, uint32_t
|
||||
return res;
|
||||
}
|
||||
|
||||
SPIRExpression &CompilerGLSL::emit_uninitialized_temporary_expression(uint32_t type, uint32_t id)
|
||||
{
|
||||
forced_temporaries.insert(id);
|
||||
emit_uninitialized_temporary(type, id);
|
||||
return set<SPIRExpression>(id, to_name(id), type, true);
|
||||
}
|
||||
|
||||
void CompilerGLSL::emit_uninitialized_temporary(uint32_t result_type, uint32_t result_id)
|
||||
{
|
||||
// If we're declaring temporaries inside continue blocks,
|
||||
// we must declare the temporary in the loop header so that the continue block can avoid declaring new variables.
|
||||
if (current_continue_block && !hoisted_temporaries.count(result_id))
|
||||
{
|
||||
auto &header = get<SPIRBlock>(current_continue_block->loop_dominator);
|
||||
if (find_if(begin(header.declare_temporary), end(header.declare_temporary),
|
||||
[result_type, result_id](const pair<uint32_t, uint32_t> &tmp) {
|
||||
return tmp.first == result_type && tmp.second == result_id;
|
||||
}) == end(header.declare_temporary))
|
||||
{
|
||||
header.declare_temporary.emplace_back(result_type, result_id);
|
||||
hoisted_temporaries.insert(result_id);
|
||||
force_recompile();
|
||||
}
|
||||
}
|
||||
else if (hoisted_temporaries.count(result_id) == 0)
|
||||
{
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
auto &flags = ir.meta[result_id].decoration.decoration_flags;
|
||||
|
||||
// The result_id has not been made into an expression yet, so use flags interface.
|
||||
add_local_variable_name(result_id);
|
||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";");
|
||||
}
|
||||
}
|
||||
|
||||
string CompilerGLSL::declare_temporary(uint32_t result_type, uint32_t result_id)
|
||||
{
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
@@ -3788,14 +3840,19 @@ void CompilerGLSL::emit_unary_func_op_cast(uint32_t result_type, uint32_t result
|
||||
SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type)
|
||||
{
|
||||
auto &out_type = get<SPIRType>(result_type);
|
||||
auto &expr_type = expression_type(op0);
|
||||
auto expected_type = out_type;
|
||||
|
||||
// Bit-widths might be different in unary cases because we use it for SConvert/UConvert and friends.
|
||||
expected_type.basetype = input_type;
|
||||
string cast_op = expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_expression(op0);
|
||||
expected_type.width = expr_type.width;
|
||||
string cast_op = expr_type.basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0);
|
||||
|
||||
string expr;
|
||||
if (out_type.basetype != expected_result_type)
|
||||
{
|
||||
expected_type.basetype = expected_result_type;
|
||||
expected_type.width = out_type.width;
|
||||
expr = bitcast_glsl_op(out_type, expected_type);
|
||||
expr += '(';
|
||||
expr += join(op, "(", cast_op, ")");
|
||||
@@ -3810,17 +3867,18 @@ void CompilerGLSL::emit_unary_func_op_cast(uint32_t result_type, uint32_t result
|
||||
inherit_expression_dependencies(result_id, op0);
|
||||
}
|
||||
|
||||
void CompilerGLSL::emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id,
|
||||
uint32_t op0, uint32_t op1, uint32_t op2,
|
||||
const char *op,
|
||||
SPIRType::BaseType input_type)
|
||||
void CompilerGLSL::emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1,
|
||||
uint32_t op2, const char *op, SPIRType::BaseType input_type)
|
||||
{
|
||||
auto &out_type = get<SPIRType>(result_type);
|
||||
auto expected_type = out_type;
|
||||
expected_type.basetype = input_type;
|
||||
string cast_op0 = expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_expression(op0);
|
||||
string cast_op1 = expression_type(op1).basetype != input_type ? bitcast_glsl(expected_type, op1) : to_expression(op1);
|
||||
string cast_op2 = expression_type(op2).basetype != input_type ? bitcast_glsl(expected_type, op2) : to_expression(op2);
|
||||
string cast_op0 =
|
||||
expression_type(op0).basetype != input_type ? bitcast_glsl(expected_type, op0) : to_unpacked_expression(op0);
|
||||
string cast_op1 =
|
||||
expression_type(op1).basetype != input_type ? bitcast_glsl(expected_type, op1) : to_unpacked_expression(op1);
|
||||
string cast_op2 =
|
||||
expression_type(op2).basetype != input_type ? bitcast_glsl(expected_type, op2) : to_unpacked_expression(op2);
|
||||
|
||||
string expr;
|
||||
if (out_type.basetype != input_type)
|
||||
@@ -4276,7 +4334,7 @@ void CompilerGLSL::emit_texture_op(const Instruction &i)
|
||||
auto op = static_cast<Op>(i.op);
|
||||
uint32_t length = i.length;
|
||||
|
||||
vector<uint32_t> inherited_expressions;
|
||||
SmallVector<uint32_t> inherited_expressions;
|
||||
|
||||
uint32_t result_type = ops[0];
|
||||
uint32_t id = ops[1];
|
||||
@@ -4818,7 +4876,18 @@ void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop,
|
||||
emit_unary_func_op(result_type, id, args[0], "degrees");
|
||||
break;
|
||||
case GLSLstd450Fma:
|
||||
emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "fma");
|
||||
if ((!options.es && options.version < 400) || (options.es && options.version < 320))
|
||||
{
|
||||
auto expr = join(to_enclosed_expression(args[0]), " * ", to_enclosed_expression(args[1]), " + ",
|
||||
to_enclosed_expression(args[2]));
|
||||
|
||||
emit_op(result_type, id, expr,
|
||||
should_forward(args[0]) && should_forward(args[1]) && should_forward(args[2]));
|
||||
for (uint32_t i = 0; i < 3; i++)
|
||||
inherit_expression_dependencies(id, args[i]);
|
||||
}
|
||||
else
|
||||
emit_trinary_func_op(result_type, id, args[0], args[1], args[2], "fma");
|
||||
break;
|
||||
case GLSLstd450Modf:
|
||||
register_call_out_argument(args[1]);
|
||||
@@ -4830,10 +4899,7 @@ void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop,
|
||||
{
|
||||
forced_temporaries.insert(id);
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
auto &flags = ir.meta[id].decoration.decoration_flags;
|
||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(id)), ";");
|
||||
set<SPIRExpression>(id, to_name(id), result_type, true);
|
||||
|
||||
emit_uninitialized_temporary_expression(result_type, id);
|
||||
statement(to_expression(id), ".", to_member_name(type, 0), " = ", "modf(", to_expression(args[0]), ", ",
|
||||
to_expression(id), ".", to_member_name(type, 1), ");");
|
||||
break;
|
||||
@@ -4973,10 +5039,7 @@ void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop,
|
||||
{
|
||||
forced_temporaries.insert(id);
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
auto &flags = ir.meta[id].decoration.decoration_flags;
|
||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(id)), ";");
|
||||
set<SPIRExpression>(id, to_name(id), result_type, true);
|
||||
|
||||
emit_uninitialized_temporary_expression(result_type, id);
|
||||
statement(to_expression(id), ".", to_member_name(type, 0), " = ", "frexp(", to_expression(args[0]), ", ",
|
||||
to_expression(id), ".", to_member_name(type, 1), ");");
|
||||
break;
|
||||
@@ -5056,7 +5119,8 @@ void CompilerGLSL::emit_glsl_op(uint32_t result_type, uint32_t id, uint32_t eop,
|
||||
break;
|
||||
|
||||
case GLSLstd450FindUMsb:
|
||||
emit_unary_func_op_cast(result_type, id, args[0], "findMSB", uint_type, int_type); // findMSB always returns int.
|
||||
emit_unary_func_op_cast(result_type, id, args[0], "findMSB", uint_type,
|
||||
int_type); // findMSB always returns int.
|
||||
break;
|
||||
|
||||
// Multisampled varying
|
||||
@@ -5580,9 +5644,9 @@ string CompilerGLSL::bitcast_glsl(const SPIRType &result_type, uint32_t argument
|
||||
{
|
||||
auto op = bitcast_glsl_op(result_type, expression_type(argument));
|
||||
if (op.empty())
|
||||
return to_enclosed_expression(argument);
|
||||
return to_enclosed_unpacked_expression(argument);
|
||||
else
|
||||
return join(op, "(", to_expression(argument), ")");
|
||||
return join(op, "(", to_unpacked_expression(argument), ")");
|
||||
}
|
||||
|
||||
std::string CompilerGLSL::bitcast_expression(SPIRType::BaseType target_type, uint32_t arg)
|
||||
@@ -7035,6 +7099,10 @@ uint32_t CompilerGLSL::get_integer_width_for_instruction(const Instruction &inst
|
||||
|
||||
switch (instr.op)
|
||||
{
|
||||
case OpSConvert:
|
||||
case OpConvertSToF:
|
||||
case OpUConvert:
|
||||
case OpConvertUToF:
|
||||
case OpIEqual:
|
||||
case OpINotEqual:
|
||||
case OpSLessThan:
|
||||
@@ -7158,8 +7226,19 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
bool usage_tracking = ptr_expression && flattened_buffer_blocks.count(ptr_expression->loaded_from) != 0 &&
|
||||
(type.basetype == SPIRType::Struct || (type.columns > 1));
|
||||
|
||||
auto &e = emit_op(result_type, id, expr, forward, !usage_tracking);
|
||||
e.need_transpose = need_transpose;
|
||||
SPIRExpression *e = nullptr;
|
||||
if (!backend.array_is_value_type && !type.array.empty() && !forward)
|
||||
{
|
||||
// Complicated load case where we need to make a copy of ptr, but we cannot, because
|
||||
// it is an array, and our backend does not support arrays as value types.
|
||||
// Emit the temporary, and copy it explicitly.
|
||||
e = &emit_uninitialized_temporary_expression(result_type, id);
|
||||
emit_array_copy(to_expression(id), ptr);
|
||||
}
|
||||
else
|
||||
e = &emit_op(result_type, id, expr, forward, !usage_tracking);
|
||||
|
||||
e->need_transpose = need_transpose;
|
||||
register_read(id, ptr, forward);
|
||||
|
||||
// Pass through whether the result is of a packed type.
|
||||
@@ -7172,7 +7251,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
|
||||
inherit_expression_dependencies(id, ptr);
|
||||
if (forward)
|
||||
add_implied_read_expression(e, ptr);
|
||||
add_implied_read_expression(*e, ptr);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -7290,7 +7369,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
register_impure_function_call();
|
||||
|
||||
string funexpr;
|
||||
vector<string> arglist;
|
||||
SmallVector<string> arglist;
|
||||
funexpr += to_name(func) + "(";
|
||||
|
||||
if (emit_return_value_as_argument)
|
||||
@@ -7419,10 +7498,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
{
|
||||
// We cannot construct array of arrays because we cannot treat the inputs
|
||||
// as value types. Need to declare the array-of-arrays, and copy in elements one by one.
|
||||
forced_temporaries.insert(id);
|
||||
auto &flags = ir.meta[id].decoration.decoration_flags;
|
||||
statement(flags_to_precision_qualifiers_glsl(out_type, flags), variable_decl(out_type, to_name(id)), ";");
|
||||
set<SPIRExpression>(id, to_name(id), result_type, true);
|
||||
emit_uninitialized_temporary_expression(result_type, id);
|
||||
for (uint32_t i = 0; i < length; i++)
|
||||
emit_array_copy(join(to_expression(id), "[", i, "]"), elems[i]);
|
||||
}
|
||||
@@ -7656,7 +7732,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
trivial_forward = !expression_is_forwarded(vec0) && !expression_is_forwarded(vec1);
|
||||
|
||||
// Constructor style and shuffling from two different vectors.
|
||||
vector<string> args;
|
||||
SmallVector<string> args;
|
||||
for (uint32_t i = 0; i < length; i++)
|
||||
{
|
||||
if (elems[i] == 0xffffffffu)
|
||||
@@ -7821,12 +7897,8 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
uint32_t result_id = ops[1];
|
||||
uint32_t op0 = ops[2];
|
||||
uint32_t op1 = ops[3];
|
||||
forced_temporaries.insert(result_id);
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
auto &flags = ir.meta[result_id].decoration.decoration_flags;
|
||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";");
|
||||
set<SPIRExpression>(result_id, to_name(result_id), result_type, true);
|
||||
|
||||
emit_uninitialized_temporary_expression(result_type, result_id);
|
||||
const char *op = opcode == OpIAddCarry ? "uaddCarry" : "usubBorrow";
|
||||
|
||||
statement(to_expression(result_id), ".", to_member_name(type, 0), " = ", op, "(", to_expression(op0), ", ",
|
||||
@@ -7848,10 +7920,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
uint32_t op1 = ops[3];
|
||||
forced_temporaries.insert(result_id);
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
auto &flags = ir.meta[result_id].decoration.decoration_flags;
|
||||
statement(flags_to_precision_qualifiers_glsl(type, flags), variable_decl(type, to_name(result_id)), ";");
|
||||
set<SPIRExpression>(result_id, to_name(result_id), result_type, true);
|
||||
|
||||
emit_uninitialized_temporary_expression(result_type, result_id);
|
||||
const char *op = opcode == OpUMulExtended ? "umulExtended" : "imulExtended";
|
||||
|
||||
statement(op, "(", to_expression(op0), ", ", to_expression(op1), ", ", to_expression(result_id), ".",
|
||||
@@ -8107,12 +8176,45 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
}
|
||||
|
||||
// Conversion
|
||||
case OpSConvert:
|
||||
case OpConvertSToF:
|
||||
case OpUConvert:
|
||||
case OpConvertUToF:
|
||||
{
|
||||
auto input_type = opcode == OpSConvert || opcode == OpConvertSToF ? int_type : uint_type;
|
||||
uint32_t result_type = ops[0];
|
||||
uint32_t id = ops[1];
|
||||
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
auto &arg_type = expression_type(ops[2]);
|
||||
auto func = type_to_glsl_constructor(type);
|
||||
|
||||
// If we're sign-extending or zero-extending, we need to make sure we cast from the correct type.
|
||||
// For truncation, it does not matter, so don't emit useless casts.
|
||||
if (arg_type.width < type.width)
|
||||
emit_unary_func_op_cast(result_type, id, ops[2], func.c_str(), input_type, type.basetype);
|
||||
else
|
||||
emit_unary_func_op(result_type, id, ops[2], func.c_str());
|
||||
break;
|
||||
}
|
||||
|
||||
case OpConvertFToU:
|
||||
case OpConvertFToS:
|
||||
case OpConvertSToF:
|
||||
case OpConvertUToF:
|
||||
case OpUConvert:
|
||||
case OpSConvert:
|
||||
{
|
||||
// Cast to expected arithmetic type, then potentially bitcast away to desired signedness.
|
||||
uint32_t result_type = ops[0];
|
||||
uint32_t id = ops[1];
|
||||
auto &type = get<SPIRType>(result_type);
|
||||
auto expected_type = type;
|
||||
auto &float_type = expression_type(ops[2]);
|
||||
expected_type.basetype =
|
||||
opcode == OpConvertFToS ? to_signed_basetype(type.width) : to_unsigned_basetype(type.width);
|
||||
|
||||
auto func = type_to_glsl_constructor(expected_type);
|
||||
emit_unary_func_op_cast(result_type, id, ops[2], func.c_str(), float_type.basetype, expected_type.basetype);
|
||||
break;
|
||||
}
|
||||
|
||||
case OpFConvert:
|
||||
{
|
||||
uint32_t result_type = ops[0];
|
||||
@@ -9191,6 +9293,10 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
statement("executeCallableNV(", to_expression(ops[0]), ", ", to_expression(ops[1]), ");");
|
||||
break;
|
||||
|
||||
case OpUndef:
|
||||
// Undefined value has been declared.
|
||||
break;
|
||||
|
||||
default:
|
||||
statement("// unimplemented op ", instruction.op);
|
||||
break;
|
||||
@@ -9205,7 +9311,7 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction)
|
||||
// access to shader input content from within a function (eg. Metal). Each additional
|
||||
// function args uses the name of the global variable. Function nesting will modify the
|
||||
// functions and function calls all the way up the nesting chain.
|
||||
void CompilerGLSL::append_global_func_args(const SPIRFunction &func, uint32_t index, vector<string> &arglist)
|
||||
void CompilerGLSL::append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector<string> &arglist)
|
||||
{
|
||||
auto &args = func.arguments;
|
||||
uint32_t arg_cnt = uint32_t(args.size());
|
||||
@@ -10040,7 +10146,7 @@ void CompilerGLSL::emit_function_prototype(SPIRFunction &func, const Bitset &ret
|
||||
decl += to_name(func.self);
|
||||
|
||||
decl += "(";
|
||||
vector<string> arglist;
|
||||
SmallVector<string> arglist;
|
||||
for (auto &arg : func.arguments)
|
||||
{
|
||||
// Do not pass in separate images or samplers if we're remapping
|
||||
@@ -10498,7 +10604,7 @@ string CompilerGLSL::emit_continue_block(uint32_t continue_block, bool follow_tr
|
||||
// if we have to emit temporaries.
|
||||
current_continue_block = block;
|
||||
|
||||
vector<string> statements;
|
||||
SmallVector<string> statements;
|
||||
|
||||
// Capture all statements into our list.
|
||||
auto *old = redirect_statement;
|
||||
@@ -10821,7 +10927,7 @@ void CompilerGLSL::flush_undeclared_variables(SPIRBlock &block)
|
||||
flush_variable_declaration(v);
|
||||
}
|
||||
|
||||
void CompilerGLSL::emit_hoisted_temporaries(vector<pair<uint32_t, uint32_t>> &temporaries)
|
||||
void CompilerGLSL::emit_hoisted_temporaries(SmallVector<pair<uint32_t, uint32_t>> &temporaries)
|
||||
{
|
||||
// If we need to force temporaries for certain IDs due to continue blocks, do it before starting loop header.
|
||||
// Need to sort these to ensure that reference output is stable.
|
||||
@@ -11196,7 +11302,7 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block)
|
||||
assert(block.merge == SPIRBlock::MergeSelection);
|
||||
branch_to_continue(block.self, block.next_block);
|
||||
}
|
||||
else
|
||||
else if (block.self != block.next_block)
|
||||
emit_block_chain(get<SPIRBlock>(block.next_block));
|
||||
}
|
||||
|
||||
@@ -11371,8 +11477,7 @@ void CompilerGLSL::unroll_array_from_complex_load(uint32_t target_id, uint32_t s
|
||||
}
|
||||
}
|
||||
|
||||
void CompilerGLSL::bitcast_from_builtin_load(uint32_t source_id, std::string &expr,
|
||||
const SPIRType &expr_type)
|
||||
void CompilerGLSL::bitcast_from_builtin_load(uint32_t source_id, std::string &expr, const SPIRType &expr_type)
|
||||
{
|
||||
auto *var = maybe_get_backing_variable(source_id);
|
||||
if (var)
|
||||
@@ -11419,8 +11524,7 @@ void CompilerGLSL::bitcast_from_builtin_load(uint32_t source_id, std::string &ex
|
||||
expr = bitcast_expression(expr_type, expected_type, expr);
|
||||
}
|
||||
|
||||
void CompilerGLSL::bitcast_to_builtin_store(uint32_t target_id, std::string &expr,
|
||||
const SPIRType &expr_type)
|
||||
void CompilerGLSL::bitcast_to_builtin_store(uint32_t target_id, std::string &expr, const SPIRType &expr_type)
|
||||
{
|
||||
// Only interested in standalone builtin variables.
|
||||
if (!has_decoration(target_id, DecorationBuiltIn))
|
||||
|
||||
37
3rdparty/spirv-cross/spirv_glsl.hpp
vendored
37
3rdparty/spirv-cross/spirv_glsl.hpp
vendored
@@ -17,9 +17,8 @@
|
||||
#ifndef SPIRV_CROSS_GLSL_HPP
|
||||
#define SPIRV_CROSS_GLSL_HPP
|
||||
|
||||
#include "spirv_cross.hpp"
|
||||
#include "GLSL.std.450.h"
|
||||
#include <sstream>
|
||||
#include "spirv_cross.hpp"
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <utility>
|
||||
@@ -142,7 +141,7 @@ public:
|
||||
}
|
||||
|
||||
explicit CompilerGLSL(std::vector<uint32_t> spirv_)
|
||||
: Compiler(move(spirv_))
|
||||
: Compiler(std::move(spirv_))
|
||||
{
|
||||
init();
|
||||
}
|
||||
@@ -230,7 +229,7 @@ protected:
|
||||
virtual void emit_spv_amd_gcn_shader_op(uint32_t result_type, uint32_t result_id, uint32_t op, const uint32_t *args,
|
||||
uint32_t count);
|
||||
virtual void emit_header();
|
||||
void build_workgroup_size(std::vector<std::string> &arguments, const SpecializationConstant &x,
|
||||
void build_workgroup_size(SmallVector<std::string> &arguments, const SpecializationConstant &x,
|
||||
const SpecializationConstant &y, const SpecializationConstant &z);
|
||||
|
||||
virtual void emit_sampled_image_op(uint32_t result_type, uint32_t result_id, uint32_t image_id, uint32_t samp_id);
|
||||
@@ -260,19 +259,19 @@ protected:
|
||||
virtual void emit_uniform(const SPIRVariable &var);
|
||||
virtual std::string unpack_expression_type(std::string expr_str, const SPIRType &type, uint32_t packed_type_id);
|
||||
|
||||
std::unique_ptr<std::ostringstream> buffer;
|
||||
StringStream<> buffer;
|
||||
|
||||
template <typename T>
|
||||
inline void statement_inner(T &&t)
|
||||
{
|
||||
(*buffer) << std::forward<T>(t);
|
||||
buffer << std::forward<T>(t);
|
||||
statement_count++;
|
||||
}
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
inline void statement_inner(T &&t, Ts &&... ts)
|
||||
{
|
||||
(*buffer) << std::forward<T>(t);
|
||||
buffer << std::forward<T>(t);
|
||||
statement_count++;
|
||||
statement_inner(std::forward<Ts>(ts)...);
|
||||
}
|
||||
@@ -296,9 +295,9 @@ protected:
|
||||
else
|
||||
{
|
||||
for (uint32_t i = 0; i < indent; i++)
|
||||
(*buffer) << " ";
|
||||
buffer << " ";
|
||||
statement_inner(std::forward<Ts>(ts)...);
|
||||
(*buffer) << '\n';
|
||||
buffer << '\n';
|
||||
}
|
||||
}
|
||||
|
||||
@@ -314,7 +313,7 @@ protected:
|
||||
// Used for implementing continue blocks where
|
||||
// we want to obtain a list of statements we can merge
|
||||
// on a single line separated by comma.
|
||||
std::vector<std::string> *redirect_statement = nullptr;
|
||||
SmallVector<std::string> *redirect_statement = nullptr;
|
||||
const SPIRBlock *current_continue_block = nullptr;
|
||||
|
||||
void begin_scope();
|
||||
@@ -406,7 +405,7 @@ protected:
|
||||
void emit_interface_block(const SPIRVariable &type);
|
||||
void emit_flattened_io_block(const SPIRVariable &var, const char *qual);
|
||||
void emit_block_chain(SPIRBlock &block);
|
||||
void emit_hoisted_temporaries(std::vector<std::pair<uint32_t, uint32_t>> &temporaries);
|
||||
void emit_hoisted_temporaries(SmallVector<std::pair<uint32_t, uint32_t>> &temporaries);
|
||||
std::string constant_value_macro_name(uint32_t id);
|
||||
void emit_constant(const SPIRConstant &constant);
|
||||
void emit_specialization_constant_op(const SPIRConstantOp &constant);
|
||||
@@ -437,8 +436,8 @@ protected:
|
||||
SPIRType::BaseType input_type, SPIRType::BaseType expected_result_type);
|
||||
void emit_binary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, const char *op,
|
||||
SPIRType::BaseType input_type, bool skip_cast_if_equal_type);
|
||||
void emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2, const char *op,
|
||||
SPIRType::BaseType input_type);
|
||||
void emit_trinary_func_op_cast(uint32_t result_type, uint32_t result_id, uint32_t op0, uint32_t op1, uint32_t op2,
|
||||
const char *op, SPIRType::BaseType input_type);
|
||||
|
||||
void emit_unary_func_op(uint32_t result_type, uint32_t result_id, uint32_t op0, const char *op);
|
||||
void emit_unrolled_unary_op(uint32_t result_type, uint32_t result_id, uint32_t operand, const char *op);
|
||||
@@ -484,7 +483,9 @@ protected:
|
||||
const char *index_to_swizzle(uint32_t index);
|
||||
std::string remap_swizzle(const SPIRType &result_type, uint32_t input_components, const std::string &expr);
|
||||
std::string declare_temporary(uint32_t type, uint32_t id);
|
||||
void append_global_func_args(const SPIRFunction &func, uint32_t index, std::vector<std::string> &arglist);
|
||||
void emit_uninitialized_temporary(uint32_t type, uint32_t id);
|
||||
SPIRExpression &emit_uninitialized_temporary_expression(uint32_t type, uint32_t id);
|
||||
void append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector<std::string> &arglist);
|
||||
std::string to_expression(uint32_t id, bool register_expression_read = true);
|
||||
std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true);
|
||||
std::string to_unpacked_expression(uint32_t id, bool register_expression_read = true);
|
||||
@@ -560,15 +561,15 @@ protected:
|
||||
std::unordered_map<uint32_t, uint32_t> expression_usage_counts;
|
||||
void track_expression_read(uint32_t id);
|
||||
|
||||
std::vector<std::string> forced_extensions;
|
||||
std::vector<std::string> header_lines;
|
||||
SmallVector<std::string> forced_extensions;
|
||||
SmallVector<std::string> header_lines;
|
||||
|
||||
// Used when expressions emit extra opcodes with their own unique IDs,
|
||||
// and we need to reuse the IDs across recompilation loops.
|
||||
// Currently used by NMin/Max/Clamp implementations.
|
||||
std::unordered_map<uint32_t, uint32_t> extra_sub_expressions;
|
||||
|
||||
uint32_t statement_count;
|
||||
uint32_t statement_count = 0;
|
||||
|
||||
inline bool is_legacy() const
|
||||
{
|
||||
@@ -653,6 +654,6 @@ protected:
|
||||
private:
|
||||
void init();
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
40
3rdparty/spirv-cross/spirv_hlsl.cpp
vendored
40
3rdparty/spirv-cross/spirv_hlsl.cpp
vendored
@@ -723,17 +723,25 @@ string CompilerHLSL::to_interpolation_qualifiers(const Bitset &flags)
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string CompilerHLSL::to_semantic(uint32_t vertex_location)
|
||||
std::string CompilerHLSL::to_semantic(uint32_t location, ExecutionModel em, StorageClass sc)
|
||||
{
|
||||
for (auto &attribute : remap_vertex_attributes)
|
||||
if (attribute.location == vertex_location)
|
||||
return attribute.semantic;
|
||||
if (em == ExecutionModelVertex && sc == StorageClassInput)
|
||||
{
|
||||
// We have a vertex attribute - we should look at remapping it if the user provided
|
||||
// vertex attribute hints.
|
||||
for (auto &attribute : remap_vertex_attributes)
|
||||
if (attribute.location == location)
|
||||
return attribute.semantic;
|
||||
}
|
||||
|
||||
return join("TEXCOORD", vertex_location);
|
||||
// Not a vertex attribute, or no remap_vertex_attributes entry.
|
||||
return join("TEXCOORD", location);
|
||||
}
|
||||
|
||||
void CompilerHLSL::emit_io_block(const SPIRVariable &var)
|
||||
{
|
||||
auto &execution = get_entry_point();
|
||||
|
||||
auto &type = get<SPIRType>(var.basetype);
|
||||
add_resource_name(type.self);
|
||||
|
||||
@@ -749,7 +757,7 @@ void CompilerHLSL::emit_io_block(const SPIRVariable &var)
|
||||
if (has_member_decoration(type.self, i, DecorationLocation))
|
||||
{
|
||||
uint32_t location = get_member_decoration(type.self, i, DecorationLocation);
|
||||
semantic = join(" : ", to_semantic(location));
|
||||
semantic = join(" : ", to_semantic(location, execution.model, var.storage));
|
||||
}
|
||||
else
|
||||
{
|
||||
@@ -757,7 +765,7 @@ void CompilerHLSL::emit_io_block(const SPIRVariable &var)
|
||||
// There could be a conflict if the block members partially specialize the locations.
|
||||
// It is unclear how SPIR-V deals with this. Assume this does not happen for now.
|
||||
uint32_t location = base_location + i;
|
||||
semantic = join(" : ", to_semantic(location));
|
||||
semantic = join(" : ", to_semantic(location, execution.model, var.storage));
|
||||
}
|
||||
|
||||
add_member_name(type, i);
|
||||
@@ -820,7 +828,7 @@ void CompilerHLSL::emit_interface_block_in_struct(const SPIRVariable &var, unord
|
||||
location_number = get_vacant_location();
|
||||
|
||||
// Allow semantic remap if specified.
|
||||
auto semantic = to_semantic(location_number);
|
||||
auto semantic = to_semantic(location_number, execution.model, var.storage);
|
||||
|
||||
if (need_matrix_unroll && type.columns > 1)
|
||||
{
|
||||
@@ -1210,8 +1218,8 @@ void CompilerHLSL::emit_resources()
|
||||
require_output = false;
|
||||
unordered_set<uint32_t> active_inputs;
|
||||
unordered_set<uint32_t> active_outputs;
|
||||
vector<SPIRVariable *> input_variables;
|
||||
vector<SPIRVariable *> output_variables;
|
||||
SmallVector<SPIRVariable *> input_variables;
|
||||
SmallVector<SPIRVariable *> output_variables;
|
||||
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
||||
auto &type = this->get<SPIRType>(var.basetype);
|
||||
bool block = ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock);
|
||||
@@ -2024,7 +2032,7 @@ void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &ret
|
||||
decl += to_name(func.self);
|
||||
|
||||
decl += "(";
|
||||
vector<string> arglist;
|
||||
SmallVector<string> arglist;
|
||||
|
||||
if (!type.array.empty())
|
||||
{
|
||||
@@ -2092,7 +2100,7 @@ void CompilerHLSL::emit_function_prototype(SPIRFunction &func, const Bitset &ret
|
||||
|
||||
void CompilerHLSL::emit_hlsl_entry_point()
|
||||
{
|
||||
vector<string> arguments;
|
||||
SmallVector<string> arguments;
|
||||
|
||||
if (require_input)
|
||||
arguments.push_back("SPIRV_Cross_Input stage_input");
|
||||
@@ -2425,7 +2433,7 @@ void CompilerHLSL::emit_texture_op(const Instruction &i)
|
||||
auto op = static_cast<Op>(i.op);
|
||||
uint32_t length = i.length;
|
||||
|
||||
vector<uint32_t> inherited_expressions;
|
||||
SmallVector<uint32_t> inherited_expressions;
|
||||
|
||||
uint32_t result_type = ops[0];
|
||||
uint32_t id = ops[1];
|
||||
@@ -4565,7 +4573,7 @@ void CompilerHLSL::require_texture_query_variant(const SPIRType &type)
|
||||
}
|
||||
}
|
||||
|
||||
void CompilerHLSL::set_root_constant_layouts(vector<RootConstants> layout)
|
||||
void CompilerHLSL::set_root_constant_layouts(std::vector<RootConstants> layout)
|
||||
{
|
||||
root_constants_layout = move(layout);
|
||||
}
|
||||
@@ -4664,7 +4672,7 @@ string CompilerHLSL::compile()
|
||||
reset();
|
||||
|
||||
// Move constructor for this type is broken on GCC 4.9 ...
|
||||
buffer = unique_ptr<ostringstream>(new ostringstream());
|
||||
buffer.reset();
|
||||
|
||||
emit_header();
|
||||
emit_resources();
|
||||
@@ -4678,7 +4686,7 @@ string CompilerHLSL::compile()
|
||||
// Entry point in HLSL is always main() for the time being.
|
||||
get_entry_point().name = "main";
|
||||
|
||||
return buffer->str();
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
void CompilerHLSL::emit_block_hints(const SPIRBlock &block)
|
||||
|
||||
9
3rdparty/spirv-cross/spirv_hlsl.hpp
vendored
9
3rdparty/spirv-cross/spirv_hlsl.hpp
vendored
@@ -19,7 +19,6 @@
|
||||
|
||||
#include "spirv_glsl.hpp"
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
@@ -63,7 +62,7 @@ public:
|
||||
};
|
||||
|
||||
explicit CompilerHLSL(std::vector<uint32_t> spirv_)
|
||||
: CompilerGLSL(move(spirv_))
|
||||
: CompilerGLSL(std::move(spirv_))
|
||||
{
|
||||
}
|
||||
|
||||
@@ -209,12 +208,12 @@ private:
|
||||
void emit_builtin_variables();
|
||||
bool require_output = false;
|
||||
bool require_input = false;
|
||||
std::vector<HLSLVertexAttributeRemap> remap_vertex_attributes;
|
||||
SmallVector<HLSLVertexAttributeRemap> remap_vertex_attributes;
|
||||
|
||||
uint32_t type_to_consumed_locations(const SPIRType &type) const;
|
||||
|
||||
void emit_io_block(const SPIRVariable &var);
|
||||
std::string to_semantic(uint32_t vertex_location);
|
||||
std::string to_semantic(uint32_t location, spv::ExecutionModel em, spv::StorageClass sc);
|
||||
|
||||
uint32_t num_workgroups_builtin = 0;
|
||||
|
||||
@@ -222,6 +221,6 @@ private:
|
||||
// when translating push constant ranges.
|
||||
std::vector<RootConstants> root_constants_layout;
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
55
3rdparty/spirv-cross/spirv_msl.cpp
vendored
55
3rdparty/spirv-cross/spirv_msl.cpp
vendored
@@ -30,7 +30,7 @@ static const uint32_t k_unknown_component = ~0u;
|
||||
|
||||
static const uint32_t k_aux_mbr_idx_swizzle_const = 0u;
|
||||
|
||||
CompilerMSL::CompilerMSL(vector<uint32_t> spirv_)
|
||||
CompilerMSL::CompilerMSL(std::vector<uint32_t> spirv_)
|
||||
: CompilerGLSL(move(spirv_))
|
||||
{
|
||||
}
|
||||
@@ -423,7 +423,7 @@ void CompilerMSL::emit_entry_point_declarations()
|
||||
if (type.basetype == SPIRType::Sampler)
|
||||
add_resource_name(samp.first);
|
||||
|
||||
vector<string> args;
|
||||
SmallVector<string> args;
|
||||
auto &s = samp.second;
|
||||
|
||||
if (s.coord != MSL_SAMPLER_COORD_NORMALIZED)
|
||||
@@ -659,7 +659,7 @@ string CompilerMSL::compile()
|
||||
next_metal_resource_index_sampler = 0;
|
||||
|
||||
// Move constructor for this type is broken on GCC 4.9 ...
|
||||
buffer = unique_ptr<ostringstream>(new ostringstream());
|
||||
buffer.reset();
|
||||
|
||||
emit_header();
|
||||
emit_specialization_constants_and_structs();
|
||||
@@ -670,7 +670,7 @@ string CompilerMSL::compile()
|
||||
pass_count++;
|
||||
} while (is_forcing_recompilation());
|
||||
|
||||
return buffer->str();
|
||||
return buffer.str();
|
||||
}
|
||||
|
||||
// Register the need to output any custom functions.
|
||||
@@ -679,8 +679,7 @@ void CompilerMSL::preprocess_op_codes()
|
||||
OpCodePreprocessor preproc(*this);
|
||||
traverse_all_reachable_opcodes(get<SPIRFunction>(ir.default_entry_point), preproc);
|
||||
|
||||
if (preproc.suppress_missing_prototypes)
|
||||
add_pragma_line("#pragma clang diagnostic ignored \"-Wmissing-prototypes\"");
|
||||
suppress_missing_prototypes = preproc.suppress_missing_prototypes;
|
||||
|
||||
if (preproc.uses_atomics)
|
||||
{
|
||||
@@ -1823,7 +1822,7 @@ void CompilerMSL::fix_up_interface_member_indices(StorageClass storage, uint32_t
|
||||
uint32_t CompilerMSL::add_interface_block(StorageClass storage, bool patch)
|
||||
{
|
||||
// Accumulate the variables that should appear in the interface struct
|
||||
vector<SPIRVariable *> vars;
|
||||
SmallVector<SPIRVariable *> vars;
|
||||
bool incl_builtins = (storage == StorageClassOutput || is_tessellation_shader());
|
||||
|
||||
ir.for_each_typed_id<SPIRVariable>([&](uint32_t var_id, SPIRVariable &var) {
|
||||
@@ -2351,10 +2350,13 @@ string CompilerMSL::unpack_expression_type(string expr_str, const SPIRType &type
|
||||
// Emits the file header info
|
||||
void CompilerMSL::emit_header()
|
||||
{
|
||||
// This particular line can be overridden during compilation, so make it a flag and not a pragma line.
|
||||
if (suppress_missing_prototypes)
|
||||
statement("#pragma clang diagnostic ignored \"-Wmissing-prototypes\"");
|
||||
for (auto &pragma : pragma_lines)
|
||||
statement(pragma);
|
||||
|
||||
if (!pragma_lines.empty())
|
||||
if (!pragma_lines.empty() || suppress_missing_prototypes)
|
||||
statement("");
|
||||
|
||||
statement("#include <metal_stdlib>");
|
||||
@@ -2952,8 +2954,8 @@ void CompilerMSL::emit_specialization_constants_and_structs()
|
||||
// TODO: This can be expressed as a [[threads_per_threadgroup]] input semantic, but we need to know
|
||||
// the work group size at compile time in SPIR-V, and [[threads_per_threadgroup]] would need to be passed around as a global.
|
||||
// The work group size may be a specialization constant.
|
||||
statement("constant uint3 ", builtin_to_glsl(BuiltInWorkgroupSize, StorageClassWorkgroup), " [[maybe_unused]] = ",
|
||||
constant_expression(get<SPIRConstant>(workgroup_size_id)), ";");
|
||||
statement("constant uint3 ", builtin_to_glsl(BuiltInWorkgroupSize, StorageClassWorkgroup),
|
||||
" [[maybe_unused]] = ", constant_expression(get<SPIRConstant>(workgroup_size_id)), ";");
|
||||
emitted = true;
|
||||
}
|
||||
else if (c.specialization)
|
||||
@@ -3080,7 +3082,7 @@ bool CompilerMSL::emit_tessellation_access_chain(const uint32_t *ops, uint32_t l
|
||||
get_variable_data_type(*var).basetype == SPIRType::Struct))
|
||||
{
|
||||
AccessChainMeta meta;
|
||||
std::vector<uint32_t> indices;
|
||||
SmallVector<uint32_t> indices;
|
||||
uint32_t next_id = ir.increase_bound_by(2);
|
||||
|
||||
indices.reserve(length - 3 + 1);
|
||||
@@ -3961,6 +3963,29 @@ void CompilerMSL::emit_array_copy(const string &lhs, uint32_t rhs_id)
|
||||
is_constant = true;
|
||||
}
|
||||
|
||||
// For the case where we have OpLoad triggering an array copy,
|
||||
// we cannot easily detect this case ahead of time since it's
|
||||
// context dependent. We might have to force a recompile here
|
||||
// if this is the only use of array copies in our shader.
|
||||
if (type.array.size() > 1)
|
||||
{
|
||||
if (type.array.size() > SPVFuncImplArrayCopyMultidimMax)
|
||||
SPIRV_CROSS_THROW("Cannot support this many dimensions for arrays of arrays.");
|
||||
auto func = static_cast<SPVFuncImpl>(SPVFuncImplArrayCopyMultidimBase + type.array.size());
|
||||
if (spv_function_implementations.count(func) == 0)
|
||||
{
|
||||
spv_function_implementations.insert(func);
|
||||
suppress_missing_prototypes = true;
|
||||
force_recompile();
|
||||
}
|
||||
}
|
||||
else if (spv_function_implementations.count(SPVFuncImplArrayCopy) == 0)
|
||||
{
|
||||
spv_function_implementations.insert(SPVFuncImplArrayCopy);
|
||||
suppress_missing_prototypes = true;
|
||||
force_recompile();
|
||||
}
|
||||
|
||||
const char *tag = is_constant ? "FromConstant" : "FromStack";
|
||||
statement("spvArrayCopy", tag, type.array.size(), "(", lhs, ", ", to_expression(rhs_id), ");");
|
||||
}
|
||||
@@ -4943,7 +4968,7 @@ void CompilerMSL::add_convert_row_major_matrix_function(uint32_t cols, uint32_t
|
||||
auto rslt = spv_function_implementations.insert(spv_func);
|
||||
if (rslt.second)
|
||||
{
|
||||
add_pragma_line("#pragma clang diagnostic ignored \"-Wmissing-prototypes\"");
|
||||
suppress_missing_prototypes = true;
|
||||
force_recompile();
|
||||
}
|
||||
}
|
||||
@@ -5667,7 +5692,7 @@ void CompilerMSL::entry_point_args_discrete_descriptors(string &ep_args)
|
||||
uint32_t index;
|
||||
};
|
||||
|
||||
vector<Resource> resources;
|
||||
SmallVector<Resource> resources;
|
||||
|
||||
ir.for_each_typed_id<SPIRVariable>([&](uint32_t, SPIRVariable &var) {
|
||||
if ((var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant ||
|
||||
@@ -7383,7 +7408,7 @@ void CompilerMSL::MemberSorter::sort()
|
||||
// Create a temporary array of consecutive member indices and sort it based on how
|
||||
// the members should be reordered, based on builtin and sorting aspect meta info.
|
||||
size_t mbr_cnt = type.member_types.size();
|
||||
vector<uint32_t> mbr_idxs(mbr_cnt);
|
||||
SmallVector<uint32_t> mbr_idxs(mbr_cnt);
|
||||
iota(mbr_idxs.begin(), mbr_idxs.end(), 0); // Fill with consecutive indices
|
||||
std::sort(mbr_idxs.begin(), mbr_idxs.end(), *this); // Sort member indices based on sorting aspect
|
||||
|
||||
@@ -7577,7 +7602,7 @@ void CompilerMSL::analyze_argument_buffers()
|
||||
SPIRType::BaseType basetype;
|
||||
uint32_t index;
|
||||
};
|
||||
vector<Resource> resources_in_set[kMaxArgumentBuffers];
|
||||
SmallVector<Resource> resources_in_set[kMaxArgumentBuffers];
|
||||
|
||||
ir.for_each_typed_id<SPIRVariable>([&](uint32_t self, SPIRVariable &var) {
|
||||
if ((var.storage == StorageClassUniform || var.storage == StorageClassUniformConstant ||
|
||||
|
||||
11
3rdparty/spirv-cross/spirv_msl.hpp
vendored
11
3rdparty/spirv-cross/spirv_msl.hpp
vendored
@@ -22,7 +22,6 @@
|
||||
#include <set>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
@@ -495,9 +494,9 @@ protected:
|
||||
std::unordered_map<MSLStructMemberKey, uint32_t> struct_member_padding;
|
||||
std::set<std::string> pragma_lines;
|
||||
std::set<std::string> typedef_lines;
|
||||
std::vector<uint32_t> vars_needing_early_declaration;
|
||||
SmallVector<uint32_t> vars_needing_early_declaration;
|
||||
|
||||
std::vector<std::pair<MSLResourceBinding, bool>> resource_bindings;
|
||||
SmallVector<std::pair<MSLResourceBinding, bool>> resource_bindings;
|
||||
uint32_t next_metal_resource_index_buffer = 0;
|
||||
uint32_t next_metal_resource_index_texture = 0;
|
||||
uint32_t next_metal_resource_index_sampler = 0;
|
||||
@@ -530,7 +529,7 @@ protected:
|
||||
spv::Op previous_instruction_opcode = spv::OpNop;
|
||||
|
||||
std::unordered_map<uint32_t, MSLConstexprSampler> constexpr_samplers;
|
||||
std::vector<uint32_t> buffer_arrays;
|
||||
SmallVector<uint32_t> buffer_arrays;
|
||||
|
||||
uint32_t argument_buffer_ids[kMaxArgumentBuffers];
|
||||
uint32_t argument_buffer_discrete_mask = 0;
|
||||
@@ -540,6 +539,8 @@ protected:
|
||||
uint32_t get_target_components_for_fragment_location(uint32_t location) const;
|
||||
uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components);
|
||||
|
||||
bool suppress_missing_prototypes = false;
|
||||
|
||||
// OpcodeHandler that handles several MSL preprocessing operations.
|
||||
struct OpCodePreprocessor : OpcodeHandler
|
||||
{
|
||||
@@ -595,6 +596,6 @@ protected:
|
||||
SortAspect sort_aspect;
|
||||
};
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
12
3rdparty/spirv-cross/spirv_parser.cpp
vendored
12
3rdparty/spirv-cross/spirv_parser.cpp
vendored
@@ -22,7 +22,7 @@ using namespace spv;
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
Parser::Parser(std::vector<uint32_t> spirv)
|
||||
Parser::Parser(vector<uint32_t> spirv)
|
||||
{
|
||||
ir.spirv = move(spirv);
|
||||
}
|
||||
@@ -88,7 +88,7 @@ void Parser::parse()
|
||||
|
||||
uint32_t offset = 5;
|
||||
|
||||
vector<Instruction> instructions;
|
||||
SmallVector<Instruction> instructions;
|
||||
while (offset < len)
|
||||
{
|
||||
Instruction instr = {};
|
||||
@@ -207,6 +207,8 @@ void Parser::parse(const Instruction &instruction)
|
||||
uint32_t result_type = ops[0];
|
||||
uint32_t id = ops[1];
|
||||
set<SPIRUndef>(id, result_type);
|
||||
if (current_block)
|
||||
current_block->ops.push_back(instruction);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1095,7 +1097,7 @@ void Parser::make_constant_null(uint32_t id, uint32_t type)
|
||||
if (!constant_type.array_size_literal.back())
|
||||
SPIRV_CROSS_THROW("Array size of OpConstantNull must be a literal.");
|
||||
|
||||
vector<uint32_t> elements(constant_type.array.back());
|
||||
SmallVector<uint32_t> elements(constant_type.array.back());
|
||||
for (uint32_t i = 0; i < constant_type.array.back(); i++)
|
||||
elements[i] = parent_id;
|
||||
set<SPIRConstant>(id, type, elements.data(), uint32_t(elements.size()), false);
|
||||
@@ -1103,7 +1105,7 @@ void Parser::make_constant_null(uint32_t id, uint32_t type)
|
||||
else if (!constant_type.member_types.empty())
|
||||
{
|
||||
uint32_t member_ids = ir.increase_bound_by(uint32_t(constant_type.member_types.size()));
|
||||
vector<uint32_t> elements(constant_type.member_types.size());
|
||||
SmallVector<uint32_t> elements(constant_type.member_types.size());
|
||||
for (uint32_t i = 0; i < constant_type.member_types.size(); i++)
|
||||
{
|
||||
make_constant_null(member_ids + i, constant_type.member_types[i]);
|
||||
@@ -1118,4 +1120,4 @@ void Parser::make_constant_null(uint32_t id, uint32_t type)
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
5
3rdparty/spirv-cross/spirv_parser.hpp
vendored
5
3rdparty/spirv-cross/spirv_parser.hpp
vendored
@@ -19,7 +19,6 @@
|
||||
|
||||
#include "spirv_cross_parsed_ir.hpp"
|
||||
#include <stdint.h>
|
||||
#include <vector>
|
||||
|
||||
namespace SPIRV_CROSS_NAMESPACE
|
||||
{
|
||||
@@ -84,12 +83,12 @@ private:
|
||||
}
|
||||
|
||||
// This must be an ordered data structure so we always pick the same type aliases.
|
||||
std::vector<uint32_t> global_struct_cache;
|
||||
SmallVector<uint32_t> global_struct_cache;
|
||||
|
||||
bool types_are_logically_equivalent(const SPIRType &a, const SPIRType &b) const;
|
||||
bool variable_storage_is_aliased(const SPIRVariable &v) const;
|
||||
void make_constant_null(uint32_t id, uint32_t type);
|
||||
};
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
14
3rdparty/spirv-cross/spirv_reflect.cpp
vendored
14
3rdparty/spirv-cross/spirv_reflect.cpp
vendored
@@ -36,10 +36,16 @@ using Stack = std::stack<State>;
|
||||
class Stream
|
||||
{
|
||||
Stack stack;
|
||||
std::ostringstream buffer;
|
||||
StringStream<> buffer;
|
||||
uint32_t indent{ 0 };
|
||||
char current_locale_radix_character = '.';
|
||||
|
||||
public:
|
||||
void set_current_locale_radix_character(char c)
|
||||
{
|
||||
current_locale_radix_character = c;
|
||||
}
|
||||
|
||||
void begin_json_object();
|
||||
void end_json_object();
|
||||
void emit_json_key(const std::string &key);
|
||||
@@ -212,7 +218,7 @@ void Stream::emit_json_key_value(const std::string &key, int32_t value)
|
||||
void Stream::emit_json_key_value(const std::string &key, float value)
|
||||
{
|
||||
emit_json_key(key);
|
||||
statement_inner(value);
|
||||
statement_inner(convert_to_string(value, current_locale_radix_character));
|
||||
}
|
||||
|
||||
void Stream::emit_json_key_value(const std::string &key, bool value)
|
||||
@@ -247,8 +253,8 @@ void CompilerReflection::set_format(const std::string &format)
|
||||
|
||||
string CompilerReflection::compile()
|
||||
{
|
||||
// Move constructor for this type is broken on GCC 4.9 ...
|
||||
json_stream = std::make_shared<simple_json::Stream>();
|
||||
json_stream->set_current_locale_radix_character(current_locale_radix_character);
|
||||
json_stream->begin_json_object();
|
||||
emit_entry_points();
|
||||
emit_types();
|
||||
@@ -439,7 +445,7 @@ void CompilerReflection::emit_resources()
|
||||
emit_resources("acceleration_structures", res.acceleration_structures);
|
||||
}
|
||||
|
||||
void CompilerReflection::emit_resources(const char *tag, const vector<Resource> &resources)
|
||||
void CompilerReflection::emit_resources(const char *tag, const SmallVector<Resource> &resources)
|
||||
{
|
||||
if (resources.empty())
|
||||
{
|
||||
|
||||
7
3rdparty/spirv-cross/spirv_reflect.hpp
vendored
7
3rdparty/spirv-cross/spirv_reflect.hpp
vendored
@@ -19,7 +19,6 @@
|
||||
|
||||
#include "spirv_glsl.hpp"
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
namespace simple_json
|
||||
{
|
||||
@@ -34,7 +33,7 @@ class CompilerReflection : public CompilerGLSL
|
||||
|
||||
public:
|
||||
explicit CompilerReflection(std::vector<uint32_t> spirv_)
|
||||
: Parent(move(spirv_))
|
||||
: Parent(std::move(spirv_))
|
||||
{
|
||||
options.vulkan_semantics = true;
|
||||
}
|
||||
@@ -72,13 +71,13 @@ private:
|
||||
void emit_type_member(const SPIRType &type, uint32_t index);
|
||||
void emit_type_member_qualifiers(const SPIRType &type, uint32_t index);
|
||||
void emit_type_array(const SPIRType &type);
|
||||
void emit_resources(const char *tag, const std::vector<Resource> &resources);
|
||||
void emit_resources(const char *tag, const SmallVector<Resource> &resources);
|
||||
|
||||
std::string to_member_name(const SPIRType &type, uint32_t index) const;
|
||||
|
||||
std::shared_ptr<simple_json::Stream> json_stream;
|
||||
};
|
||||
|
||||
} // namespace spirv_cross
|
||||
} // namespace SPIRV_CROSS_NAMESPACE
|
||||
|
||||
#endif
|
||||
|
||||
90
3rdparty/spirv-cross/test_shaders.py
vendored
90
3rdparty/spirv-cross/test_shaders.py
vendored
@@ -17,7 +17,8 @@ import errno
|
||||
from functools import partial
|
||||
|
||||
class Paths():
|
||||
def __init__(self, glslang, spirv_as, spirv_val, spirv_opt):
|
||||
def __init__(self, spirv_cross, glslang, spirv_as, spirv_val, spirv_opt):
|
||||
self.spirv_cross = spirv_cross
|
||||
self.glslang = glslang
|
||||
self.spirv_as = spirv_as
|
||||
self.spirv_val = spirv_val
|
||||
@@ -138,7 +139,7 @@ def validate_shader_msl(shader, opt):
|
||||
print('Error compiling Metal shader: ' + msl_path)
|
||||
raise RuntimeError('Failed to compile Metal shader')
|
||||
|
||||
def cross_compile_msl(shader, spirv, opt, paths):
|
||||
def cross_compile_msl(shader, spirv, opt, iterations, paths):
|
||||
spirv_path = create_temporary()
|
||||
msl_path = create_temporary(os.path.basename(shader))
|
||||
|
||||
@@ -154,9 +155,9 @@ def cross_compile_msl(shader, spirv, opt, paths):
|
||||
if opt:
|
||||
subprocess.check_call([paths.spirv_opt, '--skip-validation', '-O', '-o', spirv_path, spirv_path])
|
||||
|
||||
spirv_cross_path = './spirv-cross'
|
||||
spirv_cross_path = paths.spirv_cross
|
||||
|
||||
msl_args = [spirv_cross_path, '--entry', 'main', '--output', msl_path, spirv_path, '--msl']
|
||||
msl_args = [spirv_cross_path, '--entry', 'main', '--output', msl_path, spirv_path, '--msl', '--iterations', str(iterations)]
|
||||
msl_args.append('--msl-version')
|
||||
msl_args.append(path_to_msl_standard_cli(shader))
|
||||
if '.swizzle.' in shader:
|
||||
@@ -246,7 +247,7 @@ def shader_to_sm(shader):
|
||||
else:
|
||||
return '50'
|
||||
|
||||
def cross_compile_hlsl(shader, spirv, opt, force_no_external_validation, paths):
|
||||
def cross_compile_hlsl(shader, spirv, opt, force_no_external_validation, iterations, paths):
|
||||
spirv_path = create_temporary()
|
||||
hlsl_path = create_temporary(os.path.basename(shader))
|
||||
|
||||
@@ -262,10 +263,10 @@ def cross_compile_hlsl(shader, spirv, opt, force_no_external_validation, paths):
|
||||
if opt:
|
||||
subprocess.check_call([paths.spirv_opt, '--skip-validation', '-O', '-o', spirv_path, spirv_path])
|
||||
|
||||
spirv_cross_path = './spirv-cross'
|
||||
spirv_cross_path = paths.spirv_cross
|
||||
|
||||
sm = shader_to_sm(shader)
|
||||
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', hlsl_path, spirv_path, '--hlsl-enable-compat', '--hlsl', '--shader-model', sm])
|
||||
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', hlsl_path, spirv_path, '--hlsl-enable-compat', '--hlsl', '--shader-model', sm, '--iterations', str(iterations)])
|
||||
|
||||
if not shader_is_invalid_spirv(hlsl_path):
|
||||
subprocess.check_call([paths.spirv_val, '--target-env', 'vulkan1.1', spirv_path])
|
||||
@@ -274,7 +275,7 @@ def cross_compile_hlsl(shader, spirv, opt, force_no_external_validation, paths):
|
||||
|
||||
return (spirv_path, hlsl_path)
|
||||
|
||||
def cross_compile_reflect(shader, spirv, opt, paths):
|
||||
def cross_compile_reflect(shader, spirv, opt, iterations, paths):
|
||||
spirv_path = create_temporary()
|
||||
reflect_path = create_temporary(os.path.basename(shader))
|
||||
|
||||
@@ -290,10 +291,10 @@ def cross_compile_reflect(shader, spirv, opt, paths):
|
||||
if opt:
|
||||
subprocess.check_call([paths.spirv_opt, '--skip-validation', '-O', '-o', spirv_path, spirv_path])
|
||||
|
||||
spirv_cross_path = './spirv-cross'
|
||||
spirv_cross_path = paths.spirv_cross
|
||||
|
||||
sm = shader_to_sm(shader)
|
||||
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', reflect_path, spirv_path, '--reflect'])
|
||||
subprocess.check_call([spirv_cross_path, '--entry', 'main', '--output', reflect_path, spirv_path, '--reflect', '--iterations', str(iterations)])
|
||||
return (spirv_path, reflect_path)
|
||||
|
||||
def validate_shader(shader, vulkan, paths):
|
||||
@@ -302,7 +303,7 @@ def validate_shader(shader, vulkan, paths):
|
||||
else:
|
||||
subprocess.check_call([paths.glslang, shader])
|
||||
|
||||
def cross_compile(shader, vulkan, spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, opt, push_ubo, paths):
|
||||
def cross_compile(shader, vulkan, spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, opt, push_ubo, iterations, paths):
|
||||
spirv_path = create_temporary()
|
||||
glsl_path = create_temporary(os.path.basename(shader))
|
||||
|
||||
@@ -324,7 +325,7 @@ def cross_compile(shader, vulkan, spirv, invalid_spirv, eliminate, is_legacy, fl
|
||||
if not invalid_spirv:
|
||||
subprocess.check_call([paths.spirv_val, '--target-env', 'vulkan1.1', spirv_path])
|
||||
|
||||
extra_args = []
|
||||
extra_args = ['--iterations', str(iterations)]
|
||||
if eliminate:
|
||||
extra_args += ['--remove-unused-variables']
|
||||
if is_legacy:
|
||||
@@ -338,7 +339,7 @@ def cross_compile(shader, vulkan, spirv, invalid_spirv, eliminate, is_legacy, fl
|
||||
if push_ubo:
|
||||
extra_args += ['--glsl-emit-push-constant-as-ubo']
|
||||
|
||||
spirv_cross_path = './spirv-cross'
|
||||
spirv_cross_path = paths.spirv_cross
|
||||
|
||||
# A shader might not be possible to make valid GLSL from, skip validation for this case.
|
||||
if not ('nocompat' in glsl_path):
|
||||
@@ -391,8 +392,8 @@ def json_ordered(obj):
|
||||
def json_compare(json_a, json_b):
|
||||
return json_ordered(json_a) == json_ordered(json_b)
|
||||
|
||||
def regression_check_reflect(shader, json_file, update, keep, opt):
|
||||
reference = reference_path(shader[0], shader[1], opt) + '.json'
|
||||
def regression_check_reflect(shader, json_file, args):
|
||||
reference = reference_path(shader[0], shader[1], args.opt) + '.json'
|
||||
joined_path = os.path.join(shader[0], shader[1])
|
||||
print('Reference shader reflection path:', reference)
|
||||
if os.path.exists(reference):
|
||||
@@ -404,7 +405,7 @@ def regression_check_reflect(shader, json_file, update, keep, opt):
|
||||
with open(reference) as f:
|
||||
expected = json.load(f)
|
||||
if (json_compare(actual, expected) != True):
|
||||
if update:
|
||||
if args.update:
|
||||
print('Generated reflection json has changed for {}!'.format(reference))
|
||||
# If we expect changes, update the reference file.
|
||||
if os.path.exists(reference):
|
||||
@@ -422,7 +423,7 @@ def regression_check_reflect(shader, json_file, update, keep, opt):
|
||||
print('')
|
||||
|
||||
# Otherwise, fail the test. Keep the shader file around so we can inspect.
|
||||
if not keep:
|
||||
if not args.keep:
|
||||
remove_file(json_file)
|
||||
|
||||
raise RuntimeError('Does not match reference')
|
||||
@@ -433,14 +434,14 @@ def regression_check_reflect(shader, json_file, update, keep, opt):
|
||||
make_reference_dir(reference)
|
||||
shutil.move(json_file, reference)
|
||||
|
||||
def regression_check(shader, glsl, update, keep, opt):
|
||||
reference = reference_path(shader[0], shader[1], opt)
|
||||
def regression_check(shader, glsl, args):
|
||||
reference = reference_path(shader[0], shader[1], args.opt)
|
||||
joined_path = os.path.join(shader[0], shader[1])
|
||||
print('Reference shader path:', reference)
|
||||
|
||||
if os.path.exists(reference):
|
||||
if md5_for_file(glsl) != md5_for_file(reference):
|
||||
if update:
|
||||
if args.update:
|
||||
print('Generated source code has changed for {}!'.format(reference))
|
||||
# If we expect changes, update the reference file.
|
||||
if os.path.exists(reference):
|
||||
@@ -458,7 +459,7 @@ def regression_check(shader, glsl, update, keep, opt):
|
||||
print('')
|
||||
|
||||
# Otherwise, fail the test. Keep the shader file around so we can inspect.
|
||||
if not keep:
|
||||
if not args.keep:
|
||||
remove_file(glsl)
|
||||
raise RuntimeError('Does not match reference')
|
||||
else:
|
||||
@@ -501,7 +502,7 @@ def shader_is_noopt(shader):
|
||||
def shader_is_push_ubo(shader):
|
||||
return '.push-ubo.' in shader
|
||||
|
||||
def test_shader(stats, shader, update, keep, opt, paths):
|
||||
def test_shader(stats, shader, args, paths):
|
||||
joined_path = os.path.join(shader[0], shader[1])
|
||||
vulkan = shader_is_vulkan(shader[1])
|
||||
desktop = shader_is_desktop(shader[1])
|
||||
@@ -516,16 +517,16 @@ def test_shader(stats, shader, update, keep, opt, paths):
|
||||
push_ubo = shader_is_push_ubo(shader[1])
|
||||
|
||||
print('Testing shader:', joined_path)
|
||||
spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, opt and (not noopt), push_ubo, paths)
|
||||
spirv, glsl, vulkan_glsl = cross_compile(joined_path, vulkan, is_spirv, invalid_spirv, eliminate, is_legacy, flatten_ubo, sso, flatten_dim, args.opt and (not noopt), push_ubo, args.iterations, paths)
|
||||
|
||||
# Only test GLSL stats if we have a shader following GL semantics.
|
||||
if stats and (not vulkan) and (not is_spirv) and (not desktop):
|
||||
cross_stats = get_shader_stats(glsl)
|
||||
|
||||
if glsl:
|
||||
regression_check(shader, glsl, update, keep, opt)
|
||||
regression_check(shader, glsl, args)
|
||||
if vulkan_glsl:
|
||||
regression_check((shader[0], shader[1] + '.vk'), vulkan_glsl, update, keep, opt)
|
||||
regression_check((shader[0], shader[1] + '.vk'), vulkan_glsl, args)
|
||||
|
||||
remove_file(spirv)
|
||||
|
||||
@@ -540,13 +541,13 @@ def test_shader(stats, shader, update, keep, opt, paths):
|
||||
a.append(str(i))
|
||||
print(','.join(a), file = stats)
|
||||
|
||||
def test_shader_msl(stats, shader, update, keep, opt, force_no_external_validation, paths):
|
||||
def test_shader_msl(stats, shader, args, paths):
|
||||
joined_path = os.path.join(shader[0], shader[1])
|
||||
print('\nTesting MSL shader:', joined_path)
|
||||
is_spirv = shader_is_spirv(shader[1])
|
||||
noopt = shader_is_noopt(shader[1])
|
||||
spirv, msl = cross_compile_msl(joined_path, is_spirv, opt and (not noopt), paths)
|
||||
regression_check(shader, msl, update, keep, opt)
|
||||
spirv, msl = cross_compile_msl(joined_path, is_spirv, args.opt and (not noopt), args.iterations, paths)
|
||||
regression_check(shader, msl, args)
|
||||
|
||||
# Uncomment the following line to print the temp SPIR-V file path.
|
||||
# This temp SPIR-V file is not deleted until after the Metal validation step below.
|
||||
@@ -556,40 +557,40 @@ def test_shader_msl(stats, shader, update, keep, opt, force_no_external_validati
|
||||
# executable from Xcode using args: `--msl --entry main --output msl_path spirv_path`.
|
||||
# print('SPRIV shader: ' + spirv)
|
||||
|
||||
if not force_no_external_validation:
|
||||
validate_shader_msl(shader, opt)
|
||||
if not args.force_no_external_validation:
|
||||
validate_shader_msl(shader, args.opt)
|
||||
|
||||
remove_file(spirv)
|
||||
|
||||
def test_shader_hlsl(stats, shader, update, keep, opt, force_no_external_validation, paths):
|
||||
def test_shader_hlsl(stats, shader, args, paths):
|
||||
joined_path = os.path.join(shader[0], shader[1])
|
||||
print('Testing HLSL shader:', joined_path)
|
||||
is_spirv = shader_is_spirv(shader[1])
|
||||
noopt = shader_is_noopt(shader[1])
|
||||
spirv, hlsl = cross_compile_hlsl(joined_path, is_spirv, opt and (not noopt), force_no_external_validation, paths)
|
||||
regression_check(shader, hlsl, update, keep, opt)
|
||||
spirv, hlsl = cross_compile_hlsl(joined_path, is_spirv, args.opt and (not noopt), args.force_no_external_validation, args.iterations, paths)
|
||||
regression_check(shader, hlsl, args)
|
||||
remove_file(spirv)
|
||||
|
||||
def test_shader_reflect(stats, shader, update, keep, opt, paths):
|
||||
def test_shader_reflect(stats, shader, args, paths):
|
||||
joined_path = os.path.join(shader[0], shader[1])
|
||||
print('Testing shader reflection:', joined_path)
|
||||
is_spirv = shader_is_spirv(shader[1])
|
||||
noopt = shader_is_noopt(shader[1])
|
||||
spirv, reflect = cross_compile_reflect(joined_path, is_spirv, opt and (not noopt), paths)
|
||||
regression_check_reflect(shader, reflect, update, keep, opt)
|
||||
spirv, reflect = cross_compile_reflect(joined_path, is_spirv, args.opt and (not noopt), args.iterations, paths)
|
||||
regression_check_reflect(shader, reflect, args)
|
||||
remove_file(spirv)
|
||||
|
||||
def test_shader_file(relpath, stats, args, backend):
|
||||
paths = Paths(args.glslang, args.spirv_as, args.spirv_val, args.spirv_opt)
|
||||
paths = Paths(args.spirv_cross, args.glslang, args.spirv_as, args.spirv_val, args.spirv_opt)
|
||||
try:
|
||||
if backend == 'msl':
|
||||
test_shader_msl(stats, (args.folder, relpath), args.update, args.keep, args.opt, args.force_no_external_validation, paths)
|
||||
test_shader_msl(stats, (args.folder, relpath), args, paths)
|
||||
elif backend == 'hlsl':
|
||||
test_shader_hlsl(stats, (args.folder, relpath), args.update, args.keep, args.opt, args.force_no_external_validation, paths)
|
||||
test_shader_hlsl(stats, (args.folder, relpath), args, paths)
|
||||
elif backend == 'reflect':
|
||||
test_shader_reflect(stats, (args.folder, relpath), args.update, args.keep, args.opt, paths)
|
||||
test_shader_reflect(stats, (args.folder, relpath), args, paths)
|
||||
else:
|
||||
test_shader(stats, (args.folder, relpath), args.update, args.keep, args.opt, paths)
|
||||
test_shader(stats, (args.folder, relpath), args, paths)
|
||||
return None
|
||||
except Exception as e:
|
||||
return e
|
||||
@@ -669,6 +670,9 @@ def main():
|
||||
parser.add_argument('--parallel',
|
||||
action = 'store_true',
|
||||
help = 'Execute tests in parallel. Useful for doing regression quickly, but bad for debugging and stat output.')
|
||||
parser.add_argument('--spirv-cross',
|
||||
default = './spirv-cross',
|
||||
help = 'Explicit path to spirv-cross')
|
||||
parser.add_argument('--glslang',
|
||||
default = 'glslangValidator',
|
||||
help = 'Explicit path to glslangValidator')
|
||||
@@ -681,6 +685,10 @@ def main():
|
||||
parser.add_argument('--spirv-opt',
|
||||
default = 'spirv-opt',
|
||||
help = 'Explicit path to spirv-opt')
|
||||
parser.add_argument('--iterations',
|
||||
default = 1,
|
||||
type = int,
|
||||
help = 'Number of iterations to run SPIRV-Cross (benchmarking)')
|
||||
|
||||
args = parser.parse_args()
|
||||
if not args.folder:
|
||||
|
||||
28
3rdparty/spirv-cross/test_shaders.sh
vendored
28
3rdparty/spirv-cross/test_shaders.sh
vendored
@@ -1,20 +1,24 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Building spirv-cross"
|
||||
make -j$(nproc)
|
||||
if [ -z "$SPIRV_CROSS_PATH" ]; then
|
||||
echo "Building spirv-cross"
|
||||
make -j$(nproc)
|
||||
SPIRV_CROSS_PATH="./spirv-cross"
|
||||
fi
|
||||
|
||||
export PATH="./external/glslang-build/output/bin:./external/spirv-tools-build/output/bin:.:$PATH"
|
||||
echo "Using glslangValidation in: $(which glslangValidator)."
|
||||
echo "Using spirv-opt in: $(which spirv-opt)."
|
||||
echo "Using SPIRV-Cross in: \"$SPIRV_CROSS_PATH\"."
|
||||
|
||||
./test_shaders.py shaders || exit 1
|
||||
./test_shaders.py shaders --opt || exit 1
|
||||
./test_shaders.py shaders-no-opt || exit 1
|
||||
./test_shaders.py shaders-msl --msl || exit 1
|
||||
./test_shaders.py shaders-msl --msl --opt || exit 1
|
||||
./test_shaders.py shaders-msl-no-opt --msl || exit 1
|
||||
./test_shaders.py shaders-hlsl --hlsl || exit 1
|
||||
./test_shaders.py shaders-hlsl --hlsl --opt || exit 1
|
||||
./test_shaders.py shaders-hlsl-no-opt --hlsl || exit 1
|
||||
./test_shaders.py shaders-reflection --reflect || exit 1
|
||||
./test_shaders.py shaders --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders --opt --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-no-opt --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-msl --msl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-msl --msl --opt --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-msl-no-opt --msl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-hlsl --hlsl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-hlsl --hlsl --opt --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-hlsl-no-opt --hlsl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-reflection --reflect --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
|
||||
|
||||
226
3rdparty/spirv-cross/tests-other/small_vector.cpp
vendored
Normal file
226
3rdparty/spirv-cross/tests-other/small_vector.cpp
vendored
Normal file
@@ -0,0 +1,226 @@
|
||||
/*
|
||||
* Copyright 2019 Hans-Kristian Arntzen
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "spirv_cross.hpp"
|
||||
#include <memory>
|
||||
|
||||
using namespace spirv_cross;
|
||||
|
||||
// Test the tricky bits of the implementation.
|
||||
// Running the entire test suite on this implementation should find all other potential issues.
|
||||
|
||||
static int allocations = 0;
|
||||
static int deallocations = 0;
|
||||
|
||||
#define SPVC_ASSERT(x) do { \
|
||||
if (!(x)) SPIRV_CROSS_THROW("Assert: " #x " failed!"); \
|
||||
} while(0)
|
||||
|
||||
struct RAIIInt
|
||||
{
|
||||
RAIIInt(int v_) : v(v_) { allocations++; }
|
||||
~RAIIInt() { deallocations++; }
|
||||
RAIIInt() { allocations++; }
|
||||
RAIIInt(const RAIIInt &other) { v = other.v; allocations++; }
|
||||
RAIIInt(RAIIInt &&other) SPIRV_CROSS_NOEXCEPT { v = other.v; allocations++; }
|
||||
RAIIInt &operator=(RAIIInt &&) = default;
|
||||
RAIIInt &operator=(const RAIIInt &) = default;
|
||||
|
||||
int v = 0;
|
||||
};
|
||||
|
||||
static void propagate_stack_to_heap()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
auto *old_data = ints.data();
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
SPVC_ASSERT(ints[1].v == 2);
|
||||
ints.emplace_back(3);
|
||||
SPVC_ASSERT(old_data != ints.data());
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
SPVC_ASSERT(ints[1].v == 2);
|
||||
SPVC_ASSERT(ints[2].v == 3);
|
||||
SPVC_ASSERT(ints.size() == 3);
|
||||
}
|
||||
|
||||
static void insert_end()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
|
||||
const RAIIInt new_ints[3] = { 10, 20, 30 };
|
||||
ints.insert(ints.end(), new_ints, new_ints + 3);
|
||||
SPVC_ASSERT(ints.size() == 5);
|
||||
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
SPVC_ASSERT(ints[1].v == 2);
|
||||
SPVC_ASSERT(ints[2].v == 10);
|
||||
SPVC_ASSERT(ints[3].v == 20);
|
||||
SPVC_ASSERT(ints[4].v == 30);
|
||||
}
|
||||
|
||||
static void insert_begin_realloc()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
|
||||
const RAIIInt new_ints[3] = { 10, 20, 30 };
|
||||
ints.insert(ints.begin(), new_ints, new_ints + 3);
|
||||
SPVC_ASSERT(ints.size() == 5);
|
||||
|
||||
SPVC_ASSERT(ints[0].v == 10);
|
||||
SPVC_ASSERT(ints[1].v == 20);
|
||||
SPVC_ASSERT(ints[2].v == 30);
|
||||
SPVC_ASSERT(ints[3].v == 1);
|
||||
SPVC_ASSERT(ints[4].v == 2);
|
||||
}
|
||||
|
||||
static void insert_middle_realloc()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
|
||||
const RAIIInt new_ints[3] = { 10, 20, 30 };
|
||||
ints.insert(ints.begin() + 1, new_ints, new_ints + 3);
|
||||
SPVC_ASSERT(ints.size() == 5);
|
||||
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
SPVC_ASSERT(ints[1].v == 10);
|
||||
SPVC_ASSERT(ints[2].v == 20);
|
||||
SPVC_ASSERT(ints[3].v == 30);
|
||||
SPVC_ASSERT(ints[4].v == 2);
|
||||
}
|
||||
|
||||
static void insert_begin_no_realloc()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.reserve(10);
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
|
||||
const RAIIInt new_ints[3] = { 10, 20, 30 };
|
||||
ints.insert(ints.begin(), new_ints, new_ints + 3);
|
||||
SPVC_ASSERT(ints.size() == 5);
|
||||
|
||||
SPVC_ASSERT(ints[0].v == 10);
|
||||
SPVC_ASSERT(ints[1].v == 20);
|
||||
SPVC_ASSERT(ints[2].v == 30);
|
||||
SPVC_ASSERT(ints[3].v == 1);
|
||||
SPVC_ASSERT(ints[4].v == 2);
|
||||
}
|
||||
|
||||
static void insert_middle_no_realloc()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.reserve(10);
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
|
||||
const RAIIInt new_ints[3] = { 10, 20, 30 };
|
||||
ints.insert(ints.begin() + 1, new_ints, new_ints + 3);
|
||||
SPVC_ASSERT(ints.size() == 5);
|
||||
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
SPVC_ASSERT(ints[1].v == 10);
|
||||
SPVC_ASSERT(ints[2].v == 20);
|
||||
SPVC_ASSERT(ints[3].v == 30);
|
||||
SPVC_ASSERT(ints[4].v == 2);
|
||||
}
|
||||
|
||||
static void erase_end()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
ints.emplace_back(3);
|
||||
ints.emplace_back(4);
|
||||
ints.erase(ints.begin() + 1, ints.end());
|
||||
|
||||
SPVC_ASSERT(ints.size() == 1);
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
}
|
||||
|
||||
static void erase_middle()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
ints.emplace_back(3);
|
||||
ints.emplace_back(4);
|
||||
ints.erase(ints.begin() + 1, ints.end() - 1);
|
||||
|
||||
SPVC_ASSERT(ints.size() == 2);
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
SPVC_ASSERT(ints[1].v == 4);
|
||||
}
|
||||
|
||||
static void erase_start()
|
||||
{
|
||||
SmallVector<RAIIInt, 2> ints;
|
||||
ints.emplace_back(1);
|
||||
ints.emplace_back(2);
|
||||
ints.emplace_back(3);
|
||||
ints.emplace_back(4);
|
||||
ints.erase(ints.begin(), ints.end() - 2);
|
||||
|
||||
SPVC_ASSERT(ints.size() == 2);
|
||||
SPVC_ASSERT(ints[0].v == 3);
|
||||
SPVC_ASSERT(ints[1].v == 4);
|
||||
}
|
||||
|
||||
static void convert_to_std_vector()
|
||||
{
|
||||
SmallVector<RAIIInt, 4> foo;
|
||||
foo.push_back(1);
|
||||
foo.push_back(2);
|
||||
std::vector<RAIIInt> ints(foo);
|
||||
SPVC_ASSERT(ints.size() == 2);
|
||||
SPVC_ASSERT(foo.size() == 2);
|
||||
SPVC_ASSERT(ints[0].v == 1);
|
||||
SPVC_ASSERT(ints[1].v == 2);
|
||||
|
||||
// This doesn't work on MSVC 2013. Ignore it.
|
||||
#if !(defined(_MSC_VER) && _MSC_VER < 1900)
|
||||
SmallVector<std::unique_ptr<RAIIInt>> move_only_buffer;
|
||||
move_only_buffer.emplace_back(new RAIIInt(40));
|
||||
std::vector<std::unique_ptr<RAIIInt>> move_only_vector(std::move(move_only_buffer));
|
||||
SPVC_ASSERT(move_only_vector.size() == 1);
|
||||
SPVC_ASSERT(move_only_vector[0]->v == 40);
|
||||
#endif
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
propagate_stack_to_heap();
|
||||
insert_end();
|
||||
insert_begin_realloc();
|
||||
insert_begin_no_realloc();
|
||||
insert_middle_realloc();
|
||||
insert_middle_no_realloc();
|
||||
erase_end();
|
||||
erase_middle();
|
||||
erase_start();
|
||||
|
||||
convert_to_std_vector();
|
||||
|
||||
SPVC_ASSERT(allocations > 0 && deallocations > 0 && deallocations == allocations);
|
||||
}
|
||||
|
||||
28
3rdparty/spirv-cross/update_test_shaders.sh
vendored
28
3rdparty/spirv-cross/update_test_shaders.sh
vendored
@@ -1,21 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
echo "Building spirv-cross"
|
||||
make -j$(nproc)
|
||||
if [ -z "$SPIRV_CROSS_PATH" ]; then
|
||||
echo "Building spirv-cross"
|
||||
make -j$(nproc)
|
||||
SPIRV_CROSS_PATH="./spirv-cross"
|
||||
fi
|
||||
|
||||
export PATH="./external/glslang-build/output/bin:./external/spirv-tools-build/output/bin:.:$PATH"
|
||||
echo "Using glslangValidation in: $(which glslangValidator)."
|
||||
echo "Using spirv-opt in: $(which spirv-opt)."
|
||||
echo "Using SPIRV-Cross in: \"$SPIRV_CROSS_PATH\"."
|
||||
|
||||
./test_shaders.py shaders --update || exit 1
|
||||
./test_shaders.py shaders --update --opt || exit 1
|
||||
./test_shaders.py shaders-no-opt --update || exit 1
|
||||
./test_shaders.py shaders-msl --update --msl || exit 1
|
||||
./test_shaders.py shaders-msl --update --msl --opt || exit 1
|
||||
./test_shaders.py shaders-msl-no-opt --update --msl || exit 1
|
||||
./test_shaders.py shaders-hlsl --update --hlsl || exit 1
|
||||
./test_shaders.py shaders-hlsl --update --hlsl --opt || exit 1
|
||||
./test_shaders.py shaders-hlsl-no-opt --update --hlsl || exit 1
|
||||
./test_shaders.py shaders-reflection --reflect --update || exit 1
|
||||
./test_shaders.py shaders --update --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders --update --opt --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-no-opt --update --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-msl --update --msl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-msl --update --msl --opt --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-msl-no-opt --update --msl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-hlsl --update --hlsl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-hlsl --update --hlsl --opt --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-hlsl-no-opt --update --hlsl --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
./test_shaders.py shaders-reflection --reflect --update --spirv-cross "$SPIRV_CROSS_PATH" || exit 1
|
||||
|
||||
|
||||
|
||||
@@ -874,7 +874,7 @@ namespace bgfx { namespace spirv
|
||||
|
||||
spirv_cross::ShaderResources resources = msl.get_shader_resources();
|
||||
|
||||
std::vector<spirv_cross::EntryPoint> entryPoints = msl.get_entry_points_and_stages();
|
||||
spirv_cross::SmallVector<spirv_cross::EntryPoint> entryPoints = msl.get_entry_points_and_stages();
|
||||
if (!entryPoints.empty())
|
||||
msl.rename_entry_point(entryPoints[0].name, "xlatMtlMain", entryPoints[0].execution_model);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user