From a24274738eee9e1cd761bbf11d8b25eeacfa8a2a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=D0=91=D1=80=D0=B0=D0=BD=D0=B8=D0=BC=D0=B8=D1=80=20=D0=9A?= =?UTF-8?q?=D0=B0=D1=80=D0=B0=D1=9F=D0=B8=D1=9B?= Date: Thu, 18 Jun 2020 21:50:44 -0700 Subject: [PATCH] Updated spirv-cross. --- 3rdparty/spirv-cross/main.cpp | 22 ++- 3rdparty/spirv-cross/spirv_cross.cpp | 22 ++- 3rdparty/spirv-cross/spirv_cross.hpp | 2 +- 3rdparty/spirv-cross/spirv_cross_c.cpp | 37 +++++ 3rdparty/spirv-cross/spirv_cross_c.h | 35 ++++- 3rdparty/spirv-cross/spirv_glsl.cpp | 63 +++++++-- 3rdparty/spirv-cross/spirv_glsl.hpp | 4 +- 3rdparty/spirv-cross/spirv_hlsl.cpp | 39 ++++-- 3rdparty/spirv-cross/spirv_msl.cpp | 180 +++++++++++++------------ 3rdparty/spirv-cross/spirv_msl.hpp | 57 ++++++-- 10 files changed, 323 insertions(+), 138 deletions(-) diff --git a/3rdparty/spirv-cross/main.cpp b/3rdparty/spirv-cross/main.cpp index b2cda3a43..f057d9944 100644 --- a/3rdparty/spirv-cross/main.cpp +++ b/3rdparty/spirv-cross/main.cpp @@ -569,6 +569,7 @@ struct CLIArguments SmallVector msl_device_argument_buffers; SmallVector> msl_dynamic_buffers; SmallVector> msl_inline_uniform_blocks; + SmallVector msl_shader_inputs; SmallVector pls_in; SmallVector pls_out; SmallVector remaps; @@ -738,7 +739,10 @@ static void print_help_msl() "\t[--msl-disable-frag-stencil-ref-builtin]:\n\t\tDisable FragStencilRef output. Useful if pipeline does not enable stencil output, as pipeline creation might otherwise fail.\n" "\t[--msl-enable-frag-output-mask ]:\n\t\tOnly selectively enable fragment outputs. Useful if pipeline does not enable fragment output for certain locations, as pipeline creation might otherwise fail.\n" "\t[--msl-no-clip-distance-user-varying]:\n\t\tDo not emit user varyings to emulate gl_ClipDistance in fragment shaders.\n" - ); + "\t[--msl-shader-input ]:\n\t\tSpecify the format of the shader input at .\n" + "\t\t can be 'u16', 'u8', or 'other', to indicate a 16-bit unsigned integer, 8-bit unsigned integer, " + "or other-typed variable. is the vector length of the variable, which must be greater than or equal to that declared in the shader.\n" + "\t\tUseful if shader stage interfaces don't match up, as pipeline creation might otherwise fail.\n"); } static void print_help_common() @@ -975,6 +979,8 @@ static string compile_iteration(const CLIArguments &args, std::vector msl_comp->add_dynamic_buffer(v.first, v.second, i++); for (auto &v : args.msl_inline_uniform_blocks) msl_comp->add_inline_uniform_block(v.first, v.second); + for (auto &v : args.msl_shader_inputs) + msl_comp->add_msl_shader_input(v); } else if (args.hlsl) compiler.reset(new CompilerHLSL(move(spirv_parser.get_parsed_ir()))); @@ -1356,6 +1362,20 @@ static int main_inner(int argc, char *argv[]) [&args](CLIParser &parser) { args.msl_enable_frag_output_mask = parser.next_hex_uint(); }); cbs.add("--msl-no-clip-distance-user-varying", [&args](CLIParser &) { args.msl_enable_clip_distance_user_varying = false; }); + cbs.add("--msl-shader-input", [&args](CLIParser &parser) { + MSLShaderInput input; + // Make sure next_uint() is called in-order. + input.location = parser.next_uint(); + const char *format = parser.next_value_string("other"); + if (strcmp(format, "u16") == 0) + input.format = MSL_VERTEX_FORMAT_UINT16; + else if (strcmp(format, "u8") == 0) + input.format = MSL_VERTEX_FORMAT_UINT8; + else + input.format = MSL_VERTEX_FORMAT_OTHER; + input.vecsize = parser.next_uint(); + args.msl_shader_inputs.push_back(input); + }); cbs.add("--extension", [&args](CLIParser &parser) { args.extensions.push_back(parser.next_string()); }); cbs.add("--rename-entry-point", [&args](CLIParser &parser) { auto old_name = parser.next_string(); diff --git a/3rdparty/spirv-cross/spirv_cross.cpp b/3rdparty/spirv-cross/spirv_cross.cpp index 7c227a3dc..8abe19a4a 100644 --- a/3rdparty/spirv-cross/spirv_cross.cpp +++ b/3rdparty/spirv-cross/spirv_cross.cpp @@ -273,11 +273,27 @@ SPIRVariable *Compiler::maybe_get_backing_variable(uint32_t chain) return var; } -StorageClass Compiler::get_backing_variable_storage(uint32_t ptr) +StorageClass Compiler::get_expression_effective_storage_class(uint32_t ptr) { auto *var = maybe_get_backing_variable(ptr); - if (var) - return var->storage; + + // If the expression has been lowered to a temporary, we need to use the Generic storage class. + // We're looking for the effective storage class of a given expression. + // An access chain or forwarded OpLoads from such access chains + // will generally have the storage class of the underlying variable, but if the load was not forwarded + // we have lost any address space qualifiers. + bool forced_temporary = ir.ids[ptr].get_type() == TypeExpression && + !get(ptr).access_chain && + (forced_temporaries.count(ptr) != 0 || forwarded_temporaries.count(ptr) == 0); + + if (var && !forced_temporary) + { + // Normalize SSBOs to StorageBuffer here. + if (var->storage == StorageClassUniform && has_decoration(get(var->basetype).self, DecorationBufferBlock)) + return StorageClassStorageBuffer; + else + return var->storage; + } else return expression_type(ptr).storage; } diff --git a/3rdparty/spirv-cross/spirv_cross.hpp b/3rdparty/spirv-cross/spirv_cross.hpp index 98b58f743..e452ca6f2 100644 --- a/3rdparty/spirv-cross/spirv_cross.hpp +++ b/3rdparty/spirv-cross/spirv_cross.hpp @@ -611,7 +611,7 @@ protected: bool expression_is_lvalue(uint32_t id) const; bool variable_storage_is_aliased(const SPIRVariable &var); SPIRVariable *maybe_get_backing_variable(uint32_t chain); - spv::StorageClass get_backing_variable_storage(uint32_t ptr); + spv::StorageClass get_expression_effective_storage_class(uint32_t ptr); void register_read(uint32_t expr, uint32_t chain, bool forwarded); void register_write(uint32_t chain); diff --git a/3rdparty/spirv-cross/spirv_cross_c.cpp b/3rdparty/spirv-cross/spirv_cross_c.cpp index 8cc074267..1c42ad01a 100644 --- a/3rdparty/spirv-cross/spirv_cross_c.cpp +++ b/3rdparty/spirv-cross/spirv_cross_c.cpp @@ -1032,6 +1032,30 @@ spvc_result spvc_compiler_msl_add_vertex_attribute(spvc_compiler compiler, const #endif } +spvc_result spvc_compiler_msl_add_shader_input(spvc_compiler compiler, const spvc_msl_shader_input *si) +{ +#if SPIRV_CROSS_C_API_MSL + if (compiler->backend != SPVC_BACKEND_MSL) + { + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; + } + + auto &msl = *static_cast(compiler->compiler.get()); + MSLShaderInput input; + input.location = si->location; + input.format = static_cast(si->format); + input.builtin = static_cast(si->builtin); + input.vecsize = si->vecsize; + msl.add_msl_shader_input(input); + return SPVC_SUCCESS; +#else + (void)si; + compiler->context->report_error("MSL function used on a non-MSL backend."); + return SPVC_ERROR_INVALID_ARGUMENT; +#endif +} + spvc_result spvc_compiler_msl_add_resource_binding(spvc_compiler compiler, const spvc_msl_resource_binding *binding) { @@ -2267,6 +2291,19 @@ void spvc_msl_vertex_attribute_init(spvc_msl_vertex_attribute *attr) #endif } +void spvc_msl_shader_input_init(spvc_msl_shader_input *input) +{ +#if SPIRV_CROSS_C_API_MSL + MSLShaderInput input_default; + input->location = input_default.location; + input->format = static_cast(input_default.format); + input->builtin = static_cast(input_default.builtin); + input->vecsize = input_default.vecsize; +#else + memset(input, 0, sizeof(*input)); +#endif +} + void spvc_msl_resource_binding_init(spvc_msl_resource_binding *binding) { #if SPIRV_CROSS_C_API_MSL diff --git a/3rdparty/spirv-cross/spirv_cross_c.h b/3rdparty/spirv-cross/spirv_cross_c.h index 1ec154ba9..5900f1239 100644 --- a/3rdparty/spirv-cross/spirv_cross_c.h +++ b/3rdparty/spirv-cross/spirv_cross_c.h @@ -33,7 +33,7 @@ extern "C" { /* Bumped if ABI or API breaks backwards compatibility. */ #define SPVC_C_API_VERSION_MAJOR 0 /* Bumped if APIs or enumerations are added in a backwards compatible way. */ -#define SPVC_C_API_VERSION_MINOR 34 +#define SPVC_C_API_VERSION_MINOR 35 /* Bumped if internal implementation details change. */ #define SPVC_C_API_VERSION_PATCH 0 @@ -259,14 +259,19 @@ typedef enum spvc_msl_platform } spvc_msl_platform; /* Maps to C++ API. */ -typedef enum spvc_msl_vertex_format +typedef enum spvc_msl_shader_input_format { - SPVC_MSL_VERTEX_FORMAT_OTHER = 0, - SPVC_MSL_VERTEX_FORMAT_UINT8 = 1, - SPVC_MSL_VERTEX_FORMAT_UINT16 = 2 -} spvc_msl_vertex_format; + SPVC_MSL_SHADER_INPUT_FORMAT_OTHER = 0, + SPVC_MSL_SHADER_INPUT_FORMAT_UINT8 = 1, + SPVC_MSL_SHADER_INPUT_FORMAT_UINT16 = 2, -/* Maps to C++ API. */ + /* Deprecated names. */ + SPVC_MSL_VERTEX_FORMAT_OTHER = SPVC_MSL_SHADER_INPUT_FORMAT_OTHER, + SPVC_MSL_VERTEX_FORMAT_UINT8 = SPVC_MSL_SHADER_INPUT_FORMAT_UINT8, + SPVC_MSL_VERTEX_FORMAT_UINT16 = SPVC_MSL_SHADER_INPUT_FORMAT_UINT16 +} spvc_msl_shader_input_format, spvc_msl_vertex_format; + +/* Maps to C++ API. Deprecated; use spvc_msl_shader_input. */ typedef struct spvc_msl_vertex_attribute { unsigned location; @@ -289,6 +294,20 @@ typedef struct spvc_msl_vertex_attribute */ SPVC_PUBLIC_API void spvc_msl_vertex_attribute_init(spvc_msl_vertex_attribute *attr); +/* Maps to C++ API. */ +typedef struct spvc_msl_shader_input +{ + unsigned location; + spvc_msl_vertex_format format; + SpvBuiltIn builtin; + unsigned vecsize; +} spvc_msl_shader_input; + +/* + * Initializes the shader input struct. + */ +SPVC_PUBLIC_API void spvc_msl_shader_input_init(spvc_msl_shader_input *input); + /* Maps to C++ API. */ typedef struct spvc_msl_resource_binding { @@ -698,6 +717,8 @@ SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_vertex_attribute(spvc_compiler const spvc_msl_vertex_attribute *attrs); SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_resource_binding(spvc_compiler compiler, const spvc_msl_resource_binding *binding); +SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_shader_input(spvc_compiler compiler, + const spvc_msl_shader_input *input); SPVC_PUBLIC_API spvc_result spvc_compiler_msl_add_discrete_descriptor_set(spvc_compiler compiler, unsigned desc_set); SPVC_PUBLIC_API spvc_result spvc_compiler_msl_set_argument_buffer_device_address_space(spvc_compiler compiler, unsigned desc_set, spvc_bool device_address); SPVC_PUBLIC_API spvc_bool spvc_compiler_msl_is_vertex_attribute_used(spvc_compiler compiler, unsigned location); diff --git a/3rdparty/spirv-cross/spirv_glsl.cpp b/3rdparty/spirv-cross/spirv_glsl.cpp index ab3026988..e4c7c6b8c 100644 --- a/3rdparty/spirv-cross/spirv_glsl.cpp +++ b/3rdparty/spirv-cross/spirv_glsl.cpp @@ -3428,10 +3428,15 @@ string CompilerGLSL::to_rerolled_array_expression(const string &base_expr, const return expr; } -string CompilerGLSL::to_composite_constructor_expression(uint32_t id) +string CompilerGLSL::to_composite_constructor_expression(uint32_t id, bool uses_buffer_offset) { auto &type = expression_type(id); - if (!backend.array_is_value_type && !type.array.empty()) + + bool reroll_array = !type.array.empty() && + (!backend.array_is_value_type || + (uses_buffer_offset && !backend.buffer_offset_array_is_value_type)); + + if (reroll_array) { // For this case, we need to "re-roll" an array initializer from a temporary. // We cannot simply pass the array directly, since it decays to a pointer and it cannot @@ -5689,6 +5694,25 @@ bool CompilerGLSL::expression_is_constant_null(uint32_t id) const return c->constant_is_null(); } +bool CompilerGLSL::expression_is_non_value_type_array(uint32_t ptr) +{ + auto &type = expression_type(ptr); + if (type.array.empty()) + return false; + + if (!backend.array_is_value_type) + return true; + + auto *var = maybe_get_backing_variable(ptr); + if (!var) + return false; + + auto &backed_type = get(var->basetype); + return !backend.buffer_offset_array_is_value_type && + backed_type.basetype == SPIRType::Struct && + has_member_decoration(backed_type.self, 0, DecorationOffset); +} + // Returns the function name for a texture sampling function for the specified image and sampling characteristics. // For some subclasses, the function is a method on the specified image. string CompilerGLSL::to_function_name(const TextureFunctionNameArguments &args) @@ -6973,6 +6997,10 @@ string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage) "Cannot implement gl_InstanceID in Vulkan GLSL. This shader was created with GL semantics."); } } + if (!options.es && options.version < 140) + { + require_extension_internal("GL_ARB_draw_instanced"); + } return "gl_InstanceID"; case BuiltInVertexIndex: if (options.vulkan_semantics) @@ -6982,7 +7010,13 @@ string CompilerGLSL::builtin_to_glsl(BuiltIn builtin, StorageClass storage) case BuiltInInstanceIndex: if (options.vulkan_semantics) return "gl_InstanceIndex"; - else if (options.vertex.support_nonzero_base_instance) + + if (!options.es && options.version < 140) + { + require_extension_internal("GL_ARB_draw_instanced"); + } + + if (options.vertex.support_nonzero_base_instance) { if (!options.vulkan_semantics) { @@ -8400,7 +8434,10 @@ string CompilerGLSL::build_composite_combiner(uint32_t return_type, const uint32 if (i) op += ", "; - subop = to_composite_constructor_expression(elems[i]); + + bool uses_buffer_offset = type.basetype == SPIRType::Struct && + has_member_decoration(type.self, i, DecorationOffset); + subop = to_composite_constructor_expression(elems[i], uses_buffer_offset); } base = e ? e->base_expression : ID(0); @@ -8687,15 +8724,23 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction) expr = to_unpacked_expression(ptr); } + auto &type = get(result_type); + auto &expr_type = expression_type(ptr); + + // If the expression has more vector components than the result type, insert + // a swizzle. This shouldn't happen normally on valid SPIR-V, but it might + // happen with e.g. the MSL backend replacing the type of an input variable. + if (expr_type.vecsize > type.vecsize) + expr = enclose_expression(expr + vector_swizzle(type.vecsize, 0)); + // We might need to bitcast in order to load from a builtin. - bitcast_from_builtin_load(ptr, expr, get(result_type)); + bitcast_from_builtin_load(ptr, expr, type); // We might be trying to load a gl_Position[N], where we should be // doing float4[](gl_in[i].gl_Position, ...) instead. // Similar workarounds are required for input arrays in tessellation. unroll_array_from_complex_load(id, ptr, expr); - auto &type = get(result_type); // Shouldn't need to check for ID, but current glslang codegen requires it in some cases // when loading Image/Sampler descriptors. It does not hurt to check ID as well. if (has_decoration(id, DecorationNonUniformEXT) || has_decoration(ptr, DecorationNonUniformEXT)) @@ -8714,13 +8759,13 @@ void CompilerGLSL::emit_instruction(const Instruction &instruction) (type.basetype == SPIRType::Struct || (type.columns > 1)); SPIRExpression *e = nullptr; - if (!backend.array_is_value_type && !type.array.empty() && !forward) + if (!forward && expression_is_non_value_type_array(ptr)) { // Complicated load case where we need to make a copy of ptr, but we cannot, because // it is an array, and our backend does not support arrays as value types. // Emit the temporary, and copy it explicitly. e = &emit_uninitialized_temporary_expression(result_type, id); - emit_array_copy(to_expression(id), ptr, StorageClassFunction, get_backing_variable_storage(ptr)); + emit_array_copy(to_expression(id), ptr, StorageClassFunction, get_expression_effective_storage_class(ptr)); } else e = &emit_op(result_type, id, expr, forward, !usage_tracking); @@ -13385,7 +13430,7 @@ void CompilerGLSL::emit_block_chain(SPIRBlock &block) if (ir.ids[block.return_value].get_type() != TypeUndef) { emit_array_copy("SPIRV_Cross_return_value", block.return_value, StorageClassFunction, - get_backing_variable_storage(block.return_value)); + get_expression_effective_storage_class(block.return_value)); } if (!cfg.node_terminates_control_flow_in_sub_graph(current_function->entry_block, block.self) || diff --git a/3rdparty/spirv-cross/spirv_glsl.hpp b/3rdparty/spirv-cross/spirv_glsl.hpp index f27a67424..351adae15 100644 --- a/3rdparty/spirv-cross/spirv_glsl.hpp +++ b/3rdparty/spirv-cross/spirv_glsl.hpp @@ -467,6 +467,7 @@ protected: bool supports_extensions = false; bool supports_empty_struct = false; bool array_is_value_type = true; + bool buffer_offset_array_is_value_type = true; bool comparison_image_samples_scalar = false; bool native_pointers = false; bool support_small_type_sampling_result = false; @@ -585,7 +586,7 @@ protected: SPIRExpression &emit_uninitialized_temporary_expression(uint32_t type, uint32_t id); void append_global_func_args(const SPIRFunction &func, uint32_t index, SmallVector &arglist); std::string to_expression(uint32_t id, bool register_expression_read = true); - std::string to_composite_constructor_expression(uint32_t id); + std::string to_composite_constructor_expression(uint32_t id, bool uses_buffer_offset); std::string to_rerolled_array_expression(const std::string &expr, const SPIRType &type); std::string to_enclosed_expression(uint32_t id, bool register_expression_read = true); std::string to_unpacked_expression(uint32_t id, bool register_expression_read = true); @@ -762,6 +763,7 @@ protected: void disallow_forwarding_in_expression_chain(const SPIRExpression &expr); bool expression_is_constant_null(uint32_t id) const; + bool expression_is_non_value_type_array(uint32_t ptr); virtual void emit_store_statement(uint32_t lhs_expression, uint32_t rhs_expression); uint32_t get_integer_width_for_instruction(const Instruction &instr) const; diff --git a/3rdparty/spirv-cross/spirv_hlsl.cpp b/3rdparty/spirv-cross/spirv_hlsl.cpp index cbe68d0b9..d4074e7c5 100644 --- a/3rdparty/spirv-cross/spirv_hlsl.cpp +++ b/3rdparty/spirv-cross/spirv_hlsl.cpp @@ -2946,24 +2946,37 @@ void CompilerHLSL::emit_texture_op(const Instruction &i, bool sparse) if (proj && hlsl_options.shader_model >= 40) // Legacy HLSL has "proj" operations which do this for us. coord_expr = coord_expr + " / " + to_extract_component_expression(coord, coord_components); - if (hlsl_options.shader_model < 40 && lod) + if (hlsl_options.shader_model < 40) { string coord_filler; - for (uint32_t size = coord_components; size < 3; ++size) - { - coord_filler += ", 0.0"; - } - coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(lod) + ")"; - } + uint32_t modifier_count = 0; - if (hlsl_options.shader_model < 40 && bias) - { - string coord_filler; - for (uint32_t size = coord_components; size < 3; ++size) + if (lod) { - coord_filler += ", 0.0"; + for (uint32_t size = coord_components; size < 3; ++size) + coord_filler += ", 0.0"; + coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(lod) + ")"; + modifier_count++; } - coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(bias) + ")"; + + if (bias) + { + for (uint32_t size = coord_components; size < 3; ++size) + coord_filler += ", 0.0"; + coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_expression(bias) + ")"; + modifier_count++; + } + + if (proj) + { + for (uint32_t size = coord_components; size < 3; ++size) + coord_filler += ", 0.0"; + coord_expr = "float4(" + coord_expr + coord_filler + ", " + to_extract_component_expression(coord, coord_components) + ")"; + modifier_count++; + } + + if (modifier_count > 1) + SPIRV_CROSS_THROW("Legacy HLSL can only use one of lod/bias/proj modifiers."); } if (op == OpImageFetch) diff --git a/3rdparty/spirv-cross/spirv_msl.cpp b/3rdparty/spirv-cross/spirv_msl.cpp index bfe6a3d94..15cd1a303 100644 --- a/3rdparty/spirv-cross/spirv_msl.cpp +++ b/3rdparty/spirv-cross/spirv_msl.cpp @@ -49,11 +49,20 @@ CompilerMSL::CompilerMSL(ParsedIR &&ir_) { } +void CompilerMSL::add_msl_shader_input(const MSLShaderInput &si) +{ + inputs_by_location[si.location] = si; + if (si.builtin != BuiltInMax && !inputs_by_builtin.count(si.builtin)) + inputs_by_builtin[si.builtin] = si; +} + void CompilerMSL::add_msl_vertex_attribute(const MSLVertexAttr &va) { - vtx_attrs_by_location[va.location] = va; - if (va.builtin != BuiltInMax && !vtx_attrs_by_builtin.count(va.builtin)) - vtx_attrs_by_builtin[va.builtin] = va; + MSLShaderInput si; + si.location = va.location; + si.format = va.format; + si.builtin = va.builtin; + add_msl_shader_input(si); } void CompilerMSL::add_msl_resource_binding(const MSLResourceBinding &binding) @@ -93,7 +102,12 @@ void CompilerMSL::set_argument_buffer_device_address_space(uint32_t desc_set, bo bool CompilerMSL::is_msl_vertex_attribute_used(uint32_t location) { - return vtx_attrs_in_use.count(location) != 0; + return is_msl_shader_input_used(location); +} + +bool CompilerMSL::is_msl_shader_input_used(uint32_t location) +{ + return inputs_in_use.count(location) != 0; } bool CompilerMSL::is_msl_resource_binding_used(ExecutionModel model, uint32_t desc_set, uint32_t binding) const @@ -1014,6 +1028,8 @@ string CompilerMSL::compile() // Allow Metal to use the array template unless we force it off. backend.can_return_array = !msl_options.force_native_arrays; backend.array_is_value_type = !msl_options.force_native_arrays; + // Arrays which are part of buffer objects are never considered to be native arrays. + backend.buffer_offset_array_is_value_type = false; capture_output_to_buffer = msl_options.capture_output_to_buffer; is_rasterization_disabled = msl_options.disable_rasterization || capture_output_to_buffer; @@ -1458,11 +1474,11 @@ void CompilerMSL::mark_as_packable(SPIRType &type) } } -// If a vertex attribute exists at the location, it is marked as being used by this shader +// If a shader input exists at the location, it is marked as being used by this shader void CompilerMSL::mark_location_as_used_by_shader(uint32_t location, StorageClass storage) { - if ((get_execution_model() == ExecutionModelVertex || is_tessellation_shader()) && (storage == StorageClassInput)) - vtx_attrs_in_use.insert(location); + if (storage == StorageClassInput) + inputs_in_use.insert(location); } uint32_t CompilerMSL::get_target_components_for_fragment_location(uint32_t location) const @@ -1474,11 +1490,13 @@ uint32_t CompilerMSL::get_target_components_for_fragment_location(uint32_t locat return itr->second; } -uint32_t CompilerMSL::build_extended_vector_type(uint32_t type_id, uint32_t components) +uint32_t CompilerMSL::build_extended_vector_type(uint32_t type_id, uint32_t components, SPIRType::BaseType basetype) { uint32_t new_type_id = ir.increase_bound_by(1); auto &type = set(new_type_id, get(type_id)); type.vecsize = components; + if (basetype != SPIRType::Unknown) + type.basetype = basetype; type.self = new_type_id; type.parent_type = type_id; type.pointer = false; @@ -1634,11 +1652,9 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co if (get_decoration_bitset(var.self).get(DecorationLocation)) { uint32_t locn = get_decoration(var.self, DecorationLocation); - if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + if (storage == StorageClassInput) { - type_id = ensure_correct_attribute_type(var.basetype, locn, - location_meta ? location_meta->num_components : type.vecsize); - + type_id = ensure_correct_input_type(var.basetype, locn, location_meta ? location_meta->num_components : 0); if (!location_meta) var.basetype = type_id; @@ -1650,9 +1666,9 @@ void CompilerMSL::add_plain_variable_to_interface_block(StorageClass storage, co set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); } - else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + else if (is_builtin && is_tessellation_shader() && inputs_by_builtin.count(builtin)) { - uint32_t locn = vtx_attrs_by_builtin[builtin].location; + uint32_t locn = inputs_by_builtin[builtin].location; set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); } @@ -1797,19 +1813,18 @@ void CompilerMSL::add_composite_variable_to_interface_block(StorageClass storage if (get_decoration_bitset(var.self).get(DecorationLocation)) { uint32_t locn = get_decoration(var.self, DecorationLocation) + i; - if (storage == StorageClassInput && - (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + if (storage == StorageClassInput) { - var.basetype = ensure_correct_attribute_type(var.basetype, locn); - uint32_t mbr_type_id = ensure_correct_attribute_type(usable_type->self, locn); + var.basetype = ensure_correct_input_type(var.basetype, locn); + uint32_t mbr_type_id = ensure_correct_input_type(usable_type->self, locn); ib_type.member_types[ib_mbr_idx] = mbr_type_id; } set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); } - else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + else if (is_builtin && is_tessellation_shader() && inputs_by_builtin.count(builtin)) { - uint32_t locn = vtx_attrs_by_builtin[builtin].location + i; + uint32_t locn = inputs_by_builtin[builtin].location + i; set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); } @@ -1987,9 +2002,9 @@ void CompilerMSL::add_composite_member_variable_to_interface_block(StorageClass set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); } - else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + else if (is_builtin && is_tessellation_shader() && inputs_by_builtin.count(builtin)) { - uint32_t locn = vtx_attrs_by_builtin[builtin].location + i; + uint32_t locn = inputs_by_builtin[builtin].location + i; set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); } @@ -2114,9 +2129,9 @@ void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass stor if (has_member_decoration(var_type.self, mbr_idx, DecorationLocation)) { uint32_t locn = get_member_decoration(var_type.self, mbr_idx, DecorationLocation); - if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + if (storage == StorageClassInput) { - mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn); + mbr_type_id = ensure_correct_input_type(mbr_type_id, locn); var_type.member_types[mbr_idx] = mbr_type_id; ib_type.member_types[ib_mbr_idx] = mbr_type_id; } @@ -2128,20 +2143,20 @@ void CompilerMSL::add_plain_member_variable_to_interface_block(StorageClass stor // The block itself might have a location and in this case, all members of the block // receive incrementing locations. uint32_t locn = get_accumulated_member_location(var, mbr_idx, meta.strip_array); - if (storage == StorageClassInput && (get_execution_model() == ExecutionModelVertex || is_tessellation_shader())) + if (storage == StorageClassInput) { - mbr_type_id = ensure_correct_attribute_type(mbr_type_id, locn); + mbr_type_id = ensure_correct_input_type(mbr_type_id, locn); var_type.member_types[mbr_idx] = mbr_type_id; ib_type.member_types[ib_mbr_idx] = mbr_type_id; } set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); } - else if (is_builtin && is_tessellation_shader() && vtx_attrs_by_builtin.count(builtin)) + else if (is_builtin && is_tessellation_shader() && inputs_by_builtin.count(builtin)) { uint32_t locn = 0; - auto builtin_itr = vtx_attrs_by_builtin.find(builtin); - if (builtin_itr != end(vtx_attrs_by_builtin)) + auto builtin_itr = inputs_by_builtin.find(builtin); + if (builtin_itr != end(inputs_by_builtin)) locn = builtin_itr->second.location; set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, storage); @@ -2222,9 +2237,9 @@ void CompilerMSL::add_tess_level_input_to_interface_block(const std::string &ib_ set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, StorageClassInput); } - else if (vtx_attrs_by_builtin.count(builtin)) + else if (inputs_by_builtin.count(builtin)) { - uint32_t locn = vtx_attrs_by_builtin[builtin].location; + uint32_t locn = inputs_by_builtin[builtin].location; set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, StorageClassInput); } @@ -2283,9 +2298,9 @@ void CompilerMSL::add_tess_level_input_to_interface_block(const std::string &ib_ set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, StorageClassInput); } - else if (vtx_attrs_by_builtin.count(builtin)) + else if (inputs_by_builtin.count(builtin)) { - uint32_t locn = vtx_attrs_by_builtin[builtin].location; + uint32_t locn = inputs_by_builtin[builtin].location; set_member_decoration(ib_type.self, ib_mbr_idx, DecorationLocation, locn); mark_location_as_used_by_shader(locn, StorageClassInput); } @@ -2488,8 +2503,8 @@ uint32_t CompilerMSL::add_interface_block(StorageClass storage, bool patch) // accept them. We can't put them in the struct at all, or otherwise the compiler // complains that the outputs weren't explicitly marked. if (get_execution_model() == ExecutionModelFragment && storage == StorageClassOutput && !patch && - ((is_builtin && ((bi_type == BuiltInFragDepth && !msl_options.enable_frag_depth_builtin) || - (bi_type == BuiltInFragStencilRefEXT && !msl_options.enable_frag_stencil_ref_builtin))) || + ((is_builtin && ((bi_type == BuiltInFragDepth && !msl_options.enable_frag_depth_builtin) || + (bi_type == BuiltInFragStencilRefEXT && !msl_options.enable_frag_stencil_ref_builtin))) || (!is_builtin && !(msl_options.enable_frag_output_mask & (1 << location))))) { hidden = true; @@ -2784,63 +2799,49 @@ uint32_t CompilerMSL::ensure_correct_builtin_type(uint32_t type_id, BuiltIn buil return type_id; } -// Ensure that the type is compatible with the vertex attribute. +// Ensure that the type is compatible with the shader input. // If it is, simply return the given type ID. // Otherwise, create a new type, and return its ID. -uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t location, uint32_t num_components) +uint32_t CompilerMSL::ensure_correct_input_type(uint32_t type_id, uint32_t location, uint32_t num_components) { auto &type = get(type_id); - auto p_va = vtx_attrs_by_location.find(location); - if (p_va == end(vtx_attrs_by_location)) + auto p_va = inputs_by_location.find(location); + if (p_va == end(inputs_by_location)) { - if (num_components != 0 && type.vecsize != num_components) + if (num_components > type.vecsize) return build_extended_vector_type(type_id, num_components); else return type_id; } + if (num_components == 0) + num_components = p_va->second.vecsize; + switch (p_va->second.format) { - case MSL_VERTEX_FORMAT_UINT8: + case MSL_SHADER_INPUT_FORMAT_UINT8: { switch (type.basetype) { case SPIRType::UByte: case SPIRType::UShort: case SPIRType::UInt: - if (num_components != 0 && type.vecsize != num_components) + if (num_components > type.vecsize) return build_extended_vector_type(type_id, num_components); else return type_id; case SPIRType::Short: + return build_extended_vector_type(type_id, num_components > type.vecsize ? num_components : type.vecsize, + SPIRType::UShort); case SPIRType::Int: - break; + return build_extended_vector_type(type_id, num_components > type.vecsize ? num_components : type.vecsize, + SPIRType::UInt); default: SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader"); } - - uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); - uint32_t base_type_id = next_id++; - auto &base_type = set(base_type_id); - base_type = type; - base_type.basetype = type.basetype == SPIRType::Short ? SPIRType::UShort : SPIRType::UInt; - base_type.pointer = false; - if (num_components != 0) - base_type.vecsize = num_components; - - if (!type.pointer) - return base_type_id; - - uint32_t ptr_type_id = next_id++; - auto &ptr_type = set(ptr_type_id); - ptr_type = base_type; - ptr_type.pointer = true; - ptr_type.storage = type.storage; - ptr_type.parent_type = base_type_id; - return ptr_type_id; } case MSL_VERTEX_FORMAT_UINT16: @@ -2849,41 +2850,22 @@ uint32_t CompilerMSL::ensure_correct_attribute_type(uint32_t type_id, uint32_t l { case SPIRType::UShort: case SPIRType::UInt: - if (num_components != 0 && type.vecsize != num_components) + if (num_components > type.vecsize) return build_extended_vector_type(type_id, num_components); else return type_id; case SPIRType::Int: - break; + return build_extended_vector_type(type_id, num_components > type.vecsize ? num_components : type.vecsize, + SPIRType::UInt); default: SPIRV_CROSS_THROW("Vertex attribute type mismatch between host and shader"); } - - uint32_t next_id = ir.increase_bound_by(type.pointer ? 2 : 1); - uint32_t base_type_id = next_id++; - auto &base_type = set(base_type_id); - base_type = type; - base_type.basetype = SPIRType::UInt; - base_type.pointer = false; - if (num_components != 0) - base_type.vecsize = num_components; - - if (!type.pointer) - return base_type_id; - - uint32_t ptr_type_id = next_id++; - auto &ptr_type = set(ptr_type_id); - ptr_type = base_type; - ptr_type.pointer = true; - ptr_type.storage = type.storage; - ptr_type.parent_type = base_type_id; - return ptr_type_id; } default: - if (num_components != 0 && type.vecsize != num_components) + if (num_components > type.vecsize) type_id = build_extended_vector_type(type_id, num_components); break; } @@ -3840,17 +3822,21 @@ void CompilerMSL::emit_custom_functions() static const char *function_name_tags[] = { "FromConstantToStack", "FromConstantToThreadGroup", "FromStackToStack", "FromStackToThreadGroup", "FromThreadGroupToStack", "FromThreadGroupToThreadGroup", + "FromDeviceToDevice", "FromConstantToDevice", "FromStackToDevice", + "FromThreadGroupToDevice", "FromDeviceToStack", "FromDeviceToThreadGroup", }; static const char *src_address_space[] = { "constant", "constant", "thread const", "thread const", "threadgroup const", "threadgroup const", + "device const", "constant", "thread const", "threadgroup const", "device const", "device const", }; static const char *dst_address_space[] = { "thread", "threadgroup", "thread", "threadgroup", "thread", "threadgroup", + "device", "device", "device", "device", "thread", "threadgroup", }; - for (uint32_t variant = 0; variant < 6; variant++) + for (uint32_t variant = 0; variant < 12; variant++) { uint32_t dimensions = spv_func - SPVFuncImplArrayCopyMultidimBase; string tmp = "templateself); - emit_array_copy(to_expression(id_lhs), id_rhs, get_backing_variable_storage(id_lhs), - get_backing_variable_storage(id_rhs)); + emit_array_copy(to_expression(id_lhs), id_rhs, get_expression_effective_storage_class(id_lhs), + get_expression_effective_storage_class(id_rhs)); register_write(id_lhs); return true; diff --git a/3rdparty/spirv-cross/spirv_msl.hpp b/3rdparty/spirv-cross/spirv_msl.hpp index 1ec688459..41d41902c 100644 --- a/3rdparty/spirv-cross/spirv_msl.hpp +++ b/3rdparty/spirv-cross/spirv_msl.hpp @@ -27,26 +27,45 @@ namespace SPIRV_CROSS_NAMESPACE { -// Indicates the format of the vertex attribute. Currently limited to specifying -// if the attribute is an 8-bit unsigned integer, 16-bit unsigned integer, or +// Indicates the format of a shader input. Currently limited to specifying +// if the input is an 8-bit unsigned integer, 16-bit unsigned integer, or // some other format. -enum MSLVertexFormat +enum MSLShaderInputFormat { - MSL_VERTEX_FORMAT_OTHER = 0, - MSL_VERTEX_FORMAT_UINT8 = 1, - MSL_VERTEX_FORMAT_UINT16 = 2, - MSL_VERTEX_FORMAT_INT_MAX = 0x7fffffff + MSL_SHADER_INPUT_FORMAT_OTHER = 0, + MSL_SHADER_INPUT_FORMAT_UINT8 = 1, + MSL_SHADER_INPUT_FORMAT_UINT16 = 2, + + // Deprecated aliases. + MSL_VERTEX_FORMAT_OTHER = MSL_SHADER_INPUT_FORMAT_OTHER, + MSL_VERTEX_FORMAT_UINT8 = MSL_SHADER_INPUT_FORMAT_UINT8, + MSL_VERTEX_FORMAT_UINT16 = MSL_SHADER_INPUT_FORMAT_UINT16, + + MSL_SHADER_INPUT_FORMAT_INT_MAX = 0x7fffffff }; +typedef SPIRV_CROSS_DEPRECATED("Use MSLShaderInputFormat.") MSLShaderInputFormat MSLVertexFormat; // Defines MSL characteristics of a vertex attribute at a particular location. // After compilation, it is possible to query whether or not this location was used. -struct MSLVertexAttr +struct SPIRV_CROSS_DEPRECATED("Use MSLShaderInput.") MSLVertexAttr { uint32_t location = 0; - MSLVertexFormat format = MSL_VERTEX_FORMAT_OTHER; + MSLShaderInputFormat format = MSL_SHADER_INPUT_FORMAT_OTHER; spv::BuiltIn builtin = spv::BuiltInMax; }; +// Defines MSL characteristics of an input variable at a particular location. +// After compilation, it is possible to query whether or not this location was used. +// If vecsize is nonzero, it must be greater than or equal to the vecsize declared in the shader, +// or behavior is undefined. +struct MSLShaderInput +{ + uint32_t location = 0; + MSLShaderInputFormat format = MSL_SHADER_INPUT_FORMAT_OTHER; + spv::BuiltIn builtin = spv::BuiltInMax; + uint32_t vecsize = 0; +}; + // Matches the binding index of a MSL resource for a binding within a descriptor set. // Taken together, the stage, desc_set and binding combine to form a reference to a resource // descriptor used in a particular shading stage. @@ -423,8 +442,14 @@ public: // vertex content locations to MSL attributes. If vertex attributes are provided, // is_msl_vertex_attribute_used() will return true after calling ::compile() if // the location was used by the MSL code. + SPIRV_CROSS_DEPRECATED("Use add_msl_shader_input().") void add_msl_vertex_attribute(const MSLVertexAttr &attr); + // input is a shader input description used to fix up shader input variables. + // If shader inputs are provided, is_msl_shader_input_used() will return true after + // calling ::compile() if the location was used by the MSL code. + void add_msl_shader_input(const MSLShaderInput &attr); + // resource is a resource binding to indicate the MSL buffer, // texture or sampler index to use for a particular SPIR-V description set // and binding. If resource bindings are provided, @@ -456,8 +481,12 @@ public: void set_argument_buffer_device_address_space(uint32_t desc_set, bool device_storage); // Query after compilation is done. This allows you to check if a location or set/binding combination was used by the shader. + SPIRV_CROSS_DEPRECATED("Use is_msl_shader_input_used().") bool is_msl_vertex_attribute_used(uint32_t location); + // Query after compilation is done. This allows you to check if an input location was used by the shader. + bool is_msl_shader_input_used(uint32_t location); + // NOTE: Only resources which are remapped using add_msl_resource_binding will be reported here. // Constexpr samplers are always assumed to be emitted. // No specific MSLResourceBinding remapping is required for constexpr samplers as long as they are remapped @@ -686,7 +715,7 @@ protected: void mark_location_as_used_by_shader(uint32_t location, spv::StorageClass storage); uint32_t ensure_correct_builtin_type(uint32_t type_id, spv::BuiltIn builtin); - uint32_t ensure_correct_attribute_type(uint32_t type_id, uint32_t location, uint32_t num_components = 0); + uint32_t ensure_correct_input_type(uint32_t type_id, uint32_t location, uint32_t num_components = 0); void emit_custom_templates(); void emit_custom_functions(); @@ -797,9 +826,9 @@ protected: Options msl_options; std::set spv_function_implementations; - std::unordered_map vtx_attrs_by_location; - std::unordered_map vtx_attrs_by_builtin; - std::unordered_set vtx_attrs_in_use; + std::unordered_map inputs_by_location; + std::unordered_map inputs_by_builtin; + std::unordered_set inputs_in_use; std::unordered_map fragment_output_components; std::set pragma_lines; std::set typedef_lines; @@ -881,7 +910,7 @@ protected: bool descriptor_set_is_argument_buffer(uint32_t desc_set) const; uint32_t get_target_components_for_fragment_location(uint32_t location) const; - uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components); + uint32_t build_extended_vector_type(uint32_t type_id, uint32_t components, SPIRType::BaseType basetype = SPIRType::Unknown); bool suppress_missing_prototypes = false;