dep: Add SPIRV-Cross

This commit is contained in:
Stenzek
2023-08-05 20:29:52 +10:00
parent 49a4901c78
commit b42f4a3b85
29 changed files with 65316 additions and 0 deletions

View File

@ -0,0 +1,430 @@
/*
* Copyright 2016-2021 Arm Limited
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* At your option, you may choose to accept this material under either:
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
*/
#include "spirv_cfg.hpp"
#include "spirv_cross.hpp"
#include <algorithm>
#include <assert.h>
using namespace std;
namespace SPIRV_CROSS_NAMESPACE
{
CFG::CFG(Compiler &compiler_, const SPIRFunction &func_)
: compiler(compiler_)
, func(func_)
{
build_post_order_visit_order();
build_immediate_dominators();
}
uint32_t CFG::find_common_dominator(uint32_t a, uint32_t b) const
{
while (a != b)
{
if (get_visit_order(a) < get_visit_order(b))
a = get_immediate_dominator(a);
else
b = get_immediate_dominator(b);
}
return a;
}
void CFG::build_immediate_dominators()
{
// Traverse the post-order in reverse and build up the immediate dominator tree.
immediate_dominators.clear();
immediate_dominators[func.entry_block] = func.entry_block;
for (auto i = post_order.size(); i; i--)
{
uint32_t block = post_order[i - 1];
auto &pred = preceding_edges[block];
if (pred.empty()) // This is for the entry block, but we've already set up the dominators.
continue;
for (auto &edge : pred)
{
if (immediate_dominators[block])
{
assert(immediate_dominators[edge]);
immediate_dominators[block] = find_common_dominator(immediate_dominators[block], edge);
}
else
immediate_dominators[block] = edge;
}
}
}
bool CFG::is_back_edge(uint32_t to) const
{
// We have a back edge if the visit order is set with the temporary magic value 0.
// Crossing edges will have already been recorded with a visit order.
auto itr = visit_order.find(to);
return itr != end(visit_order) && itr->second.get() == 0;
}
bool CFG::has_visited_forward_edge(uint32_t to) const
{
// If > 0, we have visited the edge already, and this is not a back edge branch.
auto itr = visit_order.find(to);
return itr != end(visit_order) && itr->second.get() > 0;
}
bool CFG::post_order_visit(uint32_t block_id)
{
// If we have already branched to this block (back edge), stop recursion.
// If our branches are back-edges, we do not record them.
// We have to record crossing edges however.
if (has_visited_forward_edge(block_id))
return true;
else if (is_back_edge(block_id))
return false;
// Block back-edges from recursively revisiting ourselves.
visit_order[block_id].get() = 0;
auto &block = compiler.get<SPIRBlock>(block_id);
// If this is a loop header, add an implied branch to the merge target.
// This is needed to avoid annoying cases with do { ... } while(false) loops often generated by inliners.
// To the CFG, this is linear control flow, but we risk picking the do/while scope as our dominating block.
// This makes sure that if we are accessing a variable outside the do/while, we choose the loop header as dominator.
// We could use has_visited_forward_edge, but this break code-gen where the merge block is unreachable in the CFG.
// Make a point out of visiting merge target first. This is to make sure that post visit order outside the loop
// is lower than inside the loop, which is going to be key for some traversal algorithms like post-dominance analysis.
// For selection constructs true/false blocks will end up visiting the merge block directly and it works out fine,
// but for loops, only the header might end up actually branching to merge block.
if (block.merge == SPIRBlock::MergeLoop && post_order_visit(block.merge_block))
add_branch(block_id, block.merge_block);
// First visit our branch targets.
switch (block.terminator)
{
case SPIRBlock::Direct:
if (post_order_visit(block.next_block))
add_branch(block_id, block.next_block);
break;
case SPIRBlock::Select:
if (post_order_visit(block.true_block))
add_branch(block_id, block.true_block);
if (post_order_visit(block.false_block))
add_branch(block_id, block.false_block);
break;
case SPIRBlock::MultiSelect:
{
const auto &cases = compiler.get_case_list(block);
for (const auto &target : cases)
{
if (post_order_visit(target.block))
add_branch(block_id, target.block);
}
if (block.default_block && post_order_visit(block.default_block))
add_branch(block_id, block.default_block);
break;
}
default:
break;
}
// If this is a selection merge, add an implied branch to the merge target.
// This is needed to avoid cases where an inner branch dominates the outer branch.
// This can happen if one of the branches exit early, e.g.:
// if (cond) { ...; break; } else { var = 100 } use_var(var);
// We can use the variable without a Phi since there is only one possible parent here.
// However, in this case, we need to hoist out the inner variable to outside the branch.
// Use same strategy as loops.
if (block.merge == SPIRBlock::MergeSelection && post_order_visit(block.next_block))
{
// If there is only one preceding edge to the merge block and it's not ourselves, we need a fixup.
// Add a fake branch so any dominator in either the if (), or else () block, or a lone case statement
// will be hoisted out to outside the selection merge.
// If size > 1, the variable will be automatically hoisted, so we should not mess with it.
// The exception here is switch blocks, where we can have multiple edges to merge block,
// all coming from same scope, so be more conservative in this case.
// Adding fake branches unconditionally breaks parameter preservation analysis,
// which looks at how variables are accessed through the CFG.
auto pred_itr = preceding_edges.find(block.next_block);
if (pred_itr != end(preceding_edges))
{
auto &pred = pred_itr->second;
auto succ_itr = succeeding_edges.find(block_id);
size_t num_succeeding_edges = 0;
if (succ_itr != end(succeeding_edges))
num_succeeding_edges = succ_itr->second.size();
if (block.terminator == SPIRBlock::MultiSelect && num_succeeding_edges == 1)
{
// Multiple branches can come from the same scope due to "break;", so we need to assume that all branches
// come from same case scope in worst case, even if there are multiple preceding edges.
// If we have more than one succeeding edge from the block header, it should be impossible
// to have a dominator be inside the block.
// Only case this can go wrong is if we have 2 or more edges from block header and
// 2 or more edges to merge block, and still have dominator be inside a case label.
if (!pred.empty())
add_branch(block_id, block.next_block);
}
else
{
if (pred.size() == 1 && *pred.begin() != block_id)
add_branch(block_id, block.next_block);
}
}
else
{
// If the merge block does not have any preceding edges, i.e. unreachable, hallucinate it.
// We're going to do code-gen for it, and domination analysis requires that we have at least one preceding edge.
add_branch(block_id, block.next_block);
}
}
// Then visit ourselves. Start counting at one, to let 0 be a magic value for testing back vs. crossing edges.
visit_order[block_id].get() = ++visit_count;
post_order.push_back(block_id);
return true;
}
void CFG::build_post_order_visit_order()
{
uint32_t block = func.entry_block;
visit_count = 0;
visit_order.clear();
post_order.clear();
post_order_visit(block);
}
void CFG::add_branch(uint32_t from, uint32_t to)
{
const auto add_unique = [](SmallVector<uint32_t> &l, uint32_t value) {
auto itr = find(begin(l), end(l), value);
if (itr == end(l))
l.push_back(value);
};
add_unique(preceding_edges[to], from);
add_unique(succeeding_edges[from], to);
}
uint32_t CFG::find_loop_dominator(uint32_t block_id) const
{
while (block_id != SPIRBlock::NoDominator)
{
auto itr = preceding_edges.find(block_id);
if (itr == end(preceding_edges))
return SPIRBlock::NoDominator;
if (itr->second.empty())
return SPIRBlock::NoDominator;
uint32_t pred_block_id = SPIRBlock::NoDominator;
bool ignore_loop_header = false;
// If we are a merge block, go directly to the header block.
// Only consider a loop dominator if we are branching from inside a block to a loop header.
// NOTE: In the CFG we forced an edge from header to merge block always to support variable scopes properly.
for (auto &pred : itr->second)
{
auto &pred_block = compiler.get<SPIRBlock>(pred);
if (pred_block.merge == SPIRBlock::MergeLoop && pred_block.merge_block == ID(block_id))
{
pred_block_id = pred;
ignore_loop_header = true;
break;
}
else if (pred_block.merge == SPIRBlock::MergeSelection && pred_block.next_block == ID(block_id))
{
pred_block_id = pred;
break;
}
}
// No merge block means we can just pick any edge. Loop headers dominate the inner loop, so any path we
// take will lead there.
if (pred_block_id == SPIRBlock::NoDominator)
pred_block_id = itr->second.front();
block_id = pred_block_id;
if (!ignore_loop_header && block_id)
{
auto &block = compiler.get<SPIRBlock>(block_id);
if (block.merge == SPIRBlock::MergeLoop)
return block_id;
}
}
return block_id;
}
bool CFG::node_terminates_control_flow_in_sub_graph(BlockID from, BlockID to) const
{
// Walk backwards, starting from "to" block.
// Only follow pred edges if they have a 1:1 relationship, or a merge relationship.
// If we cannot find a path to "from", we must assume that to is inside control flow in some way.
auto &from_block = compiler.get<SPIRBlock>(from);
BlockID ignore_block_id = 0;
if (from_block.merge == SPIRBlock::MergeLoop)
ignore_block_id = from_block.merge_block;
while (to != from)
{
auto pred_itr = preceding_edges.find(to);
if (pred_itr == end(preceding_edges))
return false;
DominatorBuilder builder(*this);
for (auto &edge : pred_itr->second)
builder.add_block(edge);
uint32_t dominator = builder.get_dominator();
if (dominator == 0)
return false;
auto &dom = compiler.get<SPIRBlock>(dominator);
bool true_path_ignore = false;
bool false_path_ignore = false;
bool merges_to_nothing = dom.merge == SPIRBlock::MergeNone ||
(dom.merge == SPIRBlock::MergeSelection && dom.next_block &&
compiler.get<SPIRBlock>(dom.next_block).terminator == SPIRBlock::Unreachable) ||
(dom.merge == SPIRBlock::MergeLoop && dom.merge_block &&
compiler.get<SPIRBlock>(dom.merge_block).terminator == SPIRBlock::Unreachable);
if (dom.self == from || merges_to_nothing)
{
// We can only ignore inner branchy paths if there is no merge,
// i.e. no code is generated afterwards. E.g. this allows us to elide continue:
// for (;;) { if (cond) { continue; } else { break; } }.
// Codegen here in SPIR-V will be something like either no merge if one path directly breaks, or
// we merge to Unreachable.
if (ignore_block_id && dom.terminator == SPIRBlock::Select)
{
auto &true_block = compiler.get<SPIRBlock>(dom.true_block);
auto &false_block = compiler.get<SPIRBlock>(dom.false_block);
auto &ignore_block = compiler.get<SPIRBlock>(ignore_block_id);
true_path_ignore = compiler.execution_is_branchless(true_block, ignore_block);
false_path_ignore = compiler.execution_is_branchless(false_block, ignore_block);
}
}
// Cases where we allow traversal. This serves as a proxy for post-dominance in a loop body.
// TODO: Might want to do full post-dominance analysis, but it's a lot of churn for something like this ...
// - We're the merge block of a selection construct. Jump to header.
// - We're the merge block of a loop. Jump to header.
// - Direct branch. Trivial.
// - Allow cases inside a branch if the header cannot merge execution before loop exit.
if ((dom.merge == SPIRBlock::MergeSelection && dom.next_block == to) ||
(dom.merge == SPIRBlock::MergeLoop && dom.merge_block == to) ||
(dom.terminator == SPIRBlock::Direct && dom.next_block == to) ||
(dom.terminator == SPIRBlock::Select && dom.true_block == to && false_path_ignore) ||
(dom.terminator == SPIRBlock::Select && dom.false_block == to && true_path_ignore))
{
// Allow walking selection constructs if the other branch reaches out of a loop construct.
// It cannot be in-scope anymore.
to = dominator;
}
else
return false;
}
return true;
}
DominatorBuilder::DominatorBuilder(const CFG &cfg_)
: cfg(cfg_)
{
}
void DominatorBuilder::add_block(uint32_t block)
{
if (!cfg.get_immediate_dominator(block))
{
// Unreachable block via the CFG, we will never emit this code anyways.
return;
}
if (!dominator)
{
dominator = block;
return;
}
if (block != dominator)
dominator = cfg.find_common_dominator(block, dominator);
}
void DominatorBuilder::lift_continue_block_dominator()
{
// It is possible for a continue block to be the dominator of a variable is only accessed inside the while block of a do-while loop.
// We cannot safely declare variables inside a continue block, so move any variable declared
// in a continue block to the entry block to simplify.
// It makes very little sense for a continue block to ever be a dominator, so fall back to the simplest
// solution.
if (!dominator)
return;
auto &block = cfg.get_compiler().get<SPIRBlock>(dominator);
auto post_order = cfg.get_visit_order(dominator);
// If we are branching to a block with a higher post-order traversal index (continue blocks), we have a problem
// since we cannot create sensible GLSL code for this, fallback to entry block.
bool back_edge_dominator = false;
switch (block.terminator)
{
case SPIRBlock::Direct:
if (cfg.get_visit_order(block.next_block) > post_order)
back_edge_dominator = true;
break;
case SPIRBlock::Select:
if (cfg.get_visit_order(block.true_block) > post_order)
back_edge_dominator = true;
if (cfg.get_visit_order(block.false_block) > post_order)
back_edge_dominator = true;
break;
case SPIRBlock::MultiSelect:
{
auto &cases = cfg.get_compiler().get_case_list(block);
for (auto &target : cases)
{
if (cfg.get_visit_order(target.block) > post_order)
back_edge_dominator = true;
}
if (block.default_block && cfg.get_visit_order(block.default_block) > post_order)
back_edge_dominator = true;
break;
}
default:
break;
}
if (back_edge_dominator)
dominator = cfg.get_function().entry_block;
}
} // namespace SPIRV_CROSS_NAMESPACE

View File

@ -0,0 +1,553 @@
/*
* Copyright 2015-2021 Arm Limited
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* At your option, you may choose to accept this material under either:
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
*/
#include "spirv_cpp.hpp"
using namespace spv;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
void CompilerCPP::emit_buffer_block(const SPIRVariable &var)
{
add_resource_name(var.self);
auto &type = get<SPIRType>(var.basetype);
auto instance_name = to_name(var.self);
uint32_t descriptor_set = ir.meta[var.self].decoration.set;
uint32_t binding = ir.meta[var.self].decoration.binding;
emit_block_struct(type);
auto buffer_name = to_name(type.self);
statement("internal::Resource<", buffer_name, type_to_array_glsl(type), "> ", instance_name, "__;");
statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()");
resource_registrations.push_back(
join("s.register_resource(", instance_name, "__", ", ", descriptor_set, ", ", binding, ");"));
statement("");
}
void CompilerCPP::emit_interface_block(const SPIRVariable &var)
{
add_resource_name(var.self);
auto &type = get<SPIRType>(var.basetype);
const char *qual = var.storage == StorageClassInput ? "StageInput" : "StageOutput";
const char *lowerqual = var.storage == StorageClassInput ? "stage_input" : "stage_output";
auto instance_name = to_name(var.self);
uint32_t location = ir.meta[var.self].decoration.location;
string buffer_name;
auto flags = ir.meta[type.self].decoration.decoration_flags;
if (flags.get(DecorationBlock))
{
emit_block_struct(type);
buffer_name = to_name(type.self);
}
else
buffer_name = type_to_glsl(type);
statement("internal::", qual, "<", buffer_name, type_to_array_glsl(type), "> ", instance_name, "__;");
statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()");
resource_registrations.push_back(join("s.register_", lowerqual, "(", instance_name, "__", ", ", location, ");"));
statement("");
}
void CompilerCPP::emit_shared(const SPIRVariable &var)
{
add_resource_name(var.self);
auto instance_name = to_name(var.self);
statement(CompilerGLSL::variable_decl(var), ";");
statement_no_indent("#define ", instance_name, " __res->", instance_name);
}
void CompilerCPP::emit_uniform(const SPIRVariable &var)
{
add_resource_name(var.self);
auto &type = get<SPIRType>(var.basetype);
auto instance_name = to_name(var.self);
uint32_t descriptor_set = ir.meta[var.self].decoration.set;
uint32_t binding = ir.meta[var.self].decoration.binding;
uint32_t location = ir.meta[var.self].decoration.location;
string type_name = type_to_glsl(type);
remap_variable_type_name(type, instance_name, type_name);
if (type.basetype == SPIRType::Image || type.basetype == SPIRType::SampledImage ||
type.basetype == SPIRType::AtomicCounter)
{
statement("internal::Resource<", type_name, type_to_array_glsl(type), "> ", instance_name, "__;");
statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()");
resource_registrations.push_back(
join("s.register_resource(", instance_name, "__", ", ", descriptor_set, ", ", binding, ");"));
}
else
{
statement("internal::UniformConstant<", type_name, type_to_array_glsl(type), "> ", instance_name, "__;");
statement_no_indent("#define ", instance_name, " __res->", instance_name, "__.get()");
resource_registrations.push_back(
join("s.register_uniform_constant(", instance_name, "__", ", ", location, ");"));
}
statement("");
}
void CompilerCPP::emit_push_constant_block(const SPIRVariable &var)
{
add_resource_name(var.self);
auto &type = get<SPIRType>(var.basetype);
auto &flags = ir.meta[var.self].decoration.decoration_flags;
if (flags.get(DecorationBinding) || flags.get(DecorationDescriptorSet))
SPIRV_CROSS_THROW("Push constant blocks cannot be compiled to GLSL with Binding or Set syntax. "
"Remap to location with reflection API first or disable these decorations.");
emit_block_struct(type);
auto buffer_name = to_name(type.self);
auto instance_name = to_name(var.self);
statement("internal::PushConstant<", buffer_name, type_to_array_glsl(type), "> ", instance_name, ";");
statement_no_indent("#define ", instance_name, " __res->", instance_name, ".get()");
resource_registrations.push_back(join("s.register_push_constant(", instance_name, "__", ");"));
statement("");
}
void CompilerCPP::emit_block_struct(SPIRType &type)
{
// C++ can't do interface blocks, so we fake it by emitting a separate struct.
// However, these structs are not allowed to alias anything, so remove it before
// emitting the struct.
//
// The type we have here needs to be resolved to the non-pointer type so we can remove aliases.
auto &self = get<SPIRType>(type.self);
self.type_alias = 0;
emit_struct(self);
}
void CompilerCPP::emit_resources()
{
for (auto &id : ir.ids)
{
if (id.get_type() == TypeConstant)
{
auto &c = id.get<SPIRConstant>();
bool needs_declaration = c.specialization || c.is_used_as_lut;
if (needs_declaration)
{
if (!options.vulkan_semantics && c.specialization)
{
c.specialization_constant_macro_name =
constant_value_macro_name(get_decoration(c.self, DecorationSpecId));
}
emit_constant(c);
}
}
else if (id.get_type() == TypeConstantOp)
{
emit_specialization_constant_op(id.get<SPIRConstantOp>());
}
}
// Output all basic struct types which are not Block or BufferBlock as these are declared inplace
// when such variables are instantiated.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeType)
{
auto &type = id.get<SPIRType>();
if (type.basetype == SPIRType::Struct && type.array.empty() && !type.pointer &&
(!ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) &&
!ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_struct(type);
}
}
}
statement("struct Resources : ", resource_type);
begin_scope();
// Output UBOs and SSBOs
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && type.pointer && type.storage == StorageClassUniform &&
!is_hidden_variable(var) &&
(ir.meta[type.self].decoration.decoration_flags.get(DecorationBlock) ||
ir.meta[type.self].decoration.decoration_flags.get(DecorationBufferBlock)))
{
emit_buffer_block(var);
}
}
}
// Output push constant blocks
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
if (!is_hidden_variable(var) && var.storage != StorageClassFunction && type.pointer &&
type.storage == StorageClassPushConstant)
{
emit_push_constant_block(var);
}
}
}
// Output in/out interfaces.
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && !is_hidden_variable(var) && type.pointer &&
(var.storage == StorageClassInput || var.storage == StorageClassOutput) &&
interface_variable_exists_in_entry_point(var.self))
{
emit_interface_block(var);
}
}
}
// Output Uniform Constants (values, samplers, images, etc).
for (auto &id : ir.ids)
{
if (id.get_type() == TypeVariable)
{
auto &var = id.get<SPIRVariable>();
auto &type = get<SPIRType>(var.basetype);
if (var.storage != StorageClassFunction && !is_hidden_variable(var) && type.pointer &&
(type.storage == StorageClassUniformConstant || type.storage == StorageClassAtomicCounter))
{
emit_uniform(var);
}
}
}
// Global variables.
bool emitted = false;
for (auto global : global_variables)
{
auto &var = get<SPIRVariable>(global);
if (var.storage == StorageClassWorkgroup)
{
emit_shared(var);
emitted = true;
}
}
if (emitted)
statement("");
statement("inline void init(spirv_cross_shader& s)");
begin_scope();
statement(resource_type, "::init(s);");
for (auto &reg : resource_registrations)
statement(reg);
end_scope();
resource_registrations.clear();
end_scope_decl();
statement("");
statement("Resources* __res;");
if (get_entry_point().model == ExecutionModelGLCompute)
statement("ComputePrivateResources __priv_res;");
statement("");
// Emit regular globals which are allocated per invocation.
emitted = false;
for (auto global : global_variables)
{
auto &var = get<SPIRVariable>(global);
if (var.storage == StorageClassPrivate)
{
if (var.storage == StorageClassWorkgroup)
emit_shared(var);
else
statement(CompilerGLSL::variable_decl(var), ";");
emitted = true;
}
}
if (emitted)
statement("");
}
string CompilerCPP::compile()
{
ir.fixup_reserved_names();
// Do not deal with ES-isms like precision, older extensions and such.
options.es = false;
options.version = 450;
backend.float_literal_suffix = true;
backend.double_literal_suffix = false;
backend.long_long_literal_suffix = true;
backend.uint32_t_literal_suffix = true;
backend.basic_int_type = "int32_t";
backend.basic_uint_type = "uint32_t";
backend.swizzle_is_function = true;
backend.shared_is_implied = true;
backend.unsized_array_supported = false;
backend.explicit_struct_type = true;
backend.use_initializer_list = true;
fixup_type_alias();
reorder_type_alias();
build_function_control_flow_graphs_and_analyze();
update_active_builtins();
uint32_t pass_count = 0;
do
{
resource_registrations.clear();
reset(pass_count);
// Move constructor for this type is broken on GCC 4.9 ...
buffer.reset();
emit_header();
emit_resources();
emit_function(get<SPIRFunction>(ir.default_entry_point), Bitset());
pass_count++;
} while (is_forcing_recompilation());
// Match opening scope of emit_header().
end_scope_decl();
// namespace
end_scope();
// Emit C entry points
emit_c_linkage();
// Entry point in CPP is always main() for the time being.
get_entry_point().name = "main";
return buffer.str();
}
void CompilerCPP::emit_c_linkage()
{
statement("");
statement("spirv_cross_shader_t *spirv_cross_construct(void)");
begin_scope();
statement("return new ", impl_type, "();");
end_scope();
statement("");
statement("void spirv_cross_destruct(spirv_cross_shader_t *shader)");
begin_scope();
statement("delete static_cast<", impl_type, "*>(shader);");
end_scope();
statement("");
statement("void spirv_cross_invoke(spirv_cross_shader_t *shader)");
begin_scope();
statement("static_cast<", impl_type, "*>(shader)->invoke();");
end_scope();
statement("");
statement("static const struct spirv_cross_interface vtable =");
begin_scope();
statement("spirv_cross_construct,");
statement("spirv_cross_destruct,");
statement("spirv_cross_invoke,");
end_scope_decl();
statement("");
statement("const struct spirv_cross_interface *",
interface_name.empty() ? string("spirv_cross_get_interface") : interface_name, "(void)");
begin_scope();
statement("return &vtable;");
end_scope();
}
void CompilerCPP::emit_function_prototype(SPIRFunction &func, const Bitset &)
{
if (func.self != ir.default_entry_point)
add_function_overload(func);
local_variable_names = resource_names;
string decl;
auto &type = get<SPIRType>(func.return_type);
decl += "inline ";
decl += type_to_glsl(type);
decl += " ";
if (func.self == ir.default_entry_point)
{
decl += "main";
processing_entry_point = true;
}
else
decl += to_name(func.self);
decl += "(";
for (auto &arg : func.arguments)
{
add_local_variable_name(arg.id);
decl += argument_decl(arg);
if (&arg != &func.arguments.back())
decl += ", ";
// Hold a pointer to the parameter so we can invalidate the readonly field if needed.
auto *var = maybe_get<SPIRVariable>(arg.id);
if (var)
var->parameter = &arg;
}
decl += ")";
statement(decl);
}
string CompilerCPP::argument_decl(const SPIRFunction::Parameter &arg)
{
auto &type = expression_type(arg.id);
bool constref = !type.pointer || arg.write_count == 0;
auto &var = get<SPIRVariable>(arg.id);
string base = type_to_glsl(type);
string variable_name = to_name(var.self);
remap_variable_type_name(type, variable_name, base);
for (uint32_t i = 0; i < type.array.size(); i++)
base = join("std::array<", base, ", ", to_array_size(type, i), ">");
return join(constref ? "const " : "", base, " &", variable_name);
}
string CompilerCPP::variable_decl(const SPIRType &type, const string &name, uint32_t /* id */)
{
string base = type_to_glsl(type);
remap_variable_type_name(type, name, base);
bool runtime = false;
for (uint32_t i = 0; i < type.array.size(); i++)
{
auto &array = type.array[i];
if (!array && type.array_size_literal[i])
{
// Avoid using runtime arrays with std::array since this is undefined.
// Runtime arrays cannot be passed around as values, so this is fine.
runtime = true;
}
else
base = join("std::array<", base, ", ", to_array_size(type, i), ">");
}
base += ' ';
return base + name + (runtime ? "[1]" : "");
}
void CompilerCPP::emit_header()
{
auto &execution = get_entry_point();
statement("// This C++ shader is autogenerated by spirv-cross.");
statement("#include \"spirv_cross/internal_interface.hpp\"");
statement("#include \"spirv_cross/external_interface.h\"");
// Needed to properly implement GLSL-style arrays.
statement("#include <array>");
statement("#include <stdint.h>");
statement("");
statement("using namespace spirv_cross;");
statement("using namespace glm;");
statement("");
statement("namespace Impl");
begin_scope();
switch (execution.model)
{
case ExecutionModelGeometry:
case ExecutionModelTessellationControl:
case ExecutionModelTessellationEvaluation:
case ExecutionModelGLCompute:
case ExecutionModelFragment:
case ExecutionModelVertex:
statement("struct Shader");
begin_scope();
break;
default:
SPIRV_CROSS_THROW("Unsupported execution model.");
}
switch (execution.model)
{
case ExecutionModelGeometry:
impl_type = "GeometryShader<Impl::Shader, Impl::Shader::Resources>";
resource_type = "GeometryResources";
break;
case ExecutionModelVertex:
impl_type = "VertexShader<Impl::Shader, Impl::Shader::Resources>";
resource_type = "VertexResources";
break;
case ExecutionModelFragment:
impl_type = "FragmentShader<Impl::Shader, Impl::Shader::Resources>";
resource_type = "FragmentResources";
break;
case ExecutionModelGLCompute:
impl_type = join("ComputeShader<Impl::Shader, Impl::Shader::Resources, ", execution.workgroup_size.x, ", ",
execution.workgroup_size.y, ", ", execution.workgroup_size.z, ">");
resource_type = "ComputeResources";
break;
case ExecutionModelTessellationControl:
impl_type = "TessControlShader<Impl::Shader, Impl::Shader::Resources>";
resource_type = "TessControlResources";
break;
case ExecutionModelTessellationEvaluation:
impl_type = "TessEvaluationShader<Impl::Shader, Impl::Shader::Resources>";
resource_type = "TessEvaluationResources";
break;
default:
SPIRV_CROSS_THROW("Unsupported execution model.");
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,77 @@
/*
* Copyright 2015-2021 Arm Limited
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* At your option, you may choose to accept this material under either:
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
*/
#include "spirv_cross_util.hpp"
#include "spirv_common.hpp"
using namespace spv;
using namespace SPIRV_CROSS_NAMESPACE;
namespace spirv_cross_util
{
void rename_interface_variable(Compiler &compiler, const SmallVector<Resource> &resources, uint32_t location,
const std::string &name)
{
for (auto &v : resources)
{
if (!compiler.has_decoration(v.id, spv::DecorationLocation))
continue;
auto loc = compiler.get_decoration(v.id, spv::DecorationLocation);
if (loc != location)
continue;
auto &type = compiler.get_type(v.base_type_id);
// This is more of a friendly variant. If we need to rename interface variables, we might have to rename
// structs as well and make sure all the names match up.
if (type.basetype == SPIRType::Struct)
{
compiler.set_name(v.base_type_id, join("SPIRV_Cross_Interface_Location", location));
for (uint32_t i = 0; i < uint32_t(type.member_types.size()); i++)
compiler.set_member_name(v.base_type_id, i, join("InterfaceMember", i));
}
compiler.set_name(v.id, name);
}
}
void inherit_combined_sampler_bindings(Compiler &compiler)
{
auto &samplers = compiler.get_combined_image_samplers();
for (auto &s : samplers)
{
if (compiler.has_decoration(s.image_id, spv::DecorationDescriptorSet))
{
uint32_t set = compiler.get_decoration(s.image_id, spv::DecorationDescriptorSet);
compiler.set_decoration(s.combined_id, spv::DecorationDescriptorSet, set);
}
if (compiler.has_decoration(s.image_id, spv::DecorationBinding))
{
uint32_t binding = compiler.get_decoration(s.image_id, spv::DecorationBinding);
compiler.set_decoration(s.combined_id, spv::DecorationBinding, binding);
}
}
}
} // namespace spirv_cross_util

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,706 @@
/*
* Copyright 2018-2021 Bradley Austin Davis
* SPDX-License-Identifier: Apache-2.0 OR MIT
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* At your option, you may choose to accept this material under either:
* 1. The Apache License, Version 2.0, found at <http://www.apache.org/licenses/LICENSE-2.0>, or
* 2. The MIT License, found at <http://opensource.org/licenses/MIT>.
*/
#include "spirv_reflect.hpp"
#include "spirv_glsl.hpp"
#include <iomanip>
using namespace spv;
using namespace SPIRV_CROSS_NAMESPACE;
using namespace std;
namespace simple_json
{
enum class Type
{
Object,
Array,
};
using State = std::pair<Type, bool>;
using Stack = std::stack<State>;
class Stream
{
Stack stack;
StringStream<> buffer;
uint32_t indent{ 0 };
char current_locale_radix_character = '.';
public:
void set_current_locale_radix_character(char c)
{
current_locale_radix_character = c;
}
void begin_json_object();
void end_json_object();
void emit_json_key(const std::string &key);
void emit_json_key_value(const std::string &key, const std::string &value);
void emit_json_key_value(const std::string &key, bool value);
void emit_json_key_value(const std::string &key, uint32_t value);
void emit_json_key_value(const std::string &key, int32_t value);
void emit_json_key_value(const std::string &key, float value);
void emit_json_key_object(const std::string &key);
void emit_json_key_array(const std::string &key);
void begin_json_array();
void end_json_array();
void emit_json_array_value(const std::string &value);
void emit_json_array_value(uint32_t value);
void emit_json_array_value(bool value);
std::string str() const
{
return buffer.str();
}
private:
inline void statement_indent()
{
for (uint32_t i = 0; i < indent; i++)
buffer << " ";
}
template <typename T>
inline void statement_inner(T &&t)
{
buffer << std::forward<T>(t);
}
template <typename T, typename... Ts>
inline void statement_inner(T &&t, Ts &&... ts)
{
buffer << std::forward<T>(t);
statement_inner(std::forward<Ts>(ts)...);
}
template <typename... Ts>
inline void statement(Ts &&... ts)
{
statement_indent();
statement_inner(std::forward<Ts>(ts)...);
buffer << '\n';
}
template <typename... Ts>
void statement_no_return(Ts &&... ts)
{
statement_indent();
statement_inner(std::forward<Ts>(ts)...);
}
};
} // namespace simple_json
using namespace simple_json;
// Hackery to emit JSON without using nlohmann/json C++ library (which requires a
// higher level of compiler compliance than is required by SPIRV-Cross
void Stream::begin_json_array()
{
if (!stack.empty() && stack.top().second)
{
statement_inner(",\n");
}
statement("[");
++indent;
stack.emplace(Type::Array, false);
}
void Stream::end_json_array()
{
if (stack.empty() || stack.top().first != Type::Array)
SPIRV_CROSS_THROW("Invalid JSON state");
if (stack.top().second)
{
statement_inner("\n");
}
--indent;
statement_no_return("]");
stack.pop();
if (!stack.empty())
{
stack.top().second = true;
}
}
void Stream::emit_json_array_value(const std::string &value)
{
if (stack.empty() || stack.top().first != Type::Array)
SPIRV_CROSS_THROW("Invalid JSON state");
if (stack.top().second)
statement_inner(",\n");
statement_no_return("\"", value, "\"");
stack.top().second = true;
}
void Stream::emit_json_array_value(uint32_t value)
{
if (stack.empty() || stack.top().first != Type::Array)
SPIRV_CROSS_THROW("Invalid JSON state");
if (stack.top().second)
statement_inner(",\n");
statement_no_return(std::to_string(value));
stack.top().second = true;
}
void Stream::emit_json_array_value(bool value)
{
if (stack.empty() || stack.top().first != Type::Array)
SPIRV_CROSS_THROW("Invalid JSON state");
if (stack.top().second)
statement_inner(",\n");
statement_no_return(value ? "true" : "false");
stack.top().second = true;
}
void Stream::begin_json_object()
{
if (!stack.empty() && stack.top().second)
{
statement_inner(",\n");
}
statement("{");
++indent;
stack.emplace(Type::Object, false);
}
void Stream::end_json_object()
{
if (stack.empty() || stack.top().first != Type::Object)
SPIRV_CROSS_THROW("Invalid JSON state");
if (stack.top().second)
{
statement_inner("\n");
}
--indent;
statement_no_return("}");
stack.pop();
if (!stack.empty())
{
stack.top().second = true;
}
}
void Stream::emit_json_key(const std::string &key)
{
if (stack.empty() || stack.top().first != Type::Object)
SPIRV_CROSS_THROW("Invalid JSON state");
if (stack.top().second)
statement_inner(",\n");
statement_no_return("\"", key, "\" : ");
stack.top().second = true;
}
void Stream::emit_json_key_value(const std::string &key, const std::string &value)
{
emit_json_key(key);
statement_inner("\"", value, "\"");
}
void Stream::emit_json_key_value(const std::string &key, uint32_t value)
{
emit_json_key(key);
statement_inner(value);
}
void Stream::emit_json_key_value(const std::string &key, int32_t value)
{
emit_json_key(key);
statement_inner(value);
}
void Stream::emit_json_key_value(const std::string &key, float value)
{
emit_json_key(key);
statement_inner(convert_to_string(value, current_locale_radix_character));
}
void Stream::emit_json_key_value(const std::string &key, bool value)
{
emit_json_key(key);
statement_inner(value ? "true" : "false");
}
void Stream::emit_json_key_object(const std::string &key)
{
emit_json_key(key);
statement_inner("{\n");
++indent;
stack.emplace(Type::Object, false);
}
void Stream::emit_json_key_array(const std::string &key)
{
emit_json_key(key);
statement_inner("[\n");
++indent;
stack.emplace(Type::Array, false);
}
void CompilerReflection::set_format(const std::string &format)
{
if (format != "json")
{
SPIRV_CROSS_THROW("Unsupported format");
}
}
string CompilerReflection::compile()
{
json_stream = std::make_shared<simple_json::Stream>();
json_stream->set_current_locale_radix_character(current_locale_radix_character);
json_stream->begin_json_object();
reorder_type_alias();
emit_entry_points();
emit_types();
emit_resources();
emit_specialization_constants();
json_stream->end_json_object();
return json_stream->str();
}
static bool naturally_emit_type(const SPIRType &type)
{
return type.basetype == SPIRType::Struct && !type.pointer && type.array.empty();
}
bool CompilerReflection::type_is_reference(const SPIRType &type) const
{
// Physical pointers and arrays of physical pointers need to refer to the pointee's type.
return type_is_top_level_physical_pointer(type) ||
(type_is_array_of_pointers(type) && type.storage == StorageClassPhysicalStorageBuffer);
}
void CompilerReflection::emit_types()
{
bool emitted_open_tag = false;
SmallVector<uint32_t> physical_pointee_types;
// If we have physical pointers or arrays of physical pointers, it's also helpful to emit the pointee type
// and chain the type hierarchy. For POD, arrays can emit the entire type in-place.
ir.for_each_typed_id<SPIRType>([&](uint32_t self, SPIRType &type) {
if (naturally_emit_type(type))
{
emit_type(self, emitted_open_tag);
}
else if (type_is_reference(type))
{
if (!naturally_emit_type(this->get<SPIRType>(type.parent_type)) &&
find(physical_pointee_types.begin(), physical_pointee_types.end(), type.parent_type) ==
physical_pointee_types.end())
{
physical_pointee_types.push_back(type.parent_type);
}
}
});
for (uint32_t pointee_type : physical_pointee_types)
emit_type(pointee_type, emitted_open_tag);
if (emitted_open_tag)
{
json_stream->end_json_object();
}
}
void CompilerReflection::emit_type(uint32_t type_id, bool &emitted_open_tag)
{
auto &type = get<SPIRType>(type_id);
auto name = type_to_glsl(type);
if (!emitted_open_tag)
{
json_stream->emit_json_key_object("types");
emitted_open_tag = true;
}
json_stream->emit_json_key_object("_" + std::to_string(type_id));
json_stream->emit_json_key_value("name", name);
if (type_is_top_level_physical_pointer(type))
{
json_stream->emit_json_key_value("type", "_" + std::to_string(type.parent_type));
json_stream->emit_json_key_value("physical_pointer", true);
}
else if (!type.array.empty())
{
emit_type_array(type);
json_stream->emit_json_key_value("type", "_" + std::to_string(type.parent_type));
json_stream->emit_json_key_value("array_stride", get_decoration(type_id, DecorationArrayStride));
}
else
{
json_stream->emit_json_key_array("members");
// FIXME ideally we'd like to emit the size of a structure as a
// convenience to people parsing the reflected JSON. The problem
// is that there's no implicit size for a type. It's final size
// will be determined by the top level declaration in which it's
// included. So there might be one size for the struct if it's
// included in a std140 uniform block and another if it's included
// in a std430 uniform block.
// The solution is to include *all* potential sizes as a map of
// layout type name to integer, but that will probably require
// some additional logic being written in this class, or in the
// parent CompilerGLSL class.
auto size = type.member_types.size();
for (uint32_t i = 0; i < size; ++i)
{
emit_type_member(type, i);
}
json_stream->end_json_array();
}
json_stream->end_json_object();
}
void CompilerReflection::emit_type_member(const SPIRType &type, uint32_t index)
{
auto &membertype = get<SPIRType>(type.member_types[index]);
json_stream->begin_json_object();
auto name = to_member_name(type, index);
// FIXME we'd like to emit the offset of each member, but such offsets are
// context dependent. See the comment above regarding structure sizes
json_stream->emit_json_key_value("name", name);
if (type_is_reference(membertype))
{
json_stream->emit_json_key_value("type", "_" + std::to_string(membertype.parent_type));
}
else if (membertype.basetype == SPIRType::Struct)
{
json_stream->emit_json_key_value("type", "_" + std::to_string(membertype.self));
}
else
{
json_stream->emit_json_key_value("type", type_to_glsl(membertype));
}
emit_type_member_qualifiers(type, index);
json_stream->end_json_object();
}
void CompilerReflection::emit_type_array(const SPIRType &type)
{
if (!type_is_top_level_physical_pointer(type) && !type.array.empty())
{
json_stream->emit_json_key_array("array");
// Note that we emit the zeros here as a means of identifying
// unbounded arrays. This is necessary as otherwise there would
// be no way of differentiating between float[4] and float[4][]
for (const auto &value : type.array)
json_stream->emit_json_array_value(value);
json_stream->end_json_array();
json_stream->emit_json_key_array("array_size_is_literal");
for (const auto &value : type.array_size_literal)
json_stream->emit_json_array_value(value);
json_stream->end_json_array();
}
}
void CompilerReflection::emit_type_member_qualifiers(const SPIRType &type, uint32_t index)
{
auto &membertype = get<SPIRType>(type.member_types[index]);
emit_type_array(membertype);
auto &memb = ir.meta[type.self].members;
if (index < memb.size())
{
auto &dec = memb[index];
if (dec.decoration_flags.get(DecorationLocation))
json_stream->emit_json_key_value("location", dec.location);
if (dec.decoration_flags.get(DecorationOffset))
json_stream->emit_json_key_value("offset", dec.offset);
// Array stride is a property of the array type, not the struct.
if (has_decoration(type.member_types[index], DecorationArrayStride))
json_stream->emit_json_key_value("array_stride",
get_decoration(type.member_types[index], DecorationArrayStride));
if (dec.decoration_flags.get(DecorationMatrixStride))
json_stream->emit_json_key_value("matrix_stride", dec.matrix_stride);
if (dec.decoration_flags.get(DecorationRowMajor))
json_stream->emit_json_key_value("row_major", true);
if (type_is_top_level_physical_pointer(membertype))
json_stream->emit_json_key_value("physical_pointer", true);
}
}
string CompilerReflection::execution_model_to_str(spv::ExecutionModel model)
{
switch (model)
{
case ExecutionModelVertex:
return "vert";
case ExecutionModelTessellationControl:
return "tesc";
case ExecutionModelTessellationEvaluation:
return "tese";
case ExecutionModelGeometry:
return "geom";
case ExecutionModelFragment:
return "frag";
case ExecutionModelGLCompute:
return "comp";
case ExecutionModelRayGenerationNV:
return "rgen";
case ExecutionModelIntersectionNV:
return "rint";
case ExecutionModelAnyHitNV:
return "rahit";
case ExecutionModelClosestHitNV:
return "rchit";
case ExecutionModelMissNV:
return "rmiss";
case ExecutionModelCallableNV:
return "rcall";
default:
return "???";
}
}
// FIXME include things like the local_size dimensions, geometry output vertex count, etc
void CompilerReflection::emit_entry_points()
{
auto entries = get_entry_points_and_stages();
if (!entries.empty())
{
// Needed to make output deterministic.
sort(begin(entries), end(entries), [](const EntryPoint &a, const EntryPoint &b) -> bool {
if (a.execution_model < b.execution_model)
return true;
else if (a.execution_model > b.execution_model)
return false;
else
return a.name < b.name;
});
json_stream->emit_json_key_array("entryPoints");
for (auto &e : entries)
{
json_stream->begin_json_object();
json_stream->emit_json_key_value("name", e.name);
json_stream->emit_json_key_value("mode", execution_model_to_str(e.execution_model));
if (e.execution_model == ExecutionModelGLCompute)
{
const auto &spv_entry = get_entry_point(e.name, e.execution_model);
SpecializationConstant spec_x, spec_y, spec_z;
get_work_group_size_specialization_constants(spec_x, spec_y, spec_z);
json_stream->emit_json_key_array("workgroup_size");
json_stream->emit_json_array_value(spec_x.id != ID(0) ? spec_x.constant_id :
spv_entry.workgroup_size.x);
json_stream->emit_json_array_value(spec_y.id != ID(0) ? spec_y.constant_id :
spv_entry.workgroup_size.y);
json_stream->emit_json_array_value(spec_z.id != ID(0) ? spec_z.constant_id :
spv_entry.workgroup_size.z);
json_stream->end_json_array();
json_stream->emit_json_key_array("workgroup_size_is_spec_constant_id");
json_stream->emit_json_array_value(spec_x.id != ID(0));
json_stream->emit_json_array_value(spec_y.id != ID(0));
json_stream->emit_json_array_value(spec_z.id != ID(0));
json_stream->end_json_array();
}
json_stream->end_json_object();
}
json_stream->end_json_array();
}
}
void CompilerReflection::emit_resources()
{
auto res = get_shader_resources();
emit_resources("subpass_inputs", res.subpass_inputs);
emit_resources("inputs", res.stage_inputs);
emit_resources("outputs", res.stage_outputs);
emit_resources("textures", res.sampled_images);
emit_resources("separate_images", res.separate_images);
emit_resources("separate_samplers", res.separate_samplers);
emit_resources("images", res.storage_images);
emit_resources("ssbos", res.storage_buffers);
emit_resources("ubos", res.uniform_buffers);
emit_resources("push_constants", res.push_constant_buffers);
emit_resources("counters", res.atomic_counters);
emit_resources("acceleration_structures", res.acceleration_structures);
}
void CompilerReflection::emit_resources(const char *tag, const SmallVector<Resource> &resources)
{
if (resources.empty())
{
return;
}
json_stream->emit_json_key_array(tag);
for (auto &res : resources)
{
auto &type = get_type(res.type_id);
auto typeflags = ir.meta[type.self].decoration.decoration_flags;
auto &mask = get_decoration_bitset(res.id);
// If we don't have a name, use the fallback for the type instead of the variable
// for SSBOs and UBOs since those are the only meaningful names to use externally.
// Push constant blocks are still accessed by name and not block name, even though they are technically Blocks.
bool is_push_constant = get_storage_class(res.id) == StorageClassPushConstant;
bool is_block = get_decoration_bitset(type.self).get(DecorationBlock) ||
get_decoration_bitset(type.self).get(DecorationBufferBlock);
ID fallback_id = !is_push_constant && is_block ? ID(res.base_type_id) : ID(res.id);
json_stream->begin_json_object();
if (type.basetype == SPIRType::Struct)
{
json_stream->emit_json_key_value("type", "_" + std::to_string(res.base_type_id));
}
else
{
json_stream->emit_json_key_value("type", type_to_glsl(type));
}
json_stream->emit_json_key_value("name", !res.name.empty() ? res.name : get_fallback_name(fallback_id));
{
bool ssbo_block = type.storage == StorageClassStorageBuffer ||
(type.storage == StorageClassUniform && typeflags.get(DecorationBufferBlock));
Bitset qualifier_mask = ssbo_block ? get_buffer_block_flags(res.id) : mask;
if (qualifier_mask.get(DecorationNonReadable))
json_stream->emit_json_key_value("writeonly", true);
if (qualifier_mask.get(DecorationNonWritable))
json_stream->emit_json_key_value("readonly", true);
if (qualifier_mask.get(DecorationRestrict))
json_stream->emit_json_key_value("restrict", true);
if (qualifier_mask.get(DecorationCoherent))
json_stream->emit_json_key_value("coherent", true);
if (qualifier_mask.get(DecorationVolatile))
json_stream->emit_json_key_value("volatile", true);
}
emit_type_array(type);
{
bool is_sized_block = is_block && (get_storage_class(res.id) == StorageClassUniform ||
get_storage_class(res.id) == StorageClassUniformConstant ||
get_storage_class(res.id) == StorageClassStorageBuffer);
if (is_sized_block)
{
uint32_t block_size = uint32_t(get_declared_struct_size(get_type(res.base_type_id)));
json_stream->emit_json_key_value("block_size", block_size);
}
}
if (type.storage == StorageClassPushConstant)
json_stream->emit_json_key_value("push_constant", true);
if (mask.get(DecorationLocation))
json_stream->emit_json_key_value("location", get_decoration(res.id, DecorationLocation));
if (mask.get(DecorationRowMajor))
json_stream->emit_json_key_value("row_major", true);
if (mask.get(DecorationColMajor))
json_stream->emit_json_key_value("column_major", true);
if (mask.get(DecorationIndex))
json_stream->emit_json_key_value("index", get_decoration(res.id, DecorationIndex));
if (type.storage != StorageClassPushConstant && mask.get(DecorationDescriptorSet))
json_stream->emit_json_key_value("set", get_decoration(res.id, DecorationDescriptorSet));
if (mask.get(DecorationBinding))
json_stream->emit_json_key_value("binding", get_decoration(res.id, DecorationBinding));
if (mask.get(DecorationInputAttachmentIndex))
json_stream->emit_json_key_value("input_attachment_index",
get_decoration(res.id, DecorationInputAttachmentIndex));
if (mask.get(DecorationOffset))
json_stream->emit_json_key_value("offset", get_decoration(res.id, DecorationOffset));
// For images, the type itself adds a layout qualifer.
// Only emit the format for storage images.
if (type.basetype == SPIRType::Image && type.image.sampled == 2)
{
const char *fmt = format_to_glsl(type.image.format);
if (fmt != nullptr)
json_stream->emit_json_key_value("format", std::string(fmt));
}
json_stream->end_json_object();
}
json_stream->end_json_array();
}
void CompilerReflection::emit_specialization_constants()
{
auto specialization_constants = get_specialization_constants();
if (specialization_constants.empty())
return;
json_stream->emit_json_key_array("specialization_constants");
for (const auto &spec_const : specialization_constants)
{
auto &c = get<SPIRConstant>(spec_const.id);
auto type = get<SPIRType>(c.constant_type);
json_stream->begin_json_object();
json_stream->emit_json_key_value("name", get_name(spec_const.id));
json_stream->emit_json_key_value("id", spec_const.constant_id);
json_stream->emit_json_key_value("type", type_to_glsl(type));
json_stream->emit_json_key_value("variable_id", spec_const.id);
switch (type.basetype)
{
case SPIRType::UInt:
json_stream->emit_json_key_value("default_value", c.scalar());
break;
case SPIRType::Int:
json_stream->emit_json_key_value("default_value", c.scalar_i32());
break;
case SPIRType::Float:
json_stream->emit_json_key_value("default_value", c.scalar_f32());
break;
case SPIRType::Boolean:
json_stream->emit_json_key_value("default_value", c.scalar() != 0);
break;
default:
break;
}
json_stream->end_json_object();
}
json_stream->end_json_array();
}
string CompilerReflection::to_member_name(const SPIRType &type, uint32_t index) const
{
auto *type_meta = ir.find_meta(type.self);
if (type_meta)
{
auto &memb = type_meta->members;
if (index < memb.size() && !memb[index].alias.empty())
return memb[index].alias;
else
return join("_m", index);
}
else
return join("_m", index);
}