On 29 October 2013 16:37, Francisco Jerez <curroje...@riseup.net> wrote:
> v2: Add comments on the purpose of the auxiliary data structures. > Check for atomic counter overlaps. Use the contains_atomic() > convenience method. Add static assert with the number of expected > shader stages. > > v3: Don't resize atomic arrays. > --- > src/glsl/Makefile.sources | 1 + > src/glsl/link_atomics.cpp | 237 > ++++++++++++++++++++++++++++++++++++++++++++++ > src/glsl/linker.cpp | 17 +++- > src/glsl/linker.h | 7 ++ > 4 files changed, 261 insertions(+), 1 deletion(-) > create mode 100644 src/glsl/link_atomics.cpp > > diff --git a/src/glsl/Makefile.sources b/src/glsl/Makefile.sources > index 2f7bfa1..197d081 100644 > --- a/src/glsl/Makefile.sources > +++ b/src/glsl/Makefile.sources > @@ -47,6 +47,7 @@ LIBGLSL_FILES = \ > $(GLSL_SRCDIR)/ir_validate.cpp \ > $(GLSL_SRCDIR)/ir_variable_refcount.cpp \ > $(GLSL_SRCDIR)/linker.cpp \ > + $(GLSL_SRCDIR)/link_atomics.cpp \ > $(GLSL_SRCDIR)/link_functions.cpp \ > $(GLSL_SRCDIR)/link_interface_blocks.cpp \ > $(GLSL_SRCDIR)/link_uniforms.cpp \ > diff --git a/src/glsl/link_atomics.cpp b/src/glsl/link_atomics.cpp > new file mode 100644 > index 0000000..5a3d769 > --- /dev/null > +++ b/src/glsl/link_atomics.cpp > @@ -0,0 +1,237 @@ > +/* > + * Copyright © 2013 Intel Corporation > + * > + * Permission is hereby granted, free of charge, to any person obtaining a > + * copy of this software and associated documentation files (the > "Software"), > + * to deal in the Software without restriction, including without > limitation > + * the rights to use, copy, modify, merge, publish, distribute, > sublicense, > + * and/or sell copies of the Software, and to permit persons to whom the > + * Software is furnished to do so, subject to the following conditions: > + * > + * The above copyright notice and this permission notice (including the > next > + * paragraph) shall be included in all copies or substantial portions of > the > + * Software. > + * > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, > EXPRESS OR > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF > MERCHANTABILITY, > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT > SHALL > + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR > OTHER > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER > + * DEALINGS IN THE SOFTWARE. > + */ > + > +#include <map> > + > +#include "ir.h" > +#include "ir_uniform.h" > +#include "linker.h" > +#include "program/hash_table.h" > + > +namespace { > + /* > + * Atomic counter as seen by the program. > + */ > + struct active_atomic_counter { > + active_atomic_counter() : id(), var() {} > + active_atomic_counter(unsigned id, ir_variable *var) : > + id(id), var(var) {} > + > + unsigned id; > + ir_variable *var; > + }; > + > + /** > + * Active atomic buffer indexed by offset. > + */ > + typedef std::map<unsigned, active_atomic_counter> > active_atomic_counters_t; > + > + /* > + * Atomic counter buffer referenced by the program. There is a one > + * to one correspondence between these and the objects that can be > + * queried using glGetActiveAtomicCounterBufferiv(). > + */ > + struct active_atomic_buffer { > + active_atomic_buffer() : stage_references(), size(0) {} > + > + active_atomic_counters_t counters; > + unsigned stage_references[MESA_SHADER_TYPES]; > + unsigned size; > + }; > + > + /** > + * Active atomic buffer indexed by binding point. > + */ > + typedef std::map<unsigned, active_atomic_buffer> > active_atomic_buffers_t; > + > + bool > + check_atomic_counters_overlap(const ir_variable *x, const ir_variable > *y) > + { > + return ((x->atomic.offset >= y->atomic.offset && > + x->atomic.offset < y->atomic.offset + > y->type->atomic_size()) || > + (y->atomic.offset >= x->atomic.offset && > + y->atomic.offset < x->atomic.offset + > x->type->atomic_size())); > + } > + > + bool > + check_atomic_counter_location(const active_atomic_buffer &ab, > + const ir_variable *var) > + { > + active_atomic_counters_t::const_iterator adjacent = > + ab.counters.upper_bound(var->atomic.offset); > + > + if ((adjacent != ab.counters.end() && > + check_atomic_counters_overlap(adjacent->second.var, var)) || > + (adjacent != ab.counters.begin() && > + check_atomic_counters_overlap((--adjacent)->second.var, var))) > { > + /* Overlapping counter found, it must be a reference to the > + * same counter from a different shader stage. > + */ > + return !strcmp(var->name, adjacent->second.var->name); > Personally I prefer "strcmp(...) == 0" to "!strcmp(...)" because with the latter, I'm liable to accidentally turn it around in my brain and think it means "strings not equal". But I won't be a stickler about it. > + } else { > + return true; > + } > + } > + > + active_atomic_buffers_t > + find_active_atomic_counters(struct gl_shader_program *prog) > + { > + active_atomic_buffers_t abs; > + > + for (unsigned i = 0; i < MESA_SHADER_TYPES; ++i) { > + struct gl_shader *sh = prog->_LinkedShaders[i]; > + if (sh == NULL) > + continue; > + > + foreach_list(node, sh->ir) { > + ir_variable *var = ((ir_instruction *)node)->as_variable(); > + > + if (var && var->type->contains_atomic()) { > + unsigned id; > + bool found = prog->UniformHash->get(id, var->name); > + assert(found); > + active_atomic_buffer &ab = abs[var->binding]; > + > + if (check_atomic_counter_location(ab, var)) { > + ab.counters[var->atomic.offset] = > + active_atomic_counter(id, var); > + ab.stage_references[i]++; > + ab.size = std::max(ab.size, var->atomic.offset + > + var->type->atomic_size()); > + } else { > + linker_error(prog, "Atomic counter %s declared at > offset %d " > + "which is already in use.", var->name, > + var->atomic.offset); > + } > + } > + } > + } > + > + return abs; > + } > +} > + > +void > +link_assign_atomic_counter_resources(struct gl_shader_program *prog) > +{ > + active_atomic_buffers_t abs = find_active_atomic_counters(prog); > + > + prog->AtomicBuffers = rzalloc_array(prog, gl_active_atomic_buffer, > + abs.size()); > + prog->NumAtomicBuffers = abs.size(); > + > + unsigned i = 0; > + for (active_atomic_buffers_t::iterator it = abs.begin(); > + it != abs.end(); ++it, ++i) { > + active_atomic_buffer &ab = it->second; > + gl_active_atomic_buffer &mab = prog->AtomicBuffers[i]; > + > + /* Assign buffer-specific fields. */ > + mab.Binding = it->first; > + mab.MinimumSize = ab.size; > + mab.Uniforms = rzalloc_array(prog->AtomicBuffers, GLuint, > + ab.counters.size()); > + mab.NumUniforms = ab.counters.size(); > + > + /* Assign counter-specific fields. */ > + unsigned j = 0; > + for (active_atomic_counters_t::iterator jt = ab.counters.begin(); > + jt != ab.counters.end(); ++jt, ++j) { > + ir_variable *var = jt->second.var; > + gl_uniform_storage &storage = prog->UniformStorage[jt->second.id > ]; > + > + mab.Uniforms[j] = jt->second.id; > + var->atomic.buffer_index = i; > + storage.atomic_buffer_index = i; > + storage.offset = var->atomic.offset; > + storage.array_stride = (var->type->is_array() ? > + var->type->element_type()->atomic_size() > : 0); > + } > + > + /* Assign stage-specific fields. */ > + for (j = 0; j < MESA_SHADER_TYPES; ++j) > + mab.StageReferences[j] = > + (ab.stage_references[j] ? GL_TRUE : GL_FALSE); > + } > +} > + > +void > +link_check_atomic_counter_resources(struct gl_context *ctx, > + struct gl_shader_program *prog) > +{ > + STATIC_ASSERT(MESA_SHADER_TYPES == 3); > + static const char *shader_names[MESA_SHADER_TYPES] = { > + "vertex", "geometry", "fragment" > + }; > + const unsigned max_atomic_counters[MESA_SHADER_TYPES] = { > + ctx->Const.VertexProgram.MaxAtomicCounters, > + ctx->Const.GeometryProgram.MaxAtomicCounters, > + ctx->Const.FragmentProgram.MaxAtomicCounters > + }; > + const unsigned max_atomic_buffers[MESA_SHADER_TYPES] = { > + ctx->Const.VertexProgram.MaxAtomicBuffers, > + ctx->Const.GeometryProgram.MaxAtomicBuffers, > + ctx->Const.FragmentProgram.MaxAtomicBuffers > + }; > + active_atomic_buffers_t abs = find_active_atomic_counters(prog); > + unsigned atomic_counters[MESA_SHADER_TYPES] = {}; > + unsigned atomic_buffers[MESA_SHADER_TYPES] = {}; > + unsigned total_atomic_counters = 0; > + unsigned total_atomic_buffers = 0; > + > + /* Sum the required resources. Note that this counts buffers and > + * counters referenced by several shader stages multiple times > + * against the combined limit -- That's the behavior the spec > + * requires. > + */ > + for (active_atomic_buffers_t::iterator it = abs.begin(); > + it != abs.end(); ++it) { > + for (unsigned j = 0; j < MESA_SHADER_TYPES; ++j) { > + const unsigned n = it->second.stage_references[j]; > + > + if (n) { > + atomic_counters[j] += n; > + total_atomic_counters += n; > + atomic_buffers[j]++; > + total_atomic_buffers++; > + } > + } > + } > + > + /* Check that they are within the supported limits. */ > + for (unsigned i = 0; i < MESA_SHADER_TYPES; i++) { > + if (atomic_counters[i] > max_atomic_counters[i]) > + linker_error(prog, "Too many %s shader atomic counters", > + shader_names[i]); > + > + if (atomic_buffers[i] > max_atomic_buffers[i]) > + linker_error(prog, "Too many %s shader atomic counter buffers", > + shader_names[i]); > + } > + > + if (total_atomic_counters > ctx->Const.MaxCombinedAtomicCounters) > + linker_error(prog, "Too many combined atomic counters"); > + > + if (total_atomic_buffers > ctx->Const.MaxCombinedAtomicBuffers) > + linker_error(prog, "Too many combined atomic buffers"); > +} > diff --git a/src/glsl/linker.cpp b/src/glsl/linker.cpp > index 495a2ab..bbff2b7 100644 > --- a/src/glsl/linker.cpp > +++ b/src/glsl/linker.cpp > @@ -674,6 +674,14 @@ cross_validate_globals(struct gl_shader_program *prog, > existing->explicit_binding = true; > } > > + if (var->type->contains_atomic() && > + var->atomic.offset != existing->atomic.offset) { > + linker_error(prog, "offset specifications for %s " > + "`%s' have differing values\n", > + mode_string(var), var->name); > + return; > + } > + > /* Validate layout qualifiers for gl_FragDepth. > * > * From the AMD/ARB_conservative_depth specs: > @@ -1509,7 +1517,7 @@ update_array_sizes(struct gl_shader_program *prog) > * will not be eliminated. Since we always do std140, just > * don't resize arrays in UBOs. > */ > - if (var->is_in_uniform_block()) > + if (var->is_in_uniform_block() || var->type->contains_atomic()) > Can you please update the comment above this "if" statement to explain why we're not resizing arrays containing atomics? WIth that change, the patch is: Reviewed-by: Paul Berry <stereotype...@gmail.com> As with the previous patch, I don't think we can land this until we resolve the open question about whether STL is allowed in Mesa. > continue; > > unsigned int size = var->max_array_access; > @@ -2014,6 +2022,10 @@ link_shaders(struct gl_context *ctx, struct > gl_shader_program *prog) > prog->UniformBlockStageIndex[i] = NULL; > } > > + ralloc_free(prog->AtomicBuffers); > + prog->AtomicBuffers = NULL; > + prog->NumAtomicBuffers = 0; > + > /* Separate the shaders into groups based on their type. > */ > struct gl_shader **vert_shader_list; > @@ -2365,9 +2377,12 @@ link_shaders(struct gl_context *ctx, struct > gl_shader_program *prog) > > update_array_sizes(prog); > link_assign_uniform_locations(prog); > + link_assign_atomic_counter_resources(prog); > store_fragdepth_layout(prog); > > check_resources(ctx, prog); > + link_check_atomic_counter_resources(ctx, prog); > + > if (!prog->LinkStatus) > goto done; > > diff --git a/src/glsl/linker.h b/src/glsl/linker.h > index 887cd33..3380151 100644 > --- a/src/glsl/linker.h > +++ b/src/glsl/linker.h > @@ -69,6 +69,13 @@ validate_interstage_interface_blocks(struct > gl_shader_program *prog, > const gl_shader *producer, > const gl_shader *consumer); > > +extern void > +link_assign_atomic_counter_resources(struct gl_shader_program *prog); > + > +extern void > +link_check_atomic_counter_resources(struct gl_context *ctx, > + struct gl_shader_program *prog); > + > /** > * Class for processing all of the leaf fields of a variable that > corresponds > * to a program resource. > -- > 1.8.3.4 > > _______________________________________________ > mesa-dev mailing list > mesa-dev@lists.freedesktop.org > http://lists.freedesktop.org/mailman/listinfo/mesa-dev >
_______________________________________________ mesa-dev mailing list mesa-dev@lists.freedesktop.org http://lists.freedesktop.org/mailman/listinfo/mesa-dev