--- nir-pass.log 2019-03-19 20:00:58.333930764 +0100 +++ nir-fail.log 2019-03-19 20:02:15.510792066 +0100 @@ -1,1633 +1,1486 @@ Use master branches /home/gerddie/Testbed-master/lib gl_version 45 - core profile enabled Mesa: User error: GL_INVALID_VALUE in glTexImage2D(internalFormat=GL_ALPHA8) Mesa: User error: GL_INVALID_VALUE in glTexImage2D(internalFormat=GL_LUMINANCE8_ALPHA8) Mesa: User error: GL_INVALID_VALUE in glTexImage2D(internalFormat=GL_ALPHA16) Mesa: User error: GL_INVALID_VALUE in glTexImage2D(internalFormat=GL_LUMINANCE16_ALPHA16) GLSL IR for native compute shader 2: ( (declare (binding=2 location=0 shader_storage ) (array uint 0) csssbocontents2) (declare (binding=1 location=0 shader_storage ) (array uint 0) csssbocontents1) (declare (location=0 shader_storage ) (array uint 0) csssbocontents0) (declare (location=35 sys ) uvec3 gl_NumWorkGroups) (declare (location=34 sys ) uvec3 gl_WorkGroupID) (declare (location=30 sys ) uvec3 gl_LocalInvocationID) (declare (temporary ) vec4 temp0_0) (declare (temporary ) vec4 temp0_1) (declare (temporary ) vec4 temp0_2) (declare (temporary ) vec4 temp0_3) (declare (temporary ) vec4 temp0_4) (declare () uint ssbo_addr_temp) ( function main (signature void (parameters ) ( (assign (xy) (var_ref temp0_0) (expression vec2 bitcast_u2f (swiz xy (expression uvec4 + (expression uvec4 * (swiz xyzz (var_ref gl_WorkGroupID) )(constant uvec4 (16 8 1 1)) ) (swiz xyzz (var_ref gl_LocalInvocationID) )) )) ) (assign (x) (var_ref temp0_1) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz yyyy (var_ref temp0_0) )) (swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_0) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_0) )) ) )) ) (assign (x) (var_ref temp0_1) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) ) )) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp) - (declare (temporary ) uint ssbo_store_temp_offset) - (assign (x) (var_ref ssbo_store_temp_offset) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset) (constant uint (0)) ) (var_ref ssbo_store_temp) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (1 1 1 1)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@2) - (declare (temporary ) uint ssbo_store_temp_offset@3) - (assign (x) (var_ref ssbo_store_temp_offset@3) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@2) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset@3) (constant uint (0)) ) (var_ref ssbo_store_temp@2) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (2 2 2 2)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@4) - (declare (temporary ) uint ssbo_store_temp_offset@5) - (assign (x) (var_ref ssbo_store_temp_offset@5) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@4) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset@5) (constant uint (0)) ) (var_ref ssbo_store_temp@4) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (3 3 3 3)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@6) - (declare (temporary ) uint ssbo_store_temp_offset@7) - (assign (x) (var_ref ssbo_store_temp_offset@7) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@6) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset@7) (constant uint (0)) ) (var_ref ssbo_store_temp@6) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (4 4 4 4)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@8) - (declare (temporary ) uint ssbo_store_temp_offset@9) - (assign (x) (var_ref ssbo_store_temp_offset@9) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@8) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset@9) (constant uint (0)) ) (var_ref ssbo_store_temp@8) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (5 5 5 5)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@10) - (declare (temporary ) uint ssbo_store_temp_offset@11) - (assign (x) (var_ref ssbo_store_temp_offset@11) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@10) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset@11) (constant uint (0)) ) (var_ref ssbo_store_temp@10) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (6 6 6 6)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@12) - (declare (temporary ) uint ssbo_store_temp_offset@13) - (assign (x) (var_ref ssbo_store_temp_offset@13) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@12) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset@13) (constant uint (0)) ) (var_ref ssbo_store_temp@12) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (7 7 7 7)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@14) - (declare (temporary ) uint ssbo_store_temp_offset@15) - (assign (x) (var_ref ssbo_store_temp_offset@15) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@14) (constant uint (456)) ) - (call __intrinsic_store_ssbo ((constant uint (1)) (expression uint + (var_ref ssbo_store_temp_offset@15) (constant uint (0)) ) (var_ref ssbo_store_temp@14) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents1) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (constant uint (456)) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) ) )) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@16) - (declare (temporary ) uint ssbo_store_temp_offset@17) - (assign (x) (var_ref ssbo_store_temp_offset@17) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@16) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@17) (constant uint (0)) ) (var_ref ssbo_store_temp@16) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (1 1 1 1)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@18) - (declare (temporary ) uint ssbo_store_temp_offset@19) - (assign (x) (var_ref ssbo_store_temp_offset@19) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@18) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@19) (constant uint (0)) ) (var_ref ssbo_store_temp@18) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (2 2 2 2)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@20) - (declare (temporary ) uint ssbo_store_temp_offset@21) - (assign (x) (var_ref ssbo_store_temp_offset@21) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@20) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@21) (constant uint (0)) ) (var_ref ssbo_store_temp@20) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (3 3 3 3)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@22) - (declare (temporary ) uint ssbo_store_temp_offset@23) - (assign (x) (var_ref ssbo_store_temp_offset@23) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@22) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@23) (constant uint (0)) ) (var_ref ssbo_store_temp@22) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (4 4 4 4)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@24) - (declare (temporary ) uint ssbo_store_temp_offset@25) - (assign (x) (var_ref ssbo_store_temp_offset@25) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@24) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@25) (constant uint (0)) ) (var_ref ssbo_store_temp@24) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (5 5 5 5)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@26) - (declare (temporary ) uint ssbo_store_temp_offset@27) - (assign (x) (var_ref ssbo_store_temp_offset@27) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@26) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@27) (constant uint (0)) ) (var_ref ssbo_store_temp@26) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (6 6 6 6)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@28) - (declare (temporary ) uint ssbo_store_temp_offset@29) - (assign (x) (var_ref ssbo_store_temp_offset@29) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@28) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@29) (constant uint (0)) ) (var_ref ssbo_store_temp@28) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (7 7 7 7)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@30) - (declare (temporary ) uint ssbo_store_temp_offset@31) - (assign (x) (var_ref ssbo_store_temp_offset@31) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@30) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (2)) (expression uint + (var_ref ssbo_store_temp_offset@31) (constant uint (0)) ) (var_ref ssbo_store_temp@30) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents0) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) ) )) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp) - (declare (temporary ) uint ubo_load_temp_offset) - (assign (x) (var_ref ubo_load_temp_offset) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp) (var_ref ssbo_load_result) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (1 1 1 1)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp@32) - (declare (temporary ) uint ubo_load_temp_offset@33) - (assign (x) (var_ref ubo_load_temp_offset@33) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result@34) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result@34) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset@33) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp@32) (var_ref ssbo_load_result@34) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp@32) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) ) ( (assign (x) (var_ref temp0_2) (constant float (0.000000)) ) )) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (2 2 2 2)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp@35) - (declare (temporary ) uint ubo_load_temp_offset@36) - (assign (x) (var_ref ubo_load_temp_offset@36) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result@37) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result@37) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset@36) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp@35) (var_ref ssbo_load_result@37) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp@35) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) ) ( (assign (x) (var_ref temp0_2) (constant float (0.000000)) ) )) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (3 3 3 3)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp@38) - (declare (temporary ) uint ubo_load_temp_offset@39) - (assign (x) (var_ref ubo_load_temp_offset@39) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result@40) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result@40) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset@39) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp@38) (var_ref ssbo_load_result@40) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp@38) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) ) ( (assign (x) (var_ref temp0_2) (constant float (0.000000)) ) )) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (4 4 4 4)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp@41) - (declare (temporary ) uint ubo_load_temp_offset@42) - (assign (x) (var_ref ubo_load_temp_offset@42) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result@43) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result@43) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset@42) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp@41) (var_ref ssbo_load_result@43) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp@41) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) ) ( (assign (x) (var_ref temp0_2) (constant float (0.000000)) ) )) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (5 5 5 5)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp@44) - (declare (temporary ) uint ubo_load_temp_offset@45) - (assign (x) (var_ref ubo_load_temp_offset@45) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result@46) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result@46) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset@45) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp@44) (var_ref ssbo_load_result@46) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp@44) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) ) ( (assign (x) (var_ref temp0_2) (constant float (0.000000)) ) )) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (6 6 6 6)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp@47) - (declare (temporary ) uint ubo_load_temp_offset@48) - (assign (x) (var_ref ubo_load_temp_offset@48) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result@49) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result@49) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset@48) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp@47) (var_ref ssbo_load_result@49) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp@47) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) ) ( (assign (x) (var_ref temp0_2) (constant float (0.000000)) ) )) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (swiz xxxx (var_ref gl_NumWorkGroups) )(swiz yyyy (var_ref gl_WorkGroupID) )) (swiz xxxx (var_ref gl_WorkGroupID) )) )) ) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_LocalInvocationID) )(swiz xxxx (var_ref gl_NumWorkGroups) )) ) )) (assign (x) (var_ref temp0_4) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (swiz yyyy (var_ref gl_NumWorkGroups) )(constant uvec4 (16 16 16 16)) ) ) )) (assign (x) (var_ref temp0_3) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_4) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (16 16 16 16)) ) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_3) )) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_i2f (expression ivec4 u2i (expression uvec4 + (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (swiz xxxx (var_ref gl_LocalInvocationID) )) ) ) )) (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (swiz x (expression uvec4 + (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (8 8 8 8)) ) (constant uvec4 (7 7 7 7)) ) )) ) (assign (x) (var_ref temp0_2) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (constant uvec4 (4 4 4 4)) ) ) )) (assign (x) (var_ref ssbo_addr_temp) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) )(constant int (2)) ) ) - (declare (temporary ) uint ubo_load_temp@50) - (declare (temporary ) uint ubo_load_temp_offset@51) - (assign (x) (var_ref ubo_load_temp_offset@51) (expression uint + (constant uint (0)) (expression uint * (var_ref ssbo_addr_temp) (constant uint (4)) ) ) ) - (declare (temporary ) uint ssbo_load_result@52) - (call __intrinsic_load_ssbo (var_ref ssbo_load_result@52) ((constant uint (1)) (expression uint + (var_ref ubo_load_temp_offset@51) (constant uint (0)) ) (constant uint (0)) )) - - (assign (x) (var_ref ubo_load_temp@50) (var_ref ssbo_load_result@52) ) - (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (var_ref ubo_load_temp@50) ) ) + (assign (x) (var_ref temp0_2) (expression float bitcast_u2f (array_ref (var_ref csssbocontents1) (var_ref ssbo_addr_temp) ) ) ) (assign (x) (var_ref temp0_1) (expression float bitcast_u2f (expression uint neg (expression uint i2u (expression int b2i (swiz x (expression bvec4 == (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_2) )) (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) )) ) ) ) ) ) ( (assign (x) (var_ref temp0_1) (constant float (0.000000)) ) )) (if (expression bool any_nequal (expression bvec4 i2b (expression ivec4 u2i (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) ) ) (constant bvec4 (0 0 0 0)) ) ( (assign (x) (var_ref temp0_1) (constant float (0x1p-149)) ) ) ( (assign (x) (var_ref temp0_1) (constant float (0.000000)) ) )) (assign (x) (var_ref temp0_0) (swiz x (expression vec4 bitcast_u2f (expression uvec4 * (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_0) )) (constant uvec4 (4 4 4 4)) ) ) )) - (declare (temporary ) uint ssbo_store_temp@53) - (declare (temporary ) uint ssbo_store_temp_offset@54) - (assign (x) (var_ref ssbo_store_temp_offset@54) (expression uint + (constant uint (0)) (expression uint * (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_0) )) )(constant int (2)) ) (constant uint (4)) ) ) ) - (assign (x) (var_ref ssbo_store_temp@53) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) - (call __intrinsic_store_ssbo ((constant uint (0)) (expression uint + (var_ref ssbo_store_temp_offset@54) (constant uint (0)) ) (var_ref ssbo_store_temp@53) (constant uint (1)) (constant uint (0)) )) - + (assign (x) (array_ref (var_ref csssbocontents2) (expression uint >> (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_0) )) )(constant int (2)) ) ) (swiz x (expression uvec4 bitcast_f2u (swiz xxxx (var_ref temp0_1) )) )) )) ) ) NIR (SSA form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL2 local-size: 16, 8, 1 shared-size: 0 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents2 (0, 0, 2) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents1 (0, 0, 1) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents0 (0, 0, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000004 /* 0.000000 */) - vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) + vec1 32 ssa_1 = load_const (0x000001c8 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000002 /* 0.000000 */) - vec1 32 ssa_3 = load_const (0x000001c8 /* 0.000000 */) - vec1 32 ssa_4 = load_const (0x00000001 /* 0.000000 */) - vec1 32 ssa_5 = load_const (0x00000003 /* 0.000000 */) - vec1 32 ssa_6 = load_const (0x00000005 /* 0.000000 */) - vec1 32 ssa_7 = load_const (0x00000006 /* 0.000000 */) - vec1 32 ssa_8 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_3 = load_const (0x00000001 /* 0.000000 */) + vec1 32 ssa_4 = load_const (0x00000003 /* 0.000000 */) + vec1 32 ssa_5 = load_const (0x00000005 /* 0.000000 */) + vec1 32 ssa_6 = load_const (0x00000006 /* 0.000000 */) + vec1 32 ssa_7 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_8 = load_const (0x00000000 /* 0.000000 */) vec3 32 ssa_9 = intrinsic load_work_group_id () () vec1 32 ssa_10 = ishl ssa_9.x, ssa_0 - vec1 32 ssa_11 = ishl ssa_9.y, ssa_5 + vec1 32 ssa_11 = ishl ssa_9.y, ssa_4 vec1 32 ssa_12 = intrinsic load_subgroup_id () () - vec1 32 ssa_13 = ishl ssa_12, ssa_5 + vec1 32 ssa_13 = ishl ssa_12, ssa_4 vec1 32 ssa_14 = intrinsic load_subgroup_invocation () () vec1 32 ssa_15 = iadd ssa_14, ssa_13 vec1 32 ssa_16 = load_const (0x0000000f /* 0.000000 */) vec1 32 ssa_17 = iand ssa_15, ssa_16 vec1 32 ssa_18 = ushr ssa_15, ssa_0 - vec1 32 ssa_19 = iand ssa_18, ssa_8 + vec1 32 ssa_19 = iand ssa_18, ssa_7 vec1 32 ssa_20 = iadd ssa_10, ssa_17 vec1 32 ssa_21 = iadd ssa_11, ssa_19 vec3 32 ssa_22 = intrinsic load_num_work_groups () () vec1 32 ssa_23 = imul ssa_21, ssa_22.x vec1 32 ssa_24 = ishl ssa_23, ssa_0 vec1 32 ssa_25 = iadd ssa_24, ssa_20 vec1 32 ssa_26 = imul ssa_22.x, ssa_9.y vec1 32 ssa_27 = iadd ssa_26, ssa_9.x vec1 32 ssa_28 = imul ssa_19, ssa_22.x vec1 32 ssa_29 = ishl ssa_22.y, ssa_0 vec1 32 ssa_30 = imul ssa_28, ssa_29 vec1 32 ssa_31 = ishl ssa_27, ssa_0 vec1 32 ssa_32 = iadd ssa_31, ssa_30 vec1 32 ssa_33 = iadd ssa_32, ssa_17 - vec1 32 ssa_34 = ishl ssa_33, ssa_5 + vec1 32 ssa_34 = ishl ssa_33, ssa_4 vec1 32 ssa_35 = ishl ssa_34, ssa_2 vec1 32 ssa_36 = ushr ssa_35, ssa_2 vec1 32 ssa_37 = ishl ssa_36, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_38 = iadd ssa_34, ssa_4 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_38 = iadd ssa_34, ssa_3 vec1 32 ssa_39 = ishl ssa_38, ssa_2 vec1 32 ssa_40 = ushr ssa_39, ssa_2 vec1 32 ssa_41 = ishl ssa_40, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_42 = iadd ssa_34, ssa_2 vec1 32 ssa_43 = ishl ssa_42, ssa_2 vec1 32 ssa_44 = ushr ssa_43, ssa_2 vec1 32 ssa_45 = ishl ssa_44, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_46 = iadd ssa_34, ssa_5 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_46 = iadd ssa_34, ssa_4 vec1 32 ssa_47 = ishl ssa_46, ssa_2 vec1 32 ssa_48 = ushr ssa_47, ssa_2 vec1 32 ssa_49 = ishl ssa_48, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_50 = iadd ssa_34, ssa_0 vec1 32 ssa_51 = ishl ssa_50, ssa_2 vec1 32 ssa_52 = ushr ssa_51, ssa_2 vec1 32 ssa_53 = ishl ssa_52, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_54 = iadd ssa_34, ssa_6 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_54 = iadd ssa_34, ssa_5 vec1 32 ssa_55 = ishl ssa_54, ssa_2 vec1 32 ssa_56 = ushr ssa_55, ssa_2 vec1 32 ssa_57 = ishl ssa_56, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_58 = iadd ssa_34, ssa_7 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_58 = iadd ssa_34, ssa_6 vec1 32 ssa_59 = ishl ssa_58, ssa_2 vec1 32 ssa_60 = ushr ssa_59, ssa_2 vec1 32 ssa_61 = ishl ssa_60, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_62 = iadd ssa_34, ssa_8 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_62 = iadd ssa_34, ssa_7 vec1 32 ssa_63 = ishl ssa_62, ssa_2 vec1 32 ssa_64 = ushr ssa_63, ssa_2 vec1 32 ssa_65 = ishl ssa_64, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_66 = intrinsic load_ssbo (ssa_4, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_66 = intrinsic load_ssbo (ssa_3, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_67 = ieq32 ssa_66, ssa_27 /* succs: block_1 block_2 */ if ssa_67 { block block_1: /* preds: block_0 */ - vec1 32 ssa_68 = intrinsic load_ssbo (ssa_4, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_68 = intrinsic load_ssbo (ssa_3, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_69 = ieq32 ssa_68, ssa_27 vec1 32 ssa_70 = b2i32 ssa_69 vec1 32 ssa_71 = imov -ssa_70 /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ - vec1 32 ssa_72 = phi block_1: ssa_71, block_2: ssa_1 - vec1 32 ssa_73 = ine32 ssa_72, ssa_1 + vec1 32 ssa_72 = phi block_1: ssa_71, block_2: ssa_8 + vec1 32 ssa_73 = ine32 ssa_72, ssa_8 /* succs: block_4 block_5 */ if ssa_73 { block block_4: /* preds: block_3 */ - vec1 32 ssa_74 = intrinsic load_ssbo (ssa_4, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_74 = intrinsic load_ssbo (ssa_3, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_75 = ieq32 ssa_74, ssa_27 vec1 32 ssa_76 = b2i32 ssa_75 vec1 32 ssa_77 = imov -ssa_76 /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ - vec1 32 ssa_78 = phi block_4: ssa_77, block_5: ssa_1 - vec1 32 ssa_79 = ine32 ssa_78, ssa_1 + vec1 32 ssa_78 = phi block_4: ssa_77, block_5: ssa_8 + vec1 32 ssa_79 = ine32 ssa_78, ssa_8 /* succs: block_7 block_8 */ if ssa_79 { block block_7: /* preds: block_6 */ - vec1 32 ssa_80 = intrinsic load_ssbo (ssa_4, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_80 = intrinsic load_ssbo (ssa_3, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_81 = ieq32 ssa_80, ssa_27 vec1 32 ssa_82 = b2i32 ssa_81 vec1 32 ssa_83 = imov -ssa_82 /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ - vec1 32 ssa_84 = phi block_7: ssa_83, block_8: ssa_1 - vec1 32 ssa_85 = ine32 ssa_84, ssa_1 + vec1 32 ssa_84 = phi block_7: ssa_83, block_8: ssa_8 + vec1 32 ssa_85 = ine32 ssa_84, ssa_8 /* succs: block_10 block_11 */ if ssa_85 { block block_10: /* preds: block_9 */ - vec1 32 ssa_86 = intrinsic load_ssbo (ssa_4, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_86 = intrinsic load_ssbo (ssa_3, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_87 = ieq32 ssa_86, ssa_27 vec1 32 ssa_88 = b2i32 ssa_87 vec1 32 ssa_89 = imov -ssa_88 /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ - vec1 32 ssa_90 = phi block_10: ssa_89, block_11: ssa_1 - vec1 32 ssa_91 = ine32 ssa_90, ssa_1 + vec1 32 ssa_90 = phi block_10: ssa_89, block_11: ssa_8 + vec1 32 ssa_91 = ine32 ssa_90, ssa_8 /* succs: block_13 block_14 */ if ssa_91 { block block_13: /* preds: block_12 */ - vec1 32 ssa_92 = intrinsic load_ssbo (ssa_4, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_92 = intrinsic load_ssbo (ssa_3, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_93 = ieq32 ssa_92, ssa_27 vec1 32 ssa_94 = b2i32 ssa_93 vec1 32 ssa_95 = imov -ssa_94 /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ - vec1 32 ssa_96 = phi block_13: ssa_95, block_14: ssa_1 - vec1 32 ssa_97 = ine32 ssa_96, ssa_1 + vec1 32 ssa_96 = phi block_13: ssa_95, block_14: ssa_8 + vec1 32 ssa_97 = ine32 ssa_96, ssa_8 /* succs: block_16 block_17 */ if ssa_97 { block block_16: /* preds: block_15 */ - vec1 32 ssa_98 = intrinsic load_ssbo (ssa_4, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_98 = intrinsic load_ssbo (ssa_3, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_99 = ieq32 ssa_98, ssa_27 vec1 32 ssa_100 = b2i32 ssa_99 vec1 32 ssa_101 = imov -ssa_100 /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ - vec1 32 ssa_102 = phi block_16: ssa_101, block_17: ssa_1 - vec1 32 ssa_103 = ine32 ssa_102, ssa_1 + vec1 32 ssa_102 = phi block_16: ssa_101, block_17: ssa_8 + vec1 32 ssa_103 = ine32 ssa_102, ssa_8 /* succs: block_19 block_20 */ if ssa_103 { block block_19: /* preds: block_18 */ - vec1 32 ssa_104 = intrinsic load_ssbo (ssa_4, ssa_65) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_105 = ieq32 ssa_104, ssa_27 - vec1 32 ssa_106 = b2i32 ssa_105 - vec1 32 ssa_107 = imov -ssa_106 + vec1 32 ssa_104 = ieq32 ssa_1, ssa_27 + vec1 32 ssa_105 = b2i32 ssa_104 + vec1 32 ssa_106 = imov -ssa_105 /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ - vec1 32 ssa_108 = phi block_19: ssa_107, block_20: ssa_1 - vec1 32 ssa_109 = ine32 ssa_108, ssa_1 - vec1 32 ssa_110 = b32csel ssa_109, ssa_4, ssa_1 - vec1 32 ssa_111 = ishl ssa_25, ssa_2 - vec1 32 ssa_112 = ushr ssa_111, ssa_2 - vec1 32 ssa_113 = ishl ssa_112, ssa_2 - intrinsic store_ssbo (ssa_110, ssa_1, ssa_113) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_107 = phi block_19: ssa_106, block_20: ssa_8 + vec1 32 ssa_108 = ine32 ssa_107, ssa_8 + vec1 32 ssa_109 = b32csel ssa_108, ssa_3, ssa_8 + vec1 32 ssa_110 = ishl ssa_25, ssa_2 + vec1 32 ssa_111 = ushr ssa_110, ssa_2 + vec1 32 ssa_112 = ishl ssa_111, ssa_2 + intrinsic store_ssbo (ssa_109, ssa_8, ssa_112) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_22 */ block block_22: } NIR (final form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL2 local-size: 16, 8, 1 shared-size: 0 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents2 (0, 0, 2) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents1 (0, 0, 1) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents0 (0, 0, 0) decl_function main (0 params) impl main { decl_reg vec1 32 r0 decl_reg vec1 32 r1 decl_reg vec1 32 r2 decl_reg vec1 32 r3 decl_reg vec1 32 r4 decl_reg vec1 32 r5 decl_reg vec1 32 r6 block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000004 /* 0.000000 */) - vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) + vec1 32 ssa_1 = load_const (0x000001c8 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000002 /* 0.000000 */) - vec1 32 ssa_3 = load_const (0x000001c8 /* 0.000000 */) - vec1 32 ssa_4 = load_const (0x00000001 /* 0.000000 */) - vec1 32 ssa_5 = load_const (0x00000003 /* 0.000000 */) - vec1 32 ssa_6 = load_const (0x00000005 /* 0.000000 */) - vec1 32 ssa_7 = load_const (0x00000006 /* 0.000000 */) - vec1 32 ssa_8 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_3 = load_const (0x00000001 /* 0.000000 */) + vec1 32 ssa_4 = load_const (0x00000003 /* 0.000000 */) + vec1 32 ssa_5 = load_const (0x00000005 /* 0.000000 */) + vec1 32 ssa_6 = load_const (0x00000006 /* 0.000000 */) + vec1 32 ssa_7 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_8 = load_const (0x00000000 /* 0.000000 */) vec3 32 ssa_9 = intrinsic load_work_group_id () () vec1 32 ssa_10 = ishl ssa_9.x, ssa_0 - vec1 32 ssa_11 = ishl ssa_9.y, ssa_5 + vec1 32 ssa_11 = ishl ssa_9.y, ssa_4 vec1 32 ssa_12 = intrinsic load_subgroup_id () () - vec1 32 ssa_13 = ishl ssa_12, ssa_5 + vec1 32 ssa_13 = ishl ssa_12, ssa_4 vec1 32 ssa_14 = intrinsic load_subgroup_invocation () () vec1 32 ssa_15 = iadd ssa_14, ssa_13 vec1 32 ssa_16 = load_const (0x0000000f /* 0.000000 */) vec1 32 ssa_17 = iand ssa_15, ssa_16 vec1 32 ssa_18 = ushr ssa_15, ssa_0 - vec1 32 ssa_19 = iand ssa_18, ssa_8 + vec1 32 ssa_19 = iand ssa_18, ssa_7 vec1 32 ssa_20 = iadd ssa_10, ssa_17 vec1 32 ssa_21 = iadd ssa_11, ssa_19 vec3 32 ssa_22 = intrinsic load_num_work_groups () () vec1 32 ssa_23 = imul ssa_21, ssa_22.x vec1 32 ssa_24 = ishl ssa_23, ssa_0 vec1 32 ssa_25 = iadd ssa_24, ssa_20 vec1 32 ssa_26 = imul ssa_22.x, ssa_9.y vec1 32 ssa_27 = iadd ssa_26, ssa_9.x vec1 32 ssa_28 = imul ssa_19, ssa_22.x vec1 32 ssa_29 = ishl ssa_22.y, ssa_0 vec1 32 ssa_30 = imul ssa_28, ssa_29 vec1 32 ssa_31 = ishl ssa_27, ssa_0 vec1 32 ssa_32 = iadd ssa_31, ssa_30 vec1 32 ssa_33 = iadd ssa_32, ssa_17 - vec1 32 ssa_34 = ishl ssa_33, ssa_5 + vec1 32 ssa_34 = ishl ssa_33, ssa_4 vec1 32 ssa_35 = ishl ssa_34, ssa_2 vec1 32 ssa_36 = ushr ssa_35, ssa_2 vec1 32 ssa_37 = ishl ssa_36, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_38 = iadd ssa_34, ssa_4 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_38 = iadd ssa_34, ssa_3 vec1 32 ssa_39 = ishl ssa_38, ssa_2 vec1 32 ssa_40 = ushr ssa_39, ssa_2 vec1 32 ssa_41 = ishl ssa_40, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_42 = iadd ssa_34, ssa_2 vec1 32 ssa_43 = ishl ssa_42, ssa_2 vec1 32 ssa_44 = ushr ssa_43, ssa_2 vec1 32 ssa_45 = ishl ssa_44, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_46 = iadd ssa_34, ssa_5 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_46 = iadd ssa_34, ssa_4 vec1 32 ssa_47 = ishl ssa_46, ssa_2 vec1 32 ssa_48 = ushr ssa_47, ssa_2 vec1 32 ssa_49 = ishl ssa_48, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_50 = iadd ssa_34, ssa_0 vec1 32 ssa_51 = ishl ssa_50, ssa_2 vec1 32 ssa_52 = ushr ssa_51, ssa_2 vec1 32 ssa_53 = ishl ssa_52, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_54 = iadd ssa_34, ssa_6 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_54 = iadd ssa_34, ssa_5 vec1 32 ssa_55 = ishl ssa_54, ssa_2 vec1 32 ssa_56 = ushr ssa_55, ssa_2 vec1 32 ssa_57 = ishl ssa_56, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_58 = iadd ssa_34, ssa_7 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_58 = iadd ssa_34, ssa_6 vec1 32 ssa_59 = ishl ssa_58, ssa_2 vec1 32 ssa_60 = ushr ssa_59, ssa_2 vec1 32 ssa_61 = ishl ssa_60, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_62 = iadd ssa_34, ssa_8 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_62 = iadd ssa_34, ssa_7 vec1 32 ssa_63 = ishl ssa_62, ssa_2 vec1 32 ssa_64 = ushr ssa_63, ssa_2 vec1 32 ssa_65 = ishl ssa_64, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_66 = intrinsic load_ssbo (ssa_4, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_66 = intrinsic load_ssbo (ssa_3, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_67 = ieq32 ssa_66, ssa_27 /* succs: block_1 block_2 */ if ssa_67 { block block_1: /* preds: block_0 */ - vec1 32 ssa_68 = intrinsic load_ssbo (ssa_4, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_68 = intrinsic load_ssbo (ssa_3, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_69 = ieq32 ssa_68, ssa_27 vec1 32 ssa_70 = b2i32 ssa_69 r0 = imov -ssa_70 /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ - r0 = imov ssa_1 + r0 = imov ssa_8 /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ - vec1 32 ssa_73 = ine32 r0, ssa_1 + vec1 32 ssa_73 = ine32 r0, ssa_8 /* succs: block_4 block_5 */ if ssa_73 { block block_4: /* preds: block_3 */ - vec1 32 ssa_74 = intrinsic load_ssbo (ssa_4, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_74 = intrinsic load_ssbo (ssa_3, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_75 = ieq32 ssa_74, ssa_27 vec1 32 ssa_76 = b2i32 ssa_75 r1 = imov -ssa_76 /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ - r1 = imov ssa_1 + r1 = imov ssa_8 /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ - vec1 32 ssa_79 = ine32 r1, ssa_1 + vec1 32 ssa_79 = ine32 r1, ssa_8 /* succs: block_7 block_8 */ if ssa_79 { block block_7: /* preds: block_6 */ - vec1 32 ssa_80 = intrinsic load_ssbo (ssa_4, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_80 = intrinsic load_ssbo (ssa_3, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_81 = ieq32 ssa_80, ssa_27 vec1 32 ssa_82 = b2i32 ssa_81 r2 = imov -ssa_82 /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ - r2 = imov ssa_1 + r2 = imov ssa_8 /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ - vec1 32 ssa_85 = ine32 r2, ssa_1 + vec1 32 ssa_85 = ine32 r2, ssa_8 /* succs: block_10 block_11 */ if ssa_85 { block block_10: /* preds: block_9 */ - vec1 32 ssa_86 = intrinsic load_ssbo (ssa_4, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_86 = intrinsic load_ssbo (ssa_3, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_87 = ieq32 ssa_86, ssa_27 vec1 32 ssa_88 = b2i32 ssa_87 r3 = imov -ssa_88 /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ - r3 = imov ssa_1 + r3 = imov ssa_8 /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ - vec1 32 ssa_91 = ine32 r3, ssa_1 + vec1 32 ssa_91 = ine32 r3, ssa_8 /* succs: block_13 block_14 */ if ssa_91 { block block_13: /* preds: block_12 */ - vec1 32 ssa_92 = intrinsic load_ssbo (ssa_4, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_92 = intrinsic load_ssbo (ssa_3, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_93 = ieq32 ssa_92, ssa_27 vec1 32 ssa_94 = b2i32 ssa_93 r4 = imov -ssa_94 /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ - r4 = imov ssa_1 + r4 = imov ssa_8 /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ - vec1 32 ssa_97 = ine32 r4, ssa_1 + vec1 32 ssa_97 = ine32 r4, ssa_8 /* succs: block_16 block_17 */ if ssa_97 { block block_16: /* preds: block_15 */ - vec1 32 ssa_98 = intrinsic load_ssbo (ssa_4, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_98 = intrinsic load_ssbo (ssa_3, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_99 = ieq32 ssa_98, ssa_27 vec1 32 ssa_100 = b2i32 ssa_99 r5 = imov -ssa_100 /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ - r5 = imov ssa_1 + r5 = imov ssa_8 /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ - vec1 32 ssa_103 = ine32 r5, ssa_1 + vec1 32 ssa_103 = ine32 r5, ssa_8 /* succs: block_19 block_20 */ if ssa_103 { block block_19: /* preds: block_18 */ - vec1 32 ssa_104 = intrinsic load_ssbo (ssa_4, ssa_65) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_105 = ieq32 ssa_104, ssa_27 - vec1 32 ssa_106 = b2i32 ssa_105 - r6 = imov -ssa_106 + vec1 32 ssa_104 = ieq32 ssa_1, ssa_27 + vec1 32 ssa_105 = b2i32 ssa_104 + r6 = imov -ssa_105 /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ - r6 = imov ssa_1 + r6 = imov ssa_8 /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ - vec1 32 ssa_109 = ine32 r6, ssa_1 - vec1 32 ssa_110 = b32csel ssa_109, ssa_4, ssa_1 - vec1 32 ssa_111 = ishl ssa_25, ssa_2 - vec1 32 ssa_112 = ushr ssa_111, ssa_2 - vec1 32 ssa_113 = ishl ssa_112, ssa_2 - intrinsic store_ssbo (ssa_110, ssa_1, ssa_113) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_108 = ine32 r6, ssa_8 + vec1 32 ssa_109 = b32csel ssa_108, ssa_3, ssa_8 + vec1 32 ssa_110 = ishl ssa_25, ssa_2 + vec1 32 ssa_111 = ushr ssa_110, ssa_2 + vec1 32 ssa_112 = ishl ssa_111, ssa_2 + intrinsic store_ssbo (ssa_109, ssa_8, ssa_112) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_22 */ block block_22: } NIR (SSA form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL2 local-size: 16, 8, 1 shared-size: 0 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents2 (0, 0, 2) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents1 (0, 0, 1) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents0 (0, 0, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000004 /* 0.000000 */) - vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) + vec1 32 ssa_1 = load_const (0x000001c8 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000002 /* 0.000000 */) - vec1 32 ssa_3 = load_const (0x000001c8 /* 0.000000 */) - vec1 32 ssa_4 = load_const (0x00000001 /* 0.000000 */) - vec1 32 ssa_5 = load_const (0x00000003 /* 0.000000 */) - vec1 32 ssa_6 = load_const (0x00000005 /* 0.000000 */) - vec1 32 ssa_7 = load_const (0x00000006 /* 0.000000 */) - vec1 32 ssa_8 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_3 = load_const (0x00000001 /* 0.000000 */) + vec1 32 ssa_4 = load_const (0x00000003 /* 0.000000 */) + vec1 32 ssa_5 = load_const (0x00000005 /* 0.000000 */) + vec1 32 ssa_6 = load_const (0x00000006 /* 0.000000 */) + vec1 32 ssa_7 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_8 = load_const (0x00000000 /* 0.000000 */) vec3 32 ssa_9 = intrinsic load_work_group_id () () vec1 32 ssa_10 = ishl ssa_9.x, ssa_0 - vec1 32 ssa_11 = ishl ssa_9.y, ssa_5 + vec1 32 ssa_11 = ishl ssa_9.y, ssa_4 vec1 32 ssa_12 = intrinsic load_subgroup_id () () vec1 32 ssa_13 = ishl ssa_12, ssa_0 vec1 32 ssa_14 = intrinsic load_subgroup_invocation () () vec1 32 ssa_15 = iadd ssa_14, ssa_13 vec1 32 ssa_16 = load_const (0x0000000f /* 0.000000 */) vec1 32 ssa_17 = iand ssa_15, ssa_16 vec1 32 ssa_18 = ushr ssa_15, ssa_0 - vec1 32 ssa_19 = iand ssa_18, ssa_8 + vec1 32 ssa_19 = iand ssa_18, ssa_7 vec1 32 ssa_20 = iadd ssa_10, ssa_17 vec1 32 ssa_21 = iadd ssa_11, ssa_19 vec3 32 ssa_22 = intrinsic load_num_work_groups () () vec1 32 ssa_23 = imul ssa_21, ssa_22.x vec1 32 ssa_24 = ishl ssa_23, ssa_0 vec1 32 ssa_25 = iadd ssa_24, ssa_20 vec1 32 ssa_26 = imul ssa_22.x, ssa_9.y vec1 32 ssa_27 = iadd ssa_26, ssa_9.x vec1 32 ssa_28 = imul ssa_19, ssa_22.x vec1 32 ssa_29 = ishl ssa_22.y, ssa_0 vec1 32 ssa_30 = imul ssa_28, ssa_29 vec1 32 ssa_31 = ishl ssa_27, ssa_0 vec1 32 ssa_32 = iadd ssa_31, ssa_30 vec1 32 ssa_33 = iadd ssa_32, ssa_17 - vec1 32 ssa_34 = ishl ssa_33, ssa_5 + vec1 32 ssa_34 = ishl ssa_33, ssa_4 vec1 32 ssa_35 = ishl ssa_34, ssa_2 vec1 32 ssa_36 = ushr ssa_35, ssa_2 vec1 32 ssa_37 = ishl ssa_36, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_38 = iadd ssa_34, ssa_4 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_38 = iadd ssa_34, ssa_3 vec1 32 ssa_39 = ishl ssa_38, ssa_2 vec1 32 ssa_40 = ushr ssa_39, ssa_2 vec1 32 ssa_41 = ishl ssa_40, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_42 = iadd ssa_34, ssa_2 vec1 32 ssa_43 = ishl ssa_42, ssa_2 vec1 32 ssa_44 = ushr ssa_43, ssa_2 vec1 32 ssa_45 = ishl ssa_44, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_46 = iadd ssa_34, ssa_5 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_46 = iadd ssa_34, ssa_4 vec1 32 ssa_47 = ishl ssa_46, ssa_2 vec1 32 ssa_48 = ushr ssa_47, ssa_2 vec1 32 ssa_49 = ishl ssa_48, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_50 = iadd ssa_34, ssa_0 vec1 32 ssa_51 = ishl ssa_50, ssa_2 vec1 32 ssa_52 = ushr ssa_51, ssa_2 vec1 32 ssa_53 = ishl ssa_52, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_54 = iadd ssa_34, ssa_6 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_54 = iadd ssa_34, ssa_5 vec1 32 ssa_55 = ishl ssa_54, ssa_2 vec1 32 ssa_56 = ushr ssa_55, ssa_2 vec1 32 ssa_57 = ishl ssa_56, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_58 = iadd ssa_34, ssa_7 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_58 = iadd ssa_34, ssa_6 vec1 32 ssa_59 = ishl ssa_58, ssa_2 vec1 32 ssa_60 = ushr ssa_59, ssa_2 vec1 32 ssa_61 = ishl ssa_60, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_62 = iadd ssa_34, ssa_8 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_62 = iadd ssa_34, ssa_7 vec1 32 ssa_63 = ishl ssa_62, ssa_2 vec1 32 ssa_64 = ushr ssa_63, ssa_2 vec1 32 ssa_65 = ishl ssa_64, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_66 = intrinsic load_ssbo (ssa_4, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_66 = intrinsic load_ssbo (ssa_3, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_67 = ieq32 ssa_66, ssa_27 /* succs: block_1 block_2 */ if ssa_67 { block block_1: /* preds: block_0 */ - vec1 32 ssa_68 = intrinsic load_ssbo (ssa_4, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_68 = intrinsic load_ssbo (ssa_3, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_69 = ieq32 ssa_68, ssa_27 vec1 32 ssa_70 = b2i32 ssa_69 vec1 32 ssa_71 = imov -ssa_70 /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ - vec1 32 ssa_72 = phi block_1: ssa_71, block_2: ssa_1 - vec1 32 ssa_73 = ine32 ssa_72, ssa_1 + vec1 32 ssa_72 = phi block_1: ssa_71, block_2: ssa_8 + vec1 32 ssa_73 = ine32 ssa_72, ssa_8 /* succs: block_4 block_5 */ if ssa_73 { block block_4: /* preds: block_3 */ - vec1 32 ssa_74 = intrinsic load_ssbo (ssa_4, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_74 = intrinsic load_ssbo (ssa_3, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_75 = ieq32 ssa_74, ssa_27 vec1 32 ssa_76 = b2i32 ssa_75 vec1 32 ssa_77 = imov -ssa_76 /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ - vec1 32 ssa_78 = phi block_4: ssa_77, block_5: ssa_1 - vec1 32 ssa_79 = ine32 ssa_78, ssa_1 + vec1 32 ssa_78 = phi block_4: ssa_77, block_5: ssa_8 + vec1 32 ssa_79 = ine32 ssa_78, ssa_8 /* succs: block_7 block_8 */ if ssa_79 { block block_7: /* preds: block_6 */ - vec1 32 ssa_80 = intrinsic load_ssbo (ssa_4, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_80 = intrinsic load_ssbo (ssa_3, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_81 = ieq32 ssa_80, ssa_27 vec1 32 ssa_82 = b2i32 ssa_81 vec1 32 ssa_83 = imov -ssa_82 /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ - vec1 32 ssa_84 = phi block_7: ssa_83, block_8: ssa_1 - vec1 32 ssa_85 = ine32 ssa_84, ssa_1 + vec1 32 ssa_84 = phi block_7: ssa_83, block_8: ssa_8 + vec1 32 ssa_85 = ine32 ssa_84, ssa_8 /* succs: block_10 block_11 */ if ssa_85 { block block_10: /* preds: block_9 */ - vec1 32 ssa_86 = intrinsic load_ssbo (ssa_4, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_86 = intrinsic load_ssbo (ssa_3, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_87 = ieq32 ssa_86, ssa_27 vec1 32 ssa_88 = b2i32 ssa_87 vec1 32 ssa_89 = imov -ssa_88 /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ - vec1 32 ssa_90 = phi block_10: ssa_89, block_11: ssa_1 - vec1 32 ssa_91 = ine32 ssa_90, ssa_1 + vec1 32 ssa_90 = phi block_10: ssa_89, block_11: ssa_8 + vec1 32 ssa_91 = ine32 ssa_90, ssa_8 /* succs: block_13 block_14 */ if ssa_91 { block block_13: /* preds: block_12 */ - vec1 32 ssa_92 = intrinsic load_ssbo (ssa_4, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_92 = intrinsic load_ssbo (ssa_3, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_93 = ieq32 ssa_92, ssa_27 vec1 32 ssa_94 = b2i32 ssa_93 vec1 32 ssa_95 = imov -ssa_94 /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ - vec1 32 ssa_96 = phi block_13: ssa_95, block_14: ssa_1 - vec1 32 ssa_97 = ine32 ssa_96, ssa_1 + vec1 32 ssa_96 = phi block_13: ssa_95, block_14: ssa_8 + vec1 32 ssa_97 = ine32 ssa_96, ssa_8 /* succs: block_16 block_17 */ if ssa_97 { block block_16: /* preds: block_15 */ - vec1 32 ssa_98 = intrinsic load_ssbo (ssa_4, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_98 = intrinsic load_ssbo (ssa_3, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_99 = ieq32 ssa_98, ssa_27 vec1 32 ssa_100 = b2i32 ssa_99 vec1 32 ssa_101 = imov -ssa_100 /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ - vec1 32 ssa_102 = phi block_16: ssa_101, block_17: ssa_1 - vec1 32 ssa_103 = ine32 ssa_102, ssa_1 + vec1 32 ssa_102 = phi block_16: ssa_101, block_17: ssa_8 + vec1 32 ssa_103 = ine32 ssa_102, ssa_8 /* succs: block_19 block_20 */ if ssa_103 { block block_19: /* preds: block_18 */ - vec1 32 ssa_104 = intrinsic load_ssbo (ssa_4, ssa_65) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_105 = ieq32 ssa_104, ssa_27 - vec1 32 ssa_106 = b2i32 ssa_105 - vec1 32 ssa_107 = imov -ssa_106 + vec1 32 ssa_104 = ieq32 ssa_1, ssa_27 + vec1 32 ssa_105 = b2i32 ssa_104 + vec1 32 ssa_106 = imov -ssa_105 /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ - vec1 32 ssa_108 = phi block_19: ssa_107, block_20: ssa_1 - vec1 32 ssa_109 = ine32 ssa_108, ssa_1 - vec1 32 ssa_110 = b32csel ssa_109, ssa_4, ssa_1 - vec1 32 ssa_111 = ishl ssa_25, ssa_2 - vec1 32 ssa_112 = ushr ssa_111, ssa_2 - vec1 32 ssa_113 = ishl ssa_112, ssa_2 - intrinsic store_ssbo (ssa_110, ssa_1, ssa_113) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_107 = phi block_19: ssa_106, block_20: ssa_8 + vec1 32 ssa_108 = ine32 ssa_107, ssa_8 + vec1 32 ssa_109 = b32csel ssa_108, ssa_3, ssa_8 + vec1 32 ssa_110 = ishl ssa_25, ssa_2 + vec1 32 ssa_111 = ushr ssa_110, ssa_2 + vec1 32 ssa_112 = ishl ssa_111, ssa_2 + intrinsic store_ssbo (ssa_109, ssa_8, ssa_112) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_22 */ block block_22: } NIR (final form) for compute shader: shader: MESA_SHADER_COMPUTE name: GLSL2 local-size: 16, 8, 1 shared-size: 0 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents2 (0, 0, 2) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents1 (0, 0, 1) decl_var ssbo INTERP_MODE_NONE uint[] csssbocontents0 (0, 0, 0) decl_function main (0 params) impl main { decl_reg vec1 32 r0 decl_reg vec1 32 r1 decl_reg vec1 32 r2 decl_reg vec1 32 r3 decl_reg vec1 32 r4 decl_reg vec1 32 r5 decl_reg vec1 32 r6 block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000004 /* 0.000000 */) - vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) + vec1 32 ssa_1 = load_const (0x000001c8 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000002 /* 0.000000 */) - vec1 32 ssa_3 = load_const (0x000001c8 /* 0.000000 */) - vec1 32 ssa_4 = load_const (0x00000001 /* 0.000000 */) - vec1 32 ssa_5 = load_const (0x00000003 /* 0.000000 */) - vec1 32 ssa_6 = load_const (0x00000005 /* 0.000000 */) - vec1 32 ssa_7 = load_const (0x00000006 /* 0.000000 */) - vec1 32 ssa_8 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_3 = load_const (0x00000001 /* 0.000000 */) + vec1 32 ssa_4 = load_const (0x00000003 /* 0.000000 */) + vec1 32 ssa_5 = load_const (0x00000005 /* 0.000000 */) + vec1 32 ssa_6 = load_const (0x00000006 /* 0.000000 */) + vec1 32 ssa_7 = load_const (0x00000007 /* 0.000000 */) + vec1 32 ssa_8 = load_const (0x00000000 /* 0.000000 */) vec3 32 ssa_9 = intrinsic load_work_group_id () () vec1 32 ssa_10 = ishl ssa_9.x, ssa_0 - vec1 32 ssa_11 = ishl ssa_9.y, ssa_5 + vec1 32 ssa_11 = ishl ssa_9.y, ssa_4 vec1 32 ssa_12 = intrinsic load_subgroup_id () () vec1 32 ssa_13 = ishl ssa_12, ssa_0 vec1 32 ssa_14 = intrinsic load_subgroup_invocation () () vec1 32 ssa_15 = iadd ssa_14, ssa_13 vec1 32 ssa_16 = load_const (0x0000000f /* 0.000000 */) vec1 32 ssa_17 = iand ssa_15, ssa_16 vec1 32 ssa_18 = ushr ssa_15, ssa_0 - vec1 32 ssa_19 = iand ssa_18, ssa_8 + vec1 32 ssa_19 = iand ssa_18, ssa_7 vec1 32 ssa_20 = iadd ssa_10, ssa_17 vec1 32 ssa_21 = iadd ssa_11, ssa_19 vec3 32 ssa_22 = intrinsic load_num_work_groups () () vec1 32 ssa_23 = imul ssa_21, ssa_22.x vec1 32 ssa_24 = ishl ssa_23, ssa_0 vec1 32 ssa_25 = iadd ssa_24, ssa_20 vec1 32 ssa_26 = imul ssa_22.x, ssa_9.y vec1 32 ssa_27 = iadd ssa_26, ssa_9.x vec1 32 ssa_28 = imul ssa_19, ssa_22.x vec1 32 ssa_29 = ishl ssa_22.y, ssa_0 vec1 32 ssa_30 = imul ssa_28, ssa_29 vec1 32 ssa_31 = ishl ssa_27, ssa_0 vec1 32 ssa_32 = iadd ssa_31, ssa_30 vec1 32 ssa_33 = iadd ssa_32, ssa_17 - vec1 32 ssa_34 = ishl ssa_33, ssa_5 + vec1 32 ssa_34 = ishl ssa_33, ssa_4 vec1 32 ssa_35 = ishl ssa_34, ssa_2 vec1 32 ssa_36 = ushr ssa_35, ssa_2 vec1 32 ssa_37 = ishl ssa_36, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_38 = iadd ssa_34, ssa_4 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_38 = iadd ssa_34, ssa_3 vec1 32 ssa_39 = ishl ssa_38, ssa_2 vec1 32 ssa_40 = ushr ssa_39, ssa_2 vec1 32 ssa_41 = ishl ssa_40, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_42 = iadd ssa_34, ssa_2 vec1 32 ssa_43 = ishl ssa_42, ssa_2 vec1 32 ssa_44 = ushr ssa_43, ssa_2 vec1 32 ssa_45 = ishl ssa_44, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_46 = iadd ssa_34, ssa_5 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_46 = iadd ssa_34, ssa_4 vec1 32 ssa_47 = ishl ssa_46, ssa_2 vec1 32 ssa_48 = ushr ssa_47, ssa_2 vec1 32 ssa_49 = ishl ssa_48, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_50 = iadd ssa_34, ssa_0 vec1 32 ssa_51 = ishl ssa_50, ssa_2 vec1 32 ssa_52 = ushr ssa_51, ssa_2 vec1 32 ssa_53 = ishl ssa_52, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_54 = iadd ssa_34, ssa_6 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_54 = iadd ssa_34, ssa_5 vec1 32 ssa_55 = ishl ssa_54, ssa_2 vec1 32 ssa_56 = ushr ssa_55, ssa_2 vec1 32 ssa_57 = ishl ssa_56, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_58 = iadd ssa_34, ssa_7 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_58 = iadd ssa_34, ssa_6 vec1 32 ssa_59 = ishl ssa_58, ssa_2 vec1 32 ssa_60 = ushr ssa_59, ssa_2 vec1 32 ssa_61 = ishl ssa_60, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_62 = iadd ssa_34, ssa_8 + intrinsic store_ssbo (ssa_1, ssa_3, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_62 = iadd ssa_34, ssa_7 vec1 32 ssa_63 = ishl ssa_62, ssa_2 vec1 32 ssa_64 = ushr ssa_63, ssa_2 vec1 32 ssa_65 = ishl ssa_64, ssa_2 - intrinsic store_ssbo (ssa_3, ssa_4, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + intrinsic store_ssbo (ssa_1, ssa_3, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_37) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_41) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_45) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_49) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_53) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_57) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_61) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ intrinsic store_ssbo (ssa_27, ssa_2, ssa_65) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_66 = intrinsic load_ssbo (ssa_4, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_66 = intrinsic load_ssbo (ssa_3, ssa_37) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_67 = ieq32 ssa_66, ssa_27 /* succs: block_1 block_2 */ if ssa_67 { block block_1: /* preds: block_0 */ - vec1 32 ssa_68 = intrinsic load_ssbo (ssa_4, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_68 = intrinsic load_ssbo (ssa_3, ssa_41) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_69 = ieq32 ssa_68, ssa_27 vec1 32 ssa_70 = b2i32 ssa_69 r0 = imov -ssa_70 /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ - r0 = imov ssa_1 + r0 = imov ssa_8 /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ - vec1 32 ssa_73 = ine32 r0, ssa_1 + vec1 32 ssa_73 = ine32 r0, ssa_8 /* succs: block_4 block_5 */ if ssa_73 { block block_4: /* preds: block_3 */ - vec1 32 ssa_74 = intrinsic load_ssbo (ssa_4, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_74 = intrinsic load_ssbo (ssa_3, ssa_45) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_75 = ieq32 ssa_74, ssa_27 vec1 32 ssa_76 = b2i32 ssa_75 r1 = imov -ssa_76 /* succs: block_6 */ } else { block block_5: /* preds: block_3 */ - r1 = imov ssa_1 + r1 = imov ssa_8 /* succs: block_6 */ } block block_6: /* preds: block_4 block_5 */ - vec1 32 ssa_79 = ine32 r1, ssa_1 + vec1 32 ssa_79 = ine32 r1, ssa_8 /* succs: block_7 block_8 */ if ssa_79 { block block_7: /* preds: block_6 */ - vec1 32 ssa_80 = intrinsic load_ssbo (ssa_4, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_80 = intrinsic load_ssbo (ssa_3, ssa_49) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_81 = ieq32 ssa_80, ssa_27 vec1 32 ssa_82 = b2i32 ssa_81 r2 = imov -ssa_82 /* succs: block_9 */ } else { block block_8: /* preds: block_6 */ - r2 = imov ssa_1 + r2 = imov ssa_8 /* succs: block_9 */ } block block_9: /* preds: block_7 block_8 */ - vec1 32 ssa_85 = ine32 r2, ssa_1 + vec1 32 ssa_85 = ine32 r2, ssa_8 /* succs: block_10 block_11 */ if ssa_85 { block block_10: /* preds: block_9 */ - vec1 32 ssa_86 = intrinsic load_ssbo (ssa_4, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_86 = intrinsic load_ssbo (ssa_3, ssa_53) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_87 = ieq32 ssa_86, ssa_27 vec1 32 ssa_88 = b2i32 ssa_87 r3 = imov -ssa_88 /* succs: block_12 */ } else { block block_11: /* preds: block_9 */ - r3 = imov ssa_1 + r3 = imov ssa_8 /* succs: block_12 */ } block block_12: /* preds: block_10 block_11 */ - vec1 32 ssa_91 = ine32 r3, ssa_1 + vec1 32 ssa_91 = ine32 r3, ssa_8 /* succs: block_13 block_14 */ if ssa_91 { block block_13: /* preds: block_12 */ - vec1 32 ssa_92 = intrinsic load_ssbo (ssa_4, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_92 = intrinsic load_ssbo (ssa_3, ssa_57) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_93 = ieq32 ssa_92, ssa_27 vec1 32 ssa_94 = b2i32 ssa_93 r4 = imov -ssa_94 /* succs: block_15 */ } else { block block_14: /* preds: block_12 */ - r4 = imov ssa_1 + r4 = imov ssa_8 /* succs: block_15 */ } block block_15: /* preds: block_13 block_14 */ - vec1 32 ssa_97 = ine32 r4, ssa_1 + vec1 32 ssa_97 = ine32 r4, ssa_8 /* succs: block_16 block_17 */ if ssa_97 { block block_16: /* preds: block_15 */ - vec1 32 ssa_98 = intrinsic load_ssbo (ssa_4, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_98 = intrinsic load_ssbo (ssa_3, ssa_61) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ vec1 32 ssa_99 = ieq32 ssa_98, ssa_27 vec1 32 ssa_100 = b2i32 ssa_99 r5 = imov -ssa_100 /* succs: block_18 */ } else { block block_17: /* preds: block_15 */ - r5 = imov ssa_1 + r5 = imov ssa_8 /* succs: block_18 */ } block block_18: /* preds: block_16 block_17 */ - vec1 32 ssa_103 = ine32 r5, ssa_1 + vec1 32 ssa_103 = ine32 r5, ssa_8 /* succs: block_19 block_20 */ if ssa_103 { block block_19: /* preds: block_18 */ - vec1 32 ssa_104 = intrinsic load_ssbo (ssa_4, ssa_65) (0, 4, 0) /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ - vec1 32 ssa_105 = ieq32 ssa_104, ssa_27 - vec1 32 ssa_106 = b2i32 ssa_105 - r6 = imov -ssa_106 + vec1 32 ssa_104 = ieq32 ssa_1, ssa_27 + vec1 32 ssa_105 = b2i32 ssa_104 + r6 = imov -ssa_105 /* succs: block_21 */ } else { block block_20: /* preds: block_18 */ - r6 = imov ssa_1 + r6 = imov ssa_8 /* succs: block_21 */ } block block_21: /* preds: block_19 block_20 */ - vec1 32 ssa_109 = ine32 r6, ssa_1 - vec1 32 ssa_110 = b32csel ssa_109, ssa_4, ssa_1 - vec1 32 ssa_111 = ishl ssa_25, ssa_2 - vec1 32 ssa_112 = ushr ssa_111, ssa_2 - vec1 32 ssa_113 = ishl ssa_112, ssa_2 - intrinsic store_ssbo (ssa_110, ssa_1, ssa_113) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ + vec1 32 ssa_108 = ine32 r6, ssa_8 + vec1 32 ssa_109 = b32csel ssa_108, ssa_3, ssa_8 + vec1 32 ssa_110 = ishl ssa_25, ssa_2 + vec1 32 ssa_111 = ushr ssa_110, ssa_2 + vec1 32 ssa_112 = ishl ssa_111, ssa_2 + intrinsic store_ssbo (ssa_109, ssa_8, ssa_112) (1, 0, 4, 0) /* wrmask=x */ /* access=0 */ /* align_mul=4 */ /* align_offset=0 */ /* succs: block_22 */ block block_22: } Native code for unnamed compute shader GLSL2 -SIMD16 shader: 137 instructions. 0 loops. 3940 cycles. 0:0 spills:fills. Promoted 0 constants. Compacted 2192 to 1728 bytes (21%) +SIMD16 shader: 136 instructions. 0 loops. 3636 cycles. 0:0 spills:fills. Promoted 0 constants. Compacted 2176 to 1712 bytes (21%) START B0 (1486 cycles) mov(8) g2<1>UW 0x76543210V { align1 WE_all 1Q }; -mov(16) g103<1>UD g0.1<0,1,0>UD { align1 1H compacted }; -mov(16) g105<1>UD g0.6<0,1,0>UD { align1 1H }; +mov(16) g102<1>UD g0.1<0,1,0>UD { align1 1H compacted }; +mov(16) g104<1>UD g0.6<0,1,0>UD { align1 1H }; mov(16) g11<1>UD 0x00000000UD { align1 1H compacted }; shl(16) g17<1>D g1<0,1,0>D 0x00000004UD { align1 1H }; -mov(16) g110<1>UD 0x00000000UD { align1 1H compacted }; +mov(16) g109<1>UD 0x00000000UD { align1 1H compacted }; add(8) g2.8<1>UW g2<8,8,1>UW 0x0008UW { align1 WE_all 1Q }; -shl(16) g13<1>D g103<8,8,1>D 0x00000004UD { align1 1H }; -shl(16) g15<1>D g105<8,8,1>D 0x00000003UD { align1 1H }; +shl(16) g13<1>D g102<8,8,1>D 0x00000004UD { align1 1H }; +shl(16) g15<1>D g104<8,8,1>D 0x00000003UD { align1 1H }; mov(16) g19<1>D g2<8,8,1>UW { align1 1H }; add(16) g21<1>D g19<8,8,1>D g17<8,8,1>D { align1 1H compacted }; and(16) g23<1>UD g21<8,8,1>UD 0x0000000fUD { align1 1H compacted }; shr(16) g25<1>UD g21<8,8,1>UD 0x00000004UD { align1 1H compacted }; add(16) g29<1>D g13<8,8,1>D g23<8,8,1>D { align1 1H compacted }; and(16) g27<1>UD g25<8,8,1>UD 0x00000007UD { align1 1H compacted }; add(16) g31<1>D g15<8,8,1>D g27<8,8,1>D { align1 1H compacted }; -send(16) g107<1>UW g110<8,8,1>UD 0x04205e00 +send(16) g106<1>UW g109<8,8,1>UD 0x04205e00 dp data 1 MsgDesc: ( untyped surface read, Surface = 0, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; -mov(16) g111<1>UD 0x00000004UD { align1 1H compacted }; -send(16) g109<1>UW g111<8,8,1>UD 0x04205e00 +mov(16) g110<1>UD 0x00000004UD { align1 1H compacted }; +send(16) g108<1>UW g110<8,8,1>UD 0x04205e00 dp data 1 MsgDesc: ( untyped surface read, Surface = 0, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; -mov(16) g112<1>UD 0x000001c8UD { align1 1H compacted }; -mul(16) g33<1>D g31<8,8,1>D g107<8,8,1>D { align1 1H compacted }; -mul(16) g38<1>D g107<8,8,1>D g105<8,8,1>D { align1 1H compacted }; -mul(16) g40<1>D g27<8,8,1>D g107<8,8,1>D { align1 1H compacted }; +mov(16) g111<1>UD 0x000001c8UD { align1 1H compacted }; +mul(16) g33<1>D g31<8,8,1>D g106<8,8,1>D { align1 1H compacted }; +mul(16) g38<1>D g106<8,8,1>D g104<8,8,1>D { align1 1H compacted }; +mul(16) g40<1>D g27<8,8,1>D g106<8,8,1>D { align1 1H compacted }; shl(16) g35<1>D g33<8,8,1>D 0x00000004UD { align1 1H }; -add(16) g1<1>D g38<8,8,1>D g103<8,8,1>D { align1 1H compacted }; -shl(16) g42<1>D g109<8,8,1>D 0x00000004UD { align1 1H }; +add(16) g1<1>D g38<8,8,1>D g102<8,8,1>D { align1 1H compacted }; +shl(16) g42<1>D g108<8,8,1>D 0x00000004UD { align1 1H }; add(16) g37<1>D g35<8,8,1>D g29<8,8,1>D { align1 1H compacted }; shl(16) g46<1>D g1<8,8,1>D 0x00000004UD { align1 1H }; mul(16) g44<1>D g40<8,8,1>D g42<8,8,1>D { align1 1H compacted }; add(16) g48<1>D g46<8,8,1>D g44<8,8,1>D { align1 1H compacted }; add(16) g50<1>D g48<8,8,1>D g23<8,8,1>D { align1 1H compacted }; shl(16) g52<1>D g50<8,8,1>D 0x00000003UD { align1 1H }; shl(16) g54<1>D g52<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g56<1>UD g54<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g114<1>D g56<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g114UD g112UD 0x04025e02 0x00000080 +shl(16) g115<1>D g56<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g115UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g57<1>D g52<8,8,1>D 1D { align1 1H compacted }; shl(16) g59<1>D g57<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g61<1>UD g59<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g116<1>D g61<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g116UD g112UD 0x04025e02 0x00000080 +shl(16) g117<1>D g61<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g117UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g62<1>D g52<8,8,1>D 2D { align1 1H compacted }; shl(16) g64<1>D g62<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g66<1>UD g64<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g118<1>D g66<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g118UD g112UD 0x04025e02 0x00000080 +shl(16) g119<1>D g66<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g119UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g67<1>D g52<8,8,1>D 3D { align1 1H compacted }; shl(16) g69<1>D g67<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g71<1>UD g69<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g120<1>D g71<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g120UD g112UD 0x04025e02 0x00000080 +shl(16) g121<1>D g71<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g121UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g72<1>D g52<8,8,1>D 4D { align1 1H compacted }; shl(16) g74<1>D g72<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g76<1>UD g74<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g122<1>D g76<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g122UD g112UD 0x04025e02 0x00000080 +shl(16) g123<1>D g76<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g123UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g77<1>D g52<8,8,1>D 5D { align1 1H compacted }; shl(16) g79<1>D g77<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g81<1>UD g79<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g124<1>D g81<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g124UD g112UD 0x04025e02 0x00000080 +shl(16) g125<1>D g81<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g125UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g82<1>D g52<8,8,1>D 6D { align1 1H compacted }; shl(16) g84<1>D g82<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g86<1>UD g84<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g126<1>D g86<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g126UD g112UD 0x04025e02 0x00000080 +shl(16) g9<1>D g86<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g9UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; add(16) g87<1>D g52<8,8,1>D 7D { align1 1H compacted }; shl(16) g89<1>D g87<8,8,1>D 0x00000002UD { align1 1H }; shr(16) g91<1>UD g89<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g13<1>D g91<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g13UD g112UD 0x04025e02 0x00000080 +shl(16) g113<1>D g91<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g113UD g111UD 0x04025e02 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 2, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g114UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g115UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g116UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g117UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g118UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g119UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g120UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g121UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g122UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g123UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g124UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g125UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g126UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g9UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -sends(16) nullUD g13UD g1UD 0x04025e03 0x00000080 +sends(16) nullUD g113UD g1UD 0x04025e03 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 3, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; -send(16) g92<1>UW g114<8,8,1>UD 0x04205e02 +send(16) g92<1>UW g115<8,8,1>UD 0x04205e02 dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0.0(16) null<1>D g92<8,8,1>D g1<8,8,1>D { align1 1H compacted }; (+f0.0) if(16) JIP: 56 UIP: 64 { align1 1H }; END B0 ->B1 ->B2 START B1 <-B0 (312 cycles) -send(16) g93<1>UW g116<8,8,1>UD 0x04205e02 +send(16) g93<1>UW g117<8,8,1>UD 0x04205e02 dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0.0(16) g3<1>D g93<8,8,1>D g1<8,8,1>D { align1 1H compacted }; else(16) JIP: 24 UIP: 24 { align1 1H }; END B1 ->B3 START B2 <-B0 (4 cycles) mov(16) g3<1>UD 0x00000000UD { align1 1H compacted }; END B2 ->B3 START B3 <-B2 <-B1 (26 cycles) endif(16) JIP: 16 { align1 1H }; cmp.nz.f0.0(16) null<1>D g3<8,8,1>D 0D { align1 1H compacted }; (+f0.0) if(16) JIP: 56 UIP: 64 { align1 1H }; END B3 ->B4 ->B5 START B4 <-B3 (312 cycles) -send(16) g94<1>UW g118<8,8,1>UD 0x04205e02 +send(16) g94<1>UW g119<8,8,1>UD 0x04205e02 dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0.0(16) g4<1>D g94<8,8,1>D g1<8,8,1>D { align1 1H compacted }; else(16) JIP: 24 UIP: 24 { align1 1H }; END B4 ->B6 START B5 <-B3 (4 cycles) mov(16) g4<1>UD 0x00000000UD { align1 1H compacted }; END B5 ->B6 START B6 <-B5 <-B4 (26 cycles) endif(16) JIP: 16 { align1 1H }; cmp.nz.f0.0(16) null<1>D g4<8,8,1>D 0D { align1 1H compacted }; (+f0.0) if(16) JIP: 56 UIP: 64 { align1 1H }; END B6 ->B7 ->B8 START B7 <-B6 (312 cycles) -send(16) g95<1>UW g120<8,8,1>UD 0x04205e02 +send(16) g95<1>UW g121<8,8,1>UD 0x04205e02 dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0.0(16) g5<1>D g95<8,8,1>D g1<8,8,1>D { align1 1H compacted }; else(16) JIP: 24 UIP: 24 { align1 1H }; END B7 ->B9 START B8 <-B6 (4 cycles) mov(16) g5<1>UD 0x00000000UD { align1 1H compacted }; END B8 ->B9 START B9 <-B8 <-B7 (26 cycles) endif(16) JIP: 16 { align1 1H }; cmp.nz.f0.0(16) null<1>D g5<8,8,1>D 0D { align1 1H compacted }; (+f0.0) if(16) JIP: 56 UIP: 64 { align1 1H }; END B9 ->B10 ->B11 START B10 <-B9 (312 cycles) -send(16) g96<1>UW g122<8,8,1>UD 0x04205e02 +send(16) g96<1>UW g123<8,8,1>UD 0x04205e02 dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0.0(16) g6<1>D g96<8,8,1>D g1<8,8,1>D { align1 1H compacted }; else(16) JIP: 24 UIP: 24 { align1 1H }; END B10 ->B12 START B11 <-B9 (4 cycles) mov(16) g6<1>UD 0x00000000UD { align1 1H compacted }; END B11 ->B12 START B12 <-B11 <-B10 (26 cycles) endif(16) JIP: 16 { align1 1H }; cmp.nz.f0.0(16) null<1>D g6<8,8,1>D 0D { align1 1H compacted }; (+f0.0) if(16) JIP: 56 UIP: 64 { align1 1H }; END B12 ->B13 ->B14 START B13 <-B12 (312 cycles) -send(16) g97<1>UW g124<8,8,1>UD 0x04205e02 +send(16) g97<1>UW g125<8,8,1>UD 0x04205e02 dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0.0(16) g7<1>D g97<8,8,1>D g1<8,8,1>D { align1 1H compacted }; else(16) JIP: 24 UIP: 24 { align1 1H }; END B13 ->B15 START B14 <-B12 (4 cycles) mov(16) g7<1>UD 0x00000000UD { align1 1H compacted }; END B14 ->B15 START B15 <-B14 <-B13 (26 cycles) endif(16) JIP: 16 { align1 1H }; cmp.nz.f0.0(16) null<1>D g7<8,8,1>D 0D { align1 1H compacted }; (+f0.0) if(16) JIP: 56 UIP: 64 { align1 1H }; END B15 ->B16 ->B17 START B16 <-B15 (312 cycles) -send(16) g98<1>UW g126<8,8,1>UD 0x04205e02 +send(16) g98<1>UW g9<8,8,1>UD 0x04205e02 dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; cmp.z.f0.0(16) g8<1>D g98<8,8,1>D g1<8,8,1>D { align1 1H compacted }; else(16) JIP: 24 UIP: 24 { align1 1H }; END B16 ->B18 START B17 <-B15 (4 cycles) mov(16) g8<1>UD 0x00000000UD { align1 1H compacted }; END B17 ->B18 START B18 <-B17 <-B16 (26 cycles) endif(16) JIP: 16 { align1 1H }; cmp.nz.f0.0(16) null<1>D g8<8,8,1>D 0D { align1 1H compacted }; -(+f0.0) if(16) JIP: 56 UIP: 64 { align1 1H }; +(+f0.0) if(16) JIP: 40 UIP: 48 { align1 1H }; END B18 ->B19 ->B20 - START B19 <-B18 (312 cycles) -send(16) g99<1>UW g13<8,8,1>UD 0x04205e02 - dp data 1 MsgDesc: ( untyped surface read, Surface = 2, SIMD16, Mask = 0xe) mlen 2 rlen 2 { align1 1H }; -cmp.z.f0.0(16) g9<1>D g99<8,8,1>D g1<8,8,1>D { align1 1H compacted }; + START B19 <-B18 (8 cycles) +cmp.z.f0.0(16) g9<1>D g1<8,8,1>D 456D { align1 1H compacted }; else(16) JIP: 24 UIP: 24 { align1 1H }; END B19 ->B21 START B20 <-B18 (4 cycles) mov(16) g9<1>UD 0x00000000UD { align1 1H compacted }; END B20 ->B21 START B21 <-B20 <-B19 (86 cycles) endif(16) JIP: 16 { align1 1H }; cmp.nz.f0.0(16) null<1>D g9<8,8,1>D 0D { align1 1H compacted }; -shl(16) g100<1>D g37<8,8,1>D 0x00000002UD { align1 1H }; -(-f0.0) sel(16) g16<1>UD g11<8,8,1>UD 0x00000001UD { align1 1H }; -shr(16) g102<1>UD g100<8,8,1>UD 0x00000002UD { align1 1H compacted }; -shl(16) g14<1>D g102<8,8,1>D 0x00000002UD { align1 1H }; -sends(16) nullUD g14UD g16UD 0x04025e01 0x00000080 +shl(16) g99<1>D g37<8,8,1>D 0x00000002UD { align1 1H }; +(-f0.0) sel(16) g13<1>UD g11<8,8,1>UD 0x00000001UD { align1 1H }; +shr(16) g101<1>UD g99<8,8,1>UD 0x00000002UD { align1 1H compacted }; +shl(16) g10<1>D g101<8,8,1>D 0x00000002UD { align1 1H }; +sends(16) nullUD g10UD g13UD 0x04025e01 0x00000080 dp data 1 MsgDesc: ( DC untyped surface write, Surface = 1, SIMD16, Mask = 0xe) mlen 2 ex_mlen 2 rlen 0 { align1 1H }; mov(8) g127<1>UD g0<8,8,1>UD { align1 WE_all 1Q compacted }; send(16) null<1>UW g127<8,8,1>UW 0x82000010 thread_spawner MsgDesc: mlen 1 rlen 0 { align1 WE_all 1H EOT }; END B21 socket failed (2) - closing renderer vtest_main_run_renderer: vtest initialized.