jenkins@otc-gfxtest-icl-01:~/workspace/Leeroy/repos/mesa$ INTEL_DEBUG=fs LD_LIBRARY_PATH=/tmp/build_root/m64/lib:/tmp/build_root/m64/lib64:/tmp/build_root/m64/lib/x86_64-linux-gnu:/usr/lib:/usr/lib64:/usr/lib/x86_64-linux-gnu:/tmp/build_root/m64/lib/dri:/tmp/build_root/m64/lib64/dri:/tmp/build_root/m64/lib/x86_64-linux-gnu/dri:/usr/lib/dri:/usr/lib64/dri:/usr/lib/x86_64-linux-gnu/dri:/tmp/build_root/m64/lib/piglit/lib LIBGL_DRIVERS_PATH=/tmp/build_root/m64/lib/dri:/tmp/build_root/m64/lib64/dri:/tmp/build_root/m64/lib/x86_64-linux-gnu/dri:/usr/lib/dri:/usr/lib64/dri:/usr/lib/x86_64-linux-gnu/dri VK_ICD_FILENAMES=/tmp/build_root/m64/share/vulkan/icd.d/intel_icd.x86_64.json PIGLIT_DEBUG=1 PATH=/tmp/build_root/m64/bin:/usr/local/bin:/usr/bin:/bin:/usr/games MESA_EXTENSION_OVERRIDE="+GL_ARB_gpu_shader_fp64 +GL_ARB_vertex_attrib_64bit +GL_ARB_gpu_shader_int64 +GL_ARB_shader_ballot" DISPLAY=:0 /tmp/build_root/m64/lib/piglit/bin/shader_runner /tmp/build_root/m64/lib/piglit/tests/spec/arb_gpu_shader_int64/execution/fs-ishl-then-ushr.shader_test -auto -fbo WARNING: i965 does not fully support Gen11 yet. Instability or lower performance might occur. piglit: debug: Requested an OpenGL 4.0 Core Context, and received a matching 4.5 context GLSL IR for native fragment shader 0: ( (declare (location=1 shader_in ) vec4 gl_Color) (declare (location=2 shader_out ) vec4 gl_FragColor) ( function main (signature void (parameters ) ( (assign (xyzw) (var_ref gl_FragColor) (var_ref gl_Color) ) )) ) ) NIR (SSA form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: GLSL0 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var shader_in INTERP_MODE_SMOOTH vec4 gl_Color (VARYING_SLOT_COL0, 1, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_FragColor (FRAG_RESULT_COLOR, 4, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec2 32 ssa_0 = intrinsic load_barycentric_pixel () (1) /* interp_mode=1 */ vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2 = intrinsic load_interpolated_input (ssa_0, ssa_1) (1, 0) /* base=1 */ /* component=0 */ intrinsic store_output (ssa_2, ssa_1) (4, 15, 0) /* base=4 */ /* wrmask=xyzw */ /* component=0 */ /* gl_FragColor */ /* succs: block_1 */ block block_1: } NIR (final form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: GLSL0 inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var shader_in INTERP_MODE_SMOOTH vec4 gl_Color (VARYING_SLOT_COL0, 1, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_FragColor (FRAG_RESULT_COLOR, 4, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec2 32 ssa_0 = intrinsic load_barycentric_pixel () (1) /* interp_mode=1 */ vec1 32 ssa_1 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_2 = intrinsic load_interpolated_input (ssa_0, ssa_1) (1, 0) /* base=1 */ /* component=0 */ intrinsic store_output (ssa_2, ssa_1) (4, 15, 0) /* base=4 */ /* wrmask=xyzw */ /* component=0 */ /* gl_FragColor */ /* succs: block_1 */ block block_1: } Native code for unnamed fragment shader GLSL0 SIMD8 shader: 9 instructions. 0 loops. 66 cycles. 0:0 spills:fills. Promoted 0 constants. Compacted 144 to 144 bytes (0%) START B0 (66 cycles) mad(8) acc0<1>NF g4.3<0,1,0>F g2<8,8,1>F g4.0<0,1,0>F { align1 1Q }; mad(8) g124<1>F acc0<8,8,1>NF g3<8,8,1>F g4.1<0,1,0>F { align1 1Q }; mad(8) acc0<1>NF g4.7<0,1,0>F g2<8,8,1>F g4.4<0,1,0>F { align1 1Q }; mad(8) g125<1>F acc0<8,8,1>NF g3<8,8,1>F g4.5<0,1,0>F { align1 1Q }; mad(8) acc0<1>NF g5.3<0,1,0>F g2<8,8,1>F g5.0<0,1,0>F { align1 1Q }; mad(8) g126<1>F acc0<8,8,1>NF g3<8,8,1>F g5.1<0,1,0>F { align1 1Q }; mad(8) acc0<1>NF g5.7<0,1,0>F g2<8,8,1>F g5.4<0,1,0>F { align1 1Q }; mad(8) g127<1>F acc0<8,8,1>NF g3<8,8,1>F g5.5<0,1,0>F { align1 1Q }; sendc(8) null<1>UW g124<0,1,0>F 0x88031400 render MsgDesc: RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT }; END B0 Native code for unnamed fragment shader GLSL0 SIMD16 shader: 17 instructions. 0 loops. 76 cycles. 0:0 spills:fills. Promoted 0 constants. Compacted 272 to 272 bytes (0%) START B0 (76 cycles) mad(8) acc0<1>NF g6.3<0,1,0>F g2<8,8,1>F g6.0<0,1,0>F { align1 1Q }; mad(8) g120<1>F acc0<8,8,1>NF g3<8,8,1>F g6.1<0,1,0>F { align1 1Q }; mad(8) acc0<1>NF g6.3<0,1,0>F g4<8,8,1>F g6.0<0,1,0>F { align1 2Q }; mad(8) g121<1>F acc0<8,8,1>NF g5<8,8,1>F g6.1<0,1,0>F { align1 2Q }; mad(8) acc0<1>NF g6.7<0,1,0>F g2<8,8,1>F g6.4<0,1,0>F { align1 1Q }; mad(8) g122<1>F acc0<8,8,1>NF g3<8,8,1>F g6.5<0,1,0>F { align1 1Q }; mad(8) acc0<1>NF g6.7<0,1,0>F g4<8,8,1>F g6.4<0,1,0>F { align1 2Q }; mad(8) g123<1>F acc0<8,8,1>NF g5<8,8,1>F g6.5<0,1,0>F { align1 2Q }; mad(8) acc0<1>NF g7.3<0,1,0>F g2<8,8,1>F g7.0<0,1,0>F { align1 1Q }; mad(8) g124<1>F acc0<8,8,1>NF g3<8,8,1>F g7.1<0,1,0>F { align1 1Q }; mad(8) acc0<1>NF g7.3<0,1,0>F g4<8,8,1>F g7.0<0,1,0>F { align1 2Q }; mad(8) g125<1>F acc0<8,8,1>NF g5<8,8,1>F g7.1<0,1,0>F { align1 2Q }; mad(8) acc0<1>NF g7.7<0,1,0>F g2<8,8,1>F g7.4<0,1,0>F { align1 1Q }; mad(8) g126<1>F acc0<8,8,1>NF g3<8,8,1>F g7.5<0,1,0>F { align1 1Q }; mad(8) acc0<1>NF g7.7<0,1,0>F g4<8,8,1>F g7.4<0,1,0>F { align1 2Q }; mad(8) g127<1>F acc0<8,8,1>NF g5<8,8,1>F g7.5<0,1,0>F { align1 2Q }; sendc(16) null<1>UW g120<0,1,0>F 0x90031000 render MsgDesc: RT write SIMD16 LastRT Surface = 0 mlen 8 rlen 0 { align1 1H EOT }; END B0 NIR (SSA form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: BLORP-clear inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var shader_in INTERP_MODE_FLAT vec4 clear_color (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_FragColor (FRAG_RESULT_COLOR, 4, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1 = intrinsic load_input (ssa_0) (31, 0) /* base=31 */ /* component=0 */ /* clear_color */ intrinsic store_output (ssa_1, ssa_0) (4, 15, 0) /* base=4 */ /* wrmask=xyzw */ /* component=0 */ /* gl_FragColor */ /* succs: block_1 */ block block_1: } NIR (final form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: BLORP-clear inputs: 0 outputs: 0 uniforms: 0 shared: 0 decl_var shader_in INTERP_MODE_FLAT vec4 clear_color (VARYING_SLOT_VAR0, 31, 0) decl_var shader_out INTERP_MODE_NONE vec4 gl_FragColor (FRAG_RESULT_COLOR, 4, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec4 32 ssa_1 = intrinsic load_input (ssa_0) (31, 0) /* base=31 */ /* component=0 */ /* clear_color */ intrinsic store_output (ssa_1, ssa_0) (4, 15, 0) /* base=4 */ /* wrmask=xyzw */ /* component=0 */ /* gl_FragColor */ /* succs: block_1 */ block block_1: } Native code for unnamed fragment shader BLORP-clear SIMD16 shader: 2 instructions. 0 loops. 0 cycles. 0:0 spills:fills. Promoted 0 constants. Compacted 32 to 32 bytes (0%) START B0 (0 cycles) mov(4) g114<1>F g2.3<8,2,4>F { align1 WE_all 1N }; sendc(16) null<1>UW g114<0,1,0>F 0x82031100 render MsgDesc: RT write SIMD16/RepData LastRT Surface = 0 mlen 1 rlen 0 { align1 1H EOT }; END B0 NIR (SSA form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: GLSL3 inputs: 0 outputs: 0 uniforms: 64 shared: 0 decl_var uniform INTERP_MODE_NONE uint64_t[8] ival (0, 0, 0) = { { 0xbaddc0dedeadbeef }, { 0xdeadbea7ba5eba11 }, { 0xf0f1f2f3f4f5f6f7 }, { 0xf1f2f3f4f5f6f7f }, { 0x7071727374757677 }, { 0x717273747576777 }, { 0x1f2e3d4c5b6a7988 }, { 0xbadb100ddeadc0de } } decl_var shader_out INTERP_MODE_NONE vec4 piglit_fragcolor (FRAG_RESULT_DATA0, 8, 0) decl_function main (0 params) impl main { block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_4 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_6 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_7 = load_const (0x00000018 /* 0.000000 */) vec1 32 ssa_8 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000080 /* 0.000000 */) vec1 64 ssa_11 = intrinsic load_uniform (ssa_0) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_12 = unpack_64_2x32_split_x ssa_11 vec1 32 ssa_13 = extract_u8 ssa_12, ssa_0 vec1 32 ssa_14 = load_const (0x000000ef /* 0.000000 */) vec1 32 ssa_15 = ine32 ssa_13, ssa_14 vec1 32 ssa_16 = b32csel ssa_15, ssa_1, ssa_0 vec1 64 ssa_17 = intrinsic load_uniform (ssa_6) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_18 = unpack_64_2x32_split_x ssa_17 vec1 32 ssa_19 = extract_u8 ssa_18, ssa_1 vec1 32 ssa_20 = load_const (0x000000ba /* 0.000000 */) vec1 32 ssa_21 = ine32 ssa_19, ssa_20 vec1 32 ssa_22 = b32csel ssa_21, ssa_2, ssa_0 vec1 32 ssa_23 = ior ssa_16, ssa_22 vec1 64 ssa_24 = intrinsic load_uniform (ssa_8) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_25 = unpack_64_2x32_split_x ssa_24 vec1 32 ssa_26 = extract_u8 ssa_25, ssa_2 vec1 32 ssa_27 = load_const (0x000000f5 /* 0.000000 */) vec1 32 ssa_28 = ine32 ssa_26, ssa_27 vec1 32 ssa_29 = b32csel ssa_28, ssa_3, ssa_0 vec1 32 ssa_30 = ior ssa_23, ssa_29 vec1 64 ssa_31 = intrinsic load_uniform (ssa_7) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_32 = unpack_64_2x32_split_x ssa_31 vec1 32 ssa_33 = extract_u8 ssa_32, ssa_4 vec1 32 ssa_34 = load_const (0x0000004f /* 0.000000 */) vec1 32 ssa_35 = ine32 ssa_33, ssa_34 vec1 32 ssa_36 = b32csel ssa_35, ssa_6, ssa_0 vec1 32 ssa_37 = ior ssa_30, ssa_36 vec1 64 ssa_38 = intrinsic load_uniform (ssa_5) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_39 = unpack_64_2x32_split_x ssa_38 vec1 32 ssa_40 = unpack_64_2x32_split_y ssa_38 vec1 32 ssa_41 = ishl ssa_40, ssa_7 vec1 32 ssa_42 = ushr ssa_39, ssa_6 vec1 32 ssa_43 = ior ssa_41, ssa_42 vec1 32 ssa_44 = extract_u8 ssa_43, ssa_4 vec1 32 ssa_45 = load_const (0x00000073 /* 0.000000 */) vec1 32 ssa_46 = ine32 ssa_44, ssa_45 vec1 32 ssa_47 = b32csel ssa_46, ssa_8, ssa_0 vec1 32 ssa_48 = ior ssa_37, ssa_47 vec1 32 ssa_49 = load_const (0x00000028 /* 0.000000 */) vec1 64 ssa_50 = intrinsic load_uniform (ssa_49) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_51 = unpack_64_2x32_split_x ssa_50 vec1 32 ssa_52 = unpack_64_2x32_split_y ssa_50 vec1 32 ssa_53 = ishl ssa_52, ssa_8 vec1 32 ssa_54 = extract_u16 ssa_51, ssa_1 vec1 32 ssa_55 = ior ssa_53, ssa_54 vec1 32 ssa_56 = extract_u8 ssa_55, ssa_4 vec1 32 ssa_57 = load_const (0x00000027 /* 0.000000 */) vec1 32 ssa_58 = ine32 ssa_56, ssa_57 vec1 32 ssa_59 = b32csel ssa_58, ssa_5, ssa_0 vec1 32 ssa_60 = ior ssa_48, ssa_59 vec1 32 ssa_61 = load_const (0x00000030 /* 0.000000 */) vec1 64 ssa_62 = intrinsic load_uniform (ssa_61) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_63 = unpack_64_2x32_split_x ssa_62 vec1 32 ssa_64 = unpack_64_2x32_split_y ssa_62 vec1 32 ssa_65 = ishl ssa_64, ssa_6 vec1 32 ssa_66 = extract_u8 ssa_63, ssa_4 vec1 32 ssa_67 = ior ssa_65, ssa_66 vec1 32 ssa_68 = extract_u8 ssa_67, ssa_4 vec1 32 ssa_69 = load_const (0x0000002e /* 0.000000 */) vec1 32 ssa_70 = ine32 ssa_68, ssa_69 vec1 32 ssa_71 = b32csel ssa_70, ssa_9, ssa_0 vec1 32 ssa_72 = ior ssa_60, ssa_71 vec1 32 ssa_73 = load_const (0x00000038 /* 0.000000 */) vec1 64 ssa_74 = intrinsic load_uniform (ssa_73) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_75 = unpack_64_2x32_split_y ssa_74 vec1 32 ssa_76 = extract_u8 ssa_75, ssa_4 vec1 32 ssa_77 = ine32 ssa_76, ssa_20 vec1 32 ssa_78 = b32csel ssa_77, ssa_10, ssa_0 vec1 32 ssa_79 = ior ssa_72, ssa_78 vec1 32 ssa_80 = ieq32 ssa_79, ssa_0 /* succs: block_1 block_2 */ if ssa_80 { block block_1: /* preds: block_0 */ vec1 32 ssa_81 = load_const (0x3f800000 /* 1.000000 */) /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ vec1 32 ssa_82 = u2f32 ssa_79 vec1 32 ssa_83 = load_const (0x3b808081 /* 0.003922 */) vec1 32 ssa_84 = fmul ssa_82, ssa_83 vec1 32 ssa_85 = load_const (0x3f800000 /* 1.000000 */) /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec1 32 ssa_86 = phi block_1: ssa_0, block_2: ssa_84 vec1 32 ssa_87 = phi block_1: ssa_81, block_2: ssa_0 vec1 32 ssa_88 = phi block_1: ssa_81, block_2: ssa_85 vec4 32 ssa_89 = vec4 ssa_86, ssa_87, ssa_0, ssa_88 intrinsic store_output (ssa_89, ssa_0) (8, 15, 0) /* base=8 */ /* wrmask=xyzw */ /* component=0 */ /* piglit_fragcolor */ /* succs: block_4 */ block block_4: } NIR (final form) for fragment shader: shader: MESA_SHADER_FRAGMENT name: GLSL3 inputs: 0 outputs: 0 uniforms: 64 shared: 0 decl_var uniform INTERP_MODE_NONE uint64_t[8] ival (0, 0, 0) = { { 0xbaddc0dedeadbeef }, { 0xdeadbea7ba5eba11 }, { 0xf0f1f2f3f4f5f6f7 }, { 0xf1f2f3f4f5f6f7f }, { 0x7071727374757677 }, { 0x717273747576777 }, { 0x1f2e3d4c5b6a7988 }, { 0xbadb100ddeadc0de } } decl_var shader_out INTERP_MODE_NONE vec4 piglit_fragcolor (FRAG_RESULT_DATA0, 8, 0) decl_function main (0 params) impl main { decl_reg vec1 32 r0 decl_reg vec1 32 r1 decl_reg vec1 32 r2 block block_0: /* preds: */ vec1 32 ssa_0 = load_const (0x00000000 /* 0.000000 */) vec1 32 ssa_1 = load_const (0x00000001 /* 0.000000 */) vec1 32 ssa_2 = load_const (0x00000002 /* 0.000000 */) vec1 32 ssa_3 = load_const (0x00000004 /* 0.000000 */) vec1 32 ssa_4 = load_const (0x00000003 /* 0.000000 */) vec1 32 ssa_5 = load_const (0x00000020 /* 0.000000 */) vec1 32 ssa_6 = load_const (0x00000008 /* 0.000000 */) vec1 32 ssa_7 = load_const (0x00000018 /* 0.000000 */) vec1 32 ssa_8 = load_const (0x00000010 /* 0.000000 */) vec1 32 ssa_9 = load_const (0x00000040 /* 0.000000 */) vec1 32 ssa_10 = load_const (0x00000080 /* 0.000000 */) vec1 64 ssa_11 = intrinsic load_uniform (ssa_0) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_12 = unpack_64_2x32_split_x ssa_11 vec1 32 ssa_13 = extract_u8 ssa_12, ssa_0 vec1 32 ssa_14 = load_const (0x000000ef /* 0.000000 */) vec1 32 ssa_15 = ine32 ssa_13, ssa_14 vec1 32 ssa_16 = b32csel ssa_15, ssa_1, ssa_0 vec1 64 ssa_17 = intrinsic load_uniform (ssa_6) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_18 = unpack_64_2x32_split_x ssa_17 vec1 32 ssa_19 = extract_u8 ssa_18, ssa_1 vec1 32 ssa_20 = load_const (0x000000ba /* 0.000000 */) vec1 32 ssa_21 = ine32 ssa_19, ssa_20 vec1 32 ssa_22 = b32csel ssa_21, ssa_2, ssa_0 vec1 32 ssa_23 = ior ssa_16, ssa_22 vec1 64 ssa_24 = intrinsic load_uniform (ssa_8) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_25 = unpack_64_2x32_split_x ssa_24 vec1 32 ssa_26 = extract_u8 ssa_25, ssa_2 vec1 32 ssa_27 = load_const (0x000000f5 /* 0.000000 */) vec1 32 ssa_28 = ine32 ssa_26, ssa_27 vec1 32 ssa_29 = b32csel ssa_28, ssa_3, ssa_0 vec1 32 ssa_30 = ior ssa_23, ssa_29 vec1 64 ssa_31 = intrinsic load_uniform (ssa_7) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_32 = unpack_64_2x32_split_x ssa_31 vec1 32 ssa_33 = extract_u8 ssa_32, ssa_4 vec1 32 ssa_34 = load_const (0x0000004f /* 0.000000 */) vec1 32 ssa_35 = ine32 ssa_33, ssa_34 vec1 32 ssa_36 = b32csel ssa_35, ssa_6, ssa_0 vec1 32 ssa_37 = ior ssa_30, ssa_36 vec1 64 ssa_38 = intrinsic load_uniform (ssa_5) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_39 = unpack_64_2x32_split_x ssa_38 vec1 32 ssa_40 = unpack_64_2x32_split_y ssa_38 vec1 32 ssa_41 = ishl ssa_40, ssa_7 vec1 32 ssa_42 = ushr ssa_39, ssa_6 vec1 32 ssa_43 = ior ssa_41, ssa_42 vec1 32 ssa_44 = extract_u8 ssa_43, ssa_4 vec1 32 ssa_45 = load_const (0x00000073 /* 0.000000 */) vec1 32 ssa_46 = ine32 ssa_44, ssa_45 vec1 32 ssa_47 = b32csel ssa_46, ssa_8, ssa_0 vec1 32 ssa_48 = ior ssa_37, ssa_47 vec1 32 ssa_49 = load_const (0x00000028 /* 0.000000 */) vec1 64 ssa_50 = intrinsic load_uniform (ssa_49) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_51 = unpack_64_2x32_split_x ssa_50 vec1 32 ssa_52 = unpack_64_2x32_split_y ssa_50 vec1 32 ssa_53 = ishl ssa_52, ssa_8 vec1 32 ssa_54 = extract_u16 ssa_51, ssa_1 vec1 32 ssa_55 = ior ssa_53, ssa_54 vec1 32 ssa_56 = extract_u8 ssa_55, ssa_4 vec1 32 ssa_57 = load_const (0x00000027 /* 0.000000 */) vec1 32 ssa_58 = ine32 ssa_56, ssa_57 vec1 32 ssa_59 = b32csel ssa_58, ssa_5, ssa_0 vec1 32 ssa_60 = ior ssa_48, ssa_59 vec1 32 ssa_61 = load_const (0x00000030 /* 0.000000 */) vec1 64 ssa_62 = intrinsic load_uniform (ssa_61) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_63 = unpack_64_2x32_split_x ssa_62 vec1 32 ssa_64 = unpack_64_2x32_split_y ssa_62 vec1 32 ssa_65 = ishl ssa_64, ssa_6 vec1 32 ssa_66 = extract_u8 ssa_63, ssa_4 vec1 32 ssa_67 = ior ssa_65, ssa_66 vec1 32 ssa_68 = extract_u8 ssa_67, ssa_4 vec1 32 ssa_69 = load_const (0x0000002e /* 0.000000 */) vec1 32 ssa_70 = ine32 ssa_68, ssa_69 vec1 32 ssa_71 = b32csel ssa_70, ssa_9, ssa_0 vec1 32 ssa_72 = ior ssa_60, ssa_71 vec1 32 ssa_73 = load_const (0x00000038 /* 0.000000 */) vec1 64 ssa_74 = intrinsic load_uniform (ssa_73) (0, 64) /* base=0 */ /* range=64 */ /* ival */ vec1 32 ssa_75 = unpack_64_2x32_split_y ssa_74 vec1 32 ssa_76 = extract_u8 ssa_75, ssa_4 vec1 32 ssa_77 = ine32 ssa_76, ssa_20 vec1 32 ssa_78 = b32csel ssa_77, ssa_10, ssa_0 vec1 32 ssa_79 = ior ssa_72, ssa_78 vec1 32 ssa_80 = ieq32 ssa_79, ssa_0 /* succs: block_1 block_2 */ if ssa_80 { block block_1: /* preds: block_0 */ vec1 32 ssa_81 = load_const (0x3f800000 /* 1.000000 */) r2 = imov ssa_81 r1 = imov r2 r0 = imov ssa_0 /* succs: block_3 */ } else { block block_2: /* preds: block_0 */ vec1 32 ssa_82 = u2f32 ssa_79 vec1 32 ssa_83 = load_const (0x3b808081 /* 0.003922 */) r0 = fmul ssa_82, ssa_83 vec1 32 ssa_85 = load_const (0x3f800000 /* 1.000000 */) r2 = imov ssa_85 r1 = imov ssa_0 /* succs: block_3 */ } block block_3: /* preds: block_1 block_2 */ vec4 32 ssa_89 = vec4 r0, r1, ssa_0, r2 intrinsic store_output (ssa_89, ssa_0) (8, 15, 0) /* base=8 */ /* wrmask=xyzw */ /* component=0 */ /* piglit_fragcolor */ /* succs: block_4 */ block block_4: } Native code for unnamed fragment shader GLSL3 SIMD8 shader: 76 instructions. 0 loops. 400 cycles. 0:0 spills:fills. Promoted 0 constants. Compacted 1216 to 1040 bytes (14%) START B0 (354 cycles) mov(8) g4<1>UD 0x00000000UD { align1 1Q compacted }; mov(8) g5.1<2>UD g2.1<0,1,0>UD { align1 1Q }; mov(8) g7.1<2>UD g2.3<0,1,0>UD { align1 1Q }; mov(8) g9.1<2>UD g2.5<0,1,0>UD { align1 1Q }; mov(8) g11.1<2>UD g2.7<0,1,0>UD { align1 1Q }; mov(8) g13.1<2>UD g3.1<0,1,0>UD { align1 1Q }; mov(8) g15.1<2>UD g3.3<0,1,0>UD { align1 1Q }; mov(8) g17.1<2>UD g3.5<0,1,0>UD { align1 1Q }; mov(8) g20.1<2>UD g3.7<0,1,0>UD { align1 1Q }; mov(8) g5<2>UD g2<0,1,0>UD { align1 1Q }; mov(8) g7<2>UD g2.2<0,1,0>UD { align1 1Q }; mov(8) g9<2>UD g2.4<0,1,0>UD { align1 1Q }; mov(8) g11<2>UD g2.6<0,1,0>UD { align1 1Q }; mov(8) g13<2>UD g3<0,1,0>UD { align1 1Q }; mov(8) g15<2>UD g3.2<0,1,0>UD { align1 1Q }; mov(8) g17<2>UD g3.4<0,1,0>UD { align1 1Q }; mov(8) g20<2>UD g3.6<0,1,0>UD { align1 1Q }; mov(8) g2<1>UD g5<8,4,2>UD { align1 1Q }; mov(8) g5<1>UD g7<8,4,2>UD { align1 1Q }; mov(8) g7<1>UD g9<8,4,2>UD { align1 1Q }; mov(8) g8<1>UD g11<8,4,2>UD { align1 1Q }; shr(8) g11<1>UD g13<8,4,2>UD 0x00000008UD { align1 1Q }; shl(8) g10<1>D g13.1<8,4,2>D 0x00000018UD { align1 1Q }; mov(8) g14<1>UD g15<8,4,2>UD { align1 1Q }; shl(8) g19<1>D g17.1<8,4,2>D 0x00000008UD { align1 1Q }; shl(8) g16<1>D g15.1<8,4,2>D 0x00000010UD { align1 1Q }; mov(8) g3<1>UD g2<32,8,4>UB { align1 1Q }; mov(8) g15<1>UD g17<8,4,2>UD { align1 1Q }; mov(8) g6<1>UD g5.1<32,8,4>UB { align1 1Q }; mov(8) g9<1>UD g7.2<32,8,4>UB { align1 1Q }; mov(8) g18<1>UD g20.1<8,4,2>UD { align1 1Q }; mov(8) g12<1>UD g8.3<32,8,4>UB { align1 1Q }; or(8) g13<1>UD g10<8,8,1>UD g11<8,8,1>UD { align1 1Q compacted }; mov(8) g17<1>UD g14.1<16,8,2>UW { align1 1Q }; cmp.nz.f0.0(8) null<1>D g3<8,8,1>D 239D { align1 1Q compacted }; mov(8) g20<1>UD g15.3<32,8,4>UB { align1 1Q }; mov(8) g25<1>UD g18.3<32,8,4>UB { align1 1Q }; mov(8) g15<1>UD g13.3<32,8,4>UB { align1 1Q }; or(8) g18<1>UD g16<8,8,1>UD g17<8,8,1>UD { align1 1Q compacted }; (-f0.0) sel(8) g5<1>UD g4<8,8,1>UD 0x00000001UD { align1 1Q }; or(8) g21<1>UD g19<8,8,1>UD g20<8,8,1>UD { align1 1Q compacted }; mov(8) g19<1>UD g18.3<32,8,4>UB { align1 1Q }; cmp.nz.f0.0(8) null<1>D g6<8,8,1>D 186D { align1 1Q compacted }; mov(8) g22<1>UD g21.3<32,8,4>UB { align1 1Q }; (-f0.0) sel(8) g7<1>UD g4<8,8,1>UD 0x00000002UD { align1 1Q }; or(8) g8<1>UD g5<8,8,1>UD g7<8,8,1>UD { align1 1Q compacted }; cmp.nz.f0.0(8) null<1>D g9<8,8,1>D 245D { align1 1Q compacted }; (-f0.0) sel(8) g10<1>UD g4<8,8,1>UD 0x00000004UD { align1 1Q }; or(8) g11<1>UD g8<8,8,1>UD g10<8,8,1>UD { align1 1Q compacted }; cmp.nz.f0.0(8) null<1>D g12<8,8,1>D 79D { align1 1Q compacted }; (-f0.0) sel(8) g13<1>UD g4<8,8,1>UD 0x00000008UD { align1 1Q }; or(8) g14<1>UD g11<8,8,1>UD g13<8,8,1>UD { align1 1Q compacted }; cmp.nz.f0.0(8) null<1>D g15<8,8,1>D 115D { align1 1Q compacted }; (-f0.0) sel(8) g16<1>UD g4<8,8,1>UD 0x00000010UD { align1 1Q }; or(8) g17<1>UD g14<8,8,1>UD g16<8,8,1>UD { align1 1Q compacted }; cmp.nz.f0.0(8) null<1>D g19<8,8,1>D 39D { align1 1Q compacted }; (-f0.0) sel(8) g20<1>UD g4<8,8,1>UD 0x00000020UD { align1 1Q }; or(8) g21<1>UD g17<8,8,1>UD g20<8,8,1>UD { align1 1Q compacted }; cmp.nz.f0.0(8) null<1>D g22<8,8,1>D 46D { align1 1Q compacted }; (-f0.0) sel(8) g23<1>UD g4<8,8,1>UD 0x00000040UD { align1 1Q }; or(8) g24<1>UD g21<8,8,1>UD g23<8,8,1>UD { align1 1Q compacted }; cmp.nz.f0.0(8) null<1>D g25<8,8,1>D 186D { align1 1Q compacted }; (-f0.0) sel(8) g26<1>UD g4<8,8,1>UD 0x00000080UD { align1 1Q }; or.z.f0.0(8) g27<1>UD g24<8,8,1>UD g26<8,8,1>UD { align1 1Q compacted }; (+f0.0) if(8) JIP: 72 UIP: 120 { align1 1Q }; END B0 ->B1 ->B2 START B1 <-B0 (8 cycles) mov(8) g127<1>D 1065353216D { align1 1Q }; mov(8) g125<1>D 1065353216D { align1 1Q }; mov(8) g124<1>UD 0x00000000UD { align1 1Q compacted }; else(8) JIP: 64 UIP: 64 { align1 1Q }; END B1 ->B3 START B2 <-B0 (18 cycles) mov(8) g28<1>F g27<8,8,1>UD { align1 1Q compacted }; mov(8) g127<1>D 1065353216D { align1 1Q }; mov(8) g125<1>UD 0x00000000UD { align1 1Q compacted }; mul(8) g124<1>F g28<8,8,1>F 0x3b808081F /* 0.00392157F */ { align1 1Q }; END B2 ->B3 START B3 <-B2 <-B1 (20 cycles) endif(8) JIP: 16 { align1 1Q }; mov(8) g126<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1Q compacted }; sendc(8) null<1>UW g124<0,1,0>F 0x88031400 render MsgDesc: RT write SIMD8 LastRT Surface = 0 mlen 4 rlen 0 { align1 1Q EOT }; END B3 Native code for unnamed fragment shader GLSL3 SIMD16 shader: 103 instructions. 0 loops. 542 cycles. 0:0 spills:fills. Promoted 0 constants. Compacted 1648 to 1472 bytes (11%) START B0 (478 cycles) mov(16) g4<1>UD 0x00000000UD { align1 1H compacted }; mov(8) g87.1<2>UD g2.1<0,1,0>UD { align1 1Q }; mov(8) g6.1<2>UD g2.1<0,1,0>UD { align1 2Q }; mov(8) g89.1<2>UD g2.3<0,1,0>UD { align1 1Q }; mov(8) g12.1<2>UD g2.3<0,1,0>UD { align1 2Q }; mov(8) g91.1<2>UD g2.5<0,1,0>UD { align1 1Q }; mov(8) g20.1<2>UD g2.5<0,1,0>UD { align1 2Q }; mov(8) g93.1<2>UD g2.7<0,1,0>UD { align1 1Q }; mov(8) g28.1<2>UD g2.7<0,1,0>UD { align1 2Q }; mov(8) g95.1<2>UD g3.1<0,1,0>UD { align1 1Q }; mov(8) g36.1<2>UD g3.1<0,1,0>UD { align1 2Q }; mov(8) g97.1<2>UD g3.3<0,1,0>UD { align1 1Q }; mov(8) g48.1<2>UD g3.3<0,1,0>UD { align1 2Q }; mov(8) g99.1<2>UD g3.5<0,1,0>UD { align1 1Q }; mov(8) g62.1<2>UD g3.5<0,1,0>UD { align1 2Q }; mov(8) g101.1<2>UD g3.7<0,1,0>UD { align1 1Q }; mov(8) g76.1<2>UD g3.7<0,1,0>UD { align1 2Q }; mov(8) g87<2>UD g2<0,1,0>UD { align1 1Q }; mov(8) g6<2>UD g2<0,1,0>UD { align1 2Q }; mov(8) g89<2>UD g2.2<0,1,0>UD { align1 1Q }; mov(8) g12<2>UD g2.2<0,1,0>UD { align1 2Q }; mov(8) g91<2>UD g2.4<0,1,0>UD { align1 1Q }; mov(8) g20<2>UD g2.4<0,1,0>UD { align1 2Q }; mov(8) g93<2>UD g2.6<0,1,0>UD { align1 1Q }; mov(8) g28<2>UD g2.6<0,1,0>UD { align1 2Q }; mov(8) g95<2>UD g3<0,1,0>UD { align1 1Q }; mov(8) g36<2>UD g3<0,1,0>UD { align1 2Q }; mov(8) g97<2>UD g3.2<0,1,0>UD { align1 1Q }; mov(8) g48<2>UD g3.2<0,1,0>UD { align1 2Q }; mov(8) g99<2>UD g3.4<0,1,0>UD { align1 1Q }; mov(8) g62<2>UD g3.4<0,1,0>UD { align1 2Q }; mov(8) g101<2>UD g3.6<0,1,0>UD { align1 1Q }; mov(8) g76<2>UD g3.6<0,1,0>UD { align1 2Q }; mov(8) g8<1>UD g87<8,4,2>UD { align1 1Q }; mov(8) g9<1>UD g6<8,4,2>UD { align1 2Q }; mov(8) g14<1>UD g89<8,4,2>UD { align1 1Q }; mov(8) g15<1>UD g12<8,4,2>UD { align1 2Q }; mov(8) g22<1>UD g91<8,4,2>UD { align1 1Q }; mov(8) g23<1>UD g20<8,4,2>UD { align1 2Q }; mov(8) g30<1>UD g93<8,4,2>UD { align1 1Q }; mov(8) g31<1>UD g28<8,4,2>UD { align1 2Q }; shl(8) g38<1>D g95.1<8,4,2>D 0x00000018UD { align1 1Q }; shr(8) g40<1>UD g95<8,4,2>UD 0x00000008UD { align1 1Q }; shl(8) g39<1>D g36.1<8,4,2>D 0x00000018UD { align1 2Q }; shr(8) g41<1>UD g36<8,4,2>UD 0x00000008UD { align1 2Q }; mov(8) g50<1>UD g97<8,4,2>UD { align1 1Q }; shl(8) g52<1>D g97.1<8,4,2>D 0x00000010UD { align1 1Q }; mov(8) g51<1>UD g48<8,4,2>UD { align1 2Q }; shl(8) g53<1>D g48.1<8,4,2>D 0x00000010UD { align1 2Q }; mov(8) g64<1>UD g99<8,4,2>UD { align1 1Q }; shl(8) g66<1>D g99.1<8,4,2>D 0x00000008UD { align1 1Q }; mov(8) g65<1>UD g62<8,4,2>UD { align1 2Q }; shl(8) g67<1>D g62.1<8,4,2>D 0x00000008UD { align1 2Q }; mov(8) g78<1>UD g101.1<8,4,2>UD { align1 1Q }; mov(8) g79<1>UD g76.1<8,4,2>UD { align1 2Q }; mov(16) g10<1>UD g8<32,8,4>UB { align1 1H }; mov(16) g16<1>UD g14.1<32,8,4>UB { align1 1H }; mov(16) g24<1>UD g22.2<32,8,4>UB { align1 1H }; mov(16) g32<1>UD g30.3<32,8,4>UB { align1 1H }; or(16) g42<1>UD g38<8,8,1>UD g40<8,8,1>UD { align1 1H compacted }; mov(16) g54<1>UD g50.1<16,8,2>UW { align1 1H }; mov(16) g68<1>UD g64.3<32,8,4>UB { align1 1H }; mov(16) g80<1>UD g78.3<32,8,4>UB { align1 1H }; cmp.nz.f0.0(16) null<1>D g10<8,8,1>D 239D { align1 1H compacted }; mov(16) g44<1>UD g42.3<32,8,4>UB { align1 1H }; or(16) g56<1>UD g52<8,8,1>UD g54<8,8,1>UD { align1 1H compacted }; or(16) g70<1>UD g66<8,8,1>UD g68<8,8,1>UD { align1 1H compacted }; (-f0.0) sel(16) g11<1>UD g4<8,8,1>UD 0x00000001UD { align1 1H }; mov(16) g58<1>UD g56.3<32,8,4>UB { align1 1H }; mov(16) g72<1>UD g70.3<32,8,4>UB { align1 1H }; cmp.nz.f0.0(16) null<1>D g16<8,8,1>D 186D { align1 1H compacted }; (-f0.0) sel(16) g17<1>UD g4<8,8,1>UD 0x00000002UD { align1 1H }; or(16) g19<1>UD g11<8,8,1>UD g17<8,8,1>UD { align1 1H compacted }; cmp.nz.f0.0(16) null<1>D g24<8,8,1>D 245D { align1 1H compacted }; (-f0.0) sel(16) g25<1>UD g4<8,8,1>UD 0x00000004UD { align1 1H }; or(16) g27<1>UD g19<8,8,1>UD g25<8,8,1>UD { align1 1H compacted }; cmp.nz.f0.0(16) null<1>D g32<8,8,1>D 79D { align1 1H compacted }; (-f0.0) sel(16) g33<1>UD g4<8,8,1>UD 0x00000008UD { align1 1H }; or(16) g35<1>UD g27<8,8,1>UD g33<8,8,1>UD { align1 1H compacted }; cmp.nz.f0.0(16) null<1>D g44<8,8,1>D 115D { align1 1H compacted }; (-f0.0) sel(16) g45<1>UD g4<8,8,1>UD 0x00000010UD { align1 1H }; or(16) g47<1>UD g35<8,8,1>UD g45<8,8,1>UD { align1 1H compacted }; cmp.nz.f0.0(16) null<1>D g58<8,8,1>D 39D { align1 1H compacted }; (-f0.0) sel(16) g59<1>UD g4<8,8,1>UD 0x00000020UD { align1 1H }; or(16) g61<1>UD g47<8,8,1>UD g59<8,8,1>UD { align1 1H compacted }; cmp.nz.f0.0(16) null<1>D g72<8,8,1>D 46D { align1 1H compacted }; (-f0.0) sel(16) g73<1>UD g4<8,8,1>UD 0x00000040UD { align1 1H }; or(16) g75<1>UD g61<8,8,1>UD g73<8,8,1>UD { align1 1H compacted }; cmp.nz.f0.0(16) null<1>D g80<8,8,1>D 186D { align1 1H compacted }; (-f0.0) sel(16) g81<1>UD g4<8,8,1>UD 0x00000080UD { align1 1H }; or.z.f0.0(16) g83<1>UD g75<8,8,1>UD g81<8,8,1>UD { align1 1H compacted }; (+f0.0) if(16) JIP: 72 UIP: 120 { align1 1H }; END B0 ->B1 ->B2 START B1 <-B0 (16 cycles) mov(16) g126<1>D 1065353216D { align1 1H }; mov(16) g122<1>D 1065353216D { align1 1H }; mov(16) g120<1>UD 0x00000000UD { align1 1H compacted }; else(16) JIP: 64 UIP: 64 { align1 1H }; END B1 ->B3 START B2 <-B0 (22 cycles) mov(16) g85<1>F g83<8,8,1>UD { align1 1H compacted }; mov(16) g126<1>D 1065353216D { align1 1H }; mov(16) g122<1>UD 0x00000000UD { align1 1H compacted }; mul(16) g120<1>F g85<8,8,1>F 0x3b808081F /* 0.00392157F */ { align1 1H }; END B2 ->B3 START B3 <-B2 <-B1 (26 cycles) endif(16) JIP: 16 { align1 1H }; mov(16) g124<1>F 0x0VF /* [0F, 0F, 0F, 0F]VF */ { align1 1H compacted }; sendc(16) null<1>UW g120<0,1,0>F 0x90031000 render MsgDesc: RT write SIMD16 LastRT Surface = 0 mlen 8 rlen 0 { align1 1H EOT }; END B3 PIGLIT: {"result": "pass" }