Advertisement
Guest User

Untitled

a guest
Dec 19th, 2018
152
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.28 KB | None | 0 0
  1. NIR validation failed after nir_opt_peephole_select
  2. 3 errors:
  3. shader: MESA_SHADER_COMPUTE
  4. name: tfb_query
  5. local-size: 64, 1, 1
  6. shared-size: 0
  7. inputs: 0
  8. outputs: 0
  9. uniforms: 0
  10. shared: 0
  11. decl_function main (0 params)
  12.  
  13. impl main {
  14. block block_0:
  15. /* preds: */
  16. vec1 64 ssa_0 = load_const (0x 0 /* 0.000000 */)
  17. vec1 32 ssa_4 = load_const (0x00000000 /* 0.000000 */)
  18. /* flags */ vec1 32 ssa_7 = intrinsic load_push_constant (ssa_4) (0, 16) /* base=0 */ /* range=16 */
  19. vec1 32 ssa_9 = intrinsic vulkan_resource_index (ssa_4) (0, 0) /* desc-set=0 */ /* binding=0 */
  20. vec1 32 ssa_11 = intrinsic vulkan_resource_index (ssa_4) (0, 1) /* desc-set=0 */ /* binding=1 */
  21. vec3 32 ssa_12 = intrinsic load_local_invocation_id () ()
  22. vec3 32 ssa_13 = intrinsic load_work_group_id () ()
  23. vec1 32 ssa_14 = load_const (0x00000040 /* 0.000000 */)
  24. vec1 32 ssa_91 = imul ssa_13.x, ssa_14
  25. vec1 32 ssa_96 = iadd ssa_91, ssa_12.x
  26. vec1 32 ssa_22 = load_const (0x00000020 /* 0.000000 */)
  27. vec1 32 ssa_23 = imul ssa_22, ssa_96
  28. vec1 32 ssa_24 = load_const (0x00000004 /* 0.000000 */)
  29. /* output_stride */ vec1 32 ssa_25 = intrinsic load_push_constant (ssa_24) (0, 16) /* base=0 */ /* range=16 */
  30. vec1 32 ssa_26 = imul /* output_stride */ ssa_25, ssa_96
  31. vec4 32 ssa_27 = intrinsic load_ssbo (ssa_11, ssa_23) (0, 0, 0) /* access=0 */ /* align_mul=0 */ /* align_offset=0 */
  32. vec1 32 ssa_28 = load_const (0x00000010 /* 0.000000 */)
  33. vec1 32 ssa_29 = iadd ssa_23, ssa_28
  34. vec4 32 ssa_30 = intrinsic load_ssbo (ssa_11, ssa_29) (0, 0, 0) /* access=0 */ /* align_mul=0 */ /* align_offset=0 */
  35. vec1 32 ssa_33 = iand ssa_27.y, ssa_27.w
  36. vec1 32 ssa_36 = iand ssa_30.y, ssa_30.w
  37. vec1 32 ssa_37 = load_const (0x80000000 /* -0.000000 */)
  38. vec1 32 ssa_38 = iand ssa_33, ssa_36
  39. vec1 32 ssa_39 = iand ssa_38, ssa_37
  40. vec1 64 ssa_45 = pack_64_2x32_split ssa_27.x, ssa_27.y
  41. vec1 64 ssa_51 = pack_64_2x32_split ssa_27.z, ssa_27.w
  42. vec1 64 ssa_57 = pack_64_2x32_split ssa_30.x, ssa_30.y
  43. vec1 64 ssa_63 = pack_64_2x32_split ssa_30.z, ssa_30.w
  44. vec1 64 ssa_64 = isub ssa_63, ssa_51
  45. vec1 64 ssa_65 = isub ssa_57, ssa_45
  46. vec1 32 ssa_68 = load_const (0x00000001 /* 0.000000 */)
  47. vec1 32 ssa_120 = bcsel ssa_39, ssa_68, ssa_4
  48. error: src_bit_size == nir_alu_type_get_type_size(src_type) (../src/compiler/nir/nir_validate.c:360)
  49.  
  50. vec1 64 ssa_121 = bcsel ssa_39, ssa_64, ssa_0
  51. error: src_bit_size == nir_alu_type_get_type_size(src_type) (../src/compiler/nir/nir_validate.c:360)
  52.  
  53. vec1 64 ssa_122 = bcsel ssa_39, ssa_65, ssa_0
  54. error: src_bit_size == nir_alu_type_get_type_size(src_type) (../src/compiler/nir/nir_validate.c:360)
  55.  
  56. vec1 32 ssa_72 = load_const (0x00000001 /* 0.000000 */)
  57. vec1 32 ssa_73 = iand /* flags */ ssa_7, ssa_72
  58. vec1 32 ssa_74 = load_const (0x00000008 /* 0.000000 */)
  59. vec1 32 ssa_76 = b32csel ssa_73, ssa_28, ssa_74
  60. vec1 32 ssa_79 = iand /* flags */ ssa_7, ssa_74
  61. vec1 32 ssa_80 = ior ssa_79, ssa_120
  62. /* succs: block_1 block_5 */
  63. if ssa_80 {
  64. block block_1:
  65. /* preds: block_0 */
  66. /* succs: block_2 block_3 */
  67. if ssa_73 {
  68. block block_2:
  69. /* preds: block_1 */
  70. vec2 64 ssa_106 = vec2 ssa_121, ssa_122
  71. intrinsic store_ssbo (ssa_106, ssa_9, ssa_26) (3, 0, 0, 0) /* wrmask=xy */ /* access=0 */ /* align_mul=0 */ /* align_offset=0 */
  72. /* succs: block_4 */
  73. } else {
  74. block block_3:
  75. /* preds: block_1 */
  76. vec1 32 ssa_110 = u2u32 ssa_121
  77. vec1 32 ssa_111 = u2u32 ssa_122
  78. vec2 32 ssa_112 = vec2 ssa_110, ssa_111
  79. intrinsic store_ssbo (ssa_112, ssa_9, ssa_26) (3, 0, 0, 0) /* wrmask=xy */ /* access=0 */ /* align_mul=0 */ /* align_offset=0 */
  80. /* succs: block_4 */
  81. }
  82. block block_4:
  83. /* preds: block_2 block_3 */
  84. /* succs: block_6 */
  85. } else {
  86. block block_5:
  87. /* preds: block_0 */
  88. /* succs: block_6 */
  89. }
  90. block block_6:
  91. /* preds: block_4 block_5 */
  92. vec1 32 ssa_85 = iand /* flags */ ssa_7, ssa_24
  93. /* succs: block_7 block_8 */
  94. if ssa_85 {
  95. block block_7:
  96. /* preds: block_6 */
  97. vec1 32 ssa_87 = iadd ssa_76, ssa_26
  98. intrinsic store_ssbo (ssa_120, ssa_9, ssa_87) (1, 0, 0, 0) /* wrmask=x */ /* access=0 */ /* align_mul=0 */ /* align_offset=0 */
  99. /* succs: block_9 */
  100. } else {
  101. block block_8:
  102. /* preds: block_6 */
  103. /* succs: block_9 */
  104. }
  105. block block_9:
  106. /* preds: block_7 block_8 */
  107. /* succs: block_10 */
  108. block block_10:
  109. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement