Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ; Function Attrs: nounwind
- define i32 @memory_profiler_mandelbrot(float %x_min, float %x_max, float %y_min, float %y_max, float %c_real, float %c_imag, i32 %iters, i32 %w, i32 %h, %struct.halide_buffer_t* noalias %f1.buffer) local_unnamed_addr #8 {
- entry:
- %__cilkrts_sf = alloca %__cilkrts_stack_frame, align 8
- destructor_block: ; preds = %entry, %"assert failed30", %"assert failed26", %"assert failed24", %"assert failed22", %"assert failed20", %"assert failed16", %"assert failed14", %"assert failed12", %"assert failed10", %"assert failed8", %"assert failed6", %"assert failed1"
- %8 = phi i32 [ %13, %"assert failed1" ], [ %64, %"assert failed6" ], [ %69, %"assert failed8" ], [ %71, %"assert failed10" ], [ %76, %"assert failed12" ], [ %78, %"assert failed14" ], [ %80, %"assert failed16" ], [ %88, %"assert failed20" ], [ %90, %"assert failed22" ], [ %233, %"assert failed24" ], [ %236, %"assert failed26" ], [ %239, %"assert failed30" ], [ %profiler_token, %entry ]
- if.then.i: ; preds = %destructor_block
- br label %call_destructor.exit
- call_destructor.exit: ; preds = %destructor_block, %if.then.i
- if.then.i63: ; preds = %call_destructor.exit
- br label %call_destructor.exit65
- call_destructor.exit65: ; preds = %call_destructor.exit, %call_destructor.exit.thread, %if.then.i63
- if.then.i67: ; preds = %call_destructor.exit65
- br label %call_destructor.exit69
- call_destructor.exit69: ; preds = %call_destructor.exit65, %if.then.i67
- "assert succeeded": ; preds = %entry
- call void asm sideeffect "", ""() #9, !srcloc !177
- call void asm sideeffect "", ""() #9, !srcloc !178
- call void @halide_profiler_stack_peak_update(i8* null, i8* %11, i64* nonnull %profiling_func_stack_peak_buf55.sub) #9
- %12 = icmp eq %struct.halide_buffer_t* %f1.buffer, null
- "assert failed1": ; preds = %"assert succeeded"
- br label %destructor_block
- "assert succeeded2": ; preds = %"assert succeeded"
- %18 = load %struct.halide_dimension_t*, %struct.halide_dimension_t** %dim.i, align 8, !tbaa !67
- _halide_buffer_is_bounds_query.exit: ; preds = %"assert succeeded2"
- after_bb: ; preds = %_halide_buffer_is_bounds_query.exit
- %.pre.i = load %struct.halide_dimension_t*, %struct.halide_dimension_t** %dim.i, align 8, !tbaa !67
- %arrayidx12.i = getelementptr inbounds %struct.halide_dimension_t, %struct.halide_dimension_t* %.pre.i, i64 1
- _halide_buffer_is_bounds_query.exit101: ; preds = %_halide_buffer_is_bounds_query.exit, %after_bb
- true_bb3: ; preds = %"assert succeeded2", %after_bb, %_halide_buffer_is_bounds_query.exit101
- call_destructor.exit.thread: ; preds = %_halide_buffer_is_bounds_query.exit101, %"sync pfor f1.s0.v1.v18"
- call void asm sideeffect "", ""() #9, !srcloc !179
- call void asm sideeffect "", ""() #9, !srcloc !180
- br label %call_destructor.exit65
- "assert failed6": ; preds = %true_bb3
- br label %destructor_block
- "assert succeeded7": ; preds = %true_bb3
- "assert failed8": ; preds = %"assert succeeded7"
- br label %destructor_block
- "assert succeeded9": ; preds = %"assert succeeded7"
- "assert failed10": ; preds = %"assert succeeded9"
- br label %destructor_block
- "assert succeeded11": ; preds = %"assert succeeded9"
- "assert failed12": ; preds = %"assert succeeded11"
- br label %destructor_block
- "assert succeeded13": ; preds = %"assert succeeded11"
- "assert failed14": ; preds = %"assert succeeded13"
- br label %destructor_block
- "assert succeeded15": ; preds = %"assert succeeded13"
- "assert failed16": ; preds = %"assert succeeded15"
- br label %destructor_block
- "assert succeeded19": ; preds = %"assert succeeded15"
- "assert failed20": ; preds = %"assert succeeded19"
- br label %destructor_block
- "assert succeeded21": ; preds = %"assert succeeded19"
- "assert failed22": ; preds = %"assert succeeded21"
- br label %destructor_block
- "produce f1": ; preds = %"assert succeeded21"
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- call void asm sideeffect "", ""() #9, !srcloc !179
- call void asm sideeffect "", ""() #9, !srcloc !180
- "pfor f1.s0.v1.v18.preheader": ; preds = %"produce f1"
- br label %"pfor f1.s0.v1.v18"
- "pfor f1.s0.v1.v18": ; preds = %"pfor f1.s0.v1.v18.preheader", %"platch f1.s0.v1.v18"
- %indvars.iv214 = phi i32 [ %117, %"pfor f1.s0.v1.v18.preheader" ], [ %indvars.iv.next215, %"platch f1.s0.v1.v18" ]
- detach label %"pbody f1.s0.v1.v18", label %"platch f1.s0.v1.v18"
- "pbody f1.s0.v1.v18": ; preds = %"pfor f1.s0.v1.v18"
- call void asm sideeffect "", ""() #9, !srcloc !177
- call void asm sideeffect "", ""() #9, !srcloc !178
- "for f1.s0.v0.v17.preheader": ; preds = %"pbody f1.s0.v1.v18"
- %154 = fdiv float %153, %107
- %155 = fmul float %154, %y_max
- %156 = fsub float 1.000000e+00, %154
- %157 = fmul float %156, %y_min
- %158 = fadd float %155, %157
- %163 = fdiv float %162, %107
- %164 = fmul float %163, %y_max
- %165 = fsub float 1.000000e+00, %163
- %166 = fmul float %165, %y_min
- %167 = fadd float %164, %166
- %172 = fdiv float %171, %107
- %173 = fmul float %172, %y_max
- %174 = fsub float 1.000000e+00, %172
- %175 = fmul float %174, %y_min
- %176 = fadd float %173, %175
- %181 = fdiv float %180, %107
- %182 = fmul float %181, %y_max
- %183 = fsub float 1.000000e+00, %181
- %184 = fmul float %183, %y_min
- %185 = fadd float %182, %184
- %190 = fdiv float %189, %107
- %191 = fmul float %190, %y_max
- %192 = fsub float 1.000000e+00, %190
- %193 = fmul float %192, %y_min
- %194 = fadd float %191, %193
- %199 = fdiv float %198, %107
- %200 = fmul float %199, %y_max
- %201 = fsub float 1.000000e+00, %199
- %202 = fmul float %201, %y_min
- %203 = fadd float %200, %202
- %208 = fdiv float %207, %107
- %209 = fmul float %208, %y_max
- %210 = fsub float 1.000000e+00, %208
- %211 = fmul float %210, %y_min
- %212 = fadd float %209, %211
- %217 = fdiv float %216, %107
- %218 = fmul float %217, %y_max
- %219 = fsub float 1.000000e+00, %217
- %220 = fmul float %219, %y_min
- %221 = fadd float %218, %220
- br label %"for f1.s0.v0.v17"
- "platch f1.s0.v1.v18": ; preds = %"end for f1.s0.v0.v17", %"pfor f1.s0.v1.v18"
- "end pfor f1.s0.v1.v18": ; preds = %"platch f1.s0.v1.v18", %"produce f1"
- sync label %"sync pfor f1.s0.v1.v18"
- "sync pfor f1.s0.v1.v18": ; preds = %"end pfor f1.s0.v1.v18"
- call void asm sideeffect "", ""() #9, !srcloc !177
- call void asm sideeffect "", ""() #9, !srcloc !178
- br label %call_destructor.exit.thread
- "for f1.s0.v0.v17": ; preds = %"for f1.s0.v0.v17.preheader", %call_destructor.exit124
- %indvars.iv209 = phi i32 [ %114, %"for f1.s0.v0.v17.preheader" ], [ %indvars.iv.next210, %call_destructor.exit124 ]
- "end for f1.s0.v0.v17.loopexit": ; preds = %call_destructor.exit124
- br label %"end for f1.s0.v0.v17"
- "end for f1.s0.v0.v17": ; preds = %"end for f1.s0.v0.v17.loopexit", %"pbody f1.s0.v1.v18"
- call void asm sideeffect "", ""() #9, !srcloc !179
- call void asm sideeffect "", ""() #9, !srcloc !180
- br label %"platch f1.s0.v1.v18"
- "assert failed24": ; preds = %"for f1.s0.v0.v17"
- br label %destructor_block
- "assert succeeded25": ; preds = %"for f1.s0.v0.v17"
- "assert failed26": ; preds = %"assert succeeded25"
- br label %destructor_block
- "assert succeeded27": ; preds = %"assert succeeded25"
- "assert failed30": ; preds = %"assert succeeded27"
- br label %destructor_block
- "assert succeeded31": ; preds = %"assert succeeded27"
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- "for f0.s0.v2.preheader": ; preds = %"assert succeeded31"
- %242 = fdiv float %241, %106
- %243 = fmul float %242, %x_max
- %244 = fsub float 1.000000e+00, %242
- %245 = fmul float %244, %x_min
- %246 = fadd float %243, %245
- %249 = fdiv float %248, %106
- %250 = fmul float %249, %x_max
- %251 = fsub float 1.000000e+00, %249
- %252 = fmul float %251, %x_min
- %253 = fadd float %250, %252
- %256 = fdiv float %255, %106
- %257 = fmul float %256, %x_max
- %258 = fsub float 1.000000e+00, %256
- %259 = fmul float %258, %x_min
- %260 = fadd float %257, %259
- %263 = fdiv float %262, %106
- %264 = fmul float %263, %x_max
- %265 = fsub float 1.000000e+00, %263
- %266 = fmul float %265, %x_min
- %267 = fadd float %264, %266
- %270 = fdiv float %269, %106
- %271 = fmul float %270, %x_max
- %272 = fsub float 1.000000e+00, %270
- %273 = fmul float %272, %x_min
- %274 = fadd float %271, %273
- %277 = fdiv float %276, %106
- %278 = fmul float %277, %x_max
- %279 = fsub float 1.000000e+00, %277
- %280 = fmul float %279, %x_min
- %281 = fadd float %278, %280
- %284 = fdiv float %283, %106
- %285 = fmul float %284, %x_max
- %286 = fsub float 1.000000e+00, %284
- %287 = fmul float %286, %x_min
- %288 = fadd float %285, %287
- %291 = fdiv float %290, %106
- %292 = fmul float %291, %x_max
- %293 = fsub float 1.000000e+00, %291
- %294 = fmul float %293, %x_min
- %295 = fadd float %292, %294
- br label %"for f0.s0.v2"
- "for f0.s1.v1.preheader": ; preds = %"for f0.s0.v2", %"assert succeeded31"
- "for f0.s1.v1.us.preheader": ; preds = %"for f0.s1.v1.preheader"
- br label %"for f0.s1.v1.us"
- "for f0.s1.v1.us": ; preds = %"for f0.s1.v1.us.preheader", %"end for f0.s1.r4$x.loopexit.us.us.7"
- %indvars.iv217 = phi i64 [ %147, %"for f0.s1.v1.us.preheader" ], [ %indvars.iv.next218, %"end for f0.s1.r4$x.loopexit.us.us.7" ]
- "for f0.s1.r4$x.us.us.prol": ; preds = %"for f0.s1.v1.us"
- %t326.us.us.prol = load float, float* %303, align 4, !tbaa !215
- %t327.us.us.prol = load float, float* %304, align 4, !tbaa !217
- %305 = fmul float %t327.us.us.prol, %t327.us.us.prol
- %306 = fmul float %t326.us.us.prol, %t326.us.us.prol
- %307 = fsub float %306, %305
- %308 = fadd float %307, %c_real
- store float %308, float* %310, align 4, !tbaa !215
- %311 = load float, float* %303, align 4, !tbaa !215
- %312 = fmul float %t327.us.us.prol, %311
- %313 = fmul float %312, 2.000000e+00
- %314 = fadd float %313, %c_imag
- store float %314, float* %315, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.prol.loopexit"
- "for f0.s1.r4$x.us.us.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.prol", %"for f0.s1.v1.us"
- "end for f0.s1.r4$x.loopexit.us.us": ; preds = %"for f0.s1.r4$x.us.us", %"for f0.s1.r4$x.us.us.prol.loopexit"
- br i1 %lcmp.mod300, label %"for f0.s1.r4$x.us.us.1.prol", label %"for f0.s1.r4$x.us.us.1.prol.loopexit"
- "for f0.s1.r4$x.us.us.1.prol": ; preds = %"end for f0.s1.r4$x.loopexit.us.us"
- %t326.us.us.1.prol = load float, float* %320, align 4, !tbaa !215
- %t327.us.us.1.prol = load float, float* %321, align 4, !tbaa !217
- %322 = fmul float %t327.us.us.1.prol, %t327.us.us.1.prol
- %323 = fmul float %t326.us.us.1.prol, %t326.us.us.1.prol
- %324 = fsub float %323, %322
- %325 = fadd float %324, %c_real
- store float %325, float* %327, align 4, !tbaa !215
- %328 = load float, float* %320, align 4, !tbaa !215
- %329 = fmul float %t327.us.us.1.prol, %328
- %330 = fmul float %329, 2.000000e+00
- %331 = fadd float %330, %c_imag
- store float %331, float* %332, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.1.prol.loopexit"
- "for f0.s1.r4$x.us.us.1.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.1.prol", %"end for f0.s1.r4$x.loopexit.us.us"
- %indvars.iv207.1.unr.ph = phi i64 [ 2, %"for f0.s1.r4$x.us.us.1.prol" ], [ 1, %"end for f0.s1.r4$x.loopexit.us.us" ]
- "for f0.s1.r4$x.us.us": ; preds = %"for f0.s1.r4$x.us.us.prol.loopexit", %"for f0.s1.r4$x.us.us"
- %indvars.iv207 = phi i64 [ %indvars.iv.next208.1297, %"for f0.s1.r4$x.us.us" ], [ %indvars.iv207.unr.ph, %"for f0.s1.r4$x.us.us.prol.loopexit" ]
- %t326.us.us = load float, float* %337, align 4, !tbaa !215
- %t327.us.us = load float, float* %338, align 4, !tbaa !217
- %339 = fmul float %t327.us.us, %t327.us.us
- %340 = fmul float %t326.us.us, %t326.us.us
- %341 = fsub float %340, %339
- %342 = fadd float %341, %c_real
- store float %342, float* %345, align 4, !tbaa !215
- %346 = load float, float* %337, align 4, !tbaa !215
- %347 = fmul float %t327.us.us, %346
- %348 = fmul float %347, 2.000000e+00
- %349 = fadd float %348, %c_imag
- store float %349, float* %350, align 4, !tbaa !217
- %t326.us.us.1295 = load float, float* %355, align 4, !tbaa !215
- %t327.us.us.1296 = load float, float* %356, align 4, !tbaa !217
- %357 = fmul float %t327.us.us.1296, %t327.us.us.1296
- %358 = fmul float %t326.us.us.1295, %t326.us.us.1295
- %359 = fsub float %358, %357
- %360 = fadd float %359, %c_real
- store float %360, float* %363, align 4, !tbaa !215
- %364 = load float, float* %355, align 4, !tbaa !215
- %365 = fmul float %t327.us.us.1296, %364
- %366 = fmul float %365, 2.000000e+00
- %367 = fadd float %366, %c_imag
- store float %367, float* %368, align 4, !tbaa !217
- "for f0.s0.v2": ; preds = %"for f0.s0.v2.preheader", %"for f0.s0.v2"
- store float %246, float* %373, align 4, !tbaa !215
- store float %158, float* %374, align 4, !tbaa !217
- store float %253, float* %376, align 4, !tbaa !215
- store float %158, float* %377, align 4, !tbaa !217
- store float %260, float* %379, align 4, !tbaa !215
- store float %158, float* %380, align 4, !tbaa !217
- store float %267, float* %382, align 4, !tbaa !215
- store float %158, float* %383, align 4, !tbaa !217
- store float %274, float* %385, align 4, !tbaa !215
- store float %158, float* %386, align 4, !tbaa !217
- store float %281, float* %388, align 4, !tbaa !215
- store float %158, float* %389, align 4, !tbaa !217
- store float %288, float* %391, align 4, !tbaa !215
- store float %158, float* %392, align 4, !tbaa !217
- store float %295, float* %394, align 4, !tbaa !215
- store float %158, float* %395, align 4, !tbaa !217
- store float %246, float* %398, align 4, !tbaa !215
- store float %167, float* %399, align 4, !tbaa !217
- store float %253, float* %401, align 4, !tbaa !215
- store float %167, float* %402, align 4, !tbaa !217
- store float %260, float* %404, align 4, !tbaa !215
- store float %167, float* %405, align 4, !tbaa !217
- store float %267, float* %407, align 4, !tbaa !215
- store float %167, float* %408, align 4, !tbaa !217
- store float %274, float* %410, align 4, !tbaa !215
- store float %167, float* %411, align 4, !tbaa !217
- store float %281, float* %413, align 4, !tbaa !215
- store float %167, float* %414, align 4, !tbaa !217
- store float %288, float* %416, align 4, !tbaa !215
- store float %167, float* %417, align 4, !tbaa !217
- store float %295, float* %419, align 4, !tbaa !215
- store float %167, float* %420, align 4, !tbaa !217
- store float %246, float* %423, align 4, !tbaa !215
- store float %176, float* %424, align 4, !tbaa !217
- store float %253, float* %426, align 4, !tbaa !215
- store float %176, float* %427, align 4, !tbaa !217
- store float %260, float* %429, align 4, !tbaa !215
- store float %176, float* %430, align 4, !tbaa !217
- store float %267, float* %432, align 4, !tbaa !215
- store float %176, float* %433, align 4, !tbaa !217
- store float %274, float* %435, align 4, !tbaa !215
- store float %176, float* %436, align 4, !tbaa !217
- store float %281, float* %438, align 4, !tbaa !215
- store float %176, float* %439, align 4, !tbaa !217
- store float %288, float* %441, align 4, !tbaa !215
- store float %176, float* %442, align 4, !tbaa !217
- store float %295, float* %444, align 4, !tbaa !215
- store float %176, float* %445, align 4, !tbaa !217
- store float %246, float* %448, align 4, !tbaa !215
- store float %185, float* %449, align 4, !tbaa !217
- store float %253, float* %451, align 4, !tbaa !215
- store float %185, float* %452, align 4, !tbaa !217
- store float %260, float* %454, align 4, !tbaa !215
- store float %185, float* %455, align 4, !tbaa !217
- store float %267, float* %457, align 4, !tbaa !215
- store float %185, float* %458, align 4, !tbaa !217
- store float %274, float* %460, align 4, !tbaa !215
- store float %185, float* %461, align 4, !tbaa !217
- store float %281, float* %463, align 4, !tbaa !215
- store float %185, float* %464, align 4, !tbaa !217
- store float %288, float* %466, align 4, !tbaa !215
- store float %185, float* %467, align 4, !tbaa !217
- store float %295, float* %469, align 4, !tbaa !215
- store float %185, float* %470, align 4, !tbaa !217
- store float %246, float* %473, align 4, !tbaa !215
- store float %194, float* %474, align 4, !tbaa !217
- store float %253, float* %476, align 4, !tbaa !215
- store float %194, float* %477, align 4, !tbaa !217
- store float %260, float* %479, align 4, !tbaa !215
- store float %194, float* %480, align 4, !tbaa !217
- store float %267, float* %482, align 4, !tbaa !215
- store float %194, float* %483, align 4, !tbaa !217
- store float %274, float* %485, align 4, !tbaa !215
- store float %194, float* %486, align 4, !tbaa !217
- store float %281, float* %488, align 4, !tbaa !215
- store float %194, float* %489, align 4, !tbaa !217
- store float %288, float* %491, align 4, !tbaa !215
- store float %194, float* %492, align 4, !tbaa !217
- store float %295, float* %494, align 4, !tbaa !215
- store float %194, float* %495, align 4, !tbaa !217
- store float %246, float* %498, align 4, !tbaa !215
- store float %203, float* %499, align 4, !tbaa !217
- store float %253, float* %501, align 4, !tbaa !215
- store float %203, float* %502, align 4, !tbaa !217
- store float %260, float* %504, align 4, !tbaa !215
- store float %203, float* %505, align 4, !tbaa !217
- store float %267, float* %507, align 4, !tbaa !215
- store float %203, float* %508, align 4, !tbaa !217
- store float %274, float* %510, align 4, !tbaa !215
- store float %203, float* %511, align 4, !tbaa !217
- store float %281, float* %513, align 4, !tbaa !215
- store float %203, float* %514, align 4, !tbaa !217
- store float %288, float* %516, align 4, !tbaa !215
- store float %203, float* %517, align 4, !tbaa !217
- store float %295, float* %519, align 4, !tbaa !215
- store float %203, float* %520, align 4, !tbaa !217
- store float %246, float* %523, align 4, !tbaa !215
- store float %212, float* %524, align 4, !tbaa !217
- store float %253, float* %526, align 4, !tbaa !215
- store float %212, float* %527, align 4, !tbaa !217
- store float %260, float* %529, align 4, !tbaa !215
- store float %212, float* %530, align 4, !tbaa !217
- store float %267, float* %532, align 4, !tbaa !215
- store float %212, float* %533, align 4, !tbaa !217
- store float %274, float* %535, align 4, !tbaa !215
- store float %212, float* %536, align 4, !tbaa !217
- store float %281, float* %538, align 4, !tbaa !215
- store float %212, float* %539, align 4, !tbaa !217
- store float %288, float* %541, align 4, !tbaa !215
- store float %212, float* %542, align 4, !tbaa !217
- store float %295, float* %544, align 4, !tbaa !215
- store float %212, float* %545, align 4, !tbaa !217
- store float %246, float* %548, align 4, !tbaa !215
- store float %221, float* %549, align 4, !tbaa !217
- store float %253, float* %551, align 4, !tbaa !215
- store float %221, float* %552, align 4, !tbaa !217
- store float %260, float* %554, align 4, !tbaa !215
- store float %221, float* %555, align 4, !tbaa !217
- store float %267, float* %557, align 4, !tbaa !215
- store float %221, float* %558, align 4, !tbaa !217
- store float %274, float* %560, align 4, !tbaa !215
- store float %221, float* %561, align 4, !tbaa !217
- store float %281, float* %563, align 4, !tbaa !215
- store float %221, float* %564, align 4, !tbaa !217
- store float %288, float* %566, align 4, !tbaa !215
- store float %221, float* %567, align 4, !tbaa !217
- store float %295, float* %569, align 4, !tbaa !215
- store float %221, float* %570, align 4, !tbaa !217
- "consume f0": ; preds = %"end for f0.s1.r4$x.loopexit.us.us.7", %"for f0.s1.v1.preheader"
- %.lcssa.lcssa = phi i1 [ false, %"for f0.s1.v1.preheader" ], [ true, %"end for f0.s1.r4$x.loopexit.us.us.7" ]
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- br label %"for f1.s0.v1.v16.v16"
- "for f1.s0.v1.v16.v16": ; preds = %"consume argmin52", %"consume f0"
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- "for argmin.s1.r4$x.preheader": ; preds = %"for f1.s0.v1.v16.v16"
- "for argmin.s1.r4$x.prol": ; preds = %"for argmin.s1.r4$x.preheader"
- %578 = bitcast float* %577 to <4 x float>*
- %t330.prol = load <4 x float>, <4 x float>* %578, align 32, !tbaa !215
- %580 = bitcast float* %579 to <4 x float>*
- %t331.prol = load <4 x float>, <4 x float>* %580, align 32, !tbaa !217
- %581 = fsub <4 x float> zeroinitializer, %t331.prol
- %582 = fmul <4 x float> %t331.prol, %581
- %583 = fadd <4 x float> %582, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %584 = fmul <4 x float> %t330.prol, %t330.prol
- %585 = fcmp ogt <4 x float> %583, %584
- %586 = shufflevector <4 x i1> %585, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp59.prol = xor <128 x i1> %586, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %587 = and <128 x i1> %tmp59.prol, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %589 = select <128 x i1> %587, <128 x i1> %586, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- br label %"for argmin.s1.r4$x.prol.loopexit"
- "for argmin.s1.r4$x.prol.loopexit": ; preds = %"for argmin.s1.r4$x.prol", %"for argmin.s1.r4$x.preheader"
- %argmin.0.value.x4.lcssa.unr.ph = phi <4 x i32> [ %argmin.0.value.x4.prol, %"for argmin.s1.r4$x.prol" ], [ undef, %"for argmin.s1.r4$x.preheader" ]
- %.lcssa.unr.ph = phi <4 x i8> [ %590, %"for argmin.s1.r4$x.prol" ], [ undef, %"for argmin.s1.r4$x.preheader" ]
- %indvars.iv219.unr.ph = phi i64 [ 2, %"for argmin.s1.r4$x.prol" ], [ 1, %"for argmin.s1.r4$x.preheader" ]
- %argmin.054.0.unr.ph = phi <4 x i32> [ %argmin.0.value.x4.prol, %"for argmin.s1.r4$x.prol" ], [ zeroinitializer, %"for argmin.s1.r4$x.preheader" ]
- call_destructor.exit124: ; preds = %"consume argmin52"
- "for argmin.s1.r4$x": ; preds = %"for argmin.s1.r4$x.prol.loopexit", %"for argmin.s1.r4$x"
- %indvars.iv219 = phi i64 [ %indvars.iv.next220.1, %"for argmin.s1.r4$x" ], [ %indvars.iv219.unr.ph, %"for argmin.s1.r4$x.prol.loopexit" ]
- %argmin.153.0 = phi <4 x i8> [ %631, %"for argmin.s1.r4$x" ], [ %argmin.153.0.unr.ph, %"for argmin.s1.r4$x.prol.loopexit" ]
- %argmin.054.0 = phi <4 x i32> [ %argmin.0.value.x4.1, %"for argmin.s1.r4$x" ], [ %argmin.054.0.unr.ph, %"for argmin.s1.r4$x.prol.loopexit" ]
- %597 = bitcast float* %596 to <4 x float>*
- %t330 = load <4 x float>, <4 x float>* %597, align 32, !tbaa !215
- %599 = bitcast float* %598 to <4 x float>*
- %t331 = load <4 x float>, <4 x float>* %599, align 32, !tbaa !217
- %601 = fsub <4 x float> zeroinitializer, %t331
- %602 = fmul <4 x float> %t331, %601
- %603 = fadd <4 x float> %602, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %604 = fmul <4 x float> %t330, %t330
- %605 = fcmp ogt <4 x float> %603, %604
- %606 = shufflevector <4 x i1> %600, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %607 = shufflevector <4 x i1> %605, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp59 = xor <128 x i1> %607, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %617 = bitcast float* %616 to <4 x float>*
- %t330.1 = load <4 x float>, <4 x float>* %617, align 32, !tbaa !215
- %619 = bitcast float* %618 to <4 x float>*
- %t331.1 = load <4 x float>, <4 x float>* %619, align 32, !tbaa !217
- %620 = fsub <4 x float> zeroinitializer, %t331.1
- %621 = fmul <4 x float> %t331.1, %620
- %622 = fadd <4 x float> %621, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %623 = fmul <4 x float> %t330.1, %t330.1
- %624 = fcmp ogt <4 x float> %622, %623
- %625 = shufflevector <4 x i1> %624, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp59.1 = xor <128 x i1> %625, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- "consume argmin": ; preds = %"for argmin.s1.r4$x.prol.loopexit", %"for argmin.s1.r4$x", %"for f1.s0.v1.v16.v16"
- %argmin.054.1 = phi <4 x i32> [ zeroinitializer, %"for f1.s0.v1.v16.v16" ], [ %argmin.0.value.x4.lcssa.unr.ph, %"for argmin.s1.r4$x.prol.loopexit" ], [ %argmin.0.value.x4.1, %"for argmin.s1.r4$x" ]
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- "for argmin.s1.r4$x33.preheader": ; preds = %"consume argmin"
- "for argmin.s1.r4$x33.prol": ; preds = %"for argmin.s1.r4$x33.preheader"
- %643 = bitcast float* %642 to <4 x float>*
- %t338.prol = load <4 x float>, <4 x float>* %643, align 16, !tbaa !215
- %645 = bitcast float* %644 to <4 x float>*
- %t339.prol = load <4 x float>, <4 x float>* %645, align 16, !tbaa !217
- %646 = fsub <4 x float> zeroinitializer, %t339.prol
- %647 = fmul <4 x float> %t339.prol, %646
- %648 = fadd <4 x float> %647, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %649 = fmul <4 x float> %t338.prol, %t338.prol
- %650 = fcmp ogt <4 x float> %648, %649
- %651 = shufflevector <4 x i1> %650, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp58.prol = xor <128 x i1> %651, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %652 = and <128 x i1> %tmp58.prol, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %654 = select <128 x i1> %652, <128 x i1> %651, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- br label %"for argmin.s1.r4$x33.prol.loopexit"
- "for argmin.s1.r4$x33.prol.loopexit": ; preds = %"for argmin.s1.r4$x33.prol", %"for argmin.s1.r4$x33.preheader"
- %argmin.0.value.x436.lcssa.unr.ph = phi <4 x i32> [ %argmin.0.value.x436.prol, %"for argmin.s1.r4$x33.prol" ], [ undef, %"for argmin.s1.r4$x33.preheader" ]
- %.lcssa275.unr.ph = phi <4 x i8> [ %655, %"for argmin.s1.r4$x33.prol" ], [ undef, %"for argmin.s1.r4$x33.preheader" ]
- %indvars.iv221.unr.ph = phi i64 [ 2, %"for argmin.s1.r4$x33.prol" ], [ 1, %"for argmin.s1.r4$x33.preheader" ]
- %argmin.054.2.unr.ph = phi <4 x i32> [ %argmin.0.value.x436.prol, %"for argmin.s1.r4$x33.prol" ], [ zeroinitializer, %"for argmin.s1.r4$x33.preheader" ]
- "for argmin.s1.r4$x33": ; preds = %"for argmin.s1.r4$x33.prol.loopexit", %"for argmin.s1.r4$x33"
- %indvars.iv221 = phi i64 [ %indvars.iv.next222.1, %"for argmin.s1.r4$x33" ], [ %indvars.iv221.unr.ph, %"for argmin.s1.r4$x33.prol.loopexit" ]
- %argmin.153.2 = phi <4 x i8> [ %696, %"for argmin.s1.r4$x33" ], [ %argmin.153.2.unr.ph, %"for argmin.s1.r4$x33.prol.loopexit" ]
- %argmin.054.2 = phi <4 x i32> [ %argmin.0.value.x436.1, %"for argmin.s1.r4$x33" ], [ %argmin.054.2.unr.ph, %"for argmin.s1.r4$x33.prol.loopexit" ]
- %661 = bitcast float* %660 to <4 x float>*
- %t338 = load <4 x float>, <4 x float>* %661, align 16, !tbaa !215
- %663 = bitcast float* %662 to <4 x float>*
- %t339 = load <4 x float>, <4 x float>* %663, align 16, !tbaa !217
- %665 = fsub <4 x float> zeroinitializer, %t339
- %666 = fmul <4 x float> %t339, %665
- %667 = fadd <4 x float> %666, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %668 = fmul <4 x float> %t338, %t338
- %669 = fcmp ogt <4 x float> %667, %668
- %670 = shufflevector <4 x i1> %664, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %671 = shufflevector <4 x i1> %669, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp58 = xor <128 x i1> %671, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %682 = bitcast float* %681 to <4 x float>*
- %t338.1 = load <4 x float>, <4 x float>* %682, align 16, !tbaa !215
- %684 = bitcast float* %683 to <4 x float>*
- %t339.1 = load <4 x float>, <4 x float>* %684, align 16, !tbaa !217
- %685 = fsub <4 x float> zeroinitializer, %t339.1
- %686 = fmul <4 x float> %t339.1, %685
- %687 = fadd <4 x float> %686, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %688 = fmul <4 x float> %t338.1, %t338.1
- %689 = fcmp ogt <4 x float> %687, %688
- %690 = shufflevector <4 x i1> %689, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp58.1 = xor <128 x i1> %690, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- "consume argmin38": ; preds = %"for argmin.s1.r4$x33.prol.loopexit", %"for argmin.s1.r4$x33", %"consume argmin"
- %argmin.054.3 = phi <4 x i32> [ zeroinitializer, %"consume argmin" ], [ %argmin.0.value.x436.lcssa.unr.ph, %"for argmin.s1.r4$x33.prol.loopexit" ], [ %argmin.0.value.x436.1, %"for argmin.s1.r4$x33" ]
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- "for argmin.s1.r4$x40.preheader": ; preds = %"consume argmin38"
- "for argmin.s1.r4$x40.prol": ; preds = %"for argmin.s1.r4$x40.preheader"
- %707 = bitcast float* %706 to <4 x float>*
- %t346.prol = load <4 x float>, <4 x float>* %707, align 32, !tbaa !215
- %709 = bitcast float* %708 to <4 x float>*
- %t347.prol = load <4 x float>, <4 x float>* %709, align 32, !tbaa !217
- %710 = fsub <4 x float> zeroinitializer, %t347.prol
- %711 = fmul <4 x float> %t347.prol, %710
- %712 = fadd <4 x float> %711, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %713 = fmul <4 x float> %t346.prol, %t346.prol
- %714 = fcmp ogt <4 x float> %712, %713
- %715 = shufflevector <4 x i1> %714, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp57.prol = xor <128 x i1> %715, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %716 = and <128 x i1> %tmp57.prol, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %718 = select <128 x i1> %716, <128 x i1> %715, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- br label %"for argmin.s1.r4$x40.prol.loopexit"
- "for argmin.s1.r4$x40.prol.loopexit": ; preds = %"for argmin.s1.r4$x40.prol", %"for argmin.s1.r4$x40.preheader"
- %argmin.0.value.x443.lcssa.unr.ph = phi <4 x i32> [ %argmin.0.value.x443.prol, %"for argmin.s1.r4$x40.prol" ], [ undef, %"for argmin.s1.r4$x40.preheader" ]
- %.lcssa276.unr.ph = phi <4 x i8> [ %719, %"for argmin.s1.r4$x40.prol" ], [ undef, %"for argmin.s1.r4$x40.preheader" ]
- %indvars.iv223.unr.ph = phi i64 [ 2, %"for argmin.s1.r4$x40.prol" ], [ 1, %"for argmin.s1.r4$x40.preheader" ]
- %argmin.054.4.unr.ph = phi <4 x i32> [ %argmin.0.value.x443.prol, %"for argmin.s1.r4$x40.prol" ], [ zeroinitializer, %"for argmin.s1.r4$x40.preheader" ]
- "for argmin.s1.r4$x40": ; preds = %"for argmin.s1.r4$x40.prol.loopexit", %"for argmin.s1.r4$x40"
- %indvars.iv223 = phi i64 [ %indvars.iv.next224.1, %"for argmin.s1.r4$x40" ], [ %indvars.iv223.unr.ph, %"for argmin.s1.r4$x40.prol.loopexit" ]
- %argmin.153.4 = phi <4 x i8> [ %760, %"for argmin.s1.r4$x40" ], [ %argmin.153.4.unr.ph, %"for argmin.s1.r4$x40.prol.loopexit" ]
- %argmin.054.4 = phi <4 x i32> [ %argmin.0.value.x443.1, %"for argmin.s1.r4$x40" ], [ %argmin.054.4.unr.ph, %"for argmin.s1.r4$x40.prol.loopexit" ]
- %725 = bitcast float* %724 to <4 x float>*
- %t346 = load <4 x float>, <4 x float>* %725, align 32, !tbaa !215
- %727 = bitcast float* %726 to <4 x float>*
- %t347 = load <4 x float>, <4 x float>* %727, align 32, !tbaa !217
- %729 = fsub <4 x float> zeroinitializer, %t347
- %730 = fmul <4 x float> %t347, %729
- %731 = fadd <4 x float> %730, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %732 = fmul <4 x float> %t346, %t346
- %733 = fcmp ogt <4 x float> %731, %732
- %734 = shufflevector <4 x i1> %728, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %735 = shufflevector <4 x i1> %733, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp57 = xor <128 x i1> %735, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %746 = bitcast float* %745 to <4 x float>*
- %t346.1 = load <4 x float>, <4 x float>* %746, align 32, !tbaa !215
- %748 = bitcast float* %747 to <4 x float>*
- %t347.1 = load <4 x float>, <4 x float>* %748, align 32, !tbaa !217
- %749 = fsub <4 x float> zeroinitializer, %t347.1
- %750 = fmul <4 x float> %t347.1, %749
- %751 = fadd <4 x float> %750, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %752 = fmul <4 x float> %t346.1, %t346.1
- %753 = fcmp ogt <4 x float> %751, %752
- %754 = shufflevector <4 x i1> %753, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp57.1 = xor <128 x i1> %754, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- "consume argmin45": ; preds = %"for argmin.s1.r4$x40.prol.loopexit", %"for argmin.s1.r4$x40", %"consume argmin38"
- %argmin.054.5 = phi <4 x i32> [ zeroinitializer, %"consume argmin38" ], [ %argmin.0.value.x443.lcssa.unr.ph, %"for argmin.s1.r4$x40.prol.loopexit" ], [ %argmin.0.value.x443.1, %"for argmin.s1.r4$x40" ]
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- "for argmin.s1.r4$x47.preheader": ; preds = %"consume argmin45"
- "for argmin.s1.r4$x47.prol": ; preds = %"for argmin.s1.r4$x47.preheader"
- %773 = bitcast float* %772 to <4 x float>*
- %t354.prol = load <4 x float>, <4 x float>* %773, align 16, !tbaa !215
- %775 = bitcast float* %774 to <4 x float>*
- %t355.prol = load <4 x float>, <4 x float>* %775, align 16, !tbaa !217
- %776 = fsub <4 x float> zeroinitializer, %t355.prol
- %777 = fmul <4 x float> %t355.prol, %776
- %778 = fadd <4 x float> %777, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %779 = fmul <4 x float> %t354.prol, %t354.prol
- %780 = fcmp ogt <4 x float> %778, %779
- %781 = shufflevector <4 x i1> %780, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp.prol = xor <128 x i1> %781, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %782 = and <128 x i1> %tmp.prol, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %784 = select <128 x i1> %782, <128 x i1> %781, <128 x i1> <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- br label %"for argmin.s1.r4$x47.prol.loopexit"
- "for argmin.s1.r4$x47.prol.loopexit": ; preds = %"for argmin.s1.r4$x47.prol", %"for argmin.s1.r4$x47.preheader"
- %argmin.0.value.x450.lcssa.unr.ph = phi <4 x i32> [ %argmin.0.value.x450.prol, %"for argmin.s1.r4$x47.prol" ], [ undef, %"for argmin.s1.r4$x47.preheader" ]
- %.lcssa277.unr.ph = phi <4 x i8> [ %785, %"for argmin.s1.r4$x47.prol" ], [ undef, %"for argmin.s1.r4$x47.preheader" ]
- %indvars.iv225.unr.ph = phi i64 [ 2, %"for argmin.s1.r4$x47.prol" ], [ 1, %"for argmin.s1.r4$x47.preheader" ]
- %argmin.054.6.unr.ph = phi <4 x i32> [ %argmin.0.value.x450.prol, %"for argmin.s1.r4$x47.prol" ], [ zeroinitializer, %"for argmin.s1.r4$x47.preheader" ]
- "for argmin.s1.r4$x47": ; preds = %"for argmin.s1.r4$x47.prol.loopexit", %"for argmin.s1.r4$x47"
- %indvars.iv225 = phi i64 [ %indvars.iv.next226.1, %"for argmin.s1.r4$x47" ], [ %indvars.iv225.unr.ph, %"for argmin.s1.r4$x47.prol.loopexit" ]
- %argmin.153.6 = phi <4 x i8> [ %826, %"for argmin.s1.r4$x47" ], [ %argmin.153.6.unr.ph, %"for argmin.s1.r4$x47.prol.loopexit" ]
- %argmin.054.6 = phi <4 x i32> [ %argmin.0.value.x450.1, %"for argmin.s1.r4$x47" ], [ %argmin.054.6.unr.ph, %"for argmin.s1.r4$x47.prol.loopexit" ]
- %791 = bitcast float* %790 to <4 x float>*
- %t354 = load <4 x float>, <4 x float>* %791, align 16, !tbaa !215
- %793 = bitcast float* %792 to <4 x float>*
- %t355 = load <4 x float>, <4 x float>* %793, align 16, !tbaa !217
- %795 = fsub <4 x float> zeroinitializer, %t355
- %796 = fmul <4 x float> %t355, %795
- %797 = fadd <4 x float> %796, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %798 = fmul <4 x float> %t354, %t354
- %799 = fcmp ogt <4 x float> %797, %798
- %800 = shufflevector <4 x i1> %794, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %801 = shufflevector <4 x i1> %799, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp = xor <128 x i1> %801, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- %812 = bitcast float* %811 to <4 x float>*
- %t354.1 = load <4 x float>, <4 x float>* %812, align 16, !tbaa !215
- %814 = bitcast float* %813 to <4 x float>*
- %t355.1 = load <4 x float>, <4 x float>* %814, align 16, !tbaa !217
- %815 = fsub <4 x float> zeroinitializer, %t355.1
- %816 = fmul <4 x float> %t355.1, %815
- %817 = fadd <4 x float> %816, <float 4.000000e+00, float 4.000000e+00, float 4.000000e+00, float 4.000000e+00>
- %818 = fmul <4 x float> %t354.1, %t354.1
- %819 = fcmp ogt <4 x float> %817, %818
- %820 = shufflevector <4 x i1> %819, <4 x i1> undef, <128 x i32> <i32 0, i32 1, i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
- %tmp.1 = xor <128 x i1> %820, <i1 true, i1 true, i1 true, i1 true, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
- "consume argmin52": ; preds = %"for argmin.s1.r4$x47.prol.loopexit", %"for argmin.s1.r4$x47", %"consume argmin45"
- %argmin.054.7 = phi <4 x i32> [ zeroinitializer, %"consume argmin45" ], [ %argmin.0.value.x450.lcssa.unr.ph, %"for argmin.s1.r4$x47.prol.loopexit" ], [ %argmin.0.value.x450.1, %"for argmin.s1.r4$x47" ]
- call void asm sideeffect "", ""() #9, !srcloc !175
- call void asm sideeffect "", ""() #9, !srcloc !176
- "for f0.s1.r4$x.us.us.1": ; preds = %"for f0.s1.r4$x.us.us.1.prol.loopexit", %"for f0.s1.r4$x.us.us.1"
- %indvars.iv207.1 = phi i64 [ %indvars.iv.next208.1.1, %"for f0.s1.r4$x.us.us.1" ], [ %indvars.iv207.1.unr.ph, %"for f0.s1.r4$x.us.us.1.prol.loopexit" ]
- %t326.us.us.1 = load float, float* %838, align 4, !tbaa !215
- %t327.us.us.1 = load float, float* %839, align 4, !tbaa !217
- %840 = fmul float %t327.us.us.1, %t327.us.us.1
- %841 = fmul float %t326.us.us.1, %t326.us.us.1
- %842 = fsub float %841, %840
- %843 = fadd float %842, %c_real
- store float %843, float* %846, align 4, !tbaa !215
- %847 = load float, float* %838, align 4, !tbaa !215
- %848 = fmul float %t327.us.us.1, %847
- %849 = fmul float %848, 2.000000e+00
- %850 = fadd float %849, %c_imag
- store float %850, float* %851, align 4, !tbaa !217
- %t326.us.us.1.1 = load float, float* %856, align 4, !tbaa !215
- %t327.us.us.1.1 = load float, float* %857, align 4, !tbaa !217
- %858 = fmul float %t327.us.us.1.1, %t327.us.us.1.1
- %859 = fmul float %t326.us.us.1.1, %t326.us.us.1.1
- %860 = fsub float %859, %858
- %861 = fadd float %860, %c_real
- store float %861, float* %864, align 4, !tbaa !215
- %865 = load float, float* %856, align 4, !tbaa !215
- %866 = fmul float %t327.us.us.1.1, %865
- %867 = fmul float %866, 2.000000e+00
- %868 = fadd float %867, %c_imag
- store float %868, float* %869, align 4, !tbaa !217
- "end for f0.s1.r4$x.loopexit.us.us.1": ; preds = %"for f0.s1.r4$x.us.us.1", %"for f0.s1.r4$x.us.us.1.prol.loopexit"
- br i1 %lcmp.mod302, label %"for f0.s1.r4$x.us.us.2.prol", label %"for f0.s1.r4$x.us.us.2.prol.loopexit"
- "for f0.s1.r4$x.us.us.2.prol": ; preds = %"end for f0.s1.r4$x.loopexit.us.us.1"
- %t326.us.us.2.prol = load float, float* %875, align 4, !tbaa !215
- %t327.us.us.2.prol = load float, float* %876, align 4, !tbaa !217
- %877 = fmul float %t327.us.us.2.prol, %t327.us.us.2.prol
- %878 = fmul float %t326.us.us.2.prol, %t326.us.us.2.prol
- %879 = fsub float %878, %877
- %880 = fadd float %879, %c_real
- store float %880, float* %882, align 4, !tbaa !215
- %883 = load float, float* %875, align 4, !tbaa !215
- %884 = fmul float %t327.us.us.2.prol, %883
- %885 = fmul float %884, 2.000000e+00
- %886 = fadd float %885, %c_imag
- store float %886, float* %887, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.2.prol.loopexit"
- "for f0.s1.r4$x.us.us.2.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.2.prol", %"end for f0.s1.r4$x.loopexit.us.us.1"
- %indvars.iv207.2.unr.ph = phi i64 [ 2, %"for f0.s1.r4$x.us.us.2.prol" ], [ 1, %"end for f0.s1.r4$x.loopexit.us.us.1" ]
- "for f0.s1.r4$x.us.us.2": ; preds = %"for f0.s1.r4$x.us.us.2.prol.loopexit", %"for f0.s1.r4$x.us.us.2"
- %indvars.iv207.2 = phi i64 [ %indvars.iv.next208.2.1, %"for f0.s1.r4$x.us.us.2" ], [ %indvars.iv207.2.unr.ph, %"for f0.s1.r4$x.us.us.2.prol.loopexit" ]
- %t326.us.us.2 = load float, float* %892, align 4, !tbaa !215
- %t327.us.us.2 = load float, float* %893, align 4, !tbaa !217
- %894 = fmul float %t327.us.us.2, %t327.us.us.2
- %895 = fmul float %t326.us.us.2, %t326.us.us.2
- %896 = fsub float %895, %894
- %897 = fadd float %896, %c_real
- store float %897, float* %900, align 4, !tbaa !215
- %901 = load float, float* %892, align 4, !tbaa !215
- %902 = fmul float %t327.us.us.2, %901
- %903 = fmul float %902, 2.000000e+00
- %904 = fadd float %903, %c_imag
- store float %904, float* %905, align 4, !tbaa !217
- %t326.us.us.2.1 = load float, float* %910, align 4, !tbaa !215
- %t327.us.us.2.1 = load float, float* %911, align 4, !tbaa !217
- %912 = fmul float %t327.us.us.2.1, %t327.us.us.2.1
- %913 = fmul float %t326.us.us.2.1, %t326.us.us.2.1
- %914 = fsub float %913, %912
- %915 = fadd float %914, %c_real
- store float %915, float* %918, align 4, !tbaa !215
- %919 = load float, float* %910, align 4, !tbaa !215
- %920 = fmul float %t327.us.us.2.1, %919
- %921 = fmul float %920, 2.000000e+00
- %922 = fadd float %921, %c_imag
- store float %922, float* %923, align 4, !tbaa !217
- "end for f0.s1.r4$x.loopexit.us.us.2": ; preds = %"for f0.s1.r4$x.us.us.2", %"for f0.s1.r4$x.us.us.2.prol.loopexit"
- br i1 %lcmp.mod304, label %"for f0.s1.r4$x.us.us.3.prol", label %"for f0.s1.r4$x.us.us.3.prol.loopexit"
- "for f0.s1.r4$x.us.us.3.prol": ; preds = %"end for f0.s1.r4$x.loopexit.us.us.2"
- %t326.us.us.3.prol = load float, float* %929, align 4, !tbaa !215
- %t327.us.us.3.prol = load float, float* %930, align 4, !tbaa !217
- %931 = fmul float %t327.us.us.3.prol, %t327.us.us.3.prol
- %932 = fmul float %t326.us.us.3.prol, %t326.us.us.3.prol
- %933 = fsub float %932, %931
- %934 = fadd float %933, %c_real
- store float %934, float* %936, align 4, !tbaa !215
- %937 = load float, float* %929, align 4, !tbaa !215
- %938 = fmul float %t327.us.us.3.prol, %937
- %939 = fmul float %938, 2.000000e+00
- %940 = fadd float %939, %c_imag
- store float %940, float* %941, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.3.prol.loopexit"
- "for f0.s1.r4$x.us.us.3.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.3.prol", %"end for f0.s1.r4$x.loopexit.us.us.2"
- %indvars.iv207.3.unr.ph = phi i64 [ 2, %"for f0.s1.r4$x.us.us.3.prol" ], [ 1, %"end for f0.s1.r4$x.loopexit.us.us.2" ]
- "for f0.s1.r4$x.us.us.3": ; preds = %"for f0.s1.r4$x.us.us.3.prol.loopexit", %"for f0.s1.r4$x.us.us.3"
- %indvars.iv207.3 = phi i64 [ %indvars.iv.next208.3.1, %"for f0.s1.r4$x.us.us.3" ], [ %indvars.iv207.3.unr.ph, %"for f0.s1.r4$x.us.us.3.prol.loopexit" ]
- %t326.us.us.3 = load float, float* %946, align 4, !tbaa !215
- %t327.us.us.3 = load float, float* %947, align 4, !tbaa !217
- %948 = fmul float %t327.us.us.3, %t327.us.us.3
- %949 = fmul float %t326.us.us.3, %t326.us.us.3
- %950 = fsub float %949, %948
- %951 = fadd float %950, %c_real
- store float %951, float* %954, align 4, !tbaa !215
- %955 = load float, float* %946, align 4, !tbaa !215
- %956 = fmul float %t327.us.us.3, %955
- %957 = fmul float %956, 2.000000e+00
- %958 = fadd float %957, %c_imag
- store float %958, float* %959, align 4, !tbaa !217
- %t326.us.us.3.1 = load float, float* %964, align 4, !tbaa !215
- %t327.us.us.3.1 = load float, float* %965, align 4, !tbaa !217
- %966 = fmul float %t327.us.us.3.1, %t327.us.us.3.1
- %967 = fmul float %t326.us.us.3.1, %t326.us.us.3.1
- %968 = fsub float %967, %966
- %969 = fadd float %968, %c_real
- store float %969, float* %972, align 4, !tbaa !215
- %973 = load float, float* %964, align 4, !tbaa !215
- %974 = fmul float %t327.us.us.3.1, %973
- %975 = fmul float %974, 2.000000e+00
- %976 = fadd float %975, %c_imag
- store float %976, float* %977, align 4, !tbaa !217
- "end for f0.s1.r4$x.loopexit.us.us.3": ; preds = %"for f0.s1.r4$x.us.us.3", %"for f0.s1.r4$x.us.us.3.prol.loopexit"
- br i1 %lcmp.mod306, label %"for f0.s1.r4$x.us.us.4.prol", label %"for f0.s1.r4$x.us.us.4.prol.loopexit"
- "for f0.s1.r4$x.us.us.4.prol": ; preds = %"end for f0.s1.r4$x.loopexit.us.us.3"
- %t326.us.us.4.prol = load float, float* %983, align 4, !tbaa !215
- %t327.us.us.4.prol = load float, float* %984, align 4, !tbaa !217
- %985 = fmul float %t327.us.us.4.prol, %t327.us.us.4.prol
- %986 = fmul float %t326.us.us.4.prol, %t326.us.us.4.prol
- %987 = fsub float %986, %985
- %988 = fadd float %987, %c_real
- store float %988, float* %990, align 4, !tbaa !215
- %991 = load float, float* %983, align 4, !tbaa !215
- %992 = fmul float %t327.us.us.4.prol, %991
- %993 = fmul float %992, 2.000000e+00
- %994 = fadd float %993, %c_imag
- store float %994, float* %995, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.4.prol.loopexit"
- "for f0.s1.r4$x.us.us.4.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.4.prol", %"end for f0.s1.r4$x.loopexit.us.us.3"
- %indvars.iv207.4.unr.ph = phi i64 [ 2, %"for f0.s1.r4$x.us.us.4.prol" ], [ 1, %"end for f0.s1.r4$x.loopexit.us.us.3" ]
- "for f0.s1.r4$x.us.us.4": ; preds = %"for f0.s1.r4$x.us.us.4.prol.loopexit", %"for f0.s1.r4$x.us.us.4"
- %indvars.iv207.4 = phi i64 [ %indvars.iv.next208.4.1, %"for f0.s1.r4$x.us.us.4" ], [ %indvars.iv207.4.unr.ph, %"for f0.s1.r4$x.us.us.4.prol.loopexit" ]
- %t326.us.us.4 = load float, float* %1000, align 4, !tbaa !215
- %t327.us.us.4 = load float, float* %1001, align 4, !tbaa !217
- %1002 = fmul float %t327.us.us.4, %t327.us.us.4
- %1003 = fmul float %t326.us.us.4, %t326.us.us.4
- %1004 = fsub float %1003, %1002
- %1005 = fadd float %1004, %c_real
- store float %1005, float* %1008, align 4, !tbaa !215
- %1009 = load float, float* %1000, align 4, !tbaa !215
- %1010 = fmul float %t327.us.us.4, %1009
- %1011 = fmul float %1010, 2.000000e+00
- %1012 = fadd float %1011, %c_imag
- store float %1012, float* %1013, align 4, !tbaa !217
- %t326.us.us.4.1 = load float, float* %1018, align 4, !tbaa !215
- %t327.us.us.4.1 = load float, float* %1019, align 4, !tbaa !217
- %1020 = fmul float %t327.us.us.4.1, %t327.us.us.4.1
- %1021 = fmul float %t326.us.us.4.1, %t326.us.us.4.1
- %1022 = fsub float %1021, %1020
- %1023 = fadd float %1022, %c_real
- store float %1023, float* %1026, align 4, !tbaa !215
- %1027 = load float, float* %1018, align 4, !tbaa !215
- %1028 = fmul float %t327.us.us.4.1, %1027
- %1029 = fmul float %1028, 2.000000e+00
- %1030 = fadd float %1029, %c_imag
- store float %1030, float* %1031, align 4, !tbaa !217
- "end for f0.s1.r4$x.loopexit.us.us.4": ; preds = %"for f0.s1.r4$x.us.us.4", %"for f0.s1.r4$x.us.us.4.prol.loopexit"
- br i1 %lcmp.mod308, label %"for f0.s1.r4$x.us.us.5.prol", label %"for f0.s1.r4$x.us.us.5.prol.loopexit"
- "for f0.s1.r4$x.us.us.5.prol": ; preds = %"end for f0.s1.r4$x.loopexit.us.us.4"
- %t326.us.us.5.prol = load float, float* %1037, align 4, !tbaa !215
- %t327.us.us.5.prol = load float, float* %1038, align 4, !tbaa !217
- %1039 = fmul float %t327.us.us.5.prol, %t327.us.us.5.prol
- %1040 = fmul float %t326.us.us.5.prol, %t326.us.us.5.prol
- %1041 = fsub float %1040, %1039
- %1042 = fadd float %1041, %c_real
- store float %1042, float* %1044, align 4, !tbaa !215
- %1045 = load float, float* %1037, align 4, !tbaa !215
- %1046 = fmul float %t327.us.us.5.prol, %1045
- %1047 = fmul float %1046, 2.000000e+00
- %1048 = fadd float %1047, %c_imag
- store float %1048, float* %1049, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.5.prol.loopexit"
- "for f0.s1.r4$x.us.us.5.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.5.prol", %"end for f0.s1.r4$x.loopexit.us.us.4"
- %indvars.iv207.5.unr.ph = phi i64 [ 2, %"for f0.s1.r4$x.us.us.5.prol" ], [ 1, %"end for f0.s1.r4$x.loopexit.us.us.4" ]
- "for f0.s1.r4$x.us.us.5": ; preds = %"for f0.s1.r4$x.us.us.5.prol.loopexit", %"for f0.s1.r4$x.us.us.5"
- %indvars.iv207.5 = phi i64 [ %indvars.iv.next208.5.1, %"for f0.s1.r4$x.us.us.5" ], [ %indvars.iv207.5.unr.ph, %"for f0.s1.r4$x.us.us.5.prol.loopexit" ]
- %t326.us.us.5 = load float, float* %1054, align 4, !tbaa !215
- %t327.us.us.5 = load float, float* %1055, align 4, !tbaa !217
- %1056 = fmul float %t327.us.us.5, %t327.us.us.5
- %1057 = fmul float %t326.us.us.5, %t326.us.us.5
- %1058 = fsub float %1057, %1056
- %1059 = fadd float %1058, %c_real
- store float %1059, float* %1062, align 4, !tbaa !215
- %1063 = load float, float* %1054, align 4, !tbaa !215
- %1064 = fmul float %t327.us.us.5, %1063
- %1065 = fmul float %1064, 2.000000e+00
- %1066 = fadd float %1065, %c_imag
- store float %1066, float* %1067, align 4, !tbaa !217
- %t326.us.us.5.1 = load float, float* %1072, align 4, !tbaa !215
- %t327.us.us.5.1 = load float, float* %1073, align 4, !tbaa !217
- %1074 = fmul float %t327.us.us.5.1, %t327.us.us.5.1
- %1075 = fmul float %t326.us.us.5.1, %t326.us.us.5.1
- %1076 = fsub float %1075, %1074
- %1077 = fadd float %1076, %c_real
- store float %1077, float* %1080, align 4, !tbaa !215
- %1081 = load float, float* %1072, align 4, !tbaa !215
- %1082 = fmul float %t327.us.us.5.1, %1081
- %1083 = fmul float %1082, 2.000000e+00
- %1084 = fadd float %1083, %c_imag
- store float %1084, float* %1085, align 4, !tbaa !217
- "end for f0.s1.r4$x.loopexit.us.us.5": ; preds = %"for f0.s1.r4$x.us.us.5", %"for f0.s1.r4$x.us.us.5.prol.loopexit"
- br i1 %lcmp.mod310, label %"for f0.s1.r4$x.us.us.6.prol", label %"for f0.s1.r4$x.us.us.6.prol.loopexit"
- "for f0.s1.r4$x.us.us.6.prol": ; preds = %"end for f0.s1.r4$x.loopexit.us.us.5"
- %t326.us.us.6.prol = load float, float* %1091, align 4, !tbaa !215
- %t327.us.us.6.prol = load float, float* %1092, align 4, !tbaa !217
- %1093 = fmul float %t327.us.us.6.prol, %t327.us.us.6.prol
- %1094 = fmul float %t326.us.us.6.prol, %t326.us.us.6.prol
- %1095 = fsub float %1094, %1093
- %1096 = fadd float %1095, %c_real
- store float %1096, float* %1098, align 4, !tbaa !215
- %1099 = load float, float* %1091, align 4, !tbaa !215
- %1100 = fmul float %t327.us.us.6.prol, %1099
- %1101 = fmul float %1100, 2.000000e+00
- %1102 = fadd float %1101, %c_imag
- store float %1102, float* %1103, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.6.prol.loopexit"
- "for f0.s1.r4$x.us.us.6.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.6.prol", %"end for f0.s1.r4$x.loopexit.us.us.5"
- %indvars.iv207.6.unr.ph = phi i64 [ 2, %"for f0.s1.r4$x.us.us.6.prol" ], [ 1, %"end for f0.s1.r4$x.loopexit.us.us.5" ]
- "for f0.s1.r4$x.us.us.6": ; preds = %"for f0.s1.r4$x.us.us.6.prol.loopexit", %"for f0.s1.r4$x.us.us.6"
- %indvars.iv207.6 = phi i64 [ %indvars.iv.next208.6.1, %"for f0.s1.r4$x.us.us.6" ], [ %indvars.iv207.6.unr.ph, %"for f0.s1.r4$x.us.us.6.prol.loopexit" ]
- %t326.us.us.6 = load float, float* %1108, align 4, !tbaa !215
- %t327.us.us.6 = load float, float* %1109, align 4, !tbaa !217
- %1110 = fmul float %t327.us.us.6, %t327.us.us.6
- %1111 = fmul float %t326.us.us.6, %t326.us.us.6
- %1112 = fsub float %1111, %1110
- %1113 = fadd float %1112, %c_real
- store float %1113, float* %1116, align 4, !tbaa !215
- %1117 = load float, float* %1108, align 4, !tbaa !215
- %1118 = fmul float %t327.us.us.6, %1117
- %1119 = fmul float %1118, 2.000000e+00
- %1120 = fadd float %1119, %c_imag
- store float %1120, float* %1121, align 4, !tbaa !217
- %t326.us.us.6.1 = load float, float* %1126, align 4, !tbaa !215
- %t327.us.us.6.1 = load float, float* %1127, align 4, !tbaa !217
- %1128 = fmul float %t327.us.us.6.1, %t327.us.us.6.1
- %1129 = fmul float %t326.us.us.6.1, %t326.us.us.6.1
- %1130 = fsub float %1129, %1128
- %1131 = fadd float %1130, %c_real
- store float %1131, float* %1134, align 4, !tbaa !215
- %1135 = load float, float* %1126, align 4, !tbaa !215
- %1136 = fmul float %t327.us.us.6.1, %1135
- %1137 = fmul float %1136, 2.000000e+00
- %1138 = fadd float %1137, %c_imag
- store float %1138, float* %1139, align 4, !tbaa !217
- "end for f0.s1.r4$x.loopexit.us.us.6": ; preds = %"for f0.s1.r4$x.us.us.6", %"for f0.s1.r4$x.us.us.6.prol.loopexit"
- br i1 %lcmp.mod312, label %"for f0.s1.r4$x.us.us.7.prol", label %"for f0.s1.r4$x.us.us.7.prol.loopexit"
- "for f0.s1.r4$x.us.us.7.prol": ; preds = %"end for f0.s1.r4$x.loopexit.us.us.6"
- %t326.us.us.7.prol = load float, float* %1145, align 4, !tbaa !215
- %t327.us.us.7.prol = load float, float* %1146, align 4, !tbaa !217
- %1147 = fmul float %t327.us.us.7.prol, %t327.us.us.7.prol
- %1148 = fmul float %t326.us.us.7.prol, %t326.us.us.7.prol
- %1149 = fsub float %1148, %1147
- %1150 = fadd float %1149, %c_real
- store float %1150, float* %1152, align 4, !tbaa !215
- %1153 = load float, float* %1145, align 4, !tbaa !215
- %1154 = fmul float %t327.us.us.7.prol, %1153
- %1155 = fmul float %1154, 2.000000e+00
- %1156 = fadd float %1155, %c_imag
- store float %1156, float* %1157, align 4, !tbaa !217
- br label %"for f0.s1.r4$x.us.us.7.prol.loopexit"
- "for f0.s1.r4$x.us.us.7.prol.loopexit": ; preds = %"for f0.s1.r4$x.us.us.7.prol", %"end for f0.s1.r4$x.loopexit.us.us.6"
- %indvars.iv207.7.unr.ph = phi i64 [ 2, %"for f0.s1.r4$x.us.us.7.prol" ], [ 1, %"end for f0.s1.r4$x.loopexit.us.us.6" ]
- "for f0.s1.r4$x.us.us.7": ; preds = %"for f0.s1.r4$x.us.us.7.prol.loopexit", %"for f0.s1.r4$x.us.us.7"
- %indvars.iv207.7 = phi i64 [ %indvars.iv.next208.7.1, %"for f0.s1.r4$x.us.us.7" ], [ %indvars.iv207.7.unr.ph, %"for f0.s1.r4$x.us.us.7.prol.loopexit" ]
- %t326.us.us.7 = load float, float* %1162, align 4, !tbaa !215
- %t327.us.us.7 = load float, float* %1163, align 4, !tbaa !217
- %1164 = fmul float %t327.us.us.7, %t327.us.us.7
- %1165 = fmul float %t326.us.us.7, %t326.us.us.7
- %1166 = fsub float %1165, %1164
- %1167 = fadd float %1166, %c_real
- store float %1167, float* %1170, align 4, !tbaa !215
- %1171 = load float, float* %1162, align 4, !tbaa !215
- %1172 = fmul float %t327.us.us.7, %1171
- %1173 = fmul float %1172, 2.000000e+00
- %1174 = fadd float %1173, %c_imag
- store float %1174, float* %1175, align 4, !tbaa !217
- %t326.us.us.7.1 = load float, float* %1180, align 4, !tbaa !215
- %t327.us.us.7.1 = load float, float* %1181, align 4, !tbaa !217
- %1182 = fmul float %t327.us.us.7.1, %t327.us.us.7.1
- %1183 = fmul float %t326.us.us.7.1, %t326.us.us.7.1
- %1184 = fsub float %1183, %1182
- %1185 = fadd float %1184, %c_real
- store float %1185, float* %1188, align 4, !tbaa !215
- %1189 = load float, float* %1180, align 4, !tbaa !215
- %1190 = fmul float %t327.us.us.7.1, %1189
- %1191 = fmul float %1190, 2.000000e+00
- %1192 = fadd float %1191, %c_imag
- store float %1192, float* %1193, align 4, !tbaa !217
- "end for f0.s1.r4$x.loopexit.us.us.7": ; preds = %"for f0.s1.r4$x.us.us.7", %"for f0.s1.r4$x.us.us.7.prol.loopexit"
- }
- memory_profiler_mandelbrot.generator: /home/ubuntu/Parallel-IR/lib/Transforms/Tapir/CilkABI.cpp:1056: bool llvm::cilk::populateDetachedCFG(const llvm::DetachInst&, llvm::DominatorTree&, llvm::SmallPtrSetImpl<llvm::BasicBlock*>&, llvm::SmallVectorImpl<llvm::BasicBlock*>&, llvm::SmallPtrSetImpl<llvm::BasicBlock*>&, bool, bool): Assertion `!isa<ReturnInst>(BB->getTerminator()) && "EH block terminated by return."' failed.
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement