Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- .section __TEXT,__text,regular,pure_instructions
- .build_version macos, 10, 14
- .globl _payload ## -- Begin function payload
- .p2align 4, 0x90
- _payload: ## @payload
- .cfi_startproc
- ## %bb.0:
- pushq %rbp
- .cfi_def_cfa_offset 16
- .cfi_offset %rbp, -16
- movq %rsp, %rbp
- .cfi_def_cfa_register %rbp
- andq $-32, %rsp
- subq $256, %rsp ## imm = 0x100
- movabsq $4621819117588971520, %rax ## imm = 0x4024000000000000
- movq %rax, 152(%rsp)
- movabsq $4652007308841189376, %rax ## imm = 0x408F400000000000
- movq %rax, 144(%rsp)
- movabsq $4666723172467343360, %rax ## imm = 0x40C3880000000000
- movq %rax, 136(%rsp)
- movabsq $4672076419705077760, %rax ## imm = 0x40D68CC000000000
- movq %rax, 128(%rsp)
- vmovsd 152(%rsp), %xmm0 ## xmm0 = mem[0],zero
- vmovsd 144(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vunpcklpd %xmm0, %xmm1, %xmm0 ## xmm0 = xmm1[0],xmm0[0]
- vmovsd 136(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vmovsd 128(%rsp), %xmm2 ## xmm2 = mem[0],zero
- vunpcklpd %xmm1, %xmm2, %xmm1 ## xmm1 = xmm2[0],xmm1[0]
- ## implicit-def: %ymm3
- vmovaps %xmm1, %xmm3
- vinsertf128 $1, %xmm0, %ymm3, %ymm3
- vmovapd %ymm3, 96(%rsp)
- vmovapd 96(%rsp), %ymm3
- vmovapd %ymm3, 64(%rsp)
- movabsq $4636807660098813952, %rcx ## imm = 0x4059400000000000
- movq %rcx, 232(%rsp)
- movabsq $4667261933164953600, %rcx ## imm = 0x40C5720000000000
- movq %rcx, 224(%rsp)
- movq %rax, 216(%rsp)
- movabsq $4642542712749293568, %rax ## imm = 0x406DA00000000000
- movq %rax, 208(%rsp)
- vmovsd 232(%rsp), %xmm0 ## xmm0 = mem[0],zero
- vmovsd 224(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vunpcklpd %xmm0, %xmm1, %xmm0 ## xmm0 = xmm1[0],xmm0[0]
- vmovsd 216(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vmovsd 208(%rsp), %xmm2 ## xmm2 = mem[0],zero
- vunpcklpd %xmm1, %xmm2, %xmm1 ## xmm1 = xmm2[0],xmm1[0]
- ## implicit-def: %ymm3
- vmovaps %xmm1, %xmm3
- vinsertf128 $1, %xmm0, %ymm3, %ymm3
- vmovapd %ymm3, 160(%rsp)
- vmovapd 160(%rsp), %ymm3
- vmovapd %ymm3, 32(%rsp)
- movl $0, 28(%rsp)
- LBB0_1: ## =>This Inner Loop Header: Depth=1
- cmpl $100000, 28(%rsp) ## imm = 0x186A0
- jge LBB0_4
- ## %bb.2: ## in Loop: Header=BB0_1 Depth=1
- ## InlineAsm Start
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- vdivpd %ymm0, %ymm1, %ymm7
- vdivpd %ymm0, %ymm1, %ymm6
- vdivpd %ymm0, %ymm1, %ymm5
- vdivpd %ymm0, %ymm1, %ymm4
- vdivpd %ymm0, %ymm1, %ymm3
- vdivpd %ymm0, %ymm1, %ymm2
- ## InlineAsm End
- ## %bb.3: ## in Loop: Header=BB0_1 Depth=1
- movl 28(%rsp), %eax
- addl $1, %eax
- movl %eax, 28(%rsp)
- jmp LBB0_1
- LBB0_4:
- movq %rbp, %rsp
- popq %rbp
- vzeroupper
- retq
- .cfi_endproc
- ## -- End function
- .globl _payload2 ## -- Begin function payload2
- .p2align 4, 0x90
- _payload2: ## @payload2
- .cfi_startproc
- ## %bb.0:
- pushq %rbp
- .cfi_def_cfa_offset 16
- .cfi_offset %rbp, -16
- movq %rsp, %rbp
- .cfi_def_cfa_register %rbp
- andq $-32, %rsp
- subq $256, %rsp ## imm = 0x100
- movabsq $4621819117588971520, %rax ## imm = 0x4024000000000000
- movq %rax, 152(%rsp)
- movabsq $4652007308841189376, %rax ## imm = 0x408F400000000000
- movq %rax, 144(%rsp)
- movabsq $4666723172467343360, %rax ## imm = 0x40C3880000000000
- movq %rax, 136(%rsp)
- movabsq $4672076419705077760, %rax ## imm = 0x40D68CC000000000
- movq %rax, 128(%rsp)
- vmovsd 152(%rsp), %xmm0 ## xmm0 = mem[0],zero
- vmovsd 144(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vunpcklpd %xmm0, %xmm1, %xmm0 ## xmm0 = xmm1[0],xmm0[0]
- vmovsd 136(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vmovsd 128(%rsp), %xmm2 ## xmm2 = mem[0],zero
- vunpcklpd %xmm1, %xmm2, %xmm1 ## xmm1 = xmm2[0],xmm1[0]
- ## implicit-def: %ymm3
- vmovaps %xmm1, %xmm3
- vinsertf128 $1, %xmm0, %ymm3, %ymm3
- vmovapd %ymm3, 96(%rsp)
- vmovapd 96(%rsp), %ymm3
- vmovapd %ymm3, 64(%rsp)
- movabsq $4636807660098813952, %rcx ## imm = 0x4059400000000000
- movq %rcx, 232(%rsp)
- movabsq $4667261933164953600, %rcx ## imm = 0x40C5720000000000
- movq %rcx, 224(%rsp)
- movq %rax, 216(%rsp)
- movabsq $4642542712749293568, %rax ## imm = 0x406DA00000000000
- movq %rax, 208(%rsp)
- vmovsd 232(%rsp), %xmm0 ## xmm0 = mem[0],zero
- vmovsd 224(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vunpcklpd %xmm0, %xmm1, %xmm0 ## xmm0 = xmm1[0],xmm0[0]
- vmovsd 216(%rsp), %xmm1 ## xmm1 = mem[0],zero
- vmovsd 208(%rsp), %xmm2 ## xmm2 = mem[0],zero
- vunpcklpd %xmm1, %xmm2, %xmm1 ## xmm1 = xmm2[0],xmm1[0]
- ## implicit-def: %ymm3
- vmovaps %xmm1, %xmm3
- vinsertf128 $1, %xmm0, %ymm3, %ymm3
- vmovapd %ymm3, 160(%rsp)
- vmovapd 160(%rsp), %ymm3
- vmovapd %ymm3, 32(%rsp)
- movl $0, 28(%rsp)
- LBB1_1: ## =>This Inner Loop Header: Depth=1
- cmpl $100000, 28(%rsp) ## imm = 0x186A0
- jge LBB1_4
- ## %bb.2: ## in Loop: Header=BB1_1 Depth=1
- ## InlineAsm Start
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- vdivpd %ymm0, %ymm1, %ymm0
- ## InlineAsm End
- ## %bb.3: ## in Loop: Header=BB1_1 Depth=1
- movl 28(%rsp), %eax
- addl $1, %eax
- movl %eax, 28(%rsp)
- jmp LBB1_1
- LBB1_4:
- movq %rbp, %rsp
- popq %rbp
- vzeroupper
- retq
- .cfi_endproc
- ## -- End function
- .section __TEXT,__literal8,8byte_literals
- .p2align 3 ## -- Begin function main
- LCPI2_0:
- .quad 4707126720094797824 ## double 5.0E+6
- LCPI2_3:
- .quad 4706911971729997824 ## double 4.8E+6
- .section __TEXT,__literal16,16byte_literals
- .p2align 4
- LCPI2_1:
- .long 1127219200 ## 0x43300000
- .long 1160773632 ## 0x45300000
- .long 0 ## 0x0
- .long 0 ## 0x0
- LCPI2_2:
- .quad 4841369599423283200 ## double 4503599627370496
- .quad 4985484787499139072 ## double 1.9342813113834067E+25
- .section __TEXT,__text,regular,pure_instructions
- .globl _main
- .p2align 4, 0x90
- _main: ## @main
- .cfi_startproc
- ## %bb.0:
- pushq %rbp
- .cfi_def_cfa_offset 16
- .cfi_offset %rbp, -16
- movq %rsp, %rbp
- .cfi_def_cfa_register %rbp
- subq $112, %rsp
- movl $0, -4(%rbp)
- movl %edi, -8(%rbp)
- movq %rsi, -16(%rbp)
- movl $0, -20(%rbp)
- LBB2_1: ## =>This Inner Loop Header: Depth=1
- cmpl $200, -20(%rbp)
- jge LBB2_4
- ## %bb.2: ## in Loop: Header=BB2_1 Depth=1
- callq _payload
- ## %bb.3: ## in Loop: Header=BB2_1 Depth=1
- movl -20(%rbp), %eax
- addl $1, %eax
- movl %eax, -20(%rbp)
- jmp LBB2_1
- LBB2_4:
- leaq L_.str.1(%rip), %rdi
- vmovsd LCPI2_0(%rip), %xmm0 ## xmm0 = mem[0],zero
- movq %rdi, -48(%rbp) ## 8-byte Spill
- vmovsd %xmm0, -56(%rbp) ## 8-byte Spill
- callq _rdtsc
- movq %rax, -32(%rbp)
- callq _payload
- callq _rdtsc
- movq %rax, -40(%rbp)
- movq -40(%rbp), %rax
- movq -32(%rbp), %rdi
- subq %rdi, %rax
- vmovq %rax, %xmm0
- vmovdqa LCPI2_1(%rip), %xmm1 ## xmm1 = [1127219200,1160773632,0,0]
- vpunpckldq %xmm1, %xmm0, %xmm0 ## xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
- vmovapd LCPI2_2(%rip), %xmm2 ## xmm2 = [4.503600e+15,1.934281e+25]
- vsubpd %xmm2, %xmm0, %xmm0
- vhaddpd %xmm0, %xmm0, %xmm0
- vmovsd LCPI2_3(%rip), %xmm3 ## xmm3 = mem[0],zero
- vdivsd %xmm3, %xmm0, %xmm0
- leaq L_.str(%rip), %rdi
- movb $1, %cl
- movq %rax, %rsi
- movb %cl, %al
- vmovaps %xmm2, -80(%rbp) ## 16-byte Spill
- vmovaps %xmm1, -96(%rbp) ## 16-byte Spill
- callq _printf
- movl %eax, -100(%rbp) ## 4-byte Spill
- callq _payload2
- callq _rdtsc
- movq %rax, -32(%rbp)
- callq _payload2
- callq _rdtsc
- movq %rax, -40(%rbp)
- movq -40(%rbp), %rax
- movq -32(%rbp), %rsi
- subq %rsi, %rax
- vmovq %rax, %xmm0
- vmovaps -96(%rbp), %xmm1 ## 16-byte Reload
- vpunpckldq %xmm1, %xmm0, %xmm0 ## xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
- vmovaps -80(%rbp), %xmm2 ## 16-byte Reload
- vsubpd %xmm2, %xmm0, %xmm0
- vhaddpd %xmm0, %xmm0, %xmm0
- vmovsd -56(%rbp), %xmm3 ## 8-byte Reload
- ## xmm3 = mem[0],zero
- vdivsd %xmm3, %xmm0, %xmm0
- movq -48(%rbp), %rdi ## 8-byte Reload
- movq %rax, %rsi
- movb $1, %al
- callq _printf
- xorl %edx, %edx
- movl %eax, -104(%rbp) ## 4-byte Spill
- movl %edx, %eax
- addq $112, %rsp
- popq %rbp
- retq
- .cfi_endproc
- ## -- End function
- .p2align 4, 0x90 ## -- Begin function rdtsc
- _rdtsc: ## @rdtsc
- .cfi_startproc
- ## %bb.0:
- pushq %rbp
- .cfi_def_cfa_offset 16
- .cfi_offset %rbp, -16
- movq %rsp, %rbp
- .cfi_def_cfa_register %rbp
- ## InlineAsm Start
- rdtsc
- ## InlineAsm End
- movl %eax, -8(%rbp)
- movl %edx, -4(%rbp)
- movl -8(%rbp), %eax
- movl %eax, %ecx
- movl -4(%rbp), %eax
- movl %eax, %esi
- shlq $32, %rsi
- orq %rsi, %rcx
- movq %rcx, %rax
- popq %rbp
- retq
- .cfi_endproc
- ## -- End function
- .section __TEXT,__cstring,cstring_literals
- L_.str: ## @.str
- .asciz "ticks: %llu\ntpc: %lf\n"
- L_.str.1: ## @.str.1
- .asciz "ticks2: %llu\ntpc2: %lf\n"
- .subsections_via_symbols
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement