Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- .globl _initPerlinNoise
- .def _initPerlinNoise; .scl 2; .type 32; .endef
- _initPerlinNoise:
- pushl %ebx
- movl $1024, %ecx
- subl $1048, %esp
- movl %ecx, 8(%esp)
- leal 16(%esp), %eax
- movl $LC3, %edx
- movl %edx, 4(%esp)
- movl $_perm, %ebx
- movl %eax, (%esp)
- call _memcpy
- xorl %ecx, %ecx
- .p2align 4,,15
- L242:
- movl 16(%esp,%ecx,4), %eax
- movl %eax, _perm(,%ecx,4)
- movl %eax, 1024(%ebx,%ecx,4)
- leal 1(%ecx), %eax
- movl 16(%esp,%eax,4), %edx
- movl %edx, _perm(,%eax,4)
- movl %edx, 1024(%ebx,%eax,4)
- leal 2(%ecx), %eax
- movl 16(%esp,%eax,4), %edx
- movl %edx, _perm(,%eax,4)
- movl %edx, 1024(%ebx,%eax,4)
- leal 3(%ecx), %eax
- movl 16(%esp,%eax,4), %edx
- movl %edx, _perm(,%eax,4)
- movl %edx, 1024(%ebx,%eax,4)
- leal 4(%ecx), %eax
- movl 16(%esp,%eax,4), %edx
- movl %edx, _perm(,%eax,4)
- movl %edx, 1024(%ebx,%eax,4)
- leal 5(%ecx), %eax
- movl 16(%esp,%eax,4), %edx
- movl %edx, _perm(,%eax,4)
- movl %edx, 1024(%ebx,%eax,4)
- leal 6(%ecx), %eax
- movl 16(%esp,%eax,4), %edx
- movl %edx, _perm(,%eax,4)
- movl %edx, 1024(%ebx,%eax,4)
- leal 7(%ecx), %eax
- movl 16(%esp,%eax,4), %edx
- addl $8, %ecx
- cmpl $255, %ecx
- movl %edx, _perm(,%eax,4)
- movl %edx, 1024(%ebx,%eax,4)
- jle L242
- addl $1048, %esp
- popl %ebx
- ret
- .section .rdata,"dr"
- .align 4
- LC4:
- .long 1086324736
- .align 4
- LC5:
- .long 1097859072
- .align 4
- LC6:
- .long 1092616192
- .text
- .p2align 4,,15
- .globl _fade
- .def _fade; .scl 2; .type 32; .endef
- _fade:
- subl $4, %esp
- movss 8(%esp), %xmm0
- movaps %xmm0, %xmm1
- movaps %xmm0, %xmm2
- mulss LC4, %xmm1
- mulss %xmm0, %xmm2
- mulss %xmm0, %xmm2
- subss LC5, %xmm1
- mulss %xmm0, %xmm1
- addss LC6, %xmm1
- mulss %xmm1, %xmm2
- movss %xmm2, (%esp)
- flds (%esp)
- popl %eax
- ret
- .p2align 4,,15
- .globl _lerp
- .def _lerp; .scl 2; .type 32; .endef
- _lerp:
- subl $4, %esp
- movss 12(%esp), %xmm1
- movss 16(%esp), %xmm0
- subss %xmm1, %xmm0
- mulss 8(%esp), %xmm0
- addss %xmm1, %xmm0
- movss %xmm0, (%esp)
- flds (%esp)
- popl %eax
- ret
- .section .rdata,"dr"
- .align 16
- LC9:
- .long -2147483648
- .long 0
- .long 0
- .long 0
- .text
- .p2align 4,,15
- .globl _grad
- .def _grad; .scl 2; .type 32; .endef
- _grad:
- subl $4, %esp
- movl 16(%esp), %edx
- movl 8(%esp), %ecx
- flds 12(%esp)
- movl %edx, (%esp)
- andl $15, %ecx
- cmpl $7, %ecx
- flds (%esp)
- setg %al
- testb %al, %al
- movss (%esp), %xmm1
- fcmove %st(1), %st
- cmpl $3, %ecx
- jle L266
- flds 20(%esp)
- cmpl $12, %ecx
- sete %al
- cmpl $14, %ecx
- sete %dl
- orl %edx, %eax
- testb $1, %al
- fcmovne %st(2), %st
- fstp %st(2)
- fxch %st(1)
- fstps (%esp)
- movss (%esp), %xmm1
- jmp L260
- .p2align 4,,7
- L266:
- fstp %st(1)
- L260:
- fstps (%esp)
- testb $1, %cl
- movss (%esp), %xmm0
- je L263
- xorps LC9, %xmm0
- L263:
- movaps %xmm0, %xmm2
- subss %xmm1, %xmm0
- addss %xmm1, %xmm2
- movss %xmm2, (%esp)
- testb $2, %cl
- flds (%esp)
- movss %xmm0, (%esp)
- flds (%esp)
- popl %eax
- fcmove %st(1), %st
- fstp %st(1)
- ret
- .section .rdata,"dr"
- .align 4
- LC11:
- .long 1065353216
- .align 16
- LC12:
- .long -2147483648
- .long 0
- .long 0
- .long 0
- .align 4
- LC13:
- .long 1086324736
- .align 4
- LC14:
- .long 1097859072
- .align 4
- LC15:
- .long 1092616192
- .text
- .p2align 4,,15
- .globl _noise
- .def _noise; .scl 2; .type 32; .endef
- _noise:
- pushl %esi
- pushl %ebx
- subl $44, %esp
- movss 56(%esp), %xmm1
- movss 60(%esp), %xmm2
- movss 64(%esp), %xmm4
- cvttss2si %xmm1, %ebx
- movaps %xmm1, %xmm5
- cvtsi2ss %ebx, %xmm6
- andl $255, %ebx
- movl _perm+4(,%ebx,4), %esi
- movl _perm(,%ebx,4), %ecx
- cvttss2si %xmm2, %eax
- cvttss2si %xmm4, %edx
- cvtsi2ss %eax, %xmm3
- andl $255, %eax
- cvtsi2ss %edx, %xmm0
- addl %eax, %ecx
- addl %esi, %eax
- movl _perm(,%ecx,4), %ebx
- movl _perm(,%eax,4), %esi
- andl $255, %edx
- subss %xmm6, %xmm5
- movl _perm+4(,%ecx,4), %ecx
- movaps %xmm2, %xmm6
- addl %edx, %ebx
- cvtsi2ss _perm(,%ebx,4), %xmm7
- addl %edx, %esi
- addl %edx, %ecx
- addl _perm+4(,%eax,4), %edx
- subss %xmm3, %xmm6
- subss %xmm0, %xmm4
- cvtsi2ss _perm(,%ecx,4), %xmm1
- cvtsi2ss _perm+4(,%ecx,4), %xmm3
- cvtsi2ss _perm(,%edx,4), %xmm2
- cvtsi2ss _perm+4(,%edx,4), %xmm0
- movss %xmm4, 8(%esp)
- cvtsi2ss _perm(,%esi,4), %xmm4
- movss %xmm7, 40(%esp)
- cvtsi2ss _perm+4(,%ebx,4), %xmm7
- movss %xmm1, 36(%esp)
- movaps %xmm5, %xmm1
- movss %xmm2, 28(%esp)
- cvttss2si %xmm0, %ecx
- andl $15, %ecx
- movss LC11, %xmm2
- movss %xmm3, (%esp)
- movaps %xmm6, %xmm3
- movss %xmm4, 32(%esp)
- cmpl $7, %ecx
- setg %al
- cvtsi2ss _perm+4(,%esi,4), %xmm4
- subss %xmm2, %xmm1
- subss %xmm2, %xmm3
- movss %xmm1, 4(%esp)
- movss 8(%esp), %xmm0
- movl 4(%esp), %esi
- movss %xmm3, 4(%esp)
- movl 4(%esp), %edx
- testb %al, %al
- movaps %xmm3, %xmm1
- movl %esi, %ebx
- subss %xmm2, %xmm0
- cmovne %edx, %ebx
- cmpl $3, %ecx
- jle L271
- movss %xmm0, 4(%esp)
- cmpl $12, %ecx
- sete %al
- cmpl $14, %ecx
- sete %dl
- orl %edx, %eax
- movl 4(%esp), %edx
- testb $1, %al
- cmovne %esi, %edx
- movl %edx, 4(%esp)
- movss 4(%esp), %xmm1
- L271:
- movl %ebx, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L274
- xorps LC12, %xmm0
- L274:
- movaps %xmm0, %xmm3
- subss %xmm1, %xmm0
- addss %xmm1, %xmm3
- movss %xmm0, 4(%esp)
- testb $2, %cl
- movl 4(%esp), %ecx
- movss %xmm3, 24(%esp)
- movaps %xmm6, %xmm0
- subss %xmm2, %xmm0
- cmove 24(%esp), %ecx
- movss %xmm0, 4(%esp)
- movl 4(%esp), %esi
- movss %xmm5, 4(%esp)
- movl %ecx, 24(%esp)
- cvttss2si (%esp), %ecx
- movss 8(%esp), %xmm0
- andl $15, %ecx
- cmpl $7, %ecx
- setg %bl
- subss %xmm2, %xmm0
- testb %bl, %bl
- movl 4(%esp), %ebx
- movl %esi, 4(%esp)
- movss 4(%esp), %xmm1
- cmovne %esi, %ebx
- cmpl $3, %ecx
- jle L281
- movss %xmm5, 4(%esp)
- cmpl $12, %ecx
- sete %al
- flds 4(%esp)
- cmpl $14, %ecx
- sete %dl
- movss %xmm0, 4(%esp)
- orl %edx, %eax
- testb $1, %al
- flds 4(%esp)
- fcmovne %st(1), %st
- fstp %st(1)
- fstps 4(%esp)
- movss 4(%esp), %xmm1
- L281:
- movl %ebx, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L284
- xorps LC12, %xmm0
- L284:
- movaps %xmm0, %xmm3
- subss %xmm1, %xmm0
- addss %xmm1, %xmm3
- movss %xmm0, 4(%esp)
- testb $2, %cl
- movl 4(%esp), %esi
- movss %xmm3, 20(%esp)
- cvttss2si %xmm4, %ecx
- movaps %xmm5, %xmm4
- cmove 20(%esp), %esi
- subss %xmm2, %xmm4
- andl $15, %ecx
- movss %xmm4, 4(%esp)
- cmpl $7, %ecx
- setg %bl
- movl %esi, 20(%esp)
- movl 4(%esp), %esi
- testb %bl, %bl
- movss %xmm6, 4(%esp)
- movss 8(%esp), %xmm0
- movl 4(%esp), %ebx
- movaps %xmm6, %xmm1
- cmove %esi, %ebx
- cmpl $3, %ecx
- subss %xmm2, %xmm0
- jle L291
- movl %esi, 4(%esp)
- cmpl $12, %ecx
- sete %al
- flds 4(%esp)
- cmpl $14, %ecx
- sete %dl
- movss %xmm0, 4(%esp)
- orl %edx, %eax
- testb $1, %al
- flds 4(%esp)
- fcmovne %st(1), %st
- fstp %st(1)
- fstps 4(%esp)
- movss 4(%esp), %xmm1
- L291:
- movl %ebx, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L294
- xorps LC12, %xmm0
- L294:
- movaps %xmm0, %xmm4
- subss %xmm1, %xmm0
- addss %xmm1, %xmm4
- movss %xmm4, 4(%esp)
- testb $2, %cl
- cvttss2si %xmm7, %ecx
- flds 4(%esp)
- movss %xmm0, 4(%esp)
- movaps %xmm6, %xmm1
- flds 4(%esp)
- movss %xmm5, 4(%esp)
- movss 8(%esp), %xmm0
- fcmove %st(1), %st
- fstp %st(1)
- andl $15, %ecx
- cmpl $7, %ecx
- setg %bl
- subss %xmm2, %xmm0
- testb %bl, %bl
- movl 4(%esp), %ebx
- movss %xmm6, 4(%esp)
- movl 4(%esp), %esi
- cmovne %esi, %ebx
- cmpl $3, %ecx
- jle L301
- movss %xmm5, 4(%esp)
- cmpl $12, %ecx
- sete %al
- flds 4(%esp)
- cmpl $14, %ecx
- sete %dl
- movss %xmm0, 4(%esp)
- orl %edx, %eax
- testb $1, %al
- flds 4(%esp)
- fcmovne %st(1), %st
- fstp %st(1)
- fstps 4(%esp)
- movss 4(%esp), %xmm1
- L301:
- movl %ebx, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L304
- xorps LC12, %xmm0
- L304:
- movaps %xmm0, %xmm3
- subss %xmm1, %xmm0
- addss %xmm1, %xmm3
- movss %xmm0, 4(%esp)
- testb $2, %cl
- movl 4(%esp), %ecx
- movss %xmm3, 16(%esp)
- movaps %xmm5, %xmm7
- subss %xmm2, %xmm7
- cmove 16(%esp), %ecx
- movaps %xmm6, %xmm1
- subss %xmm2, %xmm1
- movss %xmm7, 4(%esp)
- movl 4(%esp), %esi
- movl %ecx, 16(%esp)
- cvttss2si 28(%esp), %ecx
- movss %xmm1, 4(%esp)
- movl 4(%esp), %edx
- andl $15, %ecx
- cmpl $7, %ecx
- setg %bl
- testb %bl, %bl
- movl %esi, %ebx
- cmovne %edx, %ebx
- cmpl $3, %ecx
- jle L311
- cmpl $12, %ecx
- sete %al
- cmpl $14, %ecx
- sete %dl
- orl %edx, %eax
- testb $1, %al
- movl %esi, %eax
- cmove 8(%esp), %eax
- movl %eax, 4(%esp)
- movss 4(%esp), %xmm1
- L311:
- movl %ebx, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L314
- xorps LC12, %xmm0
- L314:
- movaps %xmm0, %xmm4
- subss %xmm1, %xmm0
- addss %xmm1, %xmm4
- movss %xmm4, 4(%esp)
- testb $2, %cl
- flds 4(%esp)
- cvttss2si 36(%esp), %ecx
- movss %xmm0, 4(%esp)
- movaps %xmm6, %xmm0
- subss %xmm2, %xmm0
- flds 4(%esp)
- movss %xmm0, 4(%esp)
- movl 4(%esp), %esi
- movss %xmm5, 4(%esp)
- movaps %xmm0, %xmm1
- fcmove %st(1), %st
- fstp %st(1)
- andl $15, %ecx
- cmpl $7, %ecx
- setg %bl
- testb %bl, %bl
- movl 4(%esp), %ebx
- cmovne %esi, %ebx
- cmpl $3, %ecx
- jle L321
- cmpl $12, %ecx
- sete %al
- cmpl $14, %ecx
- sete %dl
- orl %edx, %eax
- testb $1, %al
- movl 4(%esp), %eax
- cmove 8(%esp), %eax
- movl %eax, 4(%esp)
- movss 4(%esp), %xmm1
- L321:
- movl %ebx, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L324
- xorps LC12, %xmm0
- L324:
- movaps %xmm0, %xmm7
- subss %xmm1, %xmm0
- addss %xmm1, %xmm7
- movss %xmm0, 4(%esp)
- testb $2, %cl
- movl 4(%esp), %ecx
- movss %xmm7, 12(%esp)
- movaps %xmm5, %xmm1
- subss %xmm2, %xmm1
- cmove 12(%esp), %ecx
- movss %xmm1, 4(%esp)
- movl 4(%esp), %ebx
- movss %xmm6, 4(%esp)
- movl 4(%esp), %esi
- movaps %xmm6, %xmm1
- movl %ecx, 12(%esp)
- cvttss2si 32(%esp), %ecx
- andl $15, %ecx
- cmpl $7, %ecx
- setg %al
- testb %al, %al
- cmove %ebx, %esi
- cmpl $3, %ecx
- jle L331
- cmpl $12, %ecx
- sete %al
- cmpl $14, %ecx
- sete %dl
- orl %edx, %eax
- testb $1, %al
- movl %ebx, %edx
- cmove 8(%esp), %edx
- movl %edx, 4(%esp)
- movss 4(%esp), %xmm1
- L331:
- movl %esi, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L334
- xorps LC12, %xmm0
- L334:
- movaps %xmm0, %xmm2
- subss %xmm1, %xmm0
- addss %xmm1, %xmm2
- movss %xmm2, 4(%esp)
- testb $2, %cl
- movl 4(%esp), %esi
- movss %xmm0, 4(%esp)
- movaps %xmm6, %xmm1
- movl 4(%esp), %ecx
- movss %xmm5, 4(%esp)
- cmovne %ecx, %esi
- cvttss2si 40(%esp), %ecx
- andl $15, %ecx
- cmpl $7, %ecx
- setg %bl
- testb %bl, %bl
- movl 4(%esp), %ebx
- movss %xmm6, 4(%esp)
- movl 4(%esp), %eax
- cmovne %eax, %ebx
- cmpl $3, %ecx
- jle L341
- movss %xmm5, 4(%esp)
- cmpl $12, %ecx
- sete %al
- cmpl $14, %ecx
- sete %dl
- orl %edx, %eax
- testb $1, %al
- movl 4(%esp), %edx
- cmove 8(%esp), %edx
- movl %edx, 4(%esp)
- movss 4(%esp), %xmm1
- L341:
- movl %ebx, 4(%esp)
- testb $1, %cl
- movss 4(%esp), %xmm0
- je L344
- xorps LC12, %xmm0
- L344:
- movss LC14, %xmm4
- movaps %xmm0, %xmm2
- addss %xmm1, %xmm2
- movss %xmm2, 4(%esp)
- subss %xmm1, %xmm0
- movaps %xmm5, %xmm2
- movss LC13, %xmm1
- mulss %xmm5, %xmm2
- mulss %xmm5, %xmm2
- flds 4(%esp)
- movss %xmm0, 4(%esp)
- movaps %xmm5, %xmm0
- movss LC15, %xmm3
- mulss %xmm1, %xmm0
- flds 4(%esp)
- subss %xmm4, %xmm0
- testb $2, %cl
- mulss %xmm5, %xmm0
- addss %xmm3, %xmm0
- movaps %xmm6, %xmm5
- fcmove %st(1), %st
- fstp %st(1)
- mulss %xmm0, %xmm2
- movaps %xmm6, %xmm0
- mulss %xmm1, %xmm0
- mulss %xmm6, %xmm5
- subss %xmm4, %xmm0
- mulss 8(%esp), %xmm1
- mulss %xmm6, %xmm5
- fstps 4(%esp)
- mulss %xmm6, %xmm0
- addss %xmm3, %xmm0
- movss 8(%esp), %xmm6
- mulss %xmm0, %xmm5
- subss %xmm4, %xmm1
- mulss 8(%esp), %xmm1
- movss 4(%esp), %xmm7
- mulss %xmm6, %xmm6
- mulss 8(%esp), %xmm6
- addss %xmm3, %xmm1
- movl %esi, 4(%esp)
- movss 24(%esp), %xmm3
- mulss %xmm1, %xmm6
- movss 4(%esp), %xmm1
- fstps 4(%esp)
- movss 4(%esp), %xmm0
- fstps 4(%esp)
- subss %xmm7, %xmm1
- mulss %xmm2, %xmm1
- movss 4(%esp), %xmm4
- addss %xmm7, %xmm1
- subss 12(%esp), %xmm0
- subss 20(%esp), %xmm3
- subss 16(%esp), %xmm4
- mulss %xmm2, %xmm0
- addss 12(%esp), %xmm0
- mulss %xmm2, %xmm4
- mulss %xmm3, %xmm2
- addss 16(%esp), %xmm4
- subss %xmm1, %xmm0
- mulss %xmm5, %xmm0
- addss 20(%esp), %xmm2
- addss %xmm1, %xmm0
- subss %xmm4, %xmm2
- mulss %xmm2, %xmm5
- addss %xmm5, %xmm4
- subss %xmm0, %xmm4
- mulss %xmm4, %xmm6
- addss %xmm6, %xmm0
- movss %xmm0, 24(%esp)
- flds 24(%esp)
- addl $44, %esp
- popl %ebx
- popl %esi
- ret
- .section .rdata,"dr"
- .align 4
- LC17:
- .long 1008981770
- .text
- .p2align 4,,15
- .globl _Java_pack_Code_sse_1noise@20
- .def _Java_pack_Code_sse_1noise@20; .scl 2; .type 32; .endef
- _Java_pack_Code_sse_1noise@20:
- pushl %ebp
- movl $1, %edx
- pushl %edi
- pushl %esi
- pushl %ebx
- subl $28, %esp
- movl 64(%esp), %eax
- movzbl 64(%esp), %ecx
- movl 64(%esp), %ebx
- addl %eax, %eax
- movl %eax, 24(%esp)
- movl 56(%esp), %eax
- sall %cl, %edx
- movl %edx, %ebp
- leal -1(%edx), %edi
- movl 48(%esp), %esi
- imull %edx, %ebp
- andl $15, %eax
- sall $2, %ebx
- movl %ebx, 20(%esp)
- imull %edx, %ebp
- xorl %edx, %edx
- movl %edx, %ecx
- orl %eax, %ecx
- jne L376
- L359:
- movl 56(%esp), %esi
- movl %ebp, %ebx
- decl %ebx
- movl %esi, 16(%esp)
- js L366
- movzbl 64(%esp), %ecx
- movl %ebx, %esi
- andl $1, %esi
- movl %esi, 12(%esp)
- movl %ebx, %esi
- movl %ebx, %edx
- movl %ebx, %eax
- sarl %cl, %esi
- andl %edi, %esi
- movzbl 24(%esp), %ecx
- cvtsi2ss %esi, %xmm0
- movl %ebp, %esi
- sarl %cl, %edx
- andl %edi, %edx
- cvtsi2ss %edx, %xmm1
- movzbl 20(%esp), %ecx
- mulss LC17, %xmm0
- sarl %cl, %eax
- andl %edi, %eax
- mulss LC17, %xmm1
- cvtsi2ss %eax, %xmm2
- movss %xmm0, (%esp)
- movss %xmm1, 4(%esp)
- mulss LC17, %xmm2
- movss %xmm2, 8(%esp)
- call _noise
- movl 16(%esp), %eax
- subl $2, %esi
- fstps (%eax,%ebx,4)
- js L366
- movl 12(%esp), %ebx
- testl %ebx, %ebx
- jne L377
- .p2align 4,,15
- L364:
- movzbl 64(%esp), %ecx
- movl %esi, %ebp
- movl %esi, %edx
- movl %esi, %eax
- sarl %cl, %ebp
- andl %edi, %ebp
- movzbl 24(%esp), %ecx
- cvtsi2ss %ebp, %xmm3
- sarl %cl, %edx
- movzbl 20(%esp), %ecx
- andl %edi, %edx
- cvtsi2ss %edx, %xmm1
- mulss LC17, %xmm3
- sarl %cl, %eax
- andl %edi, %eax
- mulss LC17, %xmm1
- cvtsi2ss %eax, %xmm2
- movss %xmm3, (%esp)
- movss %xmm1, 4(%esp)
- mulss LC17, %xmm2
- movss %xmm2, 8(%esp)
- call _noise
- movzbl 64(%esp), %ecx
- movl 16(%esp), %ebx
- fstps (%ebx,%esi,4)
- leal -1(%esi), %ebx
- movl %ebx, %ebp
- sarl %cl, %ebp
- movzbl 24(%esp), %ecx
- movl %ebx, %edx
- movl %ebx, %eax
- andl %edi, %ebp
- cvtsi2ss %ebp, %xmm6
- sarl %cl, %edx
- movzbl 20(%esp), %ecx
- andl %edi, %edx
- cvtsi2ss %edx, %xmm7
- mulss LC17, %xmm6
- sarl %cl, %eax
- andl %edi, %eax
- mulss LC17, %xmm7
- cvtsi2ss %eax, %xmm0
- movss %xmm6, (%esp)
- movss %xmm7, 4(%esp)
- mulss LC17, %xmm0
- movss %xmm0, 8(%esp)
- call _noise
- movl 16(%esp), %ebp
- subl $2, %esi
- fstps (%ebp,%ebx,4)
- L375:
- jns L364
- L366:
- addl $28, %esp
- popl %ebx
- popl %esi
- popl %edi
- popl %ebp
- ret $20
- L377:
- movzbl 64(%esp), %ecx
- movl %esi, %ebx
- movl %esi, %edx
- movl %esi, %eax
- sarl %cl, %ebx
- andl %edi, %ebx
- movzbl 24(%esp), %ecx
- cvtsi2ss %ebx, %xmm3
- sarl %cl, %edx
- movzbl 20(%esp), %ecx
- andl %edi, %edx
- cvtsi2ss %edx, %xmm4
- mulss LC17, %xmm3
- sarl %cl, %eax
- andl %edi, %eax
- mulss LC17, %xmm4
- cvtsi2ss %eax, %xmm5
- movss %xmm3, (%esp)
- movss %xmm4, 4(%esp)
- mulss LC17, %xmm5
- movss %xmm5, 8(%esp)
- call _noise
- movl 16(%esp), %edx
- fstps (%edx,%esi,4)
- movl %ebp, %esi
- subl $3, %esi
- jmp L375
- L376:
- movl (%esi), %ebx
- movl $LC0, %ecx
- movl %ecx, 4(%esp)
- movl %esi, (%esp)
- call *24(%ebx)
- movl $LC1, %edx
- movl %eax, %ebx
- movl (%esi), %eax
- subl $8, %esp
- movl %edx, 8(%esp)
- movl %ebx, 4(%esp)
- movl %esi, (%esp)
- call *56(%eax)
- movl (%esi), %eax
- subl $12, %esp
- movl %ebx, 4(%esp)
- movl %esi, (%esp)
- call *92(%eax)
- subl $8, %esp
- jmp L359
- .comm _perm, 2048 # 2048
- .def _memcpy; .scl 2; .type 32; .endef
- .section .drectve
- .ascii " -export:Java_pack_Code_sse_1noise@20"
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement