Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- .file "main.c"
- .text
- .p2align 4,,15
- .globl _xorBuffer
- .def _xorBuffer; .scl 2; .type 32; .endef
- _xorBuffer:
- LFB18:
- .cfi_startproc
- pushl %ebp
- .cfi_def_cfa_offset 8
- .cfi_offset 5, -8
- pushl %edi
- .cfi_def_cfa_offset 12
- .cfi_offset 7, -12
- pushl %esi
- .cfi_def_cfa_offset 16
- .cfi_offset 6, -16
- pushl %ebx
- .cfi_def_cfa_offset 20
- .cfi_offset 3, -20
- subl $28, %esp
- .cfi_def_cfa_offset 48
- movl 56(%esp), %edx
- movl 48(%esp), %eax
- movl 52(%esp), %ecx
- testl %edx, %edx
- je L1
- movl %edx, %ebx
- movl %edx, 8(%esp)
- shrl $4, %ebx
- movl %ebx, %esi
- movl %ebx, 4(%esp)
- sall $4, %esi
- testl %esi, %esi
- je L3
- leal 16(%ecx), %edi
- cmpl $15, %edx
- seta 15(%esp)
- cmpl %edi, %eax
- leal 16(%eax), %ebp
- seta %bl
- cmpl %ebp, %ecx
- movl %ebx, %edi
- seta %bl
- movl %ebx, %ebp
- orl %ebp, %edi
- movl %edi, %ebx
- testb %bl, 15(%esp)
- je L3
- movl 4(%esp), %ebx
- xorl %edi, %edi
- xorl %ebp, %ebp
- .p2align 4,,10
- L4:
- vmovdqu (%ecx,%edi), %xmm1
- addl $1, %ebp
- vmovdqu (%eax,%edi), %xmm0
- vpxor %xmm0, %xmm1, %xmm0
- vmovdqu %xmm0, (%eax,%edi)
- addl $16, %edi
- cmpl %ebp, %ebx
- ja L4
- addl %esi, %eax
- addl %esi, %ecx
- subl %esi, %edx
- cmpl %esi, 8(%esp)
- je L1
- L3:
- xorl %ebx, %ebx
- movl %ecx, %esi
- .p2align 4,,10
- L5:
- movzbl (%esi,%ebx), %ecx
- xorb %cl, (%eax,%ebx)
- addl $1, %ebx
- subl $1, %edx
- jne L5
- L1:
- addl $28, %esp
- .cfi_def_cfa_offset 20
- popl %ebx
- .cfi_def_cfa_offset 16
- .cfi_restore 3
- popl %esi
- .cfi_def_cfa_offset 12
- .cfi_restore 6
- popl %edi
- .cfi_def_cfa_offset 8
- .cfi_restore 7
- popl %ebp
- .cfi_def_cfa_offset 4
- .cfi_restore 5
- ret
- .cfi_endproc
- LFE18:
- .def ___main; .scl 2; .type 32; .endef
- .section .rdata,"dr"
- LC0:
- .ascii "result = \0"
- LC1:
- .ascii "%.2x\0"
- .section .text.startup,"x"
- .p2align 4,,15
- .globl _main
- .def _main; .scl 2; .type 32; .endef
- _main:
- LFB19:
- .cfi_startproc
- pushl %ebp
- .cfi_def_cfa_offset 8
- .cfi_offset 5, -8
- movl %esp, %ebp
- .cfi_def_cfa_register 5
- pushl %edi
- pushl %esi
- pushl %ebx
- andl $-16, %esp
- subl $48, %esp
- .cfi_offset 3, -20
- .cfi_offset 6, -16
- .cfi_offset 7, -12
- call ___main
- movl $32, (%esp)
- call _malloc
- movl $32, (%esp)
- movl %eax, 44(%esp)
- call _malloc
- movl %eax, 40(%esp)
- movl 44(%esp), %eax
- movl $-1431655766, (%eax)
- movl $-1431655766, 4(%eax)
- movl $-1431655766, 8(%eax)
- movl $-1431655766, 12(%eax)
- movl $-1431655766, 16(%eax)
- movl $-1431655766, 20(%eax)
- movl $-1431655766, 24(%eax)
- movl $-1431655766, 28(%eax)
- movl 40(%esp), %eax
- movl $-1145324613, (%eax)
- movl $-1145324613, 4(%eax)
- movl $-1145324613, 8(%eax)
- movl $-1145324613, 12(%eax)
- movl $-1145324613, 16(%eax)
- movl $-1145324613, 20(%eax)
- movl $-1145324613, 24(%eax)
- movl $-1145324613, 28(%eax)
- movl 44(%esp), %eax
- negl %eax
- andl $15, %eax
- movl %eax, 36(%esp)
- je L17
- movl %eax, %edi
- movl 40(%esp), %ecx
- xorl %edx, %edx
- movl 44(%esp), %eax
- .p2align 4,,10
- L11:
- movzbl (%ecx), %esi
- addl $1, %ecx
- movl %esi, %ebx
- movl $31, %esi
- xorb %bl, (%eax)
- subl %edx, %esi
- addl $1, %edx
- addl $1, %eax
- cmpl %edx, %edi
- ja L11
- L10:
- movl 36(%esp), %edx
- movl $32, 28(%esp)
- subl %edx, 28(%esp)
- movl 28(%esp), %ebx
- shrl $4, %ebx
- movl %ebx, 24(%esp)
- sall $4, %ebx
- testl %ebx, %ebx
- movl %ebx, 32(%esp)
- je L12
- movl 40(%esp), %ebx
- movl %eax, 36(%esp)
- movl 44(%esp), %edi
- movl %ecx, 20(%esp)
- movl 24(%esp), %eax
- addl %edx, %ebx
- movl %ebx, 40(%esp)
- addl %edx, %edi
- xorl %ebx, %ebx
- movl 40(%esp), %ecx
- xorl %edx, %edx
- .p2align 4,,10
- L13:
- vmovdqu (%ecx,%edx), %xmm0
- addl $1, %ebx
- vpxor (%edi,%edx), %xmm0, %xmm0
- vmovdqa %xmm0, (%edi,%edx)
- addl $16, %edx
- cmpl %eax, %ebx
- jb L13
- movl 36(%esp), %eax
- movl 20(%esp), %ecx
- movl 32(%esp), %edx
- subl 32(%esp), %esi
- addl 32(%esp), %eax
- addl 32(%esp), %ecx
- cmpl %edx, 28(%esp)
- je L14
- L12:
- xorl %edx, %edx
- .p2align 4,,10
- L15:
- movzbl (%ecx,%edx), %ebx
- xorb %bl, (%eax,%edx)
- addl $1, %edx
- cmpl %esi, %edx
- jne L15
- L14:
- movl $LC0, (%esp)
- xorl %ebx, %ebx
- call _printf
- movl 44(%esp), %esi
- .p2align 4,,10
- L16:
- movzbl (%esi,%ebx), %eax
- movl $LC1, (%esp)
- movl %eax, 4(%esp)
- addl $1, %ebx
- call _printf
- cmpl $32, %ebx
- jne L16
- movl $10, (%esp)
- call _putchar
- leal -12(%ebp), %esp
- xorl %eax, %eax
- popl %ebx
- .cfi_remember_state
- .cfi_restore 3
- popl %esi
- .cfi_restore 6
- popl %edi
- .cfi_restore 7
- popl %ebp
- .cfi_def_cfa 4, 4
- .cfi_restore 5
- ret
- L17:
- .cfi_restore_state
- movl 40(%esp), %ecx
- movl $32, %esi
- movl 44(%esp), %eax
- jmp L10
- .cfi_endproc
- LFE19:
- .def _putchar; .scl 2; .type 32; .endef
- .def _malloc; .scl 2; .type 32; .endef
- .def _printf; .scl 2; .type 32; .endef
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement