The key idea is to lower COPY nodes populating EFLAGS by scanning the uses of EFLAGS and introducing dedicated code to preserve the necessary state in a GPR. In the vast majority of cases, these uses are cmovCC and jCC instructions. For such cases, we can very easily save and restore the necessary information by simply inserting a setCC into a GPR where the original flags are live, and then testing that GPR directly to feed the cmov or conditional branch. However, things are a bit more tricky if arithmetic is using the flags. This patch handles the vast majority of cases that seem to come up in practice: adc, adcx, adox, rcl, and rcr; all without taking advantage of partially preserved EFLAGS as LLVM doesn't currently model that at all. There are a large number of operations that techinaclly observe EFLAGS currently but shouldn't in this case -- they typically are using DF. Currently, they will not be handled by this approach. However, I have never seen this issue come up in practice. It is already pretty rare to have these patterns come up in practical code with LLVM. I had to resort to writing MIR tests to cover most of the logic in this pass already. I suspect even with its current amount of coverage of arithmetic users of EFLAGS it will be a significant improvement over the current use of pushf/popf. It will also produce substantially faster code in most of the common patterns. This patch also removes all of the old lowering for EFLAGS copies, and the hack that forced us to use a frame pointer when EFLAGS copies were found anywhere in a function so that the dynamic stack adjustment wasn't a problem. None of this is needed as we now lower all of these copies directly in MI and without require stack adjustments. Lots of thanks to Reid who came up with several aspects of this approach, and Craig who helped me work out a couple of things tripping me up while working on this. Differential Revision: https://reviews.llvm.org/D45146 llvm-svn: 329657
8188 lines
378 KiB
LLVM
8188 lines
378 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=i386-unknown | FileCheck %s --check-prefix=X32
|
|
; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
|
|
|
|
define void @test_1024(i1024* %a, i1024* %b, i1024* %out) nounwind {
|
|
; X32-LABEL: test_1024:
|
|
; X32: # %bb.0:
|
|
; X32-NEXT: pushl %ebp
|
|
; X32-NEXT: pushl %ebx
|
|
; X32-NEXT: pushl %edi
|
|
; X32-NEXT: pushl %esi
|
|
; X32-NEXT: subl $1000, %esp # imm = 0x3E8
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 48(%eax), %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl 32(%edx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edi, %edi
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl 32(%esi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 36(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl 36(%esi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: leal (%ebx,%eax), %eax
|
|
; X32-NEXT: leal (%ecx,%ebp), %edx
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl (%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X32-NEXT: movl 16(%ebp), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %esi, %ecx
|
|
; X32-NEXT: adcl %ebx, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl (%ebp), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %ebp
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X32-NEXT: movl 4(%esi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl %ebp, %ecx
|
|
; X32-NEXT: movl %ebp, %esi
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: movl %ebx, %ebp
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movzbl %cl, %eax
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 8(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: addl %edi, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X32-NEXT: movl 52(%ebp), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl %bl, %ebx
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl 56(%ebp), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %ebp
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %edi
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 1-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 40(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: addl %esi, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl 16(%ecx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ebx, %ebx
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl 20(%ecx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ebp, %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl %edi, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl %bl, %esi
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 24(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %ebx
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X32-NEXT: movl 20(%edi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl %bl, %esi
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl 24(%edi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %edi
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl 4(%ecx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ecx, %edi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movzbl %cl, %eax
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 8(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %ecx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: movl %esi, %edx
|
|
; X32-NEXT: movl %esi, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %edx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %eax, %edx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl %ecx, %edx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %edx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %edx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %edx, %eax
|
|
; X32-NEXT: adcl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %edx, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 40(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %edx
|
|
; X32-NEXT: addl %eax, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %ebp
|
|
; X32-NEXT: addl %edi, %edx
|
|
; X32-NEXT: adcl %ebx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X32-NEXT: movl 48(%esi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl 52(%esi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %edi, %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl %bl, %esi
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 56(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %ebx
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %edi
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X32-NEXT: movl 64(%edi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %ecx
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl 80(%edi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %ebp, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl 80(%ecx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edi, %edi
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl 64(%ecx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %esi, %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movb {{[-0-9]+}}(%e{{[sb]}}p), %al # 1-byte Reload
|
|
; X32-NEXT: addb $255, %al
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 68(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 72(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %edx
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl 84(%ecx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 88(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %esi
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl %ebp, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 84(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edx, %edi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 88(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %ebx
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl 68(%ecx), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 72(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %edx
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edi, %ebx
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %ecx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %ecx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %edx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: addl %edx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 12(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %edi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: movl %ecx, %edx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl (%esp), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %edi, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl %ebx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl (%esp), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: setb %dl
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl %dl, %edx
|
|
; X32-NEXT: adcl %ebx, %edx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl (%esp), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 44(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: addl %eax, %edx
|
|
; X32-NEXT: movzbl %bl, %eax
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: addl %edi, %edx
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl %edi, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl %ebp, (%esp) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: adcl %edi, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 12(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %ebx
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: addl %eax, %edx
|
|
; X32-NEXT: movzbl %bl, %ebp
|
|
; X32-NEXT: adcl %ecx, %ebp
|
|
; X32-NEXT: movl %esi, %ecx
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %edi
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ebp, %esi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: addl %ebx, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 44(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %edi, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: movzbl %cl, %eax
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl %esi, %eax
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %edx
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %ebx, %edi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl %edx, %eax
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb %dl
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl %dl, %eax
|
|
; X32-NEXT: adcl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %esi, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl %ebp, %esi
|
|
; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: addb $255, {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 60(%eax), %esi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ebp, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: movzbl %bl, %eax
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl (%esp), %edi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %esi, %ebp
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %esi, %ebp
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl (%esp), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %ebp
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %esi, %ecx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 28(%eax), %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %ebx, %esi
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %ebx
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %bl, %esi
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, (%esp) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 28(%eax), %ecx
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %esi, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %bl, %esi
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %edi
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl %cl, %esi
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %esi, %ecx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl %ebp, %esi
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebp, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl (%esp), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ebp, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %ebp
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl (%esp), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl (%esp), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl (%esp), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: movzbl %cl, %eax
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl (%esp), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl (%esp), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %edi, %ecx
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl %esi, %edi
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebp, %ecx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 60(%eax), %ebp
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl (%esp), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl (%esp), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl %ebp, %edx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, %ebx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl (%esp), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ebp, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: movzbl %bl, %eax
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ebp, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %ebp
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %edi, %ecx
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %edi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, %ebp
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %esi, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 76(%eax), %ecx
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %ecx, %edi
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl (%esp), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl (%esp), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: movzbl %bl, %eax
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, (%esp) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl (%esp), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl (%esp), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, (%esp) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %edi, %ecx
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl %ebp, %esi
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 92(%eax), %ebp
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl (%esp), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb (%esp) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl (%esp), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, (%esp) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, %ebp
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, (%esp) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl %esi, %ebp
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %esi, %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: imull %eax, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: imull %ebx, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: imull %edi, %esi
|
|
; X32-NEXT: addl %edx, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl %edi, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebp, %esi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: imull %ebp, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: imull %edi, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ebx, %edx
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: imull %eax, %ecx
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: adcl %ebx, %esi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X32-NEXT: movl 104(%esi), %ebp
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl 108(%esi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %edi
|
|
; X32-NEXT: movl 96(%edi), %ebx
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl 100(%edi), %edi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: xorl %ecx, %ecx
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: addl %edi, %ecx
|
|
; X32-NEXT: adcl %ebx, %eax
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ebp, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
; X32-NEXT: movl 112(%esi), %edi
|
|
; X32-NEXT: imull %edi, %ebp
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl 116(%esi), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: imull %eax, %ecx
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, %ebx
|
|
; X32-NEXT: movl 120(%esi), %eax
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: imull %esi, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl 124(%ecx), %ecx
|
|
; X32-NEXT: imull %ebp, %ecx
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ebp, %ebx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %esi
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: imull %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: imull %ebp, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: imull %eax, %ecx
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ebx, %esi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %esi, %edi
|
|
; X32-NEXT: adcl %ebp, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, %ecx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, %edi
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, %ebx
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl (%esp), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 92(%eax), %esi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %ebp
|
|
; X32-NEXT: movzbl %bl, %eax
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 76(%eax), %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %esi, %ebp
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %esi, %ebp
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %esi, %ecx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %esi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl %ebp, %esi
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebx, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ebp
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %ebp
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %bl, %esi
|
|
; X32-NEXT: adcl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl %ebp, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %ecx, %esi
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %esi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl %edi, %esi
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %ecx, %ebx
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %ebp, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebp
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl 96(%ecx), %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %esi, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
; X32-NEXT: movl 100(%eax), %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %esi, %ecx
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %esi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %esi, %edi
|
|
; X32-NEXT: movzbl %bl, %eax
|
|
; X32-NEXT: adcl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: addl %eax, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl %edi, %ebx
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp
|
|
; X32-NEXT: movl 104(%ebp), %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl 108(%ebp), %esi
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: addl %edi, %esi
|
|
; X32-NEXT: movzbl %bl, %eax
|
|
; X32-NEXT: adcl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: xorl %edx, %edx
|
|
; X32-NEXT: mull %edx
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl %edx, %eax
|
|
; X32-NEXT: addl %esi, %edi
|
|
; X32-NEXT: adcl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: adcl $0, %eax
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl %ebx, %esi
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ebx, %ebp
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %ecx
|
|
; X32-NEXT: imull %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %edx, %esi
|
|
; X32-NEXT: movl %esi, %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: imull %edi, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %esi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: imull %ecx, %esi
|
|
; X32-NEXT: addl %edx, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl %ebx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ebx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ebx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
|
|
; X32-NEXT: movl 124(%edx), %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: imull %eax, %ecx
|
|
; X32-NEXT: movl 120(%edx), %esi
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: imull {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %edx, %esi
|
|
; X32-NEXT: movl 112(%edi), %ebp
|
|
; X32-NEXT: movl 116(%edi), %ebx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: imull %ebx, %edi
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: imull %ebp, %ecx
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ebx, %ebp
|
|
; X32-NEXT: adcl %edi, %ecx
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl %bl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %edi, %ebx
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ecx, %ebp
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ebx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl $0, %ebx
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %ebx, %edi
|
|
; X32-NEXT: setb %bl
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: movl %esi, %ebp
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movzbl %bl, %edi
|
|
; X32-NEXT: adcl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %edi
|
|
; X32-NEXT: adcl %edx, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl $0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ecx, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: addl %ebp, %edi
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ecx, %eax
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %edx, %ecx
|
|
; X32-NEXT: addl %edi, %eax
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: adcl %esi, %ecx
|
|
; X32-NEXT: setb {{[-0-9]+}}(%e{{[sb]}}p) # 1-byte Folded Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: addl %ecx, %eax
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: addl %eax, %esi
|
|
; X32-NEXT: adcl %edx, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movzbl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X32-NEXT: adcl %eax, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl $0, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %ebp, %ecx
|
|
; X32-NEXT: imull %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: imull %ebp, %esi
|
|
; X32-NEXT: addl %edx, %esi
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: imull %ebx, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: imull %esi, %edi
|
|
; X32-NEXT: addl %edx, %edi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %ebx, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebx
|
|
; X32-NEXT: addl %esi, %ebx
|
|
; X32-NEXT: adcl $0, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebp
|
|
; X32-NEXT: movl %ebp, %esi
|
|
; X32-NEXT: movl %edx, %ebp
|
|
; X32-NEXT: addl %ebx, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl %edi, %ebp
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %esi
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %edx
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: imull %eax, %edi
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %edi, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: imull %ebx, %ecx
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: imull %esi, %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: mull %edi
|
|
; X32-NEXT: addl %ecx, %edx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: imull %edi, %ecx
|
|
; X32-NEXT: addl %edx, %ecx
|
|
; X32-NEXT: addl %ebp, %eax
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %edi, %eax
|
|
; X32-NEXT: movl %edi, %ebp
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl %esi, %eax
|
|
; X32-NEXT: mull %ecx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %ecx
|
|
; X32-NEXT: addl %edi, %ecx
|
|
; X32-NEXT: adcl $0, %esi
|
|
; X32-NEXT: movl %ebp, %eax
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %edi
|
|
; X32-NEXT: movl %eax, %ebp
|
|
; X32-NEXT: addl %ecx, %ebp
|
|
; X32-NEXT: adcl %esi, %edi
|
|
; X32-NEXT: setb %cl
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: mull %ebx
|
|
; X32-NEXT: movl %edx, %esi
|
|
; X32-NEXT: movl %eax, %edx
|
|
; X32-NEXT: addl %edi, %edx
|
|
; X32-NEXT: movzbl %cl, %ecx
|
|
; X32-NEXT: adcl %ecx, %esi
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, %ebp
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: addl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %esi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ebp, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebp # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edi # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Folded Reload
|
|
; X32-NEXT: movl %ecx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %ebx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %edx # 4-byte Folded Reload
|
|
; X32-NEXT: adcl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload
|
|
; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, (%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 4(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 8(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 12(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 16(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 20(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 24(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 28(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 32(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 36(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 40(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 44(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 48(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 52(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 56(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 60(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 64(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 68(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 72(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 76(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 80(%ecx)
|
|
; X32-NEXT: movl %ebp, 84(%ecx)
|
|
; X32-NEXT: movl %edi, 88(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 92(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 96(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 100(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 104(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 108(%ecx)
|
|
; X32-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %esi # 4-byte Reload
|
|
; X32-NEXT: movl %esi, 112(%ecx)
|
|
; X32-NEXT: movl %ebx, 116(%ecx)
|
|
; X32-NEXT: movl %edx, 120(%ecx)
|
|
; X32-NEXT: movl %eax, 124(%ecx)
|
|
; X32-NEXT: addl $1000, %esp # imm = 0x3E8
|
|
; X32-NEXT: popl %esi
|
|
; X32-NEXT: popl %edi
|
|
; X32-NEXT: popl %ebx
|
|
; X32-NEXT: popl %ebp
|
|
; X32-NEXT: retl
|
|
;
|
|
; X64-LABEL: test_1024:
|
|
; X64: # %bb.0:
|
|
; X64-NEXT: pushq %rbp
|
|
; X64-NEXT: pushq %r15
|
|
; X64-NEXT: pushq %r14
|
|
; X64-NEXT: pushq %r13
|
|
; X64-NEXT: pushq %r12
|
|
; X64-NEXT: pushq %rbx
|
|
; X64-NEXT: subq $352, %rsp # imm = 0x160
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq 48(%rdi), %r9
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq 40(%rdi), %rbp
|
|
; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq 32(%rdi), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdi, %r10
|
|
; X64-NEXT: xorl %r8d, %r8d
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rdi, %rbx
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rdi, %rbp
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movzbl %bl, %ebx
|
|
; X64-NEXT: addq %rax, %rbp
|
|
; X64-NEXT: adcq %rdx, %rbx
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rcx, %r12
|
|
; X64-NEXT: movq %rcx, %r8
|
|
; X64-NEXT: addq %rax, %r12
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq %rdi, %r9
|
|
; X64-NEXT: movq %rdi, (%rsp) # 8-byte Spill
|
|
; X64-NEXT: adcq %rdx, %rax
|
|
; X64-NEXT: addq %rbp, %r12
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq (%rsi), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: xorl %ebp, %ebp
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq 8(%rsi), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: xorl %r11d, %r11d
|
|
; X64-NEXT: movq %rax, %r15
|
|
; X64-NEXT: addq %rcx, %r15
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: addq %rdi, %r15
|
|
; X64-NEXT: adcq %rcx, %rbp
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: addq %rax, %rbp
|
|
; X64-NEXT: movzbl %bl, %ebx
|
|
; X64-NEXT: adcq %rdx, %rbx
|
|
; X64-NEXT: movq 16(%rsi), %rax
|
|
; X64-NEXT: movq %rsi, %r13
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdi, %r14
|
|
; X64-NEXT: addq %rax, %r14
|
|
; X64-NEXT: movq %rcx, %r11
|
|
; X64-NEXT: adcq %rdx, %r11
|
|
; X64-NEXT: addq %rbp, %r14
|
|
; X64-NEXT: adcq %rbx, %r11
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: movq %r8, %rbp
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: adcq %rcx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq (%r10), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: xorl %r8d, %r8d
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: movq %rdi, %r9
|
|
; X64-NEXT: movq %rdx, %rax
|
|
; X64-NEXT: adcq %rcx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq 32(%r13), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: xorl %r8d, %r8d
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: movq %rbx, %rcx
|
|
; X64-NEXT: addq %r13, %rax
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: adcq %rdx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %r15, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r14, %r12
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %r11, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r11, %rdi
|
|
; X64-NEXT: movq 8(%r10), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %rsi, %r11
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: addq %rbx, %r11
|
|
; X64-NEXT: adcq %rsi, %rbp
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: addq %rax, %rbp
|
|
; X64-NEXT: movzbl %bl, %ebx
|
|
; X64-NEXT: adcq %rdx, %rbx
|
|
; X64-NEXT: movq 16(%r10), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rcx, %r8
|
|
; X64-NEXT: addq %rax, %r8
|
|
; X64-NEXT: movq %rsi, %r10
|
|
; X64-NEXT: adcq %rdx, %r10
|
|
; X64-NEXT: addq %rbp, %r8
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: adcq %rbx, %r10
|
|
; X64-NEXT: movq %rcx, %rdx
|
|
; X64-NEXT: movq %rcx, %r12
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: addq %r9, %rdx
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r11, %r8
|
|
; X64-NEXT: adcq %r11, %r15
|
|
; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rax, %r14
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: adcq %r10, %rdi
|
|
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq 40(%rsi), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: xorl %r14d, %r14d
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: addq %r9, %rdi
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: addq %r13, %rdi
|
|
; X64-NEXT: adcq %r9, %rbp
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: addq %rax, %rbp
|
|
; X64-NEXT: movzbl %bl, %r11d
|
|
; X64-NEXT: adcq %rdx, %r11
|
|
; X64-NEXT: movq 48(%rsi), %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r13, %rbx
|
|
; X64-NEXT: addq %rax, %rbx
|
|
; X64-NEXT: movq %r9, %rsi
|
|
; X64-NEXT: adcq %rdx, %rsi
|
|
; X64-NEXT: addq %rbp, %rbx
|
|
; X64-NEXT: adcq %r11, %rsi
|
|
; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: addq %r13, %r12
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rdi, %r8
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbx, %rcx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rsi, %r10
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; X64-NEXT: movq %rdx, %rax
|
|
; X64-NEXT: addq %r13, %rax
|
|
; X64-NEXT: movq (%rsp), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %r9, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %rax
|
|
; X64-NEXT: addq %r13, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq 56(%rax), %r11
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdi, %r10
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rsi, %rbx
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rbx, %r8
|
|
; X64-NEXT: adcq %rbp, %rsi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdi, %r11
|
|
; X64-NEXT: addq %rsi, %rax
|
|
; X64-NEXT: movzbl %cl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
|
|
; X64-NEXT: addq %rax, %r15
|
|
; X64-NEXT: adcq %rdx, %r12
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rsi, %rbx
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, %r10
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbp, %rcx
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r13
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %rcx, %rsi
|
|
; X64-NEXT: movzbl %bl, %eax
|
|
; X64-NEXT: adcq %rax, %r13
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: addq %r9, %rsi
|
|
; X64-NEXT: adcq %r8, %r13
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: adcq $0, %r12
|
|
; X64-NEXT: movq %r10, %rbx
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq %rdi, %r9
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rcx, %rbp
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq 24(%rax), %rcx
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rcx, %rbx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rbp, %r8
|
|
; X64-NEXT: adcq %rdi, %rcx
|
|
; X64-NEXT: setb %dil
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %dil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: addq %r14, %rbp
|
|
; X64-NEXT: movq (%rsp), %rbx # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: adcq %r9, %rbx
|
|
; X64-NEXT: addq %rax, %rbp
|
|
; X64-NEXT: adcq %rdx, %rbx
|
|
; X64-NEXT: addq %rsi, %r10
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r13, %r8
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: adcq $0, %rbx
|
|
; X64-NEXT: addq %r15, %rbp
|
|
; X64-NEXT: adcq %r12, %rbx
|
|
; X64-NEXT: setb %r15b
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %r11, %rsi
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r11, %rdi
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %rdi, %r11
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %r12, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %r8, %r12
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %sil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: addq %r14, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: adcq %r9, %r14
|
|
; X64-NEXT: addq %rax, %rcx
|
|
; X64-NEXT: adcq %rdx, %r14
|
|
; X64-NEXT: addq %rbp, %r13
|
|
; X64-NEXT: adcq %rbx, %r11
|
|
; X64-NEXT: movzbl %r15b, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: adcq $0, %r14
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq 24(%rax), %rcx
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rsi, %r11
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rbx, %rbp
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %r15
|
|
; X64-NEXT: addq %rbp, %r15
|
|
; X64-NEXT: adcq %rsi, %rbx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movzbl %sil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
|
|
; X64-NEXT: addq %rax, %r8
|
|
; X64-NEXT: adcq %rdx, %r10
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rdi, %rbx
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, %r11
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbp, %rdi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: movq %rsi, %rbp
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rdi, %rbx
|
|
; X64-NEXT: movzbl %cl, %eax
|
|
; X64-NEXT: adcq %rax, %rsi
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: addq %r14, %rbx
|
|
; X64-NEXT: adcq %r15, %rsi
|
|
; X64-NEXT: adcq $0, %r8
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: movq %rbp, %r14
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdi, %r15
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rcx, %rbp
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbp, %rax
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: adcq %rdi, %rcx
|
|
; X64-NEXT: setb %dil
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %dil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
|
|
; X64-NEXT: addq %r13, %rdi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: adcq %r14, %rbp
|
|
; X64-NEXT: addq %rax, %rdi
|
|
; X64-NEXT: adcq %rdx, %rbp
|
|
; X64-NEXT: addq %rbx, %r9
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rsi, %r11
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: addq %r8, %rdi
|
|
; X64-NEXT: adcq %r10, %rbp
|
|
; X64-NEXT: setb %r9b
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r10, %rbx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r15
|
|
; X64-NEXT: addq %rbx, %r15
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %bl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: movq %r10, %rcx
|
|
; X64-NEXT: addq %r13, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: movq %rbx, %rsi
|
|
; X64-NEXT: movq %rbx, %r12
|
|
; X64-NEXT: adcq %r14, %rsi
|
|
; X64-NEXT: addq %rax, %rcx
|
|
; X64-NEXT: adcq %rdx, %rsi
|
|
; X64-NEXT: addq %rdi, %r11
|
|
; X64-NEXT: adcq %rbp, %r15
|
|
; X64-NEXT: movzbl %r9b, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq $0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: movq %r8, %rbp
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rcx, %r11
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: addq %rsi, %rcx
|
|
; X64-NEXT: adcq $0, %rbx
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rcx, %r8
|
|
; X64-NEXT: adcq %rbx, %rsi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdi, %r15
|
|
; X64-NEXT: addq %rsi, %rax
|
|
; X64-NEXT: movzbl %cl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq %r10, %r9
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r12, %r10
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
|
|
; X64-NEXT: addq %rax, %r9
|
|
; X64-NEXT: adcq %rdx, %r10
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: movzbl %sil, %eax
|
|
; X64-NEXT: adcq %rax, %r15
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
|
|
; X64-NEXT: addq %r14, %rbx
|
|
; X64-NEXT: adcq %r8, %r15
|
|
; X64-NEXT: adcq $0, %r9
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %rbp, %rsi
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r14
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq %rdi, %r8
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: addq %r14, %rcx
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq 56(%rax), %rdi
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %rcx, %r14
|
|
; X64-NEXT: adcq %rbp, %rsi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdi, %r8
|
|
; X64-NEXT: addq %rsi, %rax
|
|
; X64-NEXT: movzbl %cl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: addq %r11, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
|
|
; X64-NEXT: adcq %r13, %rsi
|
|
; X64-NEXT: addq %rax, %rcx
|
|
; X64-NEXT: adcq %rdx, %rsi
|
|
; X64-NEXT: addq %rbx, %r12
|
|
; X64-NEXT: adcq %r15, %r14
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: addq %r9, %rcx
|
|
; X64-NEXT: adcq %r10, %rsi
|
|
; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r9, %rbx
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: movq %r8, %rdi
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rbx, %r8
|
|
; X64-NEXT: adcq %r15, %r9
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: addq %r9, %rax
|
|
; X64-NEXT: movzbl %bl, %edi
|
|
; X64-NEXT: adcq %rdi, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: addq %r11, %r15
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: adcq %r13, %rbp
|
|
; X64-NEXT: addq %rax, %r15
|
|
; X64-NEXT: adcq %rdx, %rbp
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; X64-NEXT: addq %rcx, %rdx
|
|
; X64-NEXT: adcq %rsi, %r8
|
|
; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X64-NEXT: adcq %rax, %r15
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: addq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rdx
|
|
; X64-NEXT: adcq $0, %r8
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
|
|
; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rsi, %r10
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r11, %rbx
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %rbx, %r12
|
|
; X64-NEXT: adcq %rdi, %rcx
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rsi, %r9
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %bl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: addq %rax, %r8
|
|
; X64-NEXT: adcq %rdx, %rcx
|
|
; X64-NEXT: movq %rcx, %r14
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r11, %rbx
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, %r13
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rdi, %rcx
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %rcx, %rdi
|
|
; X64-NEXT: movzbl %bl, %eax
|
|
; X64-NEXT: adcq %rax, %r11
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Folded Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq %r12, %r11
|
|
; X64-NEXT: adcq $0, %r8
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %r14
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: movq %rsi, %r9
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rcx, %r10
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: addq %r8, %rcx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: adcq %rsi, %rbx
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %r13, %r9
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movzbl %cl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
|
|
; X64-NEXT: addq %r13, %rsi
|
|
; X64-NEXT: movq (%rsp), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: adcq %r14, %rcx
|
|
; X64-NEXT: addq %rax, %rsi
|
|
; X64-NEXT: adcq %rdx, %rcx
|
|
; X64-NEXT: addq %rdi, %r12
|
|
; X64-NEXT: adcq %r11, %r8
|
|
; X64-NEXT: movq %r8, %r11
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rcx, (%rsp) # 8-byte Spill
|
|
; X64-NEXT: setb {{[-0-9]+}}(%r{{[sb]}}p) # 1-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %rcx, %rdi
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: addq %rdi, %r10
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %r8, %rax
|
|
; X64-NEXT: mulq %r9
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %bl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: addq %r13, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: adcq %r14, %rcx
|
|
; X64-NEXT: addq %rax, %rsi
|
|
; X64-NEXT: adcq %rdx, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq (%rsp), %r10 # 8-byte Folded Reload
|
|
; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X64-NEXT: adcq %rax, %rsi
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: addq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
|
|
; X64-NEXT: adcq %r15, %r12
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbp, %r11
|
|
; X64-NEXT: movq %r11, (%rsp) # 8-byte Spill
|
|
; X64-NEXT: movzbl {{[-0-9]+}}(%r{{[sb]}}p), %eax # 1-byte Folded Reload
|
|
; X64-NEXT: adcq %rax, %r14
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq 64(%rcx), %r11
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rsi, %rbx
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq 72(%rcx), %rsi
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rsi, %rcx
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rbx, %r8
|
|
; X64-NEXT: adcq %rbp, %rsi
|
|
; X64-NEXT: setb %bl
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rcx, %r10
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %rsi, %rdi
|
|
; X64-NEXT: movzbl %bl, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: xorl %edx, %edx
|
|
; X64-NEXT: mulq %rdx
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: movq %rdx, %r14
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
; X64-NEXT: addq %rax, %r12
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: adcq %rdx, %r15
|
|
; X64-NEXT: addq %rdi, %r12
|
|
; X64-NEXT: adcq %rcx, %r15
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %r11, %rsi
|
|
; X64-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r9 # 8-byte Reload
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %r11, %rdi
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, %r11
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %sil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
|
|
; X64-NEXT: addq %rax, %rbx
|
|
; X64-NEXT: adcq %rdx, %r14
|
|
; X64-NEXT: addq %r13, %rbx
|
|
; X64-NEXT: adcq %r8, %r14
|
|
; X64-NEXT: adcq $0, %r12
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq 80(%rbp), %rdi
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: addq %r8, %rcx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq 88(%rbp), %r10
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rcx, %r8
|
|
; X64-NEXT: adcq %rsi, %rbp
|
|
; X64-NEXT: setb %r11b
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %rbp, %rsi
|
|
; X64-NEXT: movzbl %r11b, %eax
|
|
; X64-NEXT: adcq %rax, %rcx
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: xorl %edx, %edx
|
|
; X64-NEXT: mulq %rdx
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: addq %rax, %rbp
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: adcq %rdx, %rax
|
|
; X64-NEXT: addq %rsi, %rbp
|
|
; X64-NEXT: adcq %rcx, %rax
|
|
; X64-NEXT: addq %rbx, %r13
|
|
; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r14, %r8
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: adcq $0, %rax
|
|
; X64-NEXT: addq %r12, %rbp
|
|
; X64-NEXT: movq %rbp, %r8
|
|
; X64-NEXT: adcq %r15, %rax
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: setb %r14b
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %r15
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r15, %rbx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %sil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: addq %r9, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: addq %rax, %rsi
|
|
; X64-NEXT: adcq %rdx, %rcx
|
|
; X64-NEXT: addq %r8, %r12
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r11, %rbx
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movzbl %r14b, %eax
|
|
; X64-NEXT: adcq %rax, %rsi
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: imulq %rax, %r10
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %r10, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: imulq %rbp, %rdi
|
|
; X64-NEXT: addq %rdx, %rdi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: imulq %r11, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %rsi, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: imulq %rcx, %rax
|
|
; X64-NEXT: addq %rdx, %rax
|
|
; X64-NEXT: addq %r8, %r9
|
|
; X64-NEXT: adcq %rdi, %rax
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %rcx, %rdi
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r15
|
|
; X64-NEXT: addq %rbx, %r15
|
|
; X64-NEXT: adcq %rsi, %rdi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r11, %rax
|
|
; X64-NEXT: mulq %rbp
|
|
; X64-NEXT: movq %rdx, %r12
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: addq %rdi, %r13
|
|
; X64-NEXT: movzbl %cl, %eax
|
|
; X64-NEXT: adcq %rax, %r12
|
|
; X64-NEXT: addq %r9, %r13
|
|
; X64-NEXT: adcq %r8, %r12
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
|
|
; X64-NEXT: movq 120(%rdx), %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
|
|
; X64-NEXT: imulq %r10, %rcx
|
|
; X64-NEXT: movq 112(%rdx), %rsi
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %r10, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: addq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: imulq %r8, %rsi
|
|
; X64-NEXT: addq %rdx, %rsi
|
|
; X64-NEXT: movq 96(%rbp), %rdi
|
|
; X64-NEXT: movq 104(%rbp), %rbx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: imulq %rbx, %rcx
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: imulq %rdi, %rax
|
|
; X64-NEXT: addq %rdx, %rax
|
|
; X64-NEXT: addq %r11, %r9
|
|
; X64-NEXT: adcq %rsi, %rax
|
|
; X64-NEXT: movq %rax, %r11
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %r10
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rcx, %rbp
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rdi
|
|
; X64-NEXT: addq %rbp, %rdi
|
|
; X64-NEXT: adcq %rsi, %rcx
|
|
; X64-NEXT: setb %sil
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %sil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: addq %r9, %rax
|
|
; X64-NEXT: adcq %r11, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq %r15, %rdi
|
|
; X64-NEXT: adcq %r13, %rax
|
|
; X64-NEXT: adcq %r12, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r14, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq 80(%rsi), %rdi
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %r8
|
|
; X64-NEXT: movq 88(%rsi), %rax
|
|
; X64-NEXT: movq %rsi, %r9
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rcx, %r11
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %r8, %rbx
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r14
|
|
; X64-NEXT: addq %rbx, %r14
|
|
; X64-NEXT: adcq %rbp, %rcx
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: movzbl %r8b, %eax
|
|
; X64-NEXT: adcq %rax, %rbp
|
|
; X64-NEXT: movq %rdi, %rax
|
|
; X64-NEXT: xorl %ecx, %ecx
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
|
|
; X64-NEXT: addq %r12, %rsi
|
|
; X64-NEXT: movq %rdx, %r10
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Reload
|
|
; X64-NEXT: adcq %r8, %r10
|
|
; X64-NEXT: addq %rbx, %rsi
|
|
; X64-NEXT: adcq %rbp, %r10
|
|
; X64-NEXT: movq 64(%r9), %r13
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq 72(%r9), %r9
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rbp
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: adcq $0, %rbp
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %rbp, %rcx
|
|
; X64-NEXT: setb %r11b
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: mulq %r15
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rcx, %rbp
|
|
; X64-NEXT: movzbl %r11b, %eax
|
|
; X64-NEXT: adcq %rax, %rbx
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: xorl %ecx, %ecx
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %r11
|
|
; X64-NEXT: movq %rax, %r15
|
|
; X64-NEXT: movq %r12, %rcx
|
|
; X64-NEXT: addq %rax, %rcx
|
|
; X64-NEXT: adcq %rdx, %r8
|
|
; X64-NEXT: addq %rbp, %rcx
|
|
; X64-NEXT: adcq %rbx, %r8
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r14, %r8
|
|
; X64-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rsi
|
|
; X64-NEXT: adcq $0, %r10
|
|
; X64-NEXT: movq %r13, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rdi
|
|
; X64-NEXT: movq %rdi, %r8
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rcx, %rbp
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq %r13, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbp, %rax
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: adcq %rdi, %rcx
|
|
; X64-NEXT: setb %dil
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %dil, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: addq %r14, %r15
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
|
|
; X64-NEXT: adcq %r13, %r11
|
|
; X64-NEXT: addq %rax, %r15
|
|
; X64-NEXT: adcq %rdx, %r11
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Folded Reload
|
|
; X64-NEXT: movq %r12, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %r15
|
|
; X64-NEXT: adcq $0, %r11
|
|
; X64-NEXT: addq %rsi, %r15
|
|
; X64-NEXT: adcq %r10, %r11
|
|
; X64-NEXT: setb %r10b
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %r8, %r12
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: addq %rcx, %rbx
|
|
; X64-NEXT: adcq $0, %rdi
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: addq %rbx, %rax
|
|
; X64-NEXT: movq %rax, %rbx
|
|
; X64-NEXT: adcq %rdi, %rcx
|
|
; X64-NEXT: setb %r8b
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: mulq %rsi
|
|
; X64-NEXT: movq %rsi, %rdi
|
|
; X64-NEXT: addq %rcx, %rax
|
|
; X64-NEXT: movzbl %r8b, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: addq %r14, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: adcq %r13, %rcx
|
|
; X64-NEXT: addq %rax, %rsi
|
|
; X64-NEXT: adcq %rdx, %rcx
|
|
; X64-NEXT: addq %r15, %r9
|
|
; X64-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq %r11, %rbx
|
|
; X64-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movzbl %r10b, %eax
|
|
; X64-NEXT: adcq %rax, %rsi
|
|
; X64-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: movq 96(%rbp), %rcx
|
|
; X64-NEXT: imulq %rcx, %rdi
|
|
; X64-NEXT: movq %rcx, %rax
|
|
; X64-NEXT: movq %r12, %rsi
|
|
; X64-NEXT: mulq %r12
|
|
; X64-NEXT: movq %rax, %r9
|
|
; X64-NEXT: addq %rdi, %rdx
|
|
; X64-NEXT: movq 104(%rbp), %r8
|
|
; X64-NEXT: imulq %r8, %rsi
|
|
; X64-NEXT: addq %rdx, %rsi
|
|
; X64-NEXT: movq %rsi, %r11
|
|
; X64-NEXT: movq 112(%rbp), %rax
|
|
; X64-NEXT: movq %rbp, %rdi
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: imulq %rbp, %rsi
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: mulq %rbx
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: addq %rsi, %rdx
|
|
; X64-NEXT: movq 120(%rdi), %rdi
|
|
; X64-NEXT: imulq %rbx, %rdi
|
|
; X64-NEXT: addq %rdx, %rdi
|
|
; X64-NEXT: addq %r9, %r10
|
|
; X64-NEXT: adcq %r11, %rdi
|
|
; X64-NEXT: movq %rbx, %rax
|
|
; X64-NEXT: movq %rbx, %rsi
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %rbp, %rax
|
|
; X64-NEXT: movq %rbp, %r9
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rdx, %rcx
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rbx, %rbp
|
|
; X64-NEXT: adcq $0, %rcx
|
|
; X64-NEXT: movq %rsi, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rsi
|
|
; X64-NEXT: movq %rax, %r12
|
|
; X64-NEXT: addq %rbp, %r12
|
|
; X64-NEXT: adcq %rcx, %rsi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r9, %rax
|
|
; X64-NEXT: mulq %r8
|
|
; X64-NEXT: movq %rdx, %rbx
|
|
; X64-NEXT: movq %rax, %rbp
|
|
; X64-NEXT: addq %rsi, %rbp
|
|
; X64-NEXT: movzbl %cl, %eax
|
|
; X64-NEXT: adcq %rax, %rbx
|
|
; X64-NEXT: addq %r10, %rbp
|
|
; X64-NEXT: adcq %rdi, %rbx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: imulq %rax, %rsi
|
|
; X64-NEXT: movq %rax, %r13
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: mulq %rcx
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: addq %rsi, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r11 # 8-byte Reload
|
|
; X64-NEXT: imulq %r11, %rcx
|
|
; X64-NEXT: addq %rdx, %rcx
|
|
; X64-NEXT: movq %rcx, %r9
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r15 # 8-byte Reload
|
|
; X64-NEXT: imulq %r15, %rcx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r14 # 8-byte Reload
|
|
; X64-NEXT: mulq %r14
|
|
; X64-NEXT: movq %rax, %r10
|
|
; X64-NEXT: addq %rcx, %rdx
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
|
|
; X64-NEXT: imulq %r14, %rax
|
|
; X64-NEXT: addq %rdx, %rax
|
|
; X64-NEXT: addq %r8, %r10
|
|
; X64-NEXT: adcq %r9, %rax
|
|
; X64-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %r8
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %r13
|
|
; X64-NEXT: movq %rdx, %r9
|
|
; X64-NEXT: movq %rax, %rcx
|
|
; X64-NEXT: addq %rdi, %rcx
|
|
; X64-NEXT: adcq $0, %r9
|
|
; X64-NEXT: movq %r14, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: movq %rdx, %rdi
|
|
; X64-NEXT: movq %rax, %rsi
|
|
; X64-NEXT: addq %rcx, %rsi
|
|
; X64-NEXT: adcq %r9, %rdi
|
|
; X64-NEXT: setb %cl
|
|
; X64-NEXT: movq %r15, %rax
|
|
; X64-NEXT: mulq %r11
|
|
; X64-NEXT: addq %rdi, %rax
|
|
; X64-NEXT: movzbl %cl, %ecx
|
|
; X64-NEXT: adcq %rcx, %rdx
|
|
; X64-NEXT: addq %r10, %rax
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq %r12, %rsi
|
|
; X64-NEXT: adcq %rbp, %rax
|
|
; X64-NEXT: adcq %rbx, %rdx
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rcx, %r9
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Folded Reload
|
|
; X64-NEXT: movq %rdi, %r10
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Folded Reload
|
|
; X64-NEXT: adcq (%rsp), %rbx # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %r8 # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
|
|
; X64-NEXT: adcq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Folded Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, (%rcx)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 8(%rcx)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 16(%rcx)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 24(%rcx)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 32(%rcx)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 40(%rcx)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 48(%rcx)
|
|
; X64-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
|
|
; X64-NEXT: movq %rdi, 56(%rcx)
|
|
; X64-NEXT: movq %r9, 64(%rcx)
|
|
; X64-NEXT: movq %r10, 72(%rcx)
|
|
; X64-NEXT: movq %rbp, 80(%rcx)
|
|
; X64-NEXT: movq %rbx, 88(%rcx)
|
|
; X64-NEXT: movq %r8, 96(%rcx)
|
|
; X64-NEXT: movq %rsi, 104(%rcx)
|
|
; X64-NEXT: movq %rax, 112(%rcx)
|
|
; X64-NEXT: movq %rdx, 120(%rcx)
|
|
; X64-NEXT: addq $352, %rsp # imm = 0x160
|
|
; X64-NEXT: popq %rbx
|
|
; X64-NEXT: popq %r12
|
|
; X64-NEXT: popq %r13
|
|
; X64-NEXT: popq %r14
|
|
; X64-NEXT: popq %r15
|
|
; X64-NEXT: popq %rbp
|
|
; X64-NEXT: retq
|
|
%av = load i1024, i1024* %a
|
|
%bv = load i1024, i1024* %b
|
|
%r = mul i1024 %av, %bv
|
|
store i1024 %r, i1024* %out
|
|
ret void
|
|
}
|