357 lines
14 KiB
LLVM
357 lines
14 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc --mtriple=loongarch64 --mattr=+d --target-abi=lp64d < %s \
|
|
; RUN: | FileCheck --check-prefix=LA64-FPELIM %s
|
|
; RUN: llc --mtriple=loongarch64 --mattr=+d --target-abi=lp64d < %s \
|
|
; RUN: --frame-pointer=all < %s \
|
|
; RUN: | FileCheck --check-prefix=LA64-WITHFP %s
|
|
|
|
declare void @llvm.va_start(ptr)
|
|
declare void @llvm.va_end(ptr)
|
|
|
|
declare void @notdead(ptr)
|
|
|
|
define i64 @va1(ptr %fmt, ...) {
|
|
; LA64-FPELIM-LABEL: va1:
|
|
; LA64-FPELIM: # %bb.0:
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -80
|
|
; LA64-FPELIM-NEXT: .cfi_def_cfa_offset 80
|
|
; LA64-FPELIM-NEXT: move $a0, $a1
|
|
; LA64-FPELIM-NEXT: st.d $a7, $sp, 72
|
|
; LA64-FPELIM-NEXT: st.d $a6, $sp, 64
|
|
; LA64-FPELIM-NEXT: st.d $a5, $sp, 56
|
|
; LA64-FPELIM-NEXT: st.d $a4, $sp, 48
|
|
; LA64-FPELIM-NEXT: st.d $a3, $sp, 40
|
|
; LA64-FPELIM-NEXT: st.d $a2, $sp, 32
|
|
; LA64-FPELIM-NEXT: addi.d $a1, $sp, 32
|
|
; LA64-FPELIM-NEXT: st.d $a1, $sp, 8
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 24
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 80
|
|
; LA64-FPELIM-NEXT: ret
|
|
;
|
|
; LA64-WITHFP-LABEL: va1:
|
|
; LA64-WITHFP: # %bb.0:
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -96
|
|
; LA64-WITHFP-NEXT: .cfi_def_cfa_offset 96
|
|
; LA64-WITHFP-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: .cfi_offset 1, -72
|
|
; LA64-WITHFP-NEXT: .cfi_offset 22, -80
|
|
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 32
|
|
; LA64-WITHFP-NEXT: .cfi_def_cfa 22, 64
|
|
; LA64-WITHFP-NEXT: move $a0, $a1
|
|
; LA64-WITHFP-NEXT: st.d $a7, $fp, 56
|
|
; LA64-WITHFP-NEXT: st.d $a6, $fp, 48
|
|
; LA64-WITHFP-NEXT: st.d $a5, $fp, 40
|
|
; LA64-WITHFP-NEXT: st.d $a4, $fp, 32
|
|
; LA64-WITHFP-NEXT: st.d $a3, $fp, 24
|
|
; LA64-WITHFP-NEXT: st.d $a2, $fp, 16
|
|
; LA64-WITHFP-NEXT: addi.d $a1, $fp, 16
|
|
; LA64-WITHFP-NEXT: st.d $a1, $fp, -24
|
|
; LA64-WITHFP-NEXT: st.d $a0, $fp, 8
|
|
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 96
|
|
; LA64-WITHFP-NEXT: ret
|
|
%va = alloca ptr, align 8
|
|
call void @llvm.va_start(ptr %va)
|
|
%argp.cur = load ptr, ptr %va, align 8
|
|
%argp.next = getelementptr inbounds i64, ptr %argp.cur, i32 1
|
|
store ptr %argp.next, ptr %va, align 8
|
|
%1 = load i64, ptr %argp.cur, align 8
|
|
call void @llvm.va_end(ptr %va)
|
|
ret i64 %1
|
|
}
|
|
|
|
define i64 @va1_va_arg(ptr %fmt, ...) nounwind {
|
|
; LA64-FPELIM-LABEL: va1_va_arg:
|
|
; LA64-FPELIM: # %bb.0:
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -80
|
|
; LA64-FPELIM-NEXT: move $a0, $a1
|
|
; LA64-FPELIM-NEXT: st.d $a7, $sp, 72
|
|
; LA64-FPELIM-NEXT: st.d $a6, $sp, 64
|
|
; LA64-FPELIM-NEXT: st.d $a5, $sp, 56
|
|
; LA64-FPELIM-NEXT: st.d $a4, $sp, 48
|
|
; LA64-FPELIM-NEXT: st.d $a3, $sp, 40
|
|
; LA64-FPELIM-NEXT: st.d $a2, $sp, 32
|
|
; LA64-FPELIM-NEXT: addi.d $a1, $sp, 32
|
|
; LA64-FPELIM-NEXT: st.d $a1, $sp, 8
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 24
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 80
|
|
; LA64-FPELIM-NEXT: ret
|
|
;
|
|
; LA64-WITHFP-LABEL: va1_va_arg:
|
|
; LA64-WITHFP: # %bb.0:
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -96
|
|
; LA64-WITHFP-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 32
|
|
; LA64-WITHFP-NEXT: move $a0, $a1
|
|
; LA64-WITHFP-NEXT: st.d $a7, $fp, 56
|
|
; LA64-WITHFP-NEXT: st.d $a6, $fp, 48
|
|
; LA64-WITHFP-NEXT: st.d $a5, $fp, 40
|
|
; LA64-WITHFP-NEXT: st.d $a4, $fp, 32
|
|
; LA64-WITHFP-NEXT: st.d $a3, $fp, 24
|
|
; LA64-WITHFP-NEXT: st.d $a2, $fp, 16
|
|
; LA64-WITHFP-NEXT: addi.d $a1, $fp, 16
|
|
; LA64-WITHFP-NEXT: st.d $a1, $fp, -24
|
|
; LA64-WITHFP-NEXT: st.d $a0, $fp, 8
|
|
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 96
|
|
; LA64-WITHFP-NEXT: ret
|
|
%va = alloca ptr, align 8
|
|
call void @llvm.va_start(ptr %va)
|
|
%1 = va_arg ptr %va, i64
|
|
call void @llvm.va_end(ptr %va)
|
|
ret i64 %1
|
|
}
|
|
|
|
;; Ensure the adjustment when restoring the stack pointer using the frame
|
|
;; pointer is correct
|
|
|
|
define i64 @va1_va_arg_alloca(ptr %fmt, ...) nounwind {
|
|
; LA64-FPELIM-LABEL: va1_va_arg_alloca:
|
|
; LA64-FPELIM: # %bb.0:
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -96
|
|
; LA64-FPELIM-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
|
; LA64-FPELIM-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
|
; LA64-FPELIM-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
|
|
; LA64-FPELIM-NEXT: addi.d $fp, $sp, 32
|
|
; LA64-FPELIM-NEXT: move $s0, $a1
|
|
; LA64-FPELIM-NEXT: st.d $a7, $fp, 56
|
|
; LA64-FPELIM-NEXT: st.d $a6, $fp, 48
|
|
; LA64-FPELIM-NEXT: st.d $a5, $fp, 40
|
|
; LA64-FPELIM-NEXT: st.d $a4, $fp, 32
|
|
; LA64-FPELIM-NEXT: st.d $a3, $fp, 24
|
|
; LA64-FPELIM-NEXT: st.d $a2, $fp, 16
|
|
; LA64-FPELIM-NEXT: addi.d $a0, $fp, 16
|
|
; LA64-FPELIM-NEXT: st.d $a0, $fp, -32
|
|
; LA64-FPELIM-NEXT: addi.d $a0, $a1, 15
|
|
; LA64-FPELIM-NEXT: addi.w $a1, $zero, -16
|
|
; LA64-FPELIM-NEXT: and $a0, $a0, $a1
|
|
; LA64-FPELIM-NEXT: st.d $s0, $fp, 8
|
|
; LA64-FPELIM-NEXT: sub.d $a0, $sp, $a0
|
|
; LA64-FPELIM-NEXT: move $sp, $a0
|
|
; LA64-FPELIM-NEXT: bl %plt(notdead)
|
|
; LA64-FPELIM-NEXT: move $a0, $s0
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $fp, -32
|
|
; LA64-FPELIM-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
|
|
; LA64-FPELIM-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
|
|
; LA64-FPELIM-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 96
|
|
; LA64-FPELIM-NEXT: ret
|
|
;
|
|
; LA64-WITHFP-LABEL: va1_va_arg_alloca:
|
|
; LA64-WITHFP: # %bb.0:
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -96
|
|
; LA64-WITHFP-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: st.d $s0, $sp, 8 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 32
|
|
; LA64-WITHFP-NEXT: move $s0, $a1
|
|
; LA64-WITHFP-NEXT: st.d $a7, $fp, 56
|
|
; LA64-WITHFP-NEXT: st.d $a6, $fp, 48
|
|
; LA64-WITHFP-NEXT: st.d $a5, $fp, 40
|
|
; LA64-WITHFP-NEXT: st.d $a4, $fp, 32
|
|
; LA64-WITHFP-NEXT: st.d $a3, $fp, 24
|
|
; LA64-WITHFP-NEXT: st.d $a2, $fp, 16
|
|
; LA64-WITHFP-NEXT: addi.d $a0, $fp, 16
|
|
; LA64-WITHFP-NEXT: st.d $a0, $fp, -32
|
|
; LA64-WITHFP-NEXT: addi.d $a0, $a1, 15
|
|
; LA64-WITHFP-NEXT: addi.w $a1, $zero, -16
|
|
; LA64-WITHFP-NEXT: and $a0, $a0, $a1
|
|
; LA64-WITHFP-NEXT: st.d $s0, $fp, 8
|
|
; LA64-WITHFP-NEXT: sub.d $a0, $sp, $a0
|
|
; LA64-WITHFP-NEXT: move $sp, $a0
|
|
; LA64-WITHFP-NEXT: bl %plt(notdead)
|
|
; LA64-WITHFP-NEXT: move $a0, $s0
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $fp, -32
|
|
; LA64-WITHFP-NEXT: ld.d $s0, $sp, 8 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 96
|
|
; LA64-WITHFP-NEXT: ret
|
|
%va = alloca ptr, align 8
|
|
call void @llvm.va_start(ptr %va)
|
|
%1 = va_arg ptr %va, i64
|
|
%2 = alloca i8, i64 %1
|
|
call void @notdead(ptr %2)
|
|
call void @llvm.va_end(ptr %va)
|
|
ret i64 %1
|
|
}
|
|
|
|
define void @va1_caller() nounwind {
|
|
; LA64-FPELIM-LABEL: va1_caller:
|
|
; LA64-FPELIM: # %bb.0:
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -16
|
|
; LA64-FPELIM-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64-FPELIM-NEXT: lu52i.d $a1, $zero, 1023
|
|
; LA64-FPELIM-NEXT: ori $a2, $zero, 2
|
|
; LA64-FPELIM-NEXT: bl %plt(va1)
|
|
; LA64-FPELIM-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 16
|
|
; LA64-FPELIM-NEXT: ret
|
|
;
|
|
; LA64-WITHFP-LABEL: va1_caller:
|
|
; LA64-WITHFP: # %bb.0:
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -16
|
|
; LA64-WITHFP-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 16
|
|
; LA64-WITHFP-NEXT: lu52i.d $a1, $zero, 1023
|
|
; LA64-WITHFP-NEXT: ori $a2, $zero, 2
|
|
; LA64-WITHFP-NEXT: bl %plt(va1)
|
|
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 16
|
|
; LA64-WITHFP-NEXT: ret
|
|
%1 = call i64 (ptr, ...) @va1(ptr undef, double 1.0, i64 2)
|
|
ret void
|
|
}
|
|
|
|
;; Ensure a named 2*GRLen argument is passed in a1 and a2, while the
|
|
;; vararg long double is passed in a4 and a5 (rather than a3 and a4)
|
|
|
|
declare i64 @va_aligned_register(i64 %a, i128 %b, ...)
|
|
|
|
define void @va_aligned_register_caller() nounwind {
|
|
; LA64-FPELIM-LABEL: va_aligned_register_caller:
|
|
; LA64-FPELIM: # %bb.0:
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -16
|
|
; LA64-FPELIM-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64-FPELIM-NEXT: lu12i.w $a0, 335544
|
|
; LA64-FPELIM-NEXT: ori $a0, $a0, 1311
|
|
; LA64-FPELIM-NEXT: lu32i.d $a0, 335544
|
|
; LA64-FPELIM-NEXT: lu52i.d $a4, $a0, -328
|
|
; LA64-FPELIM-NEXT: lu12i.w $a0, -503317
|
|
; LA64-FPELIM-NEXT: ori $a0, $a0, 2129
|
|
; LA64-FPELIM-NEXT: lu32i.d $a0, 37355
|
|
; LA64-FPELIM-NEXT: lu52i.d $a5, $a0, 1024
|
|
; LA64-FPELIM-NEXT: ori $a0, $zero, 2
|
|
; LA64-FPELIM-NEXT: ori $a1, $zero, 1111
|
|
; LA64-FPELIM-NEXT: move $a2, $zero
|
|
; LA64-FPELIM-NEXT: bl %plt(va_aligned_register)
|
|
; LA64-FPELIM-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 16
|
|
; LA64-FPELIM-NEXT: ret
|
|
;
|
|
; LA64-WITHFP-LABEL: va_aligned_register_caller:
|
|
; LA64-WITHFP: # %bb.0:
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -16
|
|
; LA64-WITHFP-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: st.d $fp, $sp, 0 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 16
|
|
; LA64-WITHFP-NEXT: lu12i.w $a0, 335544
|
|
; LA64-WITHFP-NEXT: ori $a0, $a0, 1311
|
|
; LA64-WITHFP-NEXT: lu32i.d $a0, 335544
|
|
; LA64-WITHFP-NEXT: lu52i.d $a4, $a0, -328
|
|
; LA64-WITHFP-NEXT: lu12i.w $a0, -503317
|
|
; LA64-WITHFP-NEXT: ori $a0, $a0, 2129
|
|
; LA64-WITHFP-NEXT: lu32i.d $a0, 37355
|
|
; LA64-WITHFP-NEXT: lu52i.d $a5, $a0, 1024
|
|
; LA64-WITHFP-NEXT: ori $a0, $zero, 2
|
|
; LA64-WITHFP-NEXT: ori $a1, $zero, 1111
|
|
; LA64-WITHFP-NEXT: move $a2, $zero
|
|
; LA64-WITHFP-NEXT: bl %plt(va_aligned_register)
|
|
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 0 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 16
|
|
; LA64-WITHFP-NEXT: ret
|
|
%1 = call i64 (i64, i128, ...) @va_aligned_register(i64 2, i128 1111,
|
|
fp128 0xLEB851EB851EB851F400091EB851EB851)
|
|
ret void
|
|
}
|
|
|
|
;; Check 2*GRLen values are aligned appropriately when passed on the stack
|
|
;; in a vararg call
|
|
|
|
declare i32 @va_aligned_stack_callee(i32, ...)
|
|
|
|
define void @va_aligned_stack_caller() nounwind {
|
|
; LA64-FPELIM-LABEL: va_aligned_stack_caller:
|
|
; LA64-FPELIM: # %bb.0:
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, -112
|
|
; LA64-FPELIM-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill
|
|
; LA64-FPELIM-NEXT: ori $a0, $zero, 17
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 48
|
|
; LA64-FPELIM-NEXT: ori $a0, $zero, 16
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 40
|
|
; LA64-FPELIM-NEXT: ori $a0, $zero, 15
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 32
|
|
; LA64-FPELIM-NEXT: ori $a0, $zero, 14
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 0
|
|
; LA64-FPELIM-NEXT: lu12i.w $a0, -503317
|
|
; LA64-FPELIM-NEXT: ori $a0, $a0, 2129
|
|
; LA64-FPELIM-NEXT: lu32i.d $a0, 37355
|
|
; LA64-FPELIM-NEXT: lu52i.d $a0, $a0, 1024
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 24
|
|
; LA64-FPELIM-NEXT: lu12i.w $a0, 335544
|
|
; LA64-FPELIM-NEXT: ori $a0, $a0, 1311
|
|
; LA64-FPELIM-NEXT: lu32i.d $a0, 335544
|
|
; LA64-FPELIM-NEXT: lu52i.d $a0, $a0, -328
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 16
|
|
; LA64-FPELIM-NEXT: ori $a0, $zero, 1000
|
|
; LA64-FPELIM-NEXT: st.d $a0, $sp, 64
|
|
; LA64-FPELIM-NEXT: st.d $zero, $sp, 88
|
|
; LA64-FPELIM-NEXT: st.d $zero, $sp, 80
|
|
; LA64-FPELIM-NEXT: st.d $zero, $sp, 72
|
|
; LA64-FPELIM-NEXT: ori $a1, $zero, 11
|
|
; LA64-FPELIM-NEXT: addi.d $a2, $sp, 64
|
|
; LA64-FPELIM-NEXT: ori $a3, $zero, 12
|
|
; LA64-FPELIM-NEXT: ori $a4, $zero, 13
|
|
; LA64-FPELIM-NEXT: ori $a0, $zero, 1
|
|
; LA64-FPELIM-NEXT: move $a6, $zero
|
|
; LA64-FPELIM-NEXT: move $a7, $a0
|
|
; LA64-FPELIM-NEXT: bl %plt(va_aligned_stack_callee)
|
|
; LA64-FPELIM-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload
|
|
; LA64-FPELIM-NEXT: addi.d $sp, $sp, 112
|
|
; LA64-FPELIM-NEXT: ret
|
|
;
|
|
; LA64-WITHFP-LABEL: va_aligned_stack_caller:
|
|
; LA64-WITHFP: # %bb.0:
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, -112
|
|
; LA64-WITHFP-NEXT: st.d $ra, $sp, 104 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: st.d $fp, $sp, 96 # 8-byte Folded Spill
|
|
; LA64-WITHFP-NEXT: addi.d $fp, $sp, 112
|
|
; LA64-WITHFP-NEXT: ori $a0, $zero, 17
|
|
; LA64-WITHFP-NEXT: st.d $a0, $sp, 48
|
|
; LA64-WITHFP-NEXT: ori $a0, $zero, 16
|
|
; LA64-WITHFP-NEXT: st.d $a0, $sp, 40
|
|
; LA64-WITHFP-NEXT: ori $a0, $zero, 15
|
|
; LA64-WITHFP-NEXT: st.d $a0, $sp, 32
|
|
; LA64-WITHFP-NEXT: ori $a0, $zero, 14
|
|
; LA64-WITHFP-NEXT: st.d $a0, $sp, 0
|
|
; LA64-WITHFP-NEXT: lu12i.w $a0, -503317
|
|
; LA64-WITHFP-NEXT: ori $a0, $a0, 2129
|
|
; LA64-WITHFP-NEXT: lu32i.d $a0, 37355
|
|
; LA64-WITHFP-NEXT: lu52i.d $a0, $a0, 1024
|
|
; LA64-WITHFP-NEXT: st.d $a0, $sp, 24
|
|
; LA64-WITHFP-NEXT: lu12i.w $a0, 335544
|
|
; LA64-WITHFP-NEXT: ori $a0, $a0, 1311
|
|
; LA64-WITHFP-NEXT: lu32i.d $a0, 335544
|
|
; LA64-WITHFP-NEXT: lu52i.d $a0, $a0, -328
|
|
; LA64-WITHFP-NEXT: st.d $a0, $sp, 16
|
|
; LA64-WITHFP-NEXT: ori $a0, $zero, 1000
|
|
; LA64-WITHFP-NEXT: st.d $a0, $fp, -48
|
|
; LA64-WITHFP-NEXT: st.d $zero, $fp, -24
|
|
; LA64-WITHFP-NEXT: st.d $zero, $fp, -32
|
|
; LA64-WITHFP-NEXT: st.d $zero, $fp, -40
|
|
; LA64-WITHFP-NEXT: ori $a1, $zero, 11
|
|
; LA64-WITHFP-NEXT: addi.d $a2, $fp, -48
|
|
; LA64-WITHFP-NEXT: ori $a3, $zero, 12
|
|
; LA64-WITHFP-NEXT: ori $a4, $zero, 13
|
|
; LA64-WITHFP-NEXT: ori $a0, $zero, 1
|
|
; LA64-WITHFP-NEXT: move $a6, $zero
|
|
; LA64-WITHFP-NEXT: move $a7, $a0
|
|
; LA64-WITHFP-NEXT: bl %plt(va_aligned_stack_callee)
|
|
; LA64-WITHFP-NEXT: ld.d $fp, $sp, 96 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: ld.d $ra, $sp, 104 # 8-byte Folded Reload
|
|
; LA64-WITHFP-NEXT: addi.d $sp, $sp, 112
|
|
; LA64-WITHFP-NEXT: ret
|
|
%1 = call i32 (i32, ...) @va_aligned_stack_callee(i32 1, i32 11,
|
|
i256 1000, i32 12, i32 13, i128 18446744073709551616, i32 14,
|
|
fp128 0xLEB851EB851EB851F400091EB851EB851, i64 15,
|
|
[2 x i64] [i64 16, i64 17])
|
|
ret void
|
|
}
|