llvm-project/llvm/test/CodeGen/LoongArch/ir-instruction/atomicrmw.ll

1831 lines
57 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc --mtriple=loongarch32 < %s | FileCheck %s --check-prefix=LA32
; RUN: llc --mtriple=loongarch64 < %s | FileCheck %s --check-prefix=LA64
define i8 @atomicrmw_xchg_i8_acquire(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i8_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: addi.w $a5, $a1, 0
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB0_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i8_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB0_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: addi.w $a5, $a1, 0
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB0_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i8 %b acquire
ret i8 %1
}
define i16 @atomicrmw_xchg_i16_acquire(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i16_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: addi.w $a5, $a1, 0
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB1_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i16_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB1_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: addi.w $a5, $a1, 0
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB1_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i16 %b acquire
ret i16 %1
}
define i32 @atomicrmw_xchg_i32_acquire(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i32_acquire:
; LA32: # %bb.0:
; LA32-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: move $a3, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB2_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i32_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amswap_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i32 %b acquire
ret i32 %1
}
define i64 @atomicrmw_xchg_i64_acquire(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i64_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ori $a3, $zero, 2
; LA32-NEXT: bl %plt(__atomic_exchange_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i64_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amswap_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i64 %b acquire
ret i64 %1
}
define i8 @atomicrmw_add_i8_acquire(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i8_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: add.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB4_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i8_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB4_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: add.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB4_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i8 %b acquire
ret i8 %1
}
define i16 @atomicrmw_add_i16_acquire(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i16_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: add.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB5_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i16_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB5_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: add.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB5_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i16 %b acquire
ret i16 %1
}
define i32 @atomicrmw_add_i32_acquire(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i32_acquire:
; LA32: # %bb.0:
; LA32-NEXT: .LBB6_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: add.w $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB6_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i32_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amadd_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i32 %b acquire
ret i32 %1
}
define i64 @atomicrmw_add_i64_acquire(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i64_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ori $a3, $zero, 2
; LA32-NEXT: bl %plt(__atomic_fetch_add_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i64_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amadd_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i64 %b acquire
ret i64 %1
}
define i8 @atomicrmw_sub_i8_acquire(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i8_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: sub.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB8_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i8_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB8_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: sub.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB8_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i8 %b acquire
ret i8 %1
}
define i16 @atomicrmw_sub_i16_acquire(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i16_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: sub.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB9_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i16_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB9_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: sub.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB9_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i16 %b acquire
ret i16 %1
}
define i32 @atomicrmw_sub_i32_acquire(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i32_acquire:
; LA32: # %bb.0:
; LA32-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: sub.w $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB10_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i32_acquire:
; LA64: # %bb.0:
; LA64-NEXT: sub.w $a2, $zero, $a1
; LA64-NEXT: amadd_db.w $a1, $a2, $a0
; LA64-NEXT: move $a0, $a1
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i32 %b acquire
ret i32 %1
}
define i64 @atomicrmw_sub_i64_acquire(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i64_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ori $a3, $zero, 2
; LA32-NEXT: bl %plt(__atomic_fetch_sub_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i64_acquire:
; LA64: # %bb.0:
; LA64-NEXT: sub.d $a2, $zero, $a1
; LA64-NEXT: amadd_db.d $a1, $a2, $a0
; LA64-NEXT: move $a0, $a1
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i64 %b acquire
ret i64 %1
}
define i8 @atomicrmw_nand_i8_acquire(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i8_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: and $a5, $a4, $a1
; LA32-NEXT: nor $a5, $a5, $zero
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB12_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i8_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB12_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: and $a5, $a4, $a1
; LA64-NEXT: nor $a5, $a5, $zero
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB12_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i8 %b acquire
ret i8 %1
}
define i16 @atomicrmw_nand_i16_acquire(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i16_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: and $a5, $a4, $a1
; LA32-NEXT: nor $a5, $a5, $zero
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB13_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i16_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB13_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: and $a5, $a4, $a1
; LA64-NEXT: nor $a5, $a5, $zero
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB13_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i16 %b acquire
ret i16 %1
}
define i32 @atomicrmw_nand_i32_acquire(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i32_acquire:
; LA32: # %bb.0:
; LA32-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: and $a3, $a2, $a1
; LA32-NEXT: nor $a3, $a3, $zero
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB14_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i32_acquire:
; LA64: # %bb.0:
; LA64-NEXT: .LBB14_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.w $a2, $a0, 0
; LA64-NEXT: and $a3, $a2, $a1
; LA64-NEXT: nor $a3, $a3, $zero
; LA64-NEXT: sc.w $a3, $a0, 0
; LA64-NEXT: beqz $a3, .LBB14_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i32 %b acquire
ret i32 %1
}
define i64 @atomicrmw_nand_i64_acquire(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i64_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ori $a3, $zero, 2
; LA32-NEXT: bl %plt(__atomic_fetch_nand_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i64_acquire:
; LA64: # %bb.0:
; LA64-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: dbar 0
; LA64-NEXT: ll.d $a2, $a0, 0
; LA64-NEXT: and $a3, $a2, $a1
; LA64-NEXT: nor $a3, $a3, $zero
; LA64-NEXT: sc.d $a3, $a0, 0
; LA64-NEXT: beqz $a3, .LBB15_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i64 %b acquire
ret i64 %1
}
define i8 @atomicrmw_and_i8_acquire(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i8_acquire:
; LA32: # %bb.0:
; LA32-NEXT: slli.w $a2, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a2
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a2
; LA32-NEXT: orn $a1, $a1, $a3
; LA32-NEXT: addi.w $a3, $zero, -4
; LA32-NEXT: and $a0, $a0, $a3
; LA32-NEXT: .LBB16_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a3, $a0, 0
; LA32-NEXT: and $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a0, 0
; LA32-NEXT: beqz $a4, .LBB16_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i8_acquire:
; LA64: # %bb.0:
; LA64-NEXT: slli.d $a2, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a2
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a2
; LA64-NEXT: orn $a1, $a1, $a3
; LA64-NEXT: addi.w $a3, $zero, -4
; LA64-NEXT: and $a0, $a0, $a3
; LA64-NEXT: amand_db.w $a3, $a1, $a0
; LA64-NEXT: srl.w $a0, $a3, $a2
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i8 %b acquire
ret i8 %1
}
define i16 @atomicrmw_and_i16_acquire(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i16_acquire:
; LA32: # %bb.0:
; LA32-NEXT: lu12i.w $a2, 15
; LA32-NEXT: ori $a2, $a2, 4095
; LA32-NEXT: slli.w $a3, $a0, 3
; LA32-NEXT: sll.w $a2, $a2, $a3
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a3
; LA32-NEXT: orn $a1, $a1, $a2
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a0, $a0, $a2
; LA32-NEXT: .LBB17_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: and $a4, $a2, $a1
; LA32-NEXT: sc.w $a4, $a0, 0
; LA32-NEXT: beqz $a4, .LBB17_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a2, $a3
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i16_acquire:
; LA64: # %bb.0:
; LA64-NEXT: lu12i.w $a2, 15
; LA64-NEXT: ori $a2, $a2, 4095
; LA64-NEXT: slli.d $a3, $a0, 3
; LA64-NEXT: sll.w $a2, $a2, $a3
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a3
; LA64-NEXT: orn $a1, $a1, $a2
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a0, $a0, $a2
; LA64-NEXT: amand_db.w $a2, $a1, $a0
; LA64-NEXT: srl.w $a0, $a2, $a3
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i16 %b acquire
ret i16 %1
}
define i32 @atomicrmw_and_i32_acquire(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i32_acquire:
; LA32: # %bb.0:
; LA32-NEXT: .LBB18_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: and $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB18_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i32_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amand_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i32 %b acquire
ret i32 %1
}
define i64 @atomicrmw_and_i64_acquire(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i64_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ori $a3, $zero, 2
; LA32-NEXT: bl %plt(__atomic_fetch_and_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i64_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amand_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i64 %b acquire
ret i64 %1
}
define i8 @atomicrmw_or_i8_acquire(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i8_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB20_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: or $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB20_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i8_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i8 %b acquire
ret i8 %1
}
define i16 @atomicrmw_or_i16_acquire(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i16_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB21_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: or $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB21_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i16_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i16 %b acquire
ret i16 %1
}
define i32 @atomicrmw_or_i32_acquire(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i32_acquire:
; LA32: # %bb.0:
; LA32-NEXT: .LBB22_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: or $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB22_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i32_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amor_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i32 %b acquire
ret i32 %1
}
define i64 @atomicrmw_or_i64_acquire(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i64_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ori $a3, $zero, 2
; LA32-NEXT: bl %plt(__atomic_fetch_or_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i64_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amor_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i64 %b acquire
ret i64 %1
}
define i8 @atomicrmw_xor_i8_acquire(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i8_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB24_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: xor $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB24_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i8_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amxor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i8 %b acquire
ret i8 %1
}
define i16 @atomicrmw_xor_i16_acquire(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i16_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB25_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: xor $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB25_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i16_acquire:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amxor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i16 %b acquire
ret i16 %1
}
define i32 @atomicrmw_xor_i32_acquire(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i32_acquire:
; LA32: # %bb.0:
; LA32-NEXT: .LBB26_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: dbar 0
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: xor $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB26_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i32_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amxor_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i32 %b acquire
ret i32 %1
}
define i64 @atomicrmw_xor_i64_acquire(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i64_acquire:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: ori $a3, $zero, 2
; LA32-NEXT: bl %plt(__atomic_fetch_xor_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i64_acquire:
; LA64: # %bb.0:
; LA64-NEXT: amxor_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i64 %b acquire
ret i64 %1
}
define i8 @atomicrmw_xchg_i8_monotonic(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i8_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: addi.w $a5, $a1, 0
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB28_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i8_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB28_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: addi.w $a5, $a1, 0
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB28_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i8 %b monotonic
ret i8 %1
}
define i16 @atomicrmw_xchg_i16_monotonic(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i16_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: addi.w $a5, $a1, 0
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB29_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i16_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB29_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: addi.w $a5, $a1, 0
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB29_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i16 %b monotonic
ret i16 %1
}
define i32 @atomicrmw_xchg_i32_monotonic(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i32_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: .LBB30_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: move $a3, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB30_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i32_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amswap_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i32 %b monotonic
ret i32 %1
}
define i64 @atomicrmw_xchg_i64_monotonic(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_xchg_i64_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl %plt(__atomic_exchange_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xchg_i64_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amswap_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xchg ptr %a, i64 %b monotonic
ret i64 %1
}
define i8 @atomicrmw_add_i8_monotonic(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i8_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: add.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB32_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i8_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB32_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: add.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB32_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i8 %b monotonic
ret i8 %1
}
define i16 @atomicrmw_add_i16_monotonic(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i16_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: add.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB33_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i16_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB33_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: add.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB33_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i16 %b monotonic
ret i16 %1
}
define i32 @atomicrmw_add_i32_monotonic(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i32_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: .LBB34_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: add.w $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB34_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i32_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amadd_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i32 %b monotonic
ret i32 %1
}
define i64 @atomicrmw_add_i64_monotonic(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_add_i64_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl %plt(__atomic_fetch_add_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_add_i64_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amadd_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw add ptr %a, i64 %b monotonic
ret i64 %1
}
define i8 @atomicrmw_sub_i8_monotonic(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i8_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: sub.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB36_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i8_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB36_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: sub.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB36_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i8 %b monotonic
ret i8 %1
}
define i16 @atomicrmw_sub_i16_monotonic(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i16_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: sub.w $a5, $a4, $a1
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB37_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i16_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB37_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: sub.w $a5, $a4, $a1
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB37_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i16 %b monotonic
ret i16 %1
}
define i32 @atomicrmw_sub_i32_monotonic(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i32_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: .LBB38_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: sub.w $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB38_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i32_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: sub.w $a2, $zero, $a1
; LA64-NEXT: amadd_db.w $a1, $a2, $a0
; LA64-NEXT: move $a0, $a1
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i32 %b monotonic
ret i32 %1
}
define i64 @atomicrmw_sub_i64_monotonic(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_sub_i64_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl %plt(__atomic_fetch_sub_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_sub_i64_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: sub.d $a2, $zero, $a1
; LA64-NEXT: amadd_db.d $a1, $a2, $a0
; LA64-NEXT: move $a0, $a1
; LA64-NEXT: ret
%1 = atomicrmw sub ptr %a, i64 %b monotonic
ret i64 %1
}
define i8 @atomicrmw_nand_i8_monotonic(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i8_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: and $a5, $a4, $a1
; LA32-NEXT: nor $a5, $a5, $zero
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB40_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i8_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB40_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: and $a5, $a4, $a1
; LA64-NEXT: nor $a5, $a5, $zero
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB40_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i8 %b monotonic
ret i8 %1
}
define i16 @atomicrmw_nand_i16_monotonic(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i16_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: lu12i.w $a3, 15
; LA32-NEXT: ori $a3, $a3, 4095
; LA32-NEXT: sll.w $a3, $a3, $a0
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a4, $a2, 0
; LA32-NEXT: and $a5, $a4, $a1
; LA32-NEXT: nor $a5, $a5, $zero
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: and $a5, $a5, $a3
; LA32-NEXT: xor $a5, $a4, $a5
; LA32-NEXT: sc.w $a5, $a2, 0
; LA32-NEXT: beqz $a5, .LBB41_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a4, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i16_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: lu12i.w $a3, 15
; LA64-NEXT: ori $a3, $a3, 4095
; LA64-NEXT: sll.w $a3, $a3, $a0
; LA64-NEXT: addi.w $a3, $a3, 0
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: addi.w $a1, $a1, 0
; LA64-NEXT: .LBB41_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a4, $a2, 0
; LA64-NEXT: and $a5, $a4, $a1
; LA64-NEXT: nor $a5, $a5, $zero
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: and $a5, $a5, $a3
; LA64-NEXT: xor $a5, $a4, $a5
; LA64-NEXT: sc.w $a5, $a2, 0
; LA64-NEXT: beqz $a5, .LBB41_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: srl.w $a0, $a4, $a0
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i16 %b monotonic
ret i16 %1
}
define i32 @atomicrmw_nand_i32_monotonic(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i32_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: and $a3, $a2, $a1
; LA32-NEXT: nor $a3, $a3, $zero
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB42_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i32_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: .LBB42_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.w $a2, $a0, 0
; LA64-NEXT: and $a3, $a2, $a1
; LA64-NEXT: nor $a3, $a3, $zero
; LA64-NEXT: sc.w $a3, $a0, 0
; LA64-NEXT: beqz $a3, .LBB42_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i32 %b monotonic
ret i32 %1
}
define i64 @atomicrmw_nand_i64_monotonic(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_nand_i64_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl %plt(__atomic_fetch_nand_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_nand_i64_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: .LBB43_1: # =>This Inner Loop Header: Depth=1
; LA64-NEXT: ll.d $a2, $a0, 0
; LA64-NEXT: and $a3, $a2, $a1
; LA64-NEXT: nor $a3, $a3, $zero
; LA64-NEXT: sc.d $a3, $a0, 0
; LA64-NEXT: beqz $a3, .LBB43_1
; LA64-NEXT: # %bb.2:
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw nand ptr %a, i64 %b monotonic
ret i64 %1
}
define i8 @atomicrmw_and_i8_monotonic(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i8_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: slli.w $a2, $a0, 3
; LA32-NEXT: ori $a3, $zero, 255
; LA32-NEXT: sll.w $a3, $a3, $a2
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a2
; LA32-NEXT: orn $a1, $a1, $a3
; LA32-NEXT: addi.w $a3, $zero, -4
; LA32-NEXT: and $a0, $a0, $a3
; LA32-NEXT: .LBB44_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a3, $a0, 0
; LA32-NEXT: and $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a0, 0
; LA32-NEXT: beqz $a4, .LBB44_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i8_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: slli.d $a2, $a0, 3
; LA64-NEXT: ori $a3, $zero, 255
; LA64-NEXT: sll.w $a3, $a3, $a2
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a2
; LA64-NEXT: orn $a1, $a1, $a3
; LA64-NEXT: addi.w $a3, $zero, -4
; LA64-NEXT: and $a0, $a0, $a3
; LA64-NEXT: amand_db.w $a3, $a1, $a0
; LA64-NEXT: srl.w $a0, $a3, $a2
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i8 %b monotonic
ret i8 %1
}
define i16 @atomicrmw_and_i16_monotonic(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i16_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: lu12i.w $a2, 15
; LA32-NEXT: ori $a2, $a2, 4095
; LA32-NEXT: slli.w $a3, $a0, 3
; LA32-NEXT: sll.w $a2, $a2, $a3
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a3
; LA32-NEXT: orn $a1, $a1, $a2
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a0, $a0, $a2
; LA32-NEXT: .LBB45_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: and $a4, $a2, $a1
; LA32-NEXT: sc.w $a4, $a0, 0
; LA32-NEXT: beqz $a4, .LBB45_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a2, $a3
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i16_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: lu12i.w $a2, 15
; LA64-NEXT: ori $a2, $a2, 4095
; LA64-NEXT: slli.d $a3, $a0, 3
; LA64-NEXT: sll.w $a2, $a2, $a3
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a3
; LA64-NEXT: orn $a1, $a1, $a2
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a0, $a0, $a2
; LA64-NEXT: amand_db.w $a2, $a1, $a0
; LA64-NEXT: srl.w $a0, $a2, $a3
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i16 %b monotonic
ret i16 %1
}
define i32 @atomicrmw_and_i32_monotonic(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i32_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: .LBB46_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: and $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB46_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i32_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amand_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i32 %b monotonic
ret i32 %1
}
define i64 @atomicrmw_and_i64_monotonic(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_and_i64_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl %plt(__atomic_fetch_and_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_and_i64_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amand_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw and ptr %a, i64 %b monotonic
ret i64 %1
}
define i8 @atomicrmw_or_i8_monotonic(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i8_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB48_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: or $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB48_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i8_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i8 %b monotonic
ret i8 %1
}
define i16 @atomicrmw_or_i16_monotonic(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i16_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB49_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: or $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB49_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i16_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i16 %b monotonic
ret i16 %1
}
define i32 @atomicrmw_or_i32_monotonic(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i32_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: .LBB50_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: or $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB50_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i32_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amor_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i32 %b monotonic
ret i32 %1
}
define i64 @atomicrmw_or_i64_monotonic(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_or_i64_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl %plt(__atomic_fetch_or_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_or_i64_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amor_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw or ptr %a, i64 %b monotonic
ret i64 %1
}
define i8 @atomicrmw_xor_i8_monotonic(ptr %a, i8 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i8_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: andi $a1, $a1, 255
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB52_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: xor $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB52_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i8_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: andi $a1, $a1, 255
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amxor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i8 %b monotonic
ret i8 %1
}
define i16 @atomicrmw_xor_i16_monotonic(ptr %a, i16 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i16_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $a2, $zero, -4
; LA32-NEXT: and $a2, $a0, $a2
; LA32-NEXT: slli.w $a0, $a0, 3
; LA32-NEXT: bstrpick.w $a1, $a1, 15, 0
; LA32-NEXT: sll.w $a1, $a1, $a0
; LA32-NEXT: .LBB53_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a3, $a2, 0
; LA32-NEXT: xor $a4, $a3, $a1
; LA32-NEXT: sc.w $a4, $a2, 0
; LA32-NEXT: beqz $a4, .LBB53_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: srl.w $a0, $a3, $a0
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i16_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: addi.w $a2, $zero, -4
; LA64-NEXT: and $a2, $a0, $a2
; LA64-NEXT: slli.d $a0, $a0, 3
; LA64-NEXT: bstrpick.d $a1, $a1, 15, 0
; LA64-NEXT: sll.w $a1, $a1, $a0
; LA64-NEXT: amxor_db.w $a3, $a1, $a2
; LA64-NEXT: srl.w $a0, $a3, $a0
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i16 %b monotonic
ret i16 %1
}
define i32 @atomicrmw_xor_i32_monotonic(ptr %a, i32 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i32_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: .LBB54_1: # =>This Inner Loop Header: Depth=1
; LA32-NEXT: ll.w $a2, $a0, 0
; LA32-NEXT: xor $a3, $a2, $a1
; LA32-NEXT: sc.w $a3, $a0, 0
; LA32-NEXT: beqz $a3, .LBB54_1
; LA32-NEXT: # %bb.2:
; LA32-NEXT: move $a0, $a2
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i32_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amxor_db.w $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i32 %b monotonic
ret i32 %1
}
define i64 @atomicrmw_xor_i64_monotonic(ptr %a, i64 %b) nounwind {
; LA32-LABEL: atomicrmw_xor_i64_monotonic:
; LA32: # %bb.0:
; LA32-NEXT: addi.w $sp, $sp, -16
; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
; LA32-NEXT: move $a3, $zero
; LA32-NEXT: bl %plt(__atomic_fetch_xor_8)
; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
; LA32-NEXT: addi.w $sp, $sp, 16
; LA32-NEXT: ret
;
; LA64-LABEL: atomicrmw_xor_i64_monotonic:
; LA64: # %bb.0:
; LA64-NEXT: amxor_db.d $a2, $a1, $a0
; LA64-NEXT: move $a0, $a2
; LA64-NEXT: ret
%1 = atomicrmw xor ptr %a, i64 %b monotonic
ret i64 %1
}