1848 lines
67 KiB
LLVM
1848 lines
67 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
|
|
; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
|
|
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
|
|
; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX4
|
|
|
|
define <2 x i16> @sextload_v2i1_v2i16(<2 x i1>* %x) {
|
|
; CHECK-LABEL: sextload_v2i1_v2i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
|
|
; CHECK-NEXT: vlm.v v0, (a0)
|
|
; CHECK-NEXT: vmv.v.i v8, 0
|
|
; CHECK-NEXT: vmerge.vim v8, v8, -1, v0
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i1>, <2 x i1>* %x
|
|
%z = sext <2 x i1> %y to <2 x i16>
|
|
ret <2 x i16> %z
|
|
}
|
|
|
|
define <2 x i16> @sextload_v2i8_v2i16(<2 x i8>* %x) {
|
|
; CHECK-LABEL: sextload_v2i8_v2i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i8>, <2 x i8>* %x
|
|
%z = sext <2 x i8> %y to <2 x i16>
|
|
ret <2 x i16> %z
|
|
}
|
|
|
|
define <2 x i16> @zextload_v2i8_v2i16(<2 x i8>* %x) {
|
|
; CHECK-LABEL: zextload_v2i8_v2i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i8>, <2 x i8>* %x
|
|
%z = zext <2 x i8> %y to <2 x i16>
|
|
ret <2 x i16> %z
|
|
}
|
|
|
|
define <2 x i32> @sextload_v2i8_v2i32(<2 x i8>* %x) {
|
|
; CHECK-LABEL: sextload_v2i8_v2i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf4 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i8>, <2 x i8>* %x
|
|
%z = sext <2 x i8> %y to <2 x i32>
|
|
ret <2 x i32> %z
|
|
}
|
|
|
|
define <2 x i32> @zextload_v2i8_v2i32(<2 x i8>* %x) {
|
|
; CHECK-LABEL: zextload_v2i8_v2i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf4 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i8>, <2 x i8>* %x
|
|
%z = zext <2 x i8> %y to <2 x i32>
|
|
ret <2 x i32> %z
|
|
}
|
|
|
|
define <2 x i64> @sextload_v2i8_v2i64(<2 x i8>* %x) {
|
|
; CHECK-LABEL: sextload_v2i8_v2i64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf8 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i8>, <2 x i8>* %x
|
|
%z = sext <2 x i8> %y to <2 x i64>
|
|
ret <2 x i64> %z
|
|
}
|
|
|
|
define <2 x i64> @zextload_v2i8_v2i64(<2 x i8>* %x) {
|
|
; CHECK-LABEL: zextload_v2i8_v2i64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf8 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i8>, <2 x i8>* %x
|
|
%z = zext <2 x i8> %y to <2 x i64>
|
|
ret <2 x i64> %z
|
|
}
|
|
|
|
define <4 x i16> @sextload_v4i8_v4i16(<4 x i8>* %x) {
|
|
; CHECK-LABEL: sextload_v4i8_v4i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <4 x i8>, <4 x i8>* %x
|
|
%z = sext <4 x i8> %y to <4 x i16>
|
|
ret <4 x i16> %z
|
|
}
|
|
|
|
define <4 x i16> @zextload_v4i8_v4i16(<4 x i8>* %x) {
|
|
; CHECK-LABEL: zextload_v4i8_v4i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <4 x i8>, <4 x i8>* %x
|
|
%z = zext <4 x i8> %y to <4 x i16>
|
|
ret <4 x i16> %z
|
|
}
|
|
|
|
define <4 x i32> @sextload_v4i8_v4i32(<4 x i8>* %x) {
|
|
; CHECK-LABEL: sextload_v4i8_v4i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf4 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <4 x i8>, <4 x i8>* %x
|
|
%z = sext <4 x i8> %y to <4 x i32>
|
|
ret <4 x i32> %z
|
|
}
|
|
|
|
define <4 x i32> @zextload_v4i8_v4i32(<4 x i8>* %x) {
|
|
; CHECK-LABEL: zextload_v4i8_v4i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf4 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <4 x i8>, <4 x i8>* %x
|
|
%z = zext <4 x i8> %y to <4 x i32>
|
|
ret <4 x i32> %z
|
|
}
|
|
|
|
define <4 x i64> @sextload_v4i8_v4i64(<4 x i8>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v4i8_v4i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v9, v8
|
|
; LMULMAX1-NEXT: vsext.vf8 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v4i8_v4i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf8 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <4 x i8>, <4 x i8>* %x
|
|
%z = sext <4 x i8> %y to <4 x i64>
|
|
ret <4 x i64> %z
|
|
}
|
|
|
|
define <4 x i64> @zextload_v4i8_v4i64(<4 x i8>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v4i8_v4i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v9, v8
|
|
; LMULMAX1-NEXT: vzext.vf8 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v4i8_v4i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf8 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <4 x i8>, <4 x i8>* %x
|
|
%z = zext <4 x i8> %y to <4 x i64>
|
|
ret <4 x i64> %z
|
|
}
|
|
|
|
define <8 x i16> @sextload_v8i8_v8i16(<8 x i8>* %x) {
|
|
; CHECK-LABEL: sextload_v8i8_v8i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <8 x i8>, <8 x i8>* %x
|
|
%z = sext <8 x i8> %y to <8 x i16>
|
|
ret <8 x i16> %z
|
|
}
|
|
|
|
define <8 x i16> @zextload_v8i8_v8i16(<8 x i8>* %x) {
|
|
; CHECK-LABEL: zextload_v8i8_v8i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; CHECK-NEXT: vle8.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <8 x i8>, <8 x i8>* %x
|
|
%z = zext <8 x i8> %y to <8 x i16>
|
|
ret <8 x i16> %z
|
|
}
|
|
|
|
define <8 x i32> @sextload_v8i8_v8i32(<8 x i8>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v8i8_v8i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v9, v8
|
|
; LMULMAX1-NEXT: vsext.vf4 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v8i8_v8i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf4 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i8>, <8 x i8>* %x
|
|
%z = sext <8 x i8> %y to <8 x i32>
|
|
ret <8 x i32> %z
|
|
}
|
|
|
|
define <8 x i32> @zextload_v8i8_v8i32(<8 x i8>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v8i8_v8i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v9, v8
|
|
; LMULMAX1-NEXT: vzext.vf4 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v8i8_v8i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf4 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i8>, <8 x i8>* %x
|
|
%z = zext <8 x i8> %y to <8 x i32>
|
|
ret <8 x i32> %z
|
|
}
|
|
|
|
define <8 x i64> @sextload_v8i8_v8i64(<8 x i8>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v8i8_v8i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v11, v8
|
|
; LMULMAX1-NEXT: vsext.vf8 v8, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v8i8_v8i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf8 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i8>, <8 x i8>* %x
|
|
%z = sext <8 x i8> %y to <8 x i64>
|
|
ret <8 x i64> %z
|
|
}
|
|
|
|
define <8 x i64> @zextload_v8i8_v8i64(<8 x i8>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v8i8_v8i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v11, v8
|
|
; LMULMAX1-NEXT: vzext.vf8 v8, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v8i8_v8i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf8 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i8>, <8 x i8>* %x
|
|
%z = zext <8 x i8> %y to <8 x i64>
|
|
ret <8 x i64> %z
|
|
}
|
|
|
|
define <16 x i16> @sextload_v16i8_v16i16(<16 x i8>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v16i8_v16i16:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v16i8_v16i16:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i8>, <16 x i8>* %x
|
|
%z = sext <16 x i8> %y to <16 x i16>
|
|
ret <16 x i16> %z
|
|
}
|
|
|
|
define <16 x i16> @zextload_v16i8_v16i16(<16 x i8>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v16i8_v16i16:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v16i8_v16i16:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v10, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i8>, <16 x i8>* %x
|
|
%z = zext <16 x i8> %y to <16 x i16>
|
|
ret <16 x i16> %z
|
|
}
|
|
|
|
define <16 x i32> @sextload_v16i8_v16i32(<16 x i8>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v16i8_v16i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v11, v8
|
|
; LMULMAX1-NEXT: vsext.vf4 v8, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v16i8_v16i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf4 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i8>, <16 x i8>* %x
|
|
%z = sext <16 x i8> %y to <16 x i32>
|
|
ret <16 x i32> %z
|
|
}
|
|
|
|
define <16 x i32> @zextload_v16i8_v16i32(<16 x i8>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v16i8_v16i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v11, v8
|
|
; LMULMAX1-NEXT: vzext.vf4 v8, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v16i8_v16i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v12, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf4 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i8>, <16 x i8>* %x
|
|
%z = zext <16 x i8> %y to <16 x i32>
|
|
ret <16 x i32> %z
|
|
}
|
|
|
|
define <16 x i64> @sextload_v16i8_v16i64(<16 x i8>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v16i8_v16i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v16, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v12, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v9, v10
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v16, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v10, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v14, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v13, v14
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v14, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v15, v11, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v11, v15
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf8 v15, v8
|
|
; LMULMAX1-NEXT: vsext.vf8 v8, v16
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v16i8_v16i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v16, (a0)
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vsext.vf8 v12, v8
|
|
; LMULMAX4-NEXT: vsext.vf8 v8, v16
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i8>, <16 x i8>* %x
|
|
%z = sext <16 x i8> %y to <16 x i64>
|
|
ret <16 x i64> %z
|
|
}
|
|
|
|
define <16 x i64> @zextload_v16i8_v16i64(<16 x i8>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v16i8_v16i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle8.v v16, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v12, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v10, v16, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v9, v10
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v16, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v10, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v14, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v13, v14
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v14, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v15, v11, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v11, v15
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf8 v15, v8
|
|
; LMULMAX1-NEXT: vzext.vf8 v8, v16
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v16i8_v16i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX4-NEXT: vle8.v v16, (a0)
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e8, m1, ta, ma
|
|
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vzext.vf8 v12, v8
|
|
; LMULMAX4-NEXT: vzext.vf8 v8, v16
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i8>, <16 x i8>* %x
|
|
%z = zext <16 x i8> %y to <16 x i64>
|
|
ret <16 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v2i8_v2i1(<2 x i8> %x, <2 x i1>* %z) {
|
|
; CHECK-LABEL: truncstore_v2i8_v2i1:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
|
|
; CHECK-NEXT: vand.vi v8, v8, 1
|
|
; CHECK-NEXT: vmsne.vi v0, v8, 0
|
|
; CHECK-NEXT: vmv.v.i v8, 0
|
|
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
|
|
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; CHECK-NEXT: vmv.v.i v9, 0
|
|
; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma
|
|
; CHECK-NEXT: vslideup.vi v9, v8, 0
|
|
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; CHECK-NEXT: vmsne.vi v8, v9, 0
|
|
; CHECK-NEXT: vsm.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <2 x i8> %x to <2 x i1>
|
|
store <2 x i1> %y, <2 x i1>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v2i16_v2i8(<2 x i16> %x, <2 x i8>* %z) {
|
|
; CHECK-LABEL: truncstore_v2i16_v2i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse8.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <2 x i16> %x to <2 x i8>
|
|
store <2 x i8> %y, <2 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define <2 x i32> @sextload_v2i16_v2i32(<2 x i16>* %x) {
|
|
; CHECK-LABEL: sextload_v2i16_v2i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; CHECK-NEXT: vle16.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i16>, <2 x i16>* %x
|
|
%z = sext <2 x i16> %y to <2 x i32>
|
|
ret <2 x i32> %z
|
|
}
|
|
|
|
define <2 x i32> @zextload_v2i16_v2i32(<2 x i16>* %x) {
|
|
; CHECK-LABEL: zextload_v2i16_v2i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; CHECK-NEXT: vle16.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i16>, <2 x i16>* %x
|
|
%z = zext <2 x i16> %y to <2 x i32>
|
|
ret <2 x i32> %z
|
|
}
|
|
|
|
define <2 x i64> @sextload_v2i16_v2i64(<2 x i16>* %x) {
|
|
; CHECK-LABEL: sextload_v2i16_v2i64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; CHECK-NEXT: vle16.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf4 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i16>, <2 x i16>* %x
|
|
%z = sext <2 x i16> %y to <2 x i64>
|
|
ret <2 x i64> %z
|
|
}
|
|
|
|
define <2 x i64> @zextload_v2i16_v2i64(<2 x i16>* %x) {
|
|
; CHECK-LABEL: zextload_v2i16_v2i64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; CHECK-NEXT: vle16.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf4 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i16>, <2 x i16>* %x
|
|
%z = zext <2 x i16> %y to <2 x i64>
|
|
ret <2 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v4i16_v4i8(<4 x i16> %x, <4 x i8>* %z) {
|
|
; CHECK-LABEL: truncstore_v4i16_v4i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse8.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <4 x i16> %x to <4 x i8>
|
|
store <4 x i8> %y, <4 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define <4 x i32> @sextload_v4i16_v4i32(<4 x i16>* %x) {
|
|
; CHECK-LABEL: sextload_v4i16_v4i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; CHECK-NEXT: vle16.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <4 x i16>, <4 x i16>* %x
|
|
%z = sext <4 x i16> %y to <4 x i32>
|
|
ret <4 x i32> %z
|
|
}
|
|
|
|
define <4 x i32> @zextload_v4i16_v4i32(<4 x i16>* %x) {
|
|
; CHECK-LABEL: zextload_v4i16_v4i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; CHECK-NEXT: vle16.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <4 x i16>, <4 x i16>* %x
|
|
%z = zext <4 x i16> %y to <4 x i32>
|
|
ret <4 x i32> %z
|
|
}
|
|
|
|
define <4 x i64> @sextload_v4i16_v4i64(<4 x i16>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v4i16_v4i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v9, v8
|
|
; LMULMAX1-NEXT: vsext.vf4 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v4i16_v4i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf4 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <4 x i16>, <4 x i16>* %x
|
|
%z = sext <4 x i16> %y to <4 x i64>
|
|
ret <4 x i64> %z
|
|
}
|
|
|
|
define <4 x i64> @zextload_v4i16_v4i64(<4 x i16>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v4i16_v4i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v9, v8
|
|
; LMULMAX1-NEXT: vzext.vf4 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v4i16_v4i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf4 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <4 x i16>, <4 x i16>* %x
|
|
%z = zext <4 x i16> %y to <4 x i64>
|
|
ret <4 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v8i16_v8i8(<8 x i16> %x, <8 x i8>* %z) {
|
|
; CHECK-LABEL: truncstore_v8i16_v8i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse8.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <8 x i16> %x to <8 x i8>
|
|
store <8 x i8> %y, <8 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define <8 x i32> @sextload_v8i16_v8i32(<8 x i16>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v8i16_v8i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v8i16_v8i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i16>, <8 x i16>* %x
|
|
%z = sext <8 x i16> %y to <8 x i32>
|
|
ret <8 x i32> %z
|
|
}
|
|
|
|
define <8 x i32> @zextload_v8i16_v8i32(<8 x i16>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v8i16_v8i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v8i16_v8i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i16>, <8 x i16>* %x
|
|
%z = zext <8 x i16> %y to <8 x i32>
|
|
ret <8 x i32> %z
|
|
}
|
|
|
|
define <8 x i64> @sextload_v8i16_v8i64(<8 x i16>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v8i16_v8i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v11, v8
|
|
; LMULMAX1-NEXT: vsext.vf4 v8, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v8i16_v8i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf4 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i16>, <8 x i16>* %x
|
|
%z = sext <8 x i16> %y to <8 x i64>
|
|
ret <8 x i64> %z
|
|
}
|
|
|
|
define <8 x i64> @zextload_v8i16_v8i64(<8 x i16>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v8i16_v8i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v11, v8
|
|
; LMULMAX1-NEXT: vzext.vf4 v8, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v8i16_v8i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf4 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i16>, <8 x i16>* %x
|
|
%z = zext <8 x i16> %y to <8 x i64>
|
|
ret <8 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v16i16_v16i8(<16 x i16> %x, <16 x i8>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v16i16_v16i8:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
|
|
; LMULMAX1-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v16i16_v16i8:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
|
|
; LMULMAX4-NEXT: vse8.v v10, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <16 x i16> %x to <16 x i8>
|
|
store <16 x i8> %y, <16 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define <16 x i32> @sextload_v16i16_v16i32(<16 x i16>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v16i16_v16i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v11, v8
|
|
; LMULMAX1-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: vsext.vf2 v10, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v16i16_v16i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf2 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i16>, <16 x i16>* %x
|
|
%z = sext <16 x i16> %y to <16 x i32>
|
|
ret <16 x i32> %z
|
|
}
|
|
|
|
define <16 x i32> @zextload_v16i16_v16i32(<16 x i16>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v16i16_v16i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v10, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v11, v8
|
|
; LMULMAX1-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: vzext.vf2 v10, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v16i16_v16i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf2 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i16>, <16 x i16>* %x
|
|
%z = zext <16 x i16> %y to <16 x i32>
|
|
ret <16 x i32> %z
|
|
}
|
|
|
|
define <16 x i64> @sextload_v16i16_v16i64(<16 x i16>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v16i16_v16i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle16.v v16, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v15, v16, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v14, v15
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v11, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v13, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v15, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf4 v15, v8
|
|
; LMULMAX1-NEXT: vsext.vf4 v8, v12
|
|
; LMULMAX1-NEXT: vsext.vf4 v12, v16
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v16i16_v16i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v16, (a0)
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vsext.vf4 v12, v8
|
|
; LMULMAX4-NEXT: vsext.vf4 v8, v16
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i16>, <16 x i16>* %x
|
|
%z = sext <16 x i16> %y to <16 x i64>
|
|
ret <16 x i64> %z
|
|
}
|
|
|
|
define <16 x i64> @zextload_v16i16_v16i64(<16 x i16>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v16i16_v16i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle16.v v12, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle16.v v16, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v10, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v15, v16, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v14, v15
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v11, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v9, v11
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v8, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v11, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v13, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v15, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf4 v15, v8
|
|
; LMULMAX1-NEXT: vzext.vf4 v8, v12
|
|
; LMULMAX1-NEXT: vzext.vf4 v12, v16
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v16i16_v16i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle16.v v16, (a0)
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vzext.vf4 v12, v8
|
|
; LMULMAX4-NEXT: vzext.vf4 v8, v16
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i16>, <16 x i16>* %x
|
|
%z = zext <16 x i16> %y to <16 x i64>
|
|
ret <16 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v2i32_v2i8(<2 x i32> %x, <2 x i8>* %z) {
|
|
; CHECK-LABEL: truncstore_v2i32_v2i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse8.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <2 x i32> %x to <2 x i8>
|
|
store <2 x i8> %y, <2 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v2i32_v2i16(<2 x i32> %x, <2 x i16>* %z) {
|
|
; CHECK-LABEL: truncstore_v2i32_v2i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse16.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <2 x i32> %x to <2 x i16>
|
|
store <2 x i16> %y, <2 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define <2 x i64> @sextload_v2i32_v2i64(<2 x i32>* %x) {
|
|
; CHECK-LABEL: sextload_v2i32_v2i64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; CHECK-NEXT: vle32.v v9, (a0)
|
|
; CHECK-NEXT: vsext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i32>, <2 x i32>* %x
|
|
%z = sext <2 x i32> %y to <2 x i64>
|
|
ret <2 x i64> %z
|
|
}
|
|
|
|
define <2 x i64> @zextload_v2i32_v2i64(<2 x i32>* %x) {
|
|
; CHECK-LABEL: zextload_v2i32_v2i64:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; CHECK-NEXT: vle32.v v9, (a0)
|
|
; CHECK-NEXT: vzext.vf2 v8, v9
|
|
; CHECK-NEXT: ret
|
|
%y = load <2 x i32>, <2 x i32>* %x
|
|
%z = zext <2 x i32> %y to <2 x i64>
|
|
ret <2 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v4i32_v4i8(<4 x i32> %x, <4 x i8>* %z) {
|
|
; CHECK-LABEL: truncstore_v4i32_v4i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse8.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <4 x i32> %x to <4 x i8>
|
|
store <4 x i8> %y, <4 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v4i32_v4i16(<4 x i32> %x, <4 x i16>* %z) {
|
|
; CHECK-LABEL: truncstore_v4i32_v4i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse16.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <4 x i32> %x to <4 x i16>
|
|
store <4 x i16> %y, <4 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define <4 x i64> @sextload_v4i32_v4i64(<4 x i32>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v4i32_v4i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v4i32_v4i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <4 x i32>, <4 x i32>* %x
|
|
%z = sext <4 x i32> %y to <4 x i64>
|
|
ret <4 x i64> %z
|
|
}
|
|
|
|
define <4 x i64> @zextload_v4i32_v4i64(<4 x i32>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v4i32_v4i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v4i32_v4i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e64, m2, ta, ma
|
|
; LMULMAX4-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <4 x i32>, <4 x i32>* %x
|
|
%z = zext <4 x i32> %y to <4 x i64>
|
|
ret <4 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v8i32_v8i8(<8 x i32> %x, <8 x i8>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v8i32_v8i8:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v8i32_v8i8:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0
|
|
; LMULMAX4-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <8 x i32> %x to <8 x i8>
|
|
store <8 x i8> %y, <8 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v8i32_v8i16(<8 x i32> %x, <8 x i16>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v8i32_v8i16:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v8i32_v8i16:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
|
|
; LMULMAX4-NEXT: vse16.v v10, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <8 x i32> %x to <8 x i16>
|
|
store <8 x i16> %y, <8 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define <8 x i64> @sextload_v8i32_v8i64(<8 x i32>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v8i32_v8i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle32.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v11, v8
|
|
; LMULMAX1-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: vsext.vf2 v10, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v8i32_v8i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle32.v v12, (a0)
|
|
; LMULMAX4-NEXT: vsext.vf2 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i32>, <8 x i32>* %x
|
|
%z = sext <8 x i32> %y to <8 x i64>
|
|
ret <8 x i64> %z
|
|
}
|
|
|
|
define <8 x i64> @zextload_v8i32_v8i64(<8 x i32>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v8i32_v8i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle32.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v11, v8
|
|
; LMULMAX1-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: vzext.vf2 v10, v12
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v8i32_v8i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle32.v v12, (a0)
|
|
; LMULMAX4-NEXT: vzext.vf2 v8, v12
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <8 x i32>, <8 x i32>* %x
|
|
%z = zext <8 x i32> %y to <8 x i64>
|
|
ret <8 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v16i32_v16i8(<16 x i32> %x, <16 x i8>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v16i32_v16i8:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 12
|
|
; LMULMAX1-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v16i32_v16i8:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e8, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0
|
|
; LMULMAX4-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <16 x i32> %x to <16 x i8>
|
|
store <16 x i8> %y, <16 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v16i32_v16i16(<16 x i32> %x, <16 x i16>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v16i32_v16i16:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
|
|
; LMULMAX1-NEXT: addi a1, a0, 16
|
|
; LMULMAX1-NEXT: vse16.v v10, (a1)
|
|
; LMULMAX1-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v16i32_v16i16:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
|
|
; LMULMAX4-NEXT: vse16.v v12, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <16 x i32> %x to <16 x i16>
|
|
store <16 x i16> %y, <16 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define <16 x i64> @sextload_v16i32_v16i64(<16 x i32>* %x) {
|
|
; LMULMAX1-LABEL: sextload_v16i32_v16i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: addi a1, a0, 48
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle32.v v16, (a1)
|
|
; LMULMAX1-NEXT: addi a1, a0, 32
|
|
; LMULMAX1-NEXT: vle32.v v14, (a1)
|
|
; LMULMAX1-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle32.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v11, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v14, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v13, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vsext.vf2 v15, v8
|
|
; LMULMAX1-NEXT: vsext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: vsext.vf2 v10, v12
|
|
; LMULMAX1-NEXT: vsext.vf2 v12, v14
|
|
; LMULMAX1-NEXT: vsext.vf2 v14, v16
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: sextload_v16i32_v16i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle32.v v16, (a0)
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vsext.vf2 v12, v8
|
|
; LMULMAX4-NEXT: vsext.vf2 v8, v16
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i32>, <16 x i32>* %x
|
|
%z = sext <16 x i32> %y to <16 x i64>
|
|
ret <16 x i64> %z
|
|
}
|
|
|
|
define <16 x i64> @zextload_v16i32_v16i64(<16 x i32>* %x) {
|
|
; LMULMAX1-LABEL: zextload_v16i32_v16i64:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: addi a1, a0, 48
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vle32.v v16, (a1)
|
|
; LMULMAX1-NEXT: addi a1, a0, 32
|
|
; LMULMAX1-NEXT: vle32.v v14, (a1)
|
|
; LMULMAX1-NEXT: vle32.v v10, (a0)
|
|
; LMULMAX1-NEXT: addi a0, a0, 16
|
|
; LMULMAX1-NEXT: vle32.v v12, (a0)
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v10, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v9, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v12, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v11, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v14, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v13, v8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, m1, ta, ma
|
|
; LMULMAX1-NEXT: vslidedown.vi v8, v16, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e64, m1, ta, ma
|
|
; LMULMAX1-NEXT: vzext.vf2 v15, v8
|
|
; LMULMAX1-NEXT: vzext.vf2 v8, v10
|
|
; LMULMAX1-NEXT: vzext.vf2 v10, v12
|
|
; LMULMAX1-NEXT: vzext.vf2 v12, v14
|
|
; LMULMAX1-NEXT: vzext.vf2 v14, v16
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: zextload_v16i32_v16i64:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vle32.v v16, (a0)
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m4, ta, ma
|
|
; LMULMAX4-NEXT: vslidedown.vi v8, v16, 8
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e64, m4, ta, ma
|
|
; LMULMAX4-NEXT: vzext.vf2 v12, v8
|
|
; LMULMAX4-NEXT: vzext.vf2 v8, v16
|
|
; LMULMAX4-NEXT: ret
|
|
%y = load <16 x i32>, <16 x i32>* %x
|
|
%z = zext <16 x i32> %y to <16 x i64>
|
|
ret <16 x i64> %z
|
|
}
|
|
|
|
define void @truncstore_v2i64_v2i8(<2 x i64> %x, <2 x i8>* %z) {
|
|
; CHECK-LABEL: truncstore_v2i64_v2i8:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse8.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <2 x i64> %x to <2 x i8>
|
|
store <2 x i8> %y, <2 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v2i64_v2i16(<2 x i64> %x, <2 x i16>* %z) {
|
|
; CHECK-LABEL: truncstore_v2i64_v2i16:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse16.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <2 x i64> %x to <2 x i16>
|
|
store <2 x i16> %y, <2 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v2i64_v2i32(<2 x i64> %x, <2 x i32>* %z) {
|
|
; CHECK-LABEL: truncstore_v2i64_v2i32:
|
|
; CHECK: # %bb.0:
|
|
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; CHECK-NEXT: vnsrl.wi v8, v8, 0
|
|
; CHECK-NEXT: vse32.v v8, (a0)
|
|
; CHECK-NEXT: ret
|
|
%y = trunc <2 x i64> %x to <2 x i32>
|
|
store <2 x i32> %y, <2 x i32>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v4i64_v4i8(<4 x i64> %x, <4 x i8>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v4i64_v4i8:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf4, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v4i64_v4i8:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX4-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <4 x i64> %x to <4 x i8>
|
|
store <4 x i8> %y, <4 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v4i64_v4i16(<4 x i64> %x, <4 x i16>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v4i64_v4i16:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v4i64_v4i16:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v10, 0
|
|
; LMULMAX4-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <4 x i64> %x to <4 x i16>
|
|
store <4 x i16> %y, <4 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v4i64_v4i32(<4 x i64> %x, <4 x i32>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v4i64_v4i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vse32.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v4i64_v4i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 4, e32, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v10, v8, 0
|
|
; LMULMAX4-NEXT: vse32.v v10, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <4 x i64> %x to <4 x i32>
|
|
store <4 x i32> %y, <4 x i32>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v8i64_v8i8(<8 x i64> %x, <8 x i8>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v8i64_v8i8:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, mf2, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
|
|
; LMULMAX1-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v8i64_v8i8:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX4-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <8 x i64> %x to <8 x i8>
|
|
store <8 x i8> %y, <8 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v8i64_v8i16:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
|
|
; LMULMAX1-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v8i64_v8i16:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v12, 0
|
|
; LMULMAX4-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <8 x i64> %x to <8 x i16>
|
|
store <8 x i16> %y, <8 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v8i64_v8i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
|
|
; LMULMAX1-NEXT: addi a1, a0, 16
|
|
; LMULMAX1-NEXT: vse32.v v10, (a1)
|
|
; LMULMAX1-NEXT: vse32.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v8i64_v8i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
|
|
; LMULMAX4-NEXT: vse32.v v12, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <8 x i64> %x to <8 x i32>
|
|
store <8 x i32> %y, <8 x i32>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v16i64_v16i8(<16 x i64> %x, <16 x i8>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v16i64_v16i8:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 6, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v12, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 10, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 8
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 12, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 10
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 14, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 12
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 16, e8, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 14
|
|
; LMULMAX1-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v16i64_v16i8:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v12, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e8, m1, tu, ma
|
|
; LMULMAX4-NEXT: vslideup.vi v8, v12, 8
|
|
; LMULMAX4-NEXT: vse8.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <16 x i64> %x to <16 x i8>
|
|
store <16 x i8> %y, <16 x i8>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v16i64_v16i16(<16 x i64> %x, <16 x i16>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v16i64_v16i16:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v10, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 6
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v10, v12, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v14, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 6, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v10, v9, 4
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0
|
|
; LMULMAX1-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 8, e16, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v10, v9, 6
|
|
; LMULMAX1-NEXT: addi a1, a0, 16
|
|
; LMULMAX1-NEXT: vse16.v v10, (a1)
|
|
; LMULMAX1-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v16i64_v16i16:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v16, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v14, v8, 0
|
|
; LMULMAX4-NEXT: vsetvli zero, zero, e16, m1, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v8, v14, 0
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e16, m2, tu, ma
|
|
; LMULMAX4-NEXT: vslideup.vi v8, v12, 8
|
|
; LMULMAX4-NEXT: vse16.v v8, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <16 x i64> %x to <16 x i16>
|
|
store <16 x i16> %y, <16 x i16>* %z
|
|
ret void
|
|
}
|
|
|
|
define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) {
|
|
; LMULMAX1-LABEL: truncstore_v16i64_v16i32:
|
|
; LMULMAX1: # %bb.0:
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v9, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v8, v8, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v8, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v11, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v10, v10, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v10, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v13, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v11, v12, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v11, v9, 2
|
|
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
|
|
; LMULMAX1-NEXT: vnsrl.wi v9, v15, 0
|
|
; LMULMAX1-NEXT: vnsrl.wi v12, v14, 0
|
|
; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
|
|
; LMULMAX1-NEXT: vslideup.vi v12, v9, 2
|
|
; LMULMAX1-NEXT: addi a1, a0, 48
|
|
; LMULMAX1-NEXT: vse32.v v12, (a1)
|
|
; LMULMAX1-NEXT: addi a1, a0, 32
|
|
; LMULMAX1-NEXT: vse32.v v11, (a1)
|
|
; LMULMAX1-NEXT: addi a1, a0, 16
|
|
; LMULMAX1-NEXT: vse32.v v10, (a1)
|
|
; LMULMAX1-NEXT: vse32.v v8, (a0)
|
|
; LMULMAX1-NEXT: ret
|
|
;
|
|
; LMULMAX4-LABEL: truncstore_v16i64_v16i32:
|
|
; LMULMAX4: # %bb.0:
|
|
; LMULMAX4-NEXT: vsetivli zero, 8, e32, m2, ta, ma
|
|
; LMULMAX4-NEXT: vnsrl.wi v16, v12, 0
|
|
; LMULMAX4-NEXT: vnsrl.wi v12, v8, 0
|
|
; LMULMAX4-NEXT: vsetivli zero, 16, e32, m4, tu, ma
|
|
; LMULMAX4-NEXT: vslideup.vi v12, v16, 8
|
|
; LMULMAX4-NEXT: vse32.v v12, (a0)
|
|
; LMULMAX4-NEXT: ret
|
|
%y = trunc <16 x i64> %x to <16 x i32>
|
|
store <16 x i32> %y, <16 x i32>* %z
|
|
ret void
|
|
}
|