64 lines
4.6 KiB
LLVM
64 lines
4.6 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
|
; RUN: opt < %s -mtriple=x86_64-unknown-linux -mcpu=skylake-avx512 -passes=slp-vectorizer -S | FileCheck %s
|
|
|
|
@src = common global [8 x double] zeroinitializer, align 64
|
|
@dst = common global [8 x double] zeroinitializer, align 64
|
|
|
|
declare double @llvm.sqrt.f64(double)
|
|
declare double @llvm.sin.f64(double)
|
|
|
|
define void @test() {
|
|
; CHECK-LABEL: @test(
|
|
; CHECK-NEXT: [[A0:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 0), align 8
|
|
; CHECK-NEXT: [[A1:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 1), align 8
|
|
; CHECK-NEXT: [[A2:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 2), align 8
|
|
; CHECK-NEXT: [[A3:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 3), align 8
|
|
; CHECK-NEXT: [[A4:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 4), align 8
|
|
; CHECK-NEXT: [[A5:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 5), align 8
|
|
; CHECK-NEXT: [[A6:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 6), align 8
|
|
; CHECK-NEXT: [[A7:%.*]] = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 7), align 8
|
|
; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> poison, double [[A2]], i32 0
|
|
; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x double> [[TMP1]], double [[A6]], i32 1
|
|
; CHECK-NEXT: [[TMP3:%.*]] = call fast <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP2]])
|
|
; CHECK-NEXT: [[TMP4:%.*]] = insertelement <2 x double> poison, double [[A3]], i32 0
|
|
; CHECK-NEXT: [[TMP5:%.*]] = insertelement <2 x double> [[TMP4]], double [[A7]], i32 1
|
|
; CHECK-NEXT: [[TMP6:%.*]] = call fast <2 x double> @llvm.sin.v2f64(<2 x double> [[TMP5]])
|
|
; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x double> poison, double [[A0]], i32 0
|
|
; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x double> [[TMP7]], double [[A4]], i32 1
|
|
; CHECK-NEXT: [[TMP9:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP8]])
|
|
; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x double> poison, double [[A1]], i32 0
|
|
; CHECK-NEXT: [[TMP11:%.*]] = insertelement <2 x double> [[TMP10]], double [[A5]], i32 1
|
|
; CHECK-NEXT: [[TMP12:%.*]] = call fast <2 x double> @llvm.sqrt.v2f64(<2 x double> [[TMP11]])
|
|
; CHECK-NEXT: [[TMP13:%.*]] = fadd fast <2 x double> [[TMP9]], [[TMP6]]
|
|
; CHECK-NEXT: [[TMP14:%.*]] = fadd fast <2 x double> [[TMP3]], [[TMP12]]
|
|
; CHECK-NEXT: [[TMP15:%.*]] = fadd fast <2 x double> [[TMP13]], [[TMP14]]
|
|
; CHECK-NEXT: store <2 x double> [[TMP15]], <2 x double>* bitcast ([8 x double]* @dst to <2 x double>*), align 8
|
|
; CHECK-NEXT: ret void
|
|
;
|
|
%a0 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 0), align 8
|
|
%a1 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 1), align 8
|
|
%a2 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 2), align 8
|
|
%a3 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 3), align 8
|
|
%a4 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 4), align 8
|
|
%a5 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 5), align 8
|
|
%a6 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 6), align 8
|
|
%a7 = load double, double* getelementptr inbounds ([8 x double], [8 x double]* @src, i32 0, i64 7), align 8
|
|
%sin0 = call fast double @llvm.sin.f64(double %a2)
|
|
%sin1 = call fast double @llvm.sin.f64(double %a3)
|
|
%sqrt0 = call fast double @llvm.sqrt.f64(double %a0)
|
|
%sqrt1 = call fast double @llvm.sqrt.f64(double %a1)
|
|
%sin2 = call fast double @llvm.sin.f64(double %a6)
|
|
%sin3 = call fast double @llvm.sin.f64(double %a7)
|
|
%sqrt2 = call fast double @llvm.sqrt.f64(double %a4)
|
|
%sqrt3 = call fast double @llvm.sqrt.f64(double %a5)
|
|
%res1 = fadd fast double %sqrt0, %sin1
|
|
%res2 = fadd fast double %sin0, %sqrt1
|
|
%res00 = fadd fast double %res1, %res2
|
|
%res3 = fadd fast double %sqrt2, %sin3
|
|
%res4 = fadd fast double %sin2, %sqrt3
|
|
%res01 = fadd fast double %res3, %res4
|
|
store double %res00, double* getelementptr inbounds ([8 x double], [8 x double]* @dst, i32 0, i64 0), align 8
|
|
store double %res01, double* getelementptr inbounds ([8 x double], [8 x double]* @dst, i32 0, i64 1), align 8
|
|
ret void
|
|
}
|