llvm-project/llvm/test/CodeGen/AMDGPU/implicit-arg-v5-opt.ll

225 lines
12 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=5 -S -passes=amdgpu-lower-kernel-attributes,instcombine %s | FileCheck -enable-var-scope -check-prefix=GCN %s
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_local_size_x(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_local_size_x(
; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
; GCN-NEXT: [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[IMPLICITARG_PTR]], i64 12
; GCN-NEXT: [[BC_GEP_LOCAL_SIZE:%.*]] = bitcast i8 addrspace(4)* [[GEP_LOCAL_SIZE]] to i16 addrspace(4)*
; GCN-NEXT: [[LOCAL_SIZE:%.*]] = load i16, i16 addrspace(4)* [[BC_GEP_LOCAL_SIZE]], align 4
; GCN-NEXT: store i16 [[LOCAL_SIZE]], i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%group.id = tail call i32 @llvm.amdgcn.workgroup.id.x()
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%bc.block.count.x = bitcast i8 addrspace(4)* %implicitarg.ptr to i32 addrspace(4)*
%block.count.x = load i32, i32 addrspace(4)* %bc.block.count.x, align 4
%cmp.id.count = icmp ult i32 %group.id, %block.count.x
%local.size.offset = select i1 %cmp.id.count, i64 12, i64 18
%gep.local.size = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 %local.size.offset
%bc.gep.local.size = bitcast i8 addrspace(4)* %gep.local.size to i16 addrspace(4)*
%local.size = load i16, i16 addrspace(4)* %bc.gep.local.size, align 2
store i16 %local.size, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_local_size_y(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_local_size_y(
; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
; GCN-NEXT: [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[IMPLICITARG_PTR]], i64 14
; GCN-NEXT: [[BC_GEP_LOCAL_SIZE:%.*]] = bitcast i8 addrspace(4)* [[GEP_LOCAL_SIZE]] to i16 addrspace(4)*
; GCN-NEXT: [[LOCAL_SIZE:%.*]] = load i16, i16 addrspace(4)* [[BC_GEP_LOCAL_SIZE]], align 2
; GCN-NEXT: store i16 [[LOCAL_SIZE]], i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%group.id = tail call i32 @llvm.amdgcn.workgroup.id.y()
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.block.count.y = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 4
%bc.block.count.y = bitcast i8 addrspace(4)* %gep.block.count.y to i32 addrspace(4)*
%block.count.y = load i32, i32 addrspace(4)* %bc.block.count.y, align 4
%cmp.id.count = icmp ult i32 %group.id, %block.count.y
%local.size.offset = select i1 %cmp.id.count, i64 14, i64 20
%gep.local.size = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 %local.size.offset
%bc.gep.local.size = bitcast i8 addrspace(4)* %gep.local.size to i16 addrspace(4)*
%local.size = load i16, i16 addrspace(4)* %bc.gep.local.size, align 2
store i16 %local.size, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_local_size_z(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_local_size_z(
; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
; GCN-NEXT: [[GEP_LOCAL_SIZE:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[IMPLICITARG_PTR]], i64 16
; GCN-NEXT: [[BC_GEP_LOCAL_SIZE:%.*]] = bitcast i8 addrspace(4)* [[GEP_LOCAL_SIZE]] to i16 addrspace(4)*
; GCN-NEXT: [[LOCAL_SIZE:%.*]] = load i16, i16 addrspace(4)* [[BC_GEP_LOCAL_SIZE]], align 4
; GCN-NEXT: store i16 [[LOCAL_SIZE]], i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%group.id = tail call i32 @llvm.amdgcn.workgroup.id.z()
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.block.count.z = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 8
%bc.block.count.z = bitcast i8 addrspace(4)* %gep.block.count.z to i32 addrspace(4)*
%block.count.z = load i32, i32 addrspace(4)* %bc.block.count.z, align 4
%cmp.id.count = icmp ult i32 %group.id, %block.count.z
%local.size.offset = select i1 %cmp.id.count, i64 16, i64 22
%gep.local.size = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 %local.size.offset
%bc.gep.local.size = bitcast i8 addrspace(4)* %gep.local.size to i16 addrspace(4)*
%local.size = load i16, i16 addrspace(4)* %bc.gep.local.size, align 2
store i16 %local.size, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_remainder_x(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_remainder_x(
; GCN-NEXT: store i16 0, i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.x = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 18
%bc.x = bitcast i8 addrspace(4)* %gep.x to i16 addrspace(4)*
%remainder.x = load i16, i16 addrspace(4)* %bc.x, align 2
store i16 %remainder.x, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_remainder_y(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_remainder_y(
; GCN-NEXT: store i16 0, i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.y = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 18
%bc.y = bitcast i8 addrspace(4)* %gep.y to i16 addrspace(4)*
%remainder.y = load i16, i16 addrspace(4)* %bc.y, align 2
store i16 %remainder.y, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_remainder_z(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_remainder_z(
; GCN-NEXT: store i16 0, i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.z = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 18
%bc.z = bitcast i8 addrspace(4)* %gep.z to i16 addrspace(4)*
%remainder.z = load i16, i16 addrspace(4)* %bc.z, align 2
store i16 %remainder.z, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_work_group_size_x(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_work_group_size_x(
; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
; GCN-NEXT: [[GEP_X:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[IMPLICITARG_PTR]], i64 12
; GCN-NEXT: [[BC_X:%.*]] = bitcast i8 addrspace(4)* [[GEP_X]] to i16 addrspace(4)*
; GCN-NEXT: [[GROUP_SIZE_X:%.*]] = load i16, i16 addrspace(4)* [[BC_X]], align 4
; GCN-NEXT: store i16 [[GROUP_SIZE_X]], i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.x = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 12
%bc.x = bitcast i8 addrspace(4)* %gep.x to i16 addrspace(4)*
%group.size.x = load i16, i16 addrspace(4)* %bc.x, align 2
store i16 %group.size.x, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_work_group_size_y(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_work_group_size_y(
; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
; GCN-NEXT: [[GEP_Y:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[IMPLICITARG_PTR]], i64 14
; GCN-NEXT: [[BC_Y:%.*]] = bitcast i8 addrspace(4)* [[GEP_Y]] to i16 addrspace(4)*
; GCN-NEXT: [[GROUP_SIZE_Y:%.*]] = load i16, i16 addrspace(4)* [[BC_Y]], align 2
; GCN-NEXT: store i16 [[GROUP_SIZE_Y]], i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.y = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 14
%bc.y = bitcast i8 addrspace(4)* %gep.y to i16 addrspace(4)*
%group.size.y = load i16, i16 addrspace(4)* %bc.y, align 2
store i16 %group.size.y, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_work_group_size_z(i16 addrspace(1)* %out) #0 {
; GCN-LABEL: @get_work_group_size_z(
; GCN-NEXT: [[IMPLICITARG_PTR:%.*]] = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
; GCN-NEXT: [[GEP_Z:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[IMPLICITARG_PTR]], i64 16
; GCN-NEXT: [[BC_Z:%.*]] = bitcast i8 addrspace(4)* [[GEP_Z]] to i16 addrspace(4)*
; GCN-NEXT: [[GROUP_SIZE_Z:%.*]] = load i16, i16 addrspace(4)* [[BC_Z]], align 4
; GCN-NEXT: store i16 [[GROUP_SIZE_Z]], i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.z = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 16
%bc.z = bitcast i8 addrspace(4)* %gep.z to i16 addrspace(4)*
%group.size.z = load i16, i16 addrspace(4)* %bc.z, align 2
store i16 %group.size.z, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_work_group_size_x_reqd(i16 addrspace(1)* %out) #0 !reqd_work_group_size !0 {
; GCN-LABEL: @get_work_group_size_x_reqd(
; GCN-NEXT: store i16 8, i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.x = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 12
%bc.x = bitcast i8 addrspace(4)* %gep.x to i16 addrspace(4)*
%group.size.x = load i16, i16 addrspace(4)* %bc.x, align 2
store i16 %group.size.x, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_work_group_size_y_reqd(i16 addrspace(1)* %out) #0 !reqd_work_group_size !0 {
; GCN-LABEL: @get_work_group_size_y_reqd(
; GCN-NEXT: store i16 16, i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.y = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 14
%bc.y = bitcast i8 addrspace(4)* %gep.y to i16 addrspace(4)*
%group.size.y = load i16, i16 addrspace(4)* %bc.y, align 2
store i16 %group.size.y, i16 addrspace(1)* %out
ret void
}
; Function Attrs: mustprogress nofree norecurse nosync nounwind readnone willreturn
define amdgpu_kernel void @get_work_group_size_z_reqd(i16 addrspace(1)* %out) #0 !reqd_work_group_size !0 {
; GCN-LABEL: @get_work_group_size_z_reqd(
; GCN-NEXT: store i16 2, i16 addrspace(1)* [[OUT:%.*]], align 2
; GCN-NEXT: ret void
;
%implicitarg.ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
%gep.z = getelementptr inbounds i8, i8 addrspace(4)* %implicitarg.ptr, i64 16
%bc.z = bitcast i8 addrspace(4)* %gep.z to i16 addrspace(4)*
%group.size.z = load i16, i16 addrspace(4)* %bc.z, align 2
store i16 %group.size.z, i16 addrspace(1)* %out
ret void
}
declare i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
declare i32 @llvm.amdgcn.workgroup.id.x() #1
declare i32 @llvm.amdgcn.workgroup.id.y() #1
declare i32 @llvm.amdgcn.workgroup.id.z() #1
!llvm.module.flags = !{!1}
attributes #0 = { nounwind "uniform-work-group-size"="true" }
attributes #1 = { nounwind readnone speculatable }
!0 = !{i32 8, i32 16, i32 2}
!1 = !{i32 1, !"amdgpu_code_object_version", i32 500}