You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1350 lines
54 KiB
1350 lines
54 KiB
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc -global-isel -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX7 %s
|
|
; RUN: llc -global-isel -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX8 %s
|
|
; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX10 %s
|
|
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_1(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f32_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s6, 0
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 offset:4
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], v0, v0, v2
|
|
; GFX7-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 4, v0
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: flat_load_dword v1, v[2:3]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], v1, v1, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:4
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, v0, v0, v1
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load volatile float, float addrspace(1)* %gep.0, align 4
|
|
%b = load volatile float, float addrspace(1)* %gep.1, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_2(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f32_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s6, 0
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 offset:4
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], v2, v0, v2
|
|
; GFX7-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 4, v0
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: flat_load_dword v1, v[2:3]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], v0, v1, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:4
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, v1, v0, v1
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load volatile float, float addrspace(1)* %gep.0, align 4
|
|
%b = load volatile float, float addrspace(1)* %gep.1, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_1(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f64_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
|
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: v_add_i32_e32 v2, vcc, 8, v0
|
|
; GFX7-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[2:3], v[2:3], v[0:1]
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 8, v0
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[2:3], v[2:3], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
|
|
; GFX10-NEXT: global_load_dwordx2 v[2:3], v2, s[2:3] offset:8
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s2, v[2:3], v[2:3], v[0:1]
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load volatile double, double addrspace(1)* %gep.0, align 8
|
|
%b = load volatile double, double addrspace(1)* %gep.1, align 8
|
|
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 false)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_2(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f64_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xd
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
|
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: v_add_i32_e32 v2, vcc, 8, v0
|
|
; GFX7-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX7-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[0:1], v[2:3], v[0:1]
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 8, v0
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX8-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[0:1], v[2:3], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x34
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v2, s[2:3]
|
|
; GFX10-NEXT: global_load_dwordx2 v[2:3], v2, s[2:3] offset:8
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s2, v[0:1], v[2:3], v[0:1]
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load volatile double, double addrspace(1)* %gep.0, align 8
|
|
%b = load volatile double, double addrspace(1)* %gep.1, align 8
|
|
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_scalar_num_1(float addrspace(1)* %out, float addrspace(1)* %in, [8 x i32], float %a) {
|
|
; GFX7-LABEL: test_div_scale_f32_scalar_num_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dword s8, s[0:1], 0x15
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s2, 0
|
|
; GFX7-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[0:1], s[6:7]
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64
|
|
; GFX7-NEXT: s_mov_b32 s2, -1
|
|
; GFX7-NEXT: s_mov_b64 s[6:7], s[2:3]
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[0:1], v0, v0, s8
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_scalar_num_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dword s0, s[0:1], 0x54
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[0:1], v0, v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_scalar_num_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_load_dword s0, s[0:1], 0x54
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s0, v0, v0, s0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
|
|
%b = load float, float addrspace(1)* %gep, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_scalar_num_2(float addrspace(1)* %out, float addrspace(1)* %in, float %a) {
|
|
; GFX7-LABEL: test_div_scale_f32_scalar_num_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dword s8, s[0:1], 0xd
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s2, 0
|
|
; GFX7-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[0:1], s[6:7]
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64
|
|
; GFX7-NEXT: s_mov_b32 s2, -1
|
|
; GFX7-NEXT: s_mov_b64 s[6:7], s[2:3]
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[0:1], s8, v0, s8
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_scalar_num_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dword s0, s[0:1], 0x34
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[0:1], s0, v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_scalar_num_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_load_dword s0, s[0:1], 0x34
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s0, s0, v0, s0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
|
|
%b = load float, float addrspace(1)* %gep, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_scalar_den_1(float addrspace(1)* %out, float addrspace(1)* %in, float %b) {
|
|
; GFX7-LABEL: test_div_scale_f32_scalar_den_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dword s8, s[0:1], 0xd
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s2, 0
|
|
; GFX7-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[0:1], s[6:7]
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64
|
|
; GFX7-NEXT: s_mov_b32 s2, -1
|
|
; GFX7-NEXT: s_mov_b64 s[6:7], s[2:3]
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[0:1], s8, s8, v0
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_scalar_den_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dword s0, s[0:1], 0x34
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[0:1], s0, s0, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_scalar_den_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_load_dword s0, s[0:1], 0x34
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s0, s0, s0, v0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
|
|
%a = load float, float addrspace(1)* %gep, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_scalar_den_2(float addrspace(1)* %out, float addrspace(1)* %in, float %b) {
|
|
; GFX7-LABEL: test_div_scale_f32_scalar_den_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dword s8, s[0:1], 0xd
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s2, 0
|
|
; GFX7-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[0:1], s[6:7]
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64
|
|
; GFX7-NEXT: s_mov_b32 s2, -1
|
|
; GFX7-NEXT: s_mov_b64 s[6:7], s[2:3]
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[0:1], v0, s8, v0
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_scalar_den_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dword s0, s[0:1], 0x34
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[0:1], v0, s0, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_scalar_den_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_load_dword s0, s[0:1], 0x34
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s0, v0, s0, v0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
|
|
%a = load float, float addrspace(1)* %gep, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_scalar_num_1(double addrspace(1)* %out, double addrspace(1)* %in, [8 x i32], double %a) {
|
|
; GFX7-LABEL: test_div_scale_f64_scalar_num_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x15
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[0:1], v[0:1], v[0:1], s[0:1]
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_scalar_num_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[0:1], v[0:1], v[0:1], s[0:1]
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_scalar_num_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s0, v[0:1], v[0:1], s[0:1]
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
|
|
%b = load double, double addrspace(1)* %gep, align 8
|
|
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 false)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_scalar_num_2(double addrspace(1)* %out, double addrspace(1)* %in, [8 x i32], double %a) {
|
|
; GFX7-LABEL: test_div_scale_f64_scalar_num_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x15
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[0:1], s[0:1], v[0:1], s[0:1]
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_scalar_num_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[0:1], s[0:1], v[0:1], s[0:1]
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_scalar_num_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s0, s[0:1], v[0:1], s[0:1]
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
|
|
%b = load double, double addrspace(1)* %gep, align 8
|
|
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_scalar_den_1(double addrspace(1)* %out, double addrspace(1)* %in, [8 x i32], double %b) {
|
|
; GFX7-LABEL: test_div_scale_f64_scalar_den_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x15
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[0:1], s[0:1], s[0:1], v[0:1]
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_scalar_den_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[0:1], s[0:1], s[0:1], v[0:1]
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_scalar_den_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s0, s[0:1], s[0:1], v[0:1]
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
|
|
%a = load double, double addrspace(1)* %gep, align 8
|
|
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 false)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_scalar_den_2(double addrspace(1)* %out, double addrspace(1)* %in, [8 x i32], double %b) {
|
|
; GFX7-LABEL: test_div_scale_f64_scalar_den_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x15
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX7-NEXT: v_add_i32_e32 v0, vcc, v0, v2
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX7-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX7-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[0:1], v[0:1], s[0:1], v[0:1]
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_scalar_den_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 3, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s6
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s7
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s5
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[0:1], v[0:1], s[0:1], v[0:1]
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_scalar_den_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x54
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dwordx2 v[0:1], v0, s[6:7]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s0, v[0:1], s[0:1], v[0:1]
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[4:5]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
|
|
%a = load double, double addrspace(1)* %gep, align 8
|
|
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_all_scalar_1(float addrspace(1)* %out, [8 x i32], float %a, [8 x i32], float %b) {
|
|
; GFX7-LABEL: test_div_scale_f32_all_scalar_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dword s2, s[0:1], 0x13
|
|
; GFX7-NEXT: s_load_dword s0, s[0:1], 0x1c
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[0:1], v0, v0, s2
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_all_scalar_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s2, s[0:1], 0x4c
|
|
; GFX8-NEXT: s_load_dword s3, s[0:1], 0x70
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s3
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], v0, v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_all_scalar_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_clause 0x2
|
|
; GFX10-NEXT: s_load_dword s2, s[0:1], 0x4c
|
|
; GFX10-NEXT: s_load_dword s3, s[0:1], 0x70
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, s3, s3, s2
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_all_scalar_2(float addrspace(1)* %out, [8 x i32], float %a, [8 x i32], float %b) {
|
|
; GFX7-LABEL: test_div_scale_f32_all_scalar_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
|
; GFX7-NEXT: s_load_dword s2, s[0:1], 0x13
|
|
; GFX7-NEXT: s_load_dword s0, s[0:1], 0x1c
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[0:1], s2, v0, s2
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_all_scalar_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dword s2, s[0:1], 0x4c
|
|
; GFX8-NEXT: s_load_dword s3, s[0:1], 0x70
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s3
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], s2, v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_all_scalar_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_clause 0x2
|
|
; GFX10-NEXT: s_load_dword s2, s[0:1], 0x4c
|
|
; GFX10-NEXT: s_load_dword s3, s[0:1], 0x70
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, s2, s3, s2
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b, i1 true)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_all_scalar_1(double addrspace(1)* %out, [8 x i32], double %a, [8 x i32], double %b) {
|
|
; GFX7-LABEL: test_div_scale_f64_all_scalar_1:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x1d
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[0:1], v[0:1], s[2:3]
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_all_scalar_1:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x74
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[0:1], v[0:1], s[2:3]
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_all_scalar_1:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_clause 0x2
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x74
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s2, s[4:5], s[4:5], s[2:3]
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 false)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_all_scalar_2(double addrspace(1)* %out, [8 x i32], double %a, [8 x i32], double %b) {
|
|
; GFX7-LABEL: test_div_scale_f64_all_scalar_2:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
|
|
; GFX7-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x1d
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[2:3], s[2:3], v[0:1], s[2:3]
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_all_scalar_2:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x74
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s4
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s5
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[2:3], s[2:3], v[0:1], s[2:3]
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_all_scalar_2:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_clause 0x2
|
|
; GFX10-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x4c
|
|
; GFX10-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x74
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s2, s[2:3], s[4:5], s[2:3]
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double %a, double %b, i1 true)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_inline_imm_num(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f32_inline_imm_num:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s6, 0
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], v0, v0, 1.0
|
|
; GFX7-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_inline_imm_num:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], v0, v0, 1.0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_inline_imm_num:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[2:3]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, v0, v0, 1.0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
%a = load float, float addrspace(1)* %gep.0, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float 1.0, float %a, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_inline_imm_den(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f32_inline_imm_den:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s6, 0
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], 2.0, 2.0, v0
|
|
; GFX7-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_inline_imm_den:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], 2.0, 2.0, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_inline_imm_den:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[2:3]
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, 2.0, 2.0, v0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
%a = load float, float addrspace(1)* %gep.0, align 4
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float 2.0, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f32_fabs_num:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s6, 0
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 offset:4
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX7-NEXT: v_and_b32_e32 v1, 0x7fffffff, v2
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], v0, v0, v1
|
|
; GFX7-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_fabs_num:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 4, v0
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: flat_load_dword v1, v[2:3]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX8-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], v1, v1, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_fabs_num:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:4
|
|
; GFX10-NEXT: s_waitcnt vmcnt(1)
|
|
; GFX10-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, v0, v0, v1
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load volatile float, float addrspace(1)* %gep.0, align 4
|
|
%b = load volatile float, float addrspace(1)* %gep.1, align 4
|
|
|
|
%a.fabs = call float @llvm.fabs.f32(float %a)
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a.fabs, float %b, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_fabs_den(float addrspace(1)* %out, float addrspace(1)* %in) {
|
|
; GFX7-LABEL: test_div_scale_f32_fabs_den:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
; GFX7-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX7-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX7-NEXT: s_mov_b32 s6, 0
|
|
; GFX7-NEXT: s_mov_b32 s7, 0xf000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: s_mov_b64 s[4:5], s[2:3]
|
|
; GFX7-NEXT: buffer_load_dword v2, v[0:1], s[4:7], 0 addr64
|
|
; GFX7-NEXT: buffer_load_dword v0, v[0:1], s[4:7], 0 addr64 offset:4
|
|
; GFX7-NEXT: s_mov_b32 s6, -1
|
|
; GFX7-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX7-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], v0, v0, v2
|
|
; GFX7-NEXT: s_mov_b64 s[2:3], s[6:7]
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_fabs_den:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX8-NEXT: v_lshlrev_b32_e32 v2, 2, v0
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s2
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s3
|
|
; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
|
|
; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: v_add_u32_e32 v2, vcc, 4, v0
|
|
; GFX8-NEXT: v_addc_u32_e32 v3, vcc, 0, v1, vcc
|
|
; GFX8-NEXT: flat_load_dword v0, v[0:1]
|
|
; GFX8-NEXT: flat_load_dword v1, v[2:3]
|
|
; GFX8-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX8-NEXT: v_and_b32_e32 v1, 0x7fffffff, v1
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], v1, v1, v0
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_fabs_den:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
|
|
; GFX10-NEXT: v_lshlrev_b32_e32 v0, 2, v0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: s_clause 0x1
|
|
; GFX10-NEXT: global_load_dword v1, v0, s[2:3]
|
|
; GFX10-NEXT: global_load_dword v0, v0, s[2:3] offset:4
|
|
; GFX10-NEXT: s_waitcnt vmcnt(0)
|
|
; GFX10-NEXT: v_and_b32_e32 v0, 0x7fffffff, v0
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, v0, v0, v1
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
%gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid
|
|
%gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1
|
|
|
|
%a = load volatile float, float addrspace(1)* %gep.0, align 4
|
|
%b = load volatile float, float addrspace(1)* %gep.1, align 4
|
|
|
|
%b.fabs = call float @llvm.fabs.f32(float %b)
|
|
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float %a, float %b.fabs, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_val_undef_val(float addrspace(1)* %out) #0 {
|
|
; GFX7-LABEL: test_div_scale_f32_val_undef_val:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, 0x41000000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], s0, s0, v0
|
|
; GFX7-NEXT: s_mov_b32 s2, -1
|
|
; GFX7-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_val_undef_val:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, 0x41000000
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], s0, s0, v0
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_val_undef_val:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, s0, s0, 0x41000000
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float 8.0, float undef, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_undef_val_val(float addrspace(1)* %out) #0 {
|
|
; GFX7-LABEL: test_div_scale_f32_undef_val_val:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: v_mov_b32_e32 v0, 0x41000000
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], v0, v0, s0
|
|
; GFX7-NEXT: s_mov_b32 s2, -1
|
|
; GFX7-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_undef_val_val:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, 0x41000000
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], v0, v0, s0
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_undef_val_val:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v0, 0x41000000
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, v0, v0, s0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float undef, float 8.0, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f32_undef_undef_val(float addrspace(1)* %out) #0 {
|
|
; GFX7-LABEL: test_div_scale_f32_undef_undef_val:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_div_scale_f32 v0, s[2:3], s0, s0, s0
|
|
; GFX7-NEXT: s_mov_b32 s2, -1
|
|
; GFX7-NEXT: s_mov_b32 s3, 0xf000
|
|
; GFX7-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f32_undef_undef_val:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: v_div_scale_f32 v2, s[2:3], s0, s0, s0
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v0, s0
|
|
; GFX8-NEXT: v_mov_b32_e32 v1, s1
|
|
; GFX8-NEXT: flat_store_dword v[0:1], v2
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f32_undef_undef_val:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: v_mov_b32_e32 v1, 0
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: v_div_scale_f32 v0, s2, s0, s0, s0
|
|
; GFX10-NEXT: global_store_dword v1, v0, s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { float, i1 } @llvm.amdgcn.div.scale.f32(float undef, float undef, i1 false)
|
|
%result0 = extractvalue { float, i1 } %result, 0
|
|
store float %result0, float addrspace(1)* %out, align 4
|
|
ret void
|
|
}
|
|
|
|
define amdgpu_kernel void @test_div_scale_f64_val_undef_val(double addrspace(1)* %out) #0 {
|
|
; GFX7-LABEL: test_div_scale_f64_val_undef_val:
|
|
; GFX7: ; %bb.0:
|
|
; GFX7-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
|
|
; GFX7-NEXT: s_mov_b32 s2, 0
|
|
; GFX7-NEXT: s_mov_b32 s3, 0x40200000
|
|
; GFX7-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[0:1], v[0:1], s[2:3]
|
|
; GFX7-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX7-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX7-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX7-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX7-NEXT: s_endpgm
|
|
;
|
|
; GFX8-LABEL: test_div_scale_f64_val_undef_val:
|
|
; GFX8: ; %bb.0:
|
|
; GFX8-NEXT: s_mov_b32 s2, 0
|
|
; GFX8-NEXT: s_mov_b32 s3, 0x40200000
|
|
; GFX8-NEXT: v_div_scale_f64 v[0:1], s[2:3], v[0:1], v[0:1], s[2:3]
|
|
; GFX8-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX8-NEXT: v_mov_b32_e32 v3, s1
|
|
; GFX8-NEXT: v_mov_b32_e32 v2, s0
|
|
; GFX8-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
|
|
; GFX8-NEXT: s_endpgm
|
|
;
|
|
; GFX10-LABEL: test_div_scale_f64_val_undef_val:
|
|
; GFX10: ; %bb.0:
|
|
; GFX10-NEXT: s_mov_b32 s2, 0
|
|
; GFX10-NEXT: s_mov_b32 s3, 0x40200000
|
|
; GFX10-NEXT: v_mov_b32_e32 v2, 0
|
|
; GFX10-NEXT: v_div_scale_f64 v[0:1], s2, s[0:1], s[0:1], s[2:3]
|
|
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
|
|
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
|
|
; GFX10-NEXT: global_store_dwordx2 v2, v[0:1], s[0:1]
|
|
; GFX10-NEXT: s_endpgm
|
|
%result = call { double, i1 } @llvm.amdgcn.div.scale.f64(double 8.0, double undef, i1 false)
|
|
%result0 = extractvalue { double, i1 } %result, 0
|
|
store double %result0, double addrspace(1)* %out, align 8
|
|
ret void
|
|
}
|
|
|
|
declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
declare { float, i1 } @llvm.amdgcn.div.scale.f32(float, float, i1) #1
|
|
declare { double, i1 } @llvm.amdgcn.div.scale.f64(double, double, i1) #1
|
|
declare float @llvm.fabs.f32(float) #1
|
|
|
|
attributes #0 = { nounwind }
|
|
attributes #1 = { nounwind readnone speculatable }
|