@@ -2903,15 +2903,12 @@ define amdgpu_ps <4 x float> @getresinfo_dmask0(<8 x i32> inreg %rsrc, <4 x floa
2903
2903
; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2904
2904
; GFX9: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2905
2905
; GFX9: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
2906
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2907
- ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>)
2908
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
2909
- ; GFX9: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.getresinfo.1d), 0, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0
2910
- ; GFX9: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
2911
- ; GFX9: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
2912
- ; GFX9: $vgpr1 = COPY [[DEF]](s32)
2913
- ; GFX9: $vgpr2 = COPY [[DEF]](s32)
2914
- ; GFX9: $vgpr3 = COPY [[DEF]](s32)
2906
+ ; GFX9: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
2907
+ ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<4 x s32>)
2908
+ ; GFX9: $vgpr0 = COPY [[UV]](s32)
2909
+ ; GFX9: $vgpr1 = COPY [[UV1]](s32)
2910
+ ; GFX9: $vgpr2 = COPY [[UV2]](s32)
2911
+ ; GFX9: $vgpr3 = COPY [[UV3]](s32)
2915
2912
; GFX9: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2916
2913
; GFX10NSA-LABEL: name: getresinfo_dmask0
2917
2914
; GFX10NSA: bb.1.main_body:
@@ -2925,15 +2922,12 @@ define amdgpu_ps <4 x float> @getresinfo_dmask0(<8 x i32> inreg %rsrc, <4 x floa
2925
2922
; GFX10NSA: [[COPY6:%[0-9]+]]:_(s32) = COPY $sgpr8
2926
2923
; GFX10NSA: [[COPY7:%[0-9]+]]:_(s32) = COPY $sgpr9
2927
2924
; GFX10NSA: [[COPY8:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
2928
- ; GFX10NSA: [[BUILD_VECTOR:%[0-9]+]]:_(<8 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32), [[COPY4]](s32), [[COPY5]](s32), [[COPY6]](s32), [[COPY7]](s32)
2929
- ; GFX10NSA: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY8]](<2 x s16>)
2930
- ; GFX10NSA: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
2931
- ; GFX10NSA: [[AMDGPU_INTRIN_IMAGE_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_INTRIN_IMAGE_LOAD intrinsic(@llvm.amdgcn.image.getresinfo.1d), 0, [[TRUNC]](s16), [[BUILD_VECTOR]](<8 x s32>), 0, 0
2932
- ; GFX10NSA: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
2933
- ; GFX10NSA: $vgpr0 = COPY [[AMDGPU_INTRIN_IMAGE_LOAD]](s32)
2934
- ; GFX10NSA: $vgpr1 = COPY [[DEF]](s32)
2935
- ; GFX10NSA: $vgpr2 = COPY [[DEF]](s32)
2936
- ; GFX10NSA: $vgpr3 = COPY [[DEF]](s32)
2925
+ ; GFX10NSA: [[DEF:%[0-9]+]]:_(<4 x s32>) = G_IMPLICIT_DEF
2926
+ ; GFX10NSA: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[DEF]](<4 x s32>)
2927
+ ; GFX10NSA: $vgpr0 = COPY [[UV]](s32)
2928
+ ; GFX10NSA: $vgpr1 = COPY [[UV1]](s32)
2929
+ ; GFX10NSA: $vgpr2 = COPY [[UV2]](s32)
2930
+ ; GFX10NSA: $vgpr3 = COPY [[UV3]](s32)
2937
2931
; GFX10NSA: SI_RETURN_TO_EPILOG implicit $vgpr0, implicit $vgpr1, implicit $vgpr2, implicit $vgpr3
2938
2932
main_body:
2939
2933
%mip = extractelement <2 x i16 > %coords , i32 0
0 commit comments